repo_name
string
path
string
copies
string
size
string
content
string
license
string
tbalden/android_kernel_htc_m9pw
arch/m32r/lib/delay.c
13612
2985
/* * linux/arch/m32r/lib/delay.c * * Copyright (c) 2002 Hitoshi Yamamoto, Hirokazu Takata * Copyright (c) 2004 Hirokazu Takata */ #include <linux/param.h> #include <linux/module.h> #ifdef CONFIG_SMP #include <linux/sched.h> #include <asm/current.h> #include <asm/smp.h> #endif /* CONFIG_SMP */ #include <asm/processor.h> void __delay(unsigned long loops) { #ifdef CONFIG_ISA_DUAL_ISSUE __asm__ __volatile__ ( "beqz %0, 2f \n\t" "addi %0, #-1 \n\t" " .fillinsn \n\t" "1: \n\t" "cmpz %0 || addi %0, #-1 \n\t" "bc 2f || cmpz %0 \n\t" "bc 2f || addi %0, #-1 \n\t" "cmpz %0 || addi %0, #-1 \n\t" "bc 2f || cmpz %0 \n\t" "bnc 1b || addi %0, #-1 \n\t" " .fillinsn \n\t" "2: \n\t" : "+r" (loops) : "r" (0) : "cbit" ); #else __asm__ __volatile__ ( "beqz %0, 2f \n\t" " .fillinsn \n\t" "1: \n\t" "addi %0, #-1 \n\t" "blez %0, 2f \n\t" "addi %0, #-1 \n\t" "blez %0, 2f \n\t" "addi %0, #-1 \n\t" "blez %0, 2f \n\t" "addi %0, #-1 \n\t" "bgtz %0, 1b \n\t" " .fillinsn \n\t" "2: \n\t" : "+r" (loops) : "r" (0) ); #endif } void __const_udelay(unsigned long xloops) { #if defined(CONFIG_ISA_M32R2) && defined(CONFIG_ISA_DSP_LEVEL2) /* * loops [1] = (xloops >> 32) [sec] * loops_per_jiffy [1/jiffy] * * HZ [jiffy/sec] * = (xloops >> 32) [sec] * (loops_per_jiffy * HZ) [1/sec] * = (((xloops * loops_per_jiffy) >> 32) * HZ) [1] * * NOTE: * - '[]' depicts variable's dimension in the above equation. * - "rac" instruction rounds the accumulator in word size. */ __asm__ __volatile__ ( "srli %0, #1 \n\t" "mulwhi %0, %1 ; a0 \n\t" "mulwu1 %0, %1 ; a1 \n\t" "sadd ; a0 += (a1 >> 16) \n\t" "rac a0, a0, #1 \n\t" "mvfacmi %0, a0 \n\t" : "+r" (xloops) : "r" (current_cpu_data.loops_per_jiffy) : "a0", "a1" ); #elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R) /* * u64 ull; * ull = (u64)xloops * (u64)current_cpu_data.loops_per_jiffy; * xloops = (ull >> 32); */ __asm__ __volatile__ ( "and3 r4, %0, #0xffff \n\t" "and3 r5, %1, #0xffff \n\t" "mul r4, r5 \n\t" "srl3 r6, %0, #16 \n\t" "srli r4, #16 \n\t" "mul r5, r6 \n\t" "add r4, r5 \n\t" "and3 r5, %0, #0xffff \n\t" "srl3 r6, %1, #16 \n\t" "mul r5, r6 \n\t" "add r4, r5 \n\t" "srl3 r5, %0, #16 \n\t" "srli r4, #16 \n\t" "mul r5, r6 \n\t" "add r4, r5 \n\t" "mv %0, r4 \n\t" : "+r" (xloops) : "r" (current_cpu_data.loops_per_jiffy) : "r4", "r5", "r6" ); #else #error unknown isa configuration #endif __delay(xloops * HZ); } void __udelay(unsigned long usecs) { __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ } void __ndelay(unsigned long nsecs) { __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ } EXPORT_SYMBOL(__delay); EXPORT_SYMBOL(__const_udelay); EXPORT_SYMBOL(__udelay); EXPORT_SYMBOL(__ndelay);
gpl-2.0
Loller79/Solid_Kernel-GEEHRC
arch/powerpc/boot/cpm-serial.c
13868
5276
/* * CPM serial console support. * * Copyright 2007 Freescale Semiconductor, Inc. * Author: Scott Wood <scottwood@freescale.com> * * It is assumed that the firmware (or the platform file) has already set * up the port. */ #include "types.h" #include "io.h" #include "ops.h" #include "page.h" struct cpm_scc { u32 gsmrl; u32 gsmrh; u16 psmr; u8 res1[2]; u16 todr; u16 dsr; u16 scce; u8 res2[2]; u16 sccm; u8 res3; u8 sccs; u8 res4[8]; }; struct cpm_smc { u8 res1[2]; u16 smcmr; u8 res2[2]; u8 smce; u8 res3[3]; u8 smcm; u8 res4[5]; }; struct cpm_param { u16 rbase; u16 tbase; u8 rfcr; u8 tfcr; u16 mrblr; u32 rstate; u8 res1[4]; u16 rbptr; u8 res2[6]; u32 tstate; u8 res3[4]; u16 tbptr; u8 res4[6]; u16 maxidl; u16 idlc; u16 brkln; u16 brkec; u16 brkcr; u16 rmask; u8 res5[4]; }; struct cpm_bd { u16 sc; /* Status and Control */ u16 len; /* Data length in buffer */ u8 *addr; /* Buffer address in host memory */ }; static void *cpcr; static struct cpm_param *param; static struct cpm_smc *smc; static struct cpm_scc *scc; static struct cpm_bd *tbdf, *rbdf; static u32 cpm_cmd; static void *cbd_addr; static u32 cbd_offset; static void (*do_cmd)(int op); static void (*enable_port)(void); static void (*disable_port)(void); #define CPM_CMD_STOP_TX 4 #define CPM_CMD_RESTART_TX 6 #define CPM_CMD_INIT_RX_TX 0 static void cpm1_cmd(int op) { while (in_be16(cpcr) & 1) ; out_be16(cpcr, (op << 8) | cpm_cmd | 1); while (in_be16(cpcr) & 1) ; } static void cpm2_cmd(int op) { while (in_be32(cpcr) & 0x10000) ; out_be32(cpcr, op | cpm_cmd | 0x10000); while (in_be32(cpcr) & 0x10000) ; } static void smc_disable_port(void) { do_cmd(CPM_CMD_STOP_TX); out_be16(&smc->smcmr, in_be16(&smc->smcmr) & ~3); } static void scc_disable_port(void) { do_cmd(CPM_CMD_STOP_TX); out_be32(&scc->gsmrl, in_be32(&scc->gsmrl) & ~0x30); } static void smc_enable_port(void) { out_be16(&smc->smcmr, in_be16(&smc->smcmr) | 3); do_cmd(CPM_CMD_RESTART_TX); } static void scc_enable_port(void) { out_be32(&scc->gsmrl, in_be32(&scc->gsmrl) | 0x30); do_cmd(CPM_CMD_RESTART_TX); } static int cpm_serial_open(void) { disable_port(); out_8(&param->rfcr, 0x10); out_8(&param->tfcr, 0x10); out_be16(&param->mrblr, 1); out_be16(&param->maxidl, 0); out_be16(&param->brkec, 0); out_be16(&param->brkln, 0); out_be16(&param->brkcr, 0); rbdf = cbd_addr; rbdf->addr = (u8 *)rbdf - 1; rbdf->sc = 0xa000; rbdf->len = 1; tbdf = rbdf + 1; tbdf->addr = (u8 *)rbdf - 2; tbdf->sc = 0x2000; tbdf->len = 1; sync(); out_be16(&param->rbase, cbd_offset); out_be16(&param->tbase, cbd_offset + sizeof(struct cpm_bd)); do_cmd(CPM_CMD_INIT_RX_TX); enable_port(); return 0; } static void cpm_serial_putc(unsigned char c) { while (tbdf->sc & 0x8000) barrier(); sync(); tbdf->addr[0] = c; eieio(); tbdf->sc |= 0x8000; } static unsigned char cpm_serial_tstc(void) { barrier(); return !(rbdf->sc & 0x8000); } static unsigned char cpm_serial_getc(void) { unsigned char c; while (!cpm_serial_tstc()) ; sync(); c = rbdf->addr[0]; eieio(); rbdf->sc |= 0x8000; return c; } int cpm_console_init(void *devp, struct serial_console_data *scdp) { void *vreg[2]; u32 reg[2]; int is_smc = 0, is_cpm2 = 0; void *parent, *muram; void *muram_addr; unsigned long muram_offset, muram_size; if (dt_is_compatible(devp, "fsl,cpm1-smc-uart")) { is_smc = 1; } else if (dt_is_compatible(devp, "fsl,cpm2-scc-uart")) { is_cpm2 = 1; } else if (dt_is_compatible(devp, "fsl,cpm2-smc-uart")) { is_cpm2 = 1; is_smc = 1; } if (is_smc) { enable_port = smc_enable_port; disable_port = smc_disable_port; } else { enable_port = scc_enable_port; disable_port = scc_disable_port; } if (is_cpm2) do_cmd = cpm2_cmd; else do_cmd = cpm1_cmd; if (getprop(devp, "fsl,cpm-command", &cpm_cmd, 4) < 4) return -1; if (dt_get_virtual_reg(devp, vreg, 2) < 2) return -1; if (is_smc) smc = vreg[0]; else scc = vreg[0]; param = vreg[1]; parent = get_parent(devp); if (!parent) return -1; if (dt_get_virtual_reg(parent, &cpcr, 1) < 1) return -1; muram = finddevice("/soc/cpm/muram/data"); if (!muram) return -1; /* For bootwrapper-compatible device trees, we assume that the first * entry has at least 128 bytes, and that #address-cells/#data-cells * is one for both parent and child. */ if (dt_get_virtual_reg(muram, &muram_addr, 1) < 1) return -1; if (getprop(muram, "reg", reg, 8) < 8) return -1; muram_offset = reg[0]; muram_size = reg[1]; /* Store the buffer descriptors at the end of the first muram chunk. * For SMC ports on CPM2-based platforms, relocate the parameter RAM * just before the buffer descriptors. */ cbd_offset = muram_offset + muram_size - 2 * sizeof(struct cpm_bd); if (is_cpm2 && is_smc) { u16 *smc_base = (u16 *)param; u16 pram_offset; pram_offset = cbd_offset - 64; pram_offset = _ALIGN_DOWN(pram_offset, 64); disable_port(); out_be16(smc_base, pram_offset); param = muram_addr - muram_offset + pram_offset; } cbd_addr = muram_addr - muram_offset + cbd_offset; scdp->open = cpm_serial_open; scdp->putc = cpm_serial_putc; scdp->getc = cpm_serial_getc; scdp->tstc = cpm_serial_tstc; return 0; }
gpl-2.0
paprikon/android_kernel_htc_bliss
arch/m68k/platform/5307/gpio.c
14892
1361
/* * Coldfire generic GPIO support * * (C) Copyright 2009, Steven King <sfking@fdwdc.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <asm/coldfire.h> #include <asm/mcfsim.h> #include <asm/mcfgpio.h> static struct mcf_gpio_chip mcf_gpio_chips[] = { { .gpio_chip = { .label = "PP", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value, .ngpio = 16, }, .pddr = (void __iomem *) MCFSIM_PADDR, .podr = (void __iomem *) MCFSIM_PADAT, .ppdr = (void __iomem *) MCFSIM_PADAT, }, }; static int __init mcf_gpio_init(void) { unsigned i = 0; while (i < ARRAY_SIZE(mcf_gpio_chips)) (void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]); return 0; } core_initcall(mcf_gpio_init);
gpl-2.0
MichaelQQ/Linux-PE
drivers/pinctrl/pinctrl-imx1-core.c
45
17171
/* * Core driver for the imx pin controller in imx1/21/27 * * Copyright (C) 2013 Pengutronix * Author: Markus Pargmann <mpa@pengutronix.de> * * Based on pinctrl-imx.c: * Author: Dong Aisheng <dong.aisheng@linaro.org> * Copyright (C) 2012 Freescale Semiconductor, Inc. * Copyright (C) 2012 Linaro Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/bitops.h> #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/machine.h> #include <linux/pinctrl/pinconf.h> #include <linux/pinctrl/pinctrl.h> #include <linux/pinctrl/pinmux.h> #include <linux/slab.h> #include "core.h" #include "pinctrl-imx1.h" struct imx1_pinctrl { struct device *dev; struct pinctrl_dev *pctl; void __iomem *base; const struct imx1_pinctrl_soc_info *info; }; /* * MX1 register offsets */ #define MX1_DDIR 0x00 #define MX1_OCR 0x04 #define MX1_ICONFA 0x0c #define MX1_ICONFB 0x10 #define MX1_GIUS 0x20 #define MX1_GPR 0x38 #define MX1_PUEN 0x40 #define MX1_PORT_STRIDE 0x100 /* * MUX_ID format defines */ #define MX1_MUX_FUNCTION(val) (BIT(0) & val) #define MX1_MUX_GPIO(val) ((BIT(1) & val) >> 1) #define MX1_MUX_DIR(val) ((BIT(2) & val) >> 2) #define MX1_MUX_OCONF(val) (((BIT(4) | BIT(5)) & val) >> 4) #define MX1_MUX_ICONFA(val) (((BIT(8) | BIT(9)) & val) >> 8) #define MX1_MUX_ICONFB(val) (((BIT(10) | BIT(11)) & val) >> 10) /* * IMX1 IOMUXC manages the pins based on ports. Each port has 32 pins. IOMUX * control register are seperated into function, output configuration, input * configuration A, input configuration B, GPIO in use and data direction. * * Those controls that are represented by 1 bit have a direct mapping between * bit position and pin id. If they are represented by 2 bit, the lower 16 pins * are in the first register and the upper 16 pins in the second (next) * register. pin_id is stored in bit (pin_id%16)*2 and the bit above. */ /* * Calculates the register offset from a pin_id */ static void __iomem *imx1_mem(struct imx1_pinctrl *ipctl, unsigned int pin_id) { unsigned int port = pin_id / 32; return ipctl->base + port * MX1_PORT_STRIDE; } /* * Write to a register with 2 bits per pin. The function will automatically * use the next register if the pin is managed in the second register. */ static void imx1_write_2bit(struct imx1_pinctrl *ipctl, unsigned int pin_id, u32 value, u32 reg_offset) { void __iomem *reg = imx1_mem(ipctl, pin_id) + reg_offset; int offset = (pin_id % 16) * 2; /* offset, regardless of register used */ int mask = ~(0x3 << offset); /* Mask for 2 bits at offset */ u32 old_val; u32 new_val; dev_dbg(ipctl->dev, "write: register 0x%p offset %d value 0x%x\n", reg, offset, value); /* Use the next register if the pin's port pin number is >=16 */ if (pin_id % 32 >= 16) reg += 0x04; /* Get current state of pins */ old_val = readl(reg); old_val &= mask; new_val = value & 0x3; /* Make sure value is really 2 bit */ new_val <<= offset; new_val |= old_val;/* Set new state for pin_id */ writel(new_val, reg); } static void imx1_write_bit(struct imx1_pinctrl *ipctl, unsigned int pin_id, u32 value, u32 reg_offset) { void __iomem *reg = imx1_mem(ipctl, pin_id) + reg_offset; int offset = pin_id % 32; int mask = ~BIT_MASK(offset); u32 old_val; u32 new_val; /* Get current state of pins */ old_val = readl(reg); old_val &= mask; new_val = value & 0x1; /* Make sure value is really 1 bit */ new_val <<= offset; new_val |= old_val;/* Set new state for pin_id */ writel(new_val, reg); } static int imx1_read_2bit(struct imx1_pinctrl *ipctl, unsigned int pin_id, u32 reg_offset) { void __iomem *reg = imx1_mem(ipctl, pin_id) + reg_offset; int offset = pin_id % 16; /* Use the next register if the pin's port pin number is >=16 */ if (pin_id % 32 >= 16) reg += 0x04; return (readl(reg) & (BIT(offset) | BIT(offset+1))) >> offset; } static int imx1_read_bit(struct imx1_pinctrl *ipctl, unsigned int pin_id, u32 reg_offset) { void __iomem *reg = imx1_mem(ipctl, pin_id) + reg_offset; int offset = pin_id % 32; return !!(readl(reg) & BIT(offset)); } static const inline struct imx1_pin_group *imx1_pinctrl_find_group_by_name( const struct imx1_pinctrl_soc_info *info, const char *name) { const struct imx1_pin_group *grp = NULL; int i; for (i = 0; i < info->ngroups; i++) { if (!strcmp(info->groups[i].name, name)) { grp = &info->groups[i]; break; } } return grp; } static int imx1_get_groups_count(struct pinctrl_dev *pctldev) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); const struct imx1_pinctrl_soc_info *info = ipctl->info; return info->ngroups; } static const char *imx1_get_group_name(struct pinctrl_dev *pctldev, unsigned selector) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); const struct imx1_pinctrl_soc_info *info = ipctl->info; return info->groups[selector].name; } static int imx1_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector, const unsigned int **pins, unsigned *npins) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); const struct imx1_pinctrl_soc_info *info = ipctl->info; if (selector >= info->ngroups) return -EINVAL; *pins = info->groups[selector].pin_ids; *npins = info->groups[selector].npins; return 0; } static void imx1_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned offset) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); seq_printf(s, "GPIO %d, function %d, direction %d, oconf %d, iconfa %d, iconfb %d", imx1_read_bit(ipctl, offset, MX1_GIUS), imx1_read_bit(ipctl, offset, MX1_GPR), imx1_read_bit(ipctl, offset, MX1_DDIR), imx1_read_2bit(ipctl, offset, MX1_OCR), imx1_read_2bit(ipctl, offset, MX1_ICONFA), imx1_read_2bit(ipctl, offset, MX1_ICONFB)); } static int imx1_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node *np, struct pinctrl_map **map, unsigned *num_maps) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); const struct imx1_pinctrl_soc_info *info = ipctl->info; const struct imx1_pin_group *grp; struct pinctrl_map *new_map; struct device_node *parent; int map_num = 1; int i, j; /* * first find the group of this node and check if we need create * config maps for pins */ grp = imx1_pinctrl_find_group_by_name(info, np->name); if (!grp) { dev_err(info->dev, "unable to find group for node %s\n", np->name); return -EINVAL; } for (i = 0; i < grp->npins; i++) map_num++; new_map = kmalloc(sizeof(struct pinctrl_map) * map_num, GFP_KERNEL); if (!new_map) return -ENOMEM; *map = new_map; *num_maps = map_num; /* create mux map */ parent = of_get_parent(np); if (!parent) { kfree(new_map); return -EINVAL; } new_map[0].type = PIN_MAP_TYPE_MUX_GROUP; new_map[0].data.mux.function = parent->name; new_map[0].data.mux.group = np->name; of_node_put(parent); /* create config map */ new_map++; for (i = j = 0; i < grp->npins; i++) { new_map[j].type = PIN_MAP_TYPE_CONFIGS_PIN; new_map[j].data.configs.group_or_pin = pin_get_name(pctldev, grp->pins[i].pin_id); new_map[j].data.configs.configs = &grp->pins[i].config; new_map[j].data.configs.num_configs = 1; j++; } dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n", (*map)->data.mux.function, (*map)->data.mux.group, map_num); return 0; } static void imx1_dt_free_map(struct pinctrl_dev *pctldev, struct pinctrl_map *map, unsigned num_maps) { kfree(map); } static const struct pinctrl_ops imx1_pctrl_ops = { .get_groups_count = imx1_get_groups_count, .get_group_name = imx1_get_group_name, .get_group_pins = imx1_get_group_pins, .pin_dbg_show = imx1_pin_dbg_show, .dt_node_to_map = imx1_dt_node_to_map, .dt_free_map = imx1_dt_free_map, }; static int imx1_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector, unsigned group) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); const struct imx1_pinctrl_soc_info *info = ipctl->info; const struct imx1_pin *pins; unsigned int npins; int i; /* * Configure the mux mode for each pin in the group for a specific * function. */ pins = info->groups[group].pins; npins = info->groups[group].npins; WARN_ON(!pins || !npins); dev_dbg(ipctl->dev, "enable function %s group %s\n", info->functions[selector].name, info->groups[group].name); for (i = 0; i < npins; i++) { unsigned int mux = pins[i].mux_id; unsigned int pin_id = pins[i].pin_id; unsigned int afunction = MX1_MUX_FUNCTION(mux); unsigned int gpio_in_use = MX1_MUX_GPIO(mux); unsigned int direction = MX1_MUX_DIR(mux); unsigned int gpio_oconf = MX1_MUX_OCONF(mux); unsigned int gpio_iconfa = MX1_MUX_ICONFA(mux); unsigned int gpio_iconfb = MX1_MUX_ICONFB(mux); dev_dbg(pctldev->dev, "%s, pin 0x%x, function %d, gpio %d, direction %d, oconf %d, iconfa %d, iconfb %d\n", __func__, pin_id, afunction, gpio_in_use, direction, gpio_oconf, gpio_iconfa, gpio_iconfb); imx1_write_bit(ipctl, pin_id, gpio_in_use, MX1_GIUS); imx1_write_bit(ipctl, pin_id, direction, MX1_DDIR); if (gpio_in_use) { imx1_write_2bit(ipctl, pin_id, gpio_oconf, MX1_OCR); imx1_write_2bit(ipctl, pin_id, gpio_iconfa, MX1_ICONFA); imx1_write_2bit(ipctl, pin_id, gpio_iconfb, MX1_ICONFB); } else { imx1_write_bit(ipctl, pin_id, afunction, MX1_GPR); } } return 0; } static int imx1_pmx_get_funcs_count(struct pinctrl_dev *pctldev) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); const struct imx1_pinctrl_soc_info *info = ipctl->info; return info->nfunctions; } static const char *imx1_pmx_get_func_name(struct pinctrl_dev *pctldev, unsigned selector) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); const struct imx1_pinctrl_soc_info *info = ipctl->info; return info->functions[selector].name; } static int imx1_pmx_get_groups(struct pinctrl_dev *pctldev, unsigned selector, const char * const **groups, unsigned * const num_groups) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); const struct imx1_pinctrl_soc_info *info = ipctl->info; *groups = info->functions[selector].groups; *num_groups = info->functions[selector].num_groups; return 0; } static const struct pinmux_ops imx1_pmx_ops = { .get_functions_count = imx1_pmx_get_funcs_count, .get_function_name = imx1_pmx_get_func_name, .get_function_groups = imx1_pmx_get_groups, .enable = imx1_pmx_enable, }; static int imx1_pinconf_get(struct pinctrl_dev *pctldev, unsigned pin_id, unsigned long *config) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); *config = imx1_read_bit(ipctl, pin_id, MX1_PUEN); return 0; } static int imx1_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin_id, unsigned long *configs, unsigned num_configs) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); const struct imx1_pinctrl_soc_info *info = ipctl->info; int i; for (i = 0; i != num_configs; ++i) { imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN); dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n", info->pins[pin_id].name); } return 0; } static void imx1_pinconf_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned pin_id) { unsigned long config; imx1_pinconf_get(pctldev, pin_id, &config); seq_printf(s, "0x%lx", config); } static void imx1_pinconf_group_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned group) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); const struct imx1_pinctrl_soc_info *info = ipctl->info; struct imx1_pin_group *grp; unsigned long config; const char *name; int i, ret; if (group > info->ngroups) return; seq_puts(s, "\n"); grp = &info->groups[group]; for (i = 0; i < grp->npins; i++) { name = pin_get_name(pctldev, grp->pins[i].pin_id); ret = imx1_pinconf_get(pctldev, grp->pins[i].pin_id, &config); if (ret) return; seq_printf(s, "%s: 0x%lx", name, config); } } static const struct pinconf_ops imx1_pinconf_ops = { .pin_config_get = imx1_pinconf_get, .pin_config_set = imx1_pinconf_set, .pin_config_dbg_show = imx1_pinconf_dbg_show, .pin_config_group_dbg_show = imx1_pinconf_group_dbg_show, }; static struct pinctrl_desc imx1_pinctrl_desc = { .pctlops = &imx1_pctrl_ops, .pmxops = &imx1_pmx_ops, .confops = &imx1_pinconf_ops, .owner = THIS_MODULE, }; static int imx1_pinctrl_parse_groups(struct device_node *np, struct imx1_pin_group *grp, struct imx1_pinctrl_soc_info *info, u32 index) { int size; const __be32 *list; int i; dev_dbg(info->dev, "group(%d): %s\n", index, np->name); /* Initialise group */ grp->name = np->name; /* * the binding format is fsl,pins = <PIN MUX_ID CONFIG> */ list = of_get_property(np, "fsl,pins", &size); /* we do not check return since it's safe node passed down */ if (!size || size % 12) { dev_notice(info->dev, "Not a valid fsl,pins property (%s)\n", np->name); return -EINVAL; } grp->npins = size / 12; grp->pins = devm_kzalloc(info->dev, grp->npins * sizeof(struct imx1_pin), GFP_KERNEL); grp->pin_ids = devm_kzalloc(info->dev, grp->npins * sizeof(unsigned int), GFP_KERNEL); if (!grp->pins || !grp->pin_ids) return -ENOMEM; for (i = 0; i < grp->npins; i++) { grp->pins[i].pin_id = be32_to_cpu(*list++); grp->pins[i].mux_id = be32_to_cpu(*list++); grp->pins[i].config = be32_to_cpu(*list++); grp->pin_ids[i] = grp->pins[i].pin_id; } return 0; } static int imx1_pinctrl_parse_functions(struct device_node *np, struct imx1_pinctrl_soc_info *info, u32 index) { struct device_node *child; struct imx1_pmx_func *func; struct imx1_pin_group *grp; int ret; static u32 grp_index; u32 i = 0; dev_dbg(info->dev, "parse function(%d): %s\n", index, np->name); func = &info->functions[index]; /* Initialise function */ func->name = np->name; func->num_groups = of_get_child_count(np); if (func->num_groups <= 0) return -EINVAL; func->groups = devm_kzalloc(info->dev, func->num_groups * sizeof(char *), GFP_KERNEL); if (!func->groups) return -ENOMEM; for_each_child_of_node(np, child) { func->groups[i] = child->name; grp = &info->groups[grp_index++]; ret = imx1_pinctrl_parse_groups(child, grp, info, i++); if (ret == -ENOMEM) return ret; } return 0; } static int imx1_pinctrl_parse_dt(struct platform_device *pdev, struct imx1_pinctrl *pctl, struct imx1_pinctrl_soc_info *info) { struct device_node *np = pdev->dev.of_node; struct device_node *child; int ret; u32 nfuncs = 0; u32 ngroups = 0; u32 ifunc = 0; if (!np) return -ENODEV; for_each_child_of_node(np, child) { ++nfuncs; ngroups += of_get_child_count(child); } if (!nfuncs) { dev_err(&pdev->dev, "No pin functions defined\n"); return -EINVAL; } info->nfunctions = nfuncs; info->functions = devm_kzalloc(&pdev->dev, nfuncs * sizeof(struct imx1_pmx_func), GFP_KERNEL); info->ngroups = ngroups; info->groups = devm_kzalloc(&pdev->dev, ngroups * sizeof(struct imx1_pin_group), GFP_KERNEL); if (!info->functions || !info->groups) return -ENOMEM; for_each_child_of_node(np, child) { ret = imx1_pinctrl_parse_functions(child, info, ifunc++); if (ret == -ENOMEM) return -ENOMEM; } return 0; } int imx1_pinctrl_core_probe(struct platform_device *pdev, struct imx1_pinctrl_soc_info *info) { struct imx1_pinctrl *ipctl; struct resource *res; struct pinctrl_desc *pctl_desc; int ret; if (!info || !info->pins || !info->npins) { dev_err(&pdev->dev, "wrong pinctrl info\n"); return -EINVAL; } info->dev = &pdev->dev; /* Create state holders etc for this driver */ ipctl = devm_kzalloc(&pdev->dev, sizeof(*ipctl), GFP_KERNEL); if (!ipctl) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENOENT; ipctl->base = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); if (!ipctl->base) return -ENOMEM; pctl_desc = &imx1_pinctrl_desc; pctl_desc->name = dev_name(&pdev->dev); pctl_desc->pins = info->pins; pctl_desc->npins = info->npins; ret = imx1_pinctrl_parse_dt(pdev, ipctl, info); if (ret) { dev_err(&pdev->dev, "fail to probe dt properties\n"); return ret; } ipctl->info = info; ipctl->dev = info->dev; platform_set_drvdata(pdev, ipctl); ipctl->pctl = pinctrl_register(pctl_desc, &pdev->dev, ipctl); if (!ipctl->pctl) { dev_err(&pdev->dev, "could not register IMX pinctrl driver\n"); return -EINVAL; } dev_info(&pdev->dev, "initialized IMX pinctrl driver\n"); return 0; } int imx1_pinctrl_core_remove(struct platform_device *pdev) { struct imx1_pinctrl *ipctl = platform_get_drvdata(pdev); pinctrl_unregister(ipctl->pctl); return 0; }
gpl-2.0
kyleterry/linux
drivers/pci/rom.c
45
5868
/* * drivers/pci/rom.c * * (C) Copyright 2004 Jon Smirl <jonsmirl@yahoo.com> * (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <jbarnes@sgi.com> * * PCI ROM access routines */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/pci.h> #include <linux/slab.h> #include "pci.h" /** * pci_enable_rom - enable ROM decoding for a PCI device * @pdev: PCI device to enable * * Enable ROM decoding on @dev. This involves simply turning on the last * bit of the PCI ROM BAR. Note that some cards may share address decoders * between the ROM and other resources, so enabling it may disable access * to MMIO registers or other card memory. */ int pci_enable_rom(struct pci_dev *pdev) { struct resource *res = pdev->resource + PCI_ROM_RESOURCE; struct pci_bus_region region; u32 rom_addr; if (!res->flags) return -1; pcibios_resource_to_bus(pdev, &region, res); pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); rom_addr &= ~PCI_ROM_ADDRESS_MASK; rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE; pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); return 0; } /** * pci_disable_rom - disable ROM decoding for a PCI device * @pdev: PCI device to disable * * Disable ROM decoding on a PCI device by turning off the last bit in the * ROM BAR. */ void pci_disable_rom(struct pci_dev *pdev) { u32 rom_addr; pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); rom_addr &= ~PCI_ROM_ADDRESS_ENABLE; pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); } /** * pci_get_rom_size - obtain the actual size of the ROM image * @pdev: target PCI device * @rom: kernel virtual pointer to image of ROM * @size: size of PCI window * return: size of actual ROM image * * Determine the actual length of the ROM image. * The PCI window size could be much larger than the * actual image size. */ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) { void __iomem *image; int last_image; image = rom; do { void __iomem *pds; /* Standard PCI ROMs start out with these bytes 55 AA */ if (readb(image) != 0x55) { dev_err(&pdev->dev, "Invalid ROM contents\n"); break; } if (readb(image + 1) != 0xAA) break; /* get the PCI data structure and check its signature */ pds = image + readw(image + 24); if (readb(pds) != 'P') break; if (readb(pds + 1) != 'C') break; if (readb(pds + 2) != 'I') break; if (readb(pds + 3) != 'R') break; last_image = readb(pds + 21) & 0x80; /* this length is reliable */ image += readw(pds + 16) * 512; } while (!last_image); /* never return a size larger than the PCI resource window */ /* there are known ROMs that get the size wrong */ return min((size_t)(image - rom), size); } /** * pci_map_rom - map a PCI ROM to kernel space * @pdev: pointer to pci device struct * @size: pointer to receive size of pci window over ROM * * Return: kernel virtual pointer to image of ROM * * Map a PCI ROM into kernel space. If ROM is boot video ROM, * the shadow BIOS copy will be returned instead of the * actual ROM. */ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) { struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; loff_t start; void __iomem *rom; /* * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy * memory map if the VGA enable bit of the Bridge Control register is * set for embedded VGA. */ if (res->flags & IORESOURCE_ROM_SHADOW) { /* primary video rom always starts here */ start = (loff_t)0xC0000; *size = 0x20000; /* cover C000:0 through E000:0 */ } else { if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) { *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); return (void __iomem *)(unsigned long) pci_resource_start(pdev, PCI_ROM_RESOURCE); } else { /* assign the ROM an address if it doesn't have one */ if (res->parent == NULL && pci_assign_resource(pdev,PCI_ROM_RESOURCE)) return NULL; start = pci_resource_start(pdev, PCI_ROM_RESOURCE); *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); if (*size == 0) return NULL; /* Enable ROM space decodes */ if (pci_enable_rom(pdev)) return NULL; } } rom = ioremap(start, *size); if (!rom) { /* restore enable if ioremap fails */ if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW | IORESOURCE_ROM_COPY))) pci_disable_rom(pdev); return NULL; } /* * Try to find the true size of the ROM since sometimes the PCI window * size is much larger than the actual size of the ROM. * True size is important if the ROM is going to be copied. */ *size = pci_get_rom_size(pdev, rom, *size); return rom; } /** * pci_unmap_rom - unmap the ROM from kernel space * @pdev: pointer to pci device struct * @rom: virtual address of the previous mapping * * Remove a mapping of a previously mapped ROM */ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) { struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) return; iounmap(rom); /* Disable again before continuing, leave enabled if pci=rom */ if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW))) pci_disable_rom(pdev); } /** * pci_cleanup_rom - free the ROM copy created by pci_map_rom_copy * @pdev: pointer to pci device struct * * Free the copied ROM if we allocated one. */ void pci_cleanup_rom(struct pci_dev *pdev) { struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; if (res->flags & IORESOURCE_ROM_COPY) { kfree((void*)(unsigned long)res->start); res->flags &= ~IORESOURCE_ROM_COPY; res->start = 0; res->end = 0; } } EXPORT_SYMBOL(pci_map_rom); EXPORT_SYMBOL(pci_unmap_rom); EXPORT_SYMBOL_GPL(pci_enable_rom); EXPORT_SYMBOL_GPL(pci_disable_rom);
gpl-2.0
baixobaixa/limbo-android
jni/qemu/blockdev.c
45
33418
/* * QEMU host block devices * * Copyright (c) 2003-2008 Fabrice Bellard * * This work is licensed under the terms of the GNU GPL, version 2 or * later. See the COPYING file in the top-level directory. */ #include "block.h" #include "blockdev.h" #include "monitor.h" #include "qerror.h" #include "qemu-option.h" #include "qemu-config.h" #include "qemu-objects.h" #include "sysemu.h" #include "block_int.h" #include "qmp-commands.h" #include "trace.h" #include "arch_init.h" static QTAILQ_HEAD(drivelist, DriveInfo) drives = QTAILQ_HEAD_INITIALIZER(drives); static const char *const if_name[IF_COUNT] = { [IF_NONE] = "none", [IF_IDE] = "ide", [IF_SCSI] = "scsi", [IF_FLOPPY] = "floppy", [IF_PFLASH] = "pflash", [IF_MTD] = "mtd", [IF_SD] = "sd", [IF_VIRTIO] = "virtio", [IF_XEN] = "xen", }; static const int if_max_devs[IF_COUNT] = { /* * Do not change these numbers! They govern how drive option * index maps to unit and bus. That mapping is ABI. * * All controllers used to imlement if=T drives need to support * if_max_devs[T] units, for any T with if_max_devs[T] != 0. * Otherwise, some index values map to "impossible" bus, unit * values. * * For instance, if you change [IF_SCSI] to 255, -drive * if=scsi,index=12 no longer means bus=1,unit=5, but * bus=0,unit=12. With an lsi53c895a controller (7 units max), * the drive can't be set up. Regression. */ [IF_IDE] = 2, [IF_SCSI] = 7, }; /* * We automatically delete the drive when a device using it gets * unplugged. Questionable feature, but we can't just drop it. * Device models call blockdev_mark_auto_del() to schedule the * automatic deletion, and generic qdev code calls blockdev_auto_del() * when deletion is actually safe. */ void blockdev_mark_auto_del(BlockDriverState *bs) { DriveInfo *dinfo = drive_get_by_blockdev(bs); if (bs->job) { block_job_cancel(bs->job); } if (dinfo) { dinfo->auto_del = 1; } } void blockdev_auto_del(BlockDriverState *bs) { DriveInfo *dinfo = drive_get_by_blockdev(bs); if (dinfo && dinfo->auto_del) { drive_put_ref(dinfo); } } static int drive_index_to_bus_id(BlockInterfaceType type, int index) { int max_devs = if_max_devs[type]; return max_devs ? index / max_devs : 0; } static int drive_index_to_unit_id(BlockInterfaceType type, int index) { int max_devs = if_max_devs[type]; return max_devs ? index % max_devs : index; } QemuOpts *drive_def(const char *optstr) { return qemu_opts_parse(qemu_find_opts("drive"), optstr, 0); } QemuOpts *drive_add(BlockInterfaceType type, int index, const char *file, const char *optstr) { QemuOpts *opts; char buf[32]; opts = drive_def(optstr); if (!opts) { return NULL; } if (type != IF_DEFAULT) { qemu_opt_set(opts, "if", if_name[type]); } if (index >= 0) { snprintf(buf, sizeof(buf), "%d", index); qemu_opt_set(opts, "index", buf); } if (file) qemu_opt_set(opts, "file", file); return opts; } DriveInfo *drive_get(BlockInterfaceType type, int bus, int unit) { DriveInfo *dinfo; /* seek interface, bus and unit */ QTAILQ_FOREACH(dinfo, &drives, next) { if (dinfo->type == type && dinfo->bus == bus && dinfo->unit == unit) return dinfo; } return NULL; } DriveInfo *drive_get_by_index(BlockInterfaceType type, int index) { return drive_get(type, drive_index_to_bus_id(type, index), drive_index_to_unit_id(type, index)); } int drive_get_max_bus(BlockInterfaceType type) { int max_bus; DriveInfo *dinfo; max_bus = -1; QTAILQ_FOREACH(dinfo, &drives, next) { if(dinfo->type == type && dinfo->bus > max_bus) max_bus = dinfo->bus; } return max_bus; } /* Get a block device. This should only be used for single-drive devices (e.g. SD/Floppy/MTD). Multi-disk devices (scsi/ide) should use the appropriate bus. */ DriveInfo *drive_get_next(BlockInterfaceType type) { static int next_block_unit[IF_COUNT]; return drive_get(type, 0, next_block_unit[type]++); } DriveInfo *drive_get_by_blockdev(BlockDriverState *bs) { DriveInfo *dinfo; QTAILQ_FOREACH(dinfo, &drives, next) { if (dinfo->bdrv == bs) { return dinfo; } } return NULL; } static void bdrv_format_print(void *opaque, const char *name) { error_printf(" %s", name); } static void drive_uninit(DriveInfo *dinfo) { qemu_opts_del(dinfo->opts); bdrv_delete(dinfo->bdrv); g_free(dinfo->id); QTAILQ_REMOVE(&drives, dinfo, next); g_free(dinfo); } void drive_put_ref(DriveInfo *dinfo) { assert(dinfo->refcount); if (--dinfo->refcount == 0) { drive_uninit(dinfo); } } void drive_get_ref(DriveInfo *dinfo) { dinfo->refcount++; } typedef struct { QEMUBH *bh; DriveInfo *dinfo; } DrivePutRefBH; static void drive_put_ref_bh(void *opaque) { DrivePutRefBH *s = opaque; drive_put_ref(s->dinfo); qemu_bh_delete(s->bh); g_free(s); } /* * Release a drive reference in a BH * * It is not possible to use drive_put_ref() from a callback function when the * callers still need the drive. In such cases we schedule a BH to release the * reference. */ static void drive_put_ref_bh_schedule(DriveInfo *dinfo) { DrivePutRefBH *s; s = g_new(DrivePutRefBH, 1); s->bh = qemu_bh_new(drive_put_ref_bh, s); s->dinfo = dinfo; qemu_bh_schedule(s->bh); } static int parse_block_error_action(const char *buf, int is_read) { if (!strcmp(buf, "ignore")) { return BLOCK_ERR_IGNORE; } else if (!is_read && !strcmp(buf, "enospc")) { return BLOCK_ERR_STOP_ENOSPC; } else if (!strcmp(buf, "stop")) { return BLOCK_ERR_STOP_ANY; } else if (!strcmp(buf, "report")) { return BLOCK_ERR_REPORT; } else { error_report("'%s' invalid %s error action", buf, is_read ? "read" : "write"); return -1; } } static bool do_check_io_limits(BlockIOLimit *io_limits) { bool bps_flag; bool iops_flag; assert(io_limits); bps_flag = (io_limits->bps[BLOCK_IO_LIMIT_TOTAL] != 0) && ((io_limits->bps[BLOCK_IO_LIMIT_READ] != 0) || (io_limits->bps[BLOCK_IO_LIMIT_WRITE] != 0)); iops_flag = (io_limits->iops[BLOCK_IO_LIMIT_TOTAL] != 0) && ((io_limits->iops[BLOCK_IO_LIMIT_READ] != 0) || (io_limits->iops[BLOCK_IO_LIMIT_WRITE] != 0)); if (bps_flag || iops_flag) { return false; } return true; } DriveInfo *drive_init(QemuOpts *opts, int default_to_scsi) { const char *buf; const char *file = NULL; char devname[128]; const char *serial; const char *mediastr = ""; BlockInterfaceType type; enum { MEDIA_DISK, MEDIA_CDROM } media; int bus_id, unit_id; int cyls, heads, secs, translation; BlockDriver *drv = NULL; int max_devs; int index; int ro = 0; int bdrv_flags = 0; int on_read_error, on_write_error; const char *devaddr; DriveInfo *dinfo; BlockIOLimit io_limits; int snapshot = 0; bool copy_on_read; int ret; translation = BIOS_ATA_TRANSLATION_AUTO; media = MEDIA_DISK; /* extract parameters */ bus_id = qemu_opt_get_number(opts, "bus", 0); unit_id = qemu_opt_get_number(opts, "unit", -1); index = qemu_opt_get_number(opts, "index", -1); cyls = qemu_opt_get_number(opts, "cyls", 0); heads = qemu_opt_get_number(opts, "heads", 0); secs = qemu_opt_get_number(opts, "secs", 0); snapshot = qemu_opt_get_bool(opts, "snapshot", 0); ro = qemu_opt_get_bool(opts, "readonly", 0); copy_on_read = qemu_opt_get_bool(opts, "copy-on-read", false); file = qemu_opt_get(opts, "file"); serial = qemu_opt_get(opts, "serial"); if ((buf = qemu_opt_get(opts, "if")) != NULL) { pstrcpy(devname, sizeof(devname), buf); for (type = 0; type < IF_COUNT && strcmp(buf, if_name[type]); type++) ; if (type == IF_COUNT) { error_report("unsupported bus type '%s'", buf); return NULL; } } else { type = default_to_scsi ? IF_SCSI : IF_IDE; pstrcpy(devname, sizeof(devname), if_name[type]); } max_devs = if_max_devs[type]; if (cyls || heads || secs) { if (cyls < 1 || (type == IF_IDE && cyls > 16383)) { error_report("invalid physical cyls number"); return NULL; } if (heads < 1 || (type == IF_IDE && heads > 16)) { error_report("invalid physical heads number"); return NULL; } if (secs < 1 || (type == IF_IDE && secs > 63)) { error_report("invalid physical secs number"); return NULL; } } if ((buf = qemu_opt_get(opts, "trans")) != NULL) { if (!cyls) { error_report("'%s' trans must be used with cyls, heads and secs", buf); return NULL; } if (!strcmp(buf, "none")) translation = BIOS_ATA_TRANSLATION_NONE; else if (!strcmp(buf, "lba")) translation = BIOS_ATA_TRANSLATION_LBA; else if (!strcmp(buf, "auto")) translation = BIOS_ATA_TRANSLATION_AUTO; else { error_report("'%s' invalid translation type", buf); return NULL; } } if ((buf = qemu_opt_get(opts, "media")) != NULL) { if (!strcmp(buf, "disk")) { media = MEDIA_DISK; } else if (!strcmp(buf, "cdrom")) { if (cyls || secs || heads) { error_report("CHS can't be set with media=%s", buf); return NULL; } media = MEDIA_CDROM; } else { error_report("'%s' invalid media", buf); return NULL; } } if ((buf = qemu_opt_get(opts, "cache")) != NULL) { if (bdrv_parse_cache_flags(buf, &bdrv_flags) != 0) { error_report("invalid cache option"); return NULL; } } #ifdef CONFIG_LINUX_AIO if ((buf = qemu_opt_get(opts, "aio")) != NULL) { if (!strcmp(buf, "native")) { bdrv_flags |= BDRV_O_NATIVE_AIO; } else if (!strcmp(buf, "threads")) { /* this is the default */ } else { error_report("invalid aio option"); return NULL; } } #endif if ((buf = qemu_opt_get(opts, "format")) != NULL) { if (strcmp(buf, "?") == 0) { error_printf("Supported formats:"); bdrv_iterate_format(bdrv_format_print, NULL); error_printf("\n"); return NULL; } drv = bdrv_find_whitelisted_format(buf); if (!drv) { error_report("'%s' invalid format", buf); return NULL; } } /* disk I/O throttling */ io_limits.bps[BLOCK_IO_LIMIT_TOTAL] = qemu_opt_get_number(opts, "bps", 0); io_limits.bps[BLOCK_IO_LIMIT_READ] = qemu_opt_get_number(opts, "bps_rd", 0); io_limits.bps[BLOCK_IO_LIMIT_WRITE] = qemu_opt_get_number(opts, "bps_wr", 0); io_limits.iops[BLOCK_IO_LIMIT_TOTAL] = qemu_opt_get_number(opts, "iops", 0); io_limits.iops[BLOCK_IO_LIMIT_READ] = qemu_opt_get_number(opts, "iops_rd", 0); io_limits.iops[BLOCK_IO_LIMIT_WRITE] = qemu_opt_get_number(opts, "iops_wr", 0); if (!do_check_io_limits(&io_limits)) { error_report("bps(iops) and bps_rd/bps_wr(iops_rd/iops_wr) " "cannot be used at the same time"); return NULL; } on_write_error = BLOCK_ERR_STOP_ENOSPC; if ((buf = qemu_opt_get(opts, "werror")) != NULL) { if (type != IF_IDE && type != IF_SCSI && type != IF_VIRTIO && type != IF_NONE) { error_report("werror is not supported by this bus type"); return NULL; } on_write_error = parse_block_error_action(buf, 0); if (on_write_error < 0) { return NULL; } } on_read_error = BLOCK_ERR_REPORT; if ((buf = qemu_opt_get(opts, "rerror")) != NULL) { if (type != IF_IDE && type != IF_VIRTIO && type != IF_SCSI && type != IF_NONE) { error_report("rerror is not supported by this bus type"); return NULL; } on_read_error = parse_block_error_action(buf, 1); if (on_read_error < 0) { return NULL; } } if ((devaddr = qemu_opt_get(opts, "addr")) != NULL) { if (type != IF_VIRTIO) { error_report("addr is not supported by this bus type"); return NULL; } } /* compute bus and unit according index */ if (index != -1) { if (bus_id != 0 || unit_id != -1) { error_report("index cannot be used with bus and unit"); return NULL; } bus_id = drive_index_to_bus_id(type, index); unit_id = drive_index_to_unit_id(type, index); } /* if user doesn't specify a unit_id, * try to find the first free */ if (unit_id == -1) { unit_id = 0; while (drive_get(type, bus_id, unit_id) != NULL) { unit_id++; if (max_devs && unit_id >= max_devs) { unit_id -= max_devs; bus_id++; } } } /* check unit id */ if (max_devs && unit_id >= max_devs) { error_report("unit %d too big (max is %d)", unit_id, max_devs - 1); return NULL; } /* * catch multiple definitions */ if (drive_get(type, bus_id, unit_id) != NULL) { error_report("drive with bus=%d, unit=%d (index=%d) exists", bus_id, unit_id, index); return NULL; } /* init */ dinfo = g_malloc0(sizeof(*dinfo)); if ((buf = qemu_opts_id(opts)) != NULL) { dinfo->id = g_strdup(buf); } else { /* no id supplied -> create one */ dinfo->id = g_malloc0(32); if (type == IF_IDE || type == IF_SCSI) mediastr = (media == MEDIA_CDROM) ? "-cd" : "-hd"; if (max_devs) snprintf(dinfo->id, 32, "%s%i%s%i", devname, bus_id, mediastr, unit_id); else snprintf(dinfo->id, 32, "%s%s%i", devname, mediastr, unit_id); } dinfo->bdrv = bdrv_new(dinfo->id); dinfo->devaddr = devaddr; dinfo->type = type; dinfo->bus = bus_id; dinfo->unit = unit_id; dinfo->opts = opts; dinfo->refcount = 1; if (serial) { pstrcpy(dinfo->serial, sizeof(dinfo->serial), serial); } QTAILQ_INSERT_TAIL(&drives, dinfo, next); bdrv_set_on_error(dinfo->bdrv, on_read_error, on_write_error); /* disk I/O throttling */ bdrv_set_io_limits(dinfo->bdrv, &io_limits); switch(type) { case IF_IDE: case IF_SCSI: case IF_XEN: case IF_NONE: switch(media) { case MEDIA_DISK: if (cyls != 0) { bdrv_set_geometry_hint(dinfo->bdrv, cyls, heads, secs); bdrv_set_translation_hint(dinfo->bdrv, translation); } break; case MEDIA_CDROM: dinfo->media_cd = 1; break; } break; case IF_SD: case IF_FLOPPY: case IF_PFLASH: case IF_MTD: break; case IF_VIRTIO: /* add virtio block device */ opts = qemu_opts_create(qemu_find_opts("device"), NULL, 0); if (arch_type == QEMU_ARCH_S390X) { qemu_opt_set(opts, "driver", "virtio-blk-s390"); } else { qemu_opt_set(opts, "driver", "virtio-blk-pci"); } qemu_opt_set(opts, "drive", dinfo->id); if (devaddr) qemu_opt_set(opts, "addr", devaddr); break; default: abort(); } if (!file || !*file) { return dinfo; } if (snapshot) { /* always use cache=unsafe with snapshot */ bdrv_flags &= ~BDRV_O_CACHE_MASK; bdrv_flags |= (BDRV_O_SNAPSHOT|BDRV_O_CACHE_WB|BDRV_O_NO_FLUSH); } if (copy_on_read) { bdrv_flags |= BDRV_O_COPY_ON_READ; } if (runstate_check(RUN_STATE_INMIGRATE)) { bdrv_flags |= BDRV_O_INCOMING; } if (media == MEDIA_CDROM) { /* CDROM is fine for any interface, don't check. */ ro = 1; } else if (ro == 1) { if (type != IF_SCSI && type != IF_VIRTIO && type != IF_FLOPPY && type != IF_NONE && type != IF_PFLASH) { error_report("readonly not supported by this bus type"); goto err; } } bdrv_flags |= ro ? 0 : BDRV_O_RDWR; ret = bdrv_open(dinfo->bdrv, file, bdrv_flags, drv); if (ret < 0) { error_report("could not open disk image %s: %s", file, strerror(-ret)); goto err; } if (bdrv_key_required(dinfo->bdrv)) autostart = 0; return dinfo; err: bdrv_delete(dinfo->bdrv); g_free(dinfo->id); QTAILQ_REMOVE(&drives, dinfo, next); g_free(dinfo); return NULL; } void do_commit(Monitor *mon, const QDict *qdict) { const char *device = qdict_get_str(qdict, "device"); BlockDriverState *bs; int ret; if (!strcmp(device, "all")) { ret = bdrv_commit_all(); if (ret == -EBUSY) { qerror_report(QERR_DEVICE_IN_USE, device); return; } } else { bs = bdrv_find(device); if (!bs) { qerror_report(QERR_DEVICE_NOT_FOUND, device); return; } ret = bdrv_commit(bs); if (ret == -EBUSY) { qerror_report(QERR_DEVICE_IN_USE, device); return; } } } static void blockdev_do_action(int kind, void *data, Error **errp) { BlockdevAction action; BlockdevActionList list; action.kind = kind; action.data = data; list.value = &action; list.next = NULL; qmp_transaction(&list, errp); } void qmp_blockdev_snapshot_sync(const char *device, const char *snapshot_file, bool has_format, const char *format, bool has_mode, enum NewImageMode mode, Error **errp) { BlockdevSnapshot snapshot = { .device = (char *) device, .snapshot_file = (char *) snapshot_file, .has_format = has_format, .format = (char *) format, .has_mode = has_mode, .mode = mode, }; blockdev_do_action(BLOCKDEV_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC, &snapshot, errp); } /* New and old BlockDriverState structs for group snapshots */ typedef struct BlkTransactionStates { BlockDriverState *old_bs; BlockDriverState *new_bs; QSIMPLEQ_ENTRY(BlkTransactionStates) entry; } BlkTransactionStates; /* * 'Atomic' group snapshots. The snapshots are taken as a set, and if any fail * then we do not pivot any of the devices in the group, and abandon the * snapshots */ void qmp_transaction(BlockdevActionList *dev_list, Error **errp) { int ret = 0; BlockdevActionList *dev_entry = dev_list; BlkTransactionStates *states, *next; QSIMPLEQ_HEAD(snap_bdrv_states, BlkTransactionStates) snap_bdrv_states; QSIMPLEQ_INIT(&snap_bdrv_states); /* drain all i/o before any snapshots */ bdrv_drain_all(); /* We don't do anything in this loop that commits us to the snapshot */ while (NULL != dev_entry) { BlockdevAction *dev_info = NULL; BlockDriver *proto_drv; BlockDriver *drv; int flags; enum NewImageMode mode; const char *new_image_file; const char *device; const char *format = "qcow2"; dev_info = dev_entry->value; dev_entry = dev_entry->next; states = g_malloc0(sizeof(BlkTransactionStates)); QSIMPLEQ_INSERT_TAIL(&snap_bdrv_states, states, entry); switch (dev_info->kind) { case BLOCKDEV_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC: device = dev_info->blockdev_snapshot_sync->device; if (!dev_info->blockdev_snapshot_sync->has_mode) { dev_info->blockdev_snapshot_sync->mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS; } new_image_file = dev_info->blockdev_snapshot_sync->snapshot_file; if (dev_info->blockdev_snapshot_sync->has_format) { format = dev_info->blockdev_snapshot_sync->format; } mode = dev_info->blockdev_snapshot_sync->mode; break; default: abort(); } drv = bdrv_find_format(format); if (!drv) { error_set(errp, QERR_INVALID_BLOCK_FORMAT, format); goto delete_and_fail; } states->old_bs = bdrv_find(device); if (!states->old_bs) { error_set(errp, QERR_DEVICE_NOT_FOUND, device); goto delete_and_fail; } if (!bdrv_is_inserted(states->old_bs)) { error_set(errp, QERR_DEVICE_HAS_NO_MEDIUM, device); goto delete_and_fail; } if (bdrv_in_use(states->old_bs)) { error_set(errp, QERR_DEVICE_IN_USE, device); goto delete_and_fail; } if (!bdrv_is_read_only(states->old_bs)) { if (bdrv_flush(states->old_bs)) { error_set(errp, QERR_IO_ERROR); goto delete_and_fail; } } flags = states->old_bs->open_flags; proto_drv = bdrv_find_protocol(new_image_file); if (!proto_drv) { error_set(errp, QERR_INVALID_BLOCK_FORMAT, format); goto delete_and_fail; } /* create new image w/backing file */ if (mode != NEW_IMAGE_MODE_EXISTING) { ret = bdrv_img_create(new_image_file, format, states->old_bs->filename, states->old_bs->drv->format_name, NULL, -1, flags); if (ret) { error_set(errp, QERR_OPEN_FILE_FAILED, new_image_file); goto delete_and_fail; } } /* We will manually add the backing_hd field to the bs later */ states->new_bs = bdrv_new(""); ret = bdrv_open(states->new_bs, new_image_file, flags | BDRV_O_NO_BACKING, drv); if (ret != 0) { error_set(errp, QERR_OPEN_FILE_FAILED, new_image_file); goto delete_and_fail; } } /* Now we are going to do the actual pivot. Everything up to this point * is reversible, but we are committed at this point */ QSIMPLEQ_FOREACH(states, &snap_bdrv_states, entry) { /* This removes our old bs from the bdrv_states, and adds the new bs */ bdrv_append(states->new_bs, states->old_bs); } /* success */ goto exit; delete_and_fail: /* * failure, and it is all-or-none; abandon each new bs, and keep using * the original bs for all images */ QSIMPLEQ_FOREACH(states, &snap_bdrv_states, entry) { if (states->new_bs) { bdrv_delete(states->new_bs); } } exit: QSIMPLEQ_FOREACH_SAFE(states, &snap_bdrv_states, entry, next) { g_free(states); } return; } static void eject_device(BlockDriverState *bs, int force, Error **errp) { if (bdrv_in_use(bs)) { error_set(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs)); return; } if (!bdrv_dev_has_removable_media(bs)) { error_set(errp, QERR_DEVICE_NOT_REMOVABLE, bdrv_get_device_name(bs)); return; } if (bdrv_dev_is_medium_locked(bs) && !bdrv_dev_is_tray_open(bs)) { bdrv_dev_eject_request(bs, force); if (!force) { error_set(errp, QERR_DEVICE_LOCKED, bdrv_get_device_name(bs)); return; } } bdrv_close(bs); } void qmp_eject(const char *device, bool has_force, bool force, Error **errp) { BlockDriverState *bs; bs = bdrv_find(device); if (!bs) { error_set(errp, QERR_DEVICE_NOT_FOUND, device); return; } eject_device(bs, force, errp); } void qmp_block_passwd(const char *device, const char *password, Error **errp) { BlockDriverState *bs; int err; bs = bdrv_find(device); if (!bs) { error_set(errp, QERR_DEVICE_NOT_FOUND, device); return; } err = bdrv_set_key(bs, password); if (err == -EINVAL) { error_set(errp, QERR_DEVICE_NOT_ENCRYPTED, bdrv_get_device_name(bs)); return; } else if (err < 0) { error_set(errp, QERR_INVALID_PASSWORD); return; } } static void qmp_bdrv_open_encrypted(BlockDriverState *bs, const char *filename, int bdrv_flags, BlockDriver *drv, const char *password, Error **errp) { if (bdrv_open(bs, filename, bdrv_flags, drv) < 0) { error_set(errp, QERR_OPEN_FILE_FAILED, filename); return; } if (bdrv_key_required(bs)) { if (password) { if (bdrv_set_key(bs, password) < 0) { error_set(errp, QERR_INVALID_PASSWORD); } } else { error_set(errp, QERR_DEVICE_ENCRYPTED, bdrv_get_device_name(bs), bdrv_get_encrypted_filename(bs)); } } else if (password) { error_set(errp, QERR_DEVICE_NOT_ENCRYPTED, bdrv_get_device_name(bs)); } } void qmp_change_blockdev(const char *device, const char *filename, bool has_format, const char *format, Error **errp) { BlockDriverState *bs; BlockDriver *drv = NULL; int bdrv_flags; Error *err = NULL; bs = bdrv_find(device); if (!bs) { error_set(errp, QERR_DEVICE_NOT_FOUND, device); return; } if (format) { drv = bdrv_find_whitelisted_format(format); if (!drv) { error_set(errp, QERR_INVALID_BLOCK_FORMAT, format); return; } } eject_device(bs, 0, &err); if (error_is_set(&err)) { error_propagate(errp, err); return; } bdrv_flags = bdrv_is_read_only(bs) ? 0 : BDRV_O_RDWR; bdrv_flags |= bdrv_is_snapshot(bs) ? BDRV_O_SNAPSHOT : 0; qmp_bdrv_open_encrypted(bs, filename, bdrv_flags, drv, NULL, errp); } /* throttling disk I/O limits */ void qmp_block_set_io_throttle(const char *device, int64_t bps, int64_t bps_rd, int64_t bps_wr, int64_t iops, int64_t iops_rd, int64_t iops_wr, Error **errp) { BlockIOLimit io_limits; BlockDriverState *bs; bs = bdrv_find(device); if (!bs) { error_set(errp, QERR_DEVICE_NOT_FOUND, device); return; } io_limits.bps[BLOCK_IO_LIMIT_TOTAL] = bps; io_limits.bps[BLOCK_IO_LIMIT_READ] = bps_rd; io_limits.bps[BLOCK_IO_LIMIT_WRITE] = bps_wr; io_limits.iops[BLOCK_IO_LIMIT_TOTAL]= iops; io_limits.iops[BLOCK_IO_LIMIT_READ] = iops_rd; io_limits.iops[BLOCK_IO_LIMIT_WRITE]= iops_wr; if (!do_check_io_limits(&io_limits)) { error_set(errp, QERR_INVALID_PARAMETER_COMBINATION); return; } bs->io_limits = io_limits; bs->slice_time = BLOCK_IO_SLICE_TIME; if (!bs->io_limits_enabled && bdrv_io_limits_enabled(bs)) { bdrv_io_limits_enable(bs); } else if (bs->io_limits_enabled && !bdrv_io_limits_enabled(bs)) { bdrv_io_limits_disable(bs); } else { if (bs->block_timer) { qemu_mod_timer(bs->block_timer, qemu_get_clock_ns(vm_clock)); } } } int do_drive_del(Monitor *mon, const QDict *qdict, QObject **ret_data) { const char *id = qdict_get_str(qdict, "id"); BlockDriverState *bs; bs = bdrv_find(id); if (!bs) { qerror_report(QERR_DEVICE_NOT_FOUND, id); return -1; } if (bdrv_in_use(bs)) { qerror_report(QERR_DEVICE_IN_USE, id); return -1; } /* quiesce block driver; prevent further io */ bdrv_drain_all(); bdrv_flush(bs); bdrv_close(bs); /* if we have a device attached to this BlockDriverState * then we need to make the drive anonymous until the device * can be removed. If this is a drive with no device backing * then we can just get rid of the block driver state right here. */ if (bdrv_get_attached_dev(bs)) { bdrv_make_anon(bs); } else { drive_uninit(drive_get_by_blockdev(bs)); } return 0; } void qmp_block_resize(const char *device, int64_t size, Error **errp) { BlockDriverState *bs; bs = bdrv_find(device); if (!bs) { error_set(errp, QERR_DEVICE_NOT_FOUND, device); return; } if (size < 0) { error_set(errp, QERR_INVALID_PARAMETER_VALUE, "size", "a >0 size"); return; } switch (bdrv_truncate(bs, size)) { case 0: break; case -ENOMEDIUM: error_set(errp, QERR_DEVICE_HAS_NO_MEDIUM, device); break; case -ENOTSUP: error_set(errp, QERR_UNSUPPORTED); break; case -EACCES: error_set(errp, QERR_DEVICE_IS_READ_ONLY, device); break; case -EBUSY: error_set(errp, QERR_DEVICE_IN_USE, device); break; default: error_set(errp, QERR_UNDEFINED_ERROR); break; } } static QObject *qobject_from_block_job(BlockJob *job) { return qobject_from_jsonf("{ 'type': %s," "'device': %s," "'len': %" PRId64 "," "'offset': %" PRId64 "," "'speed': %" PRId64 " }", job->job_type->job_type, bdrv_get_device_name(job->bs), job->len, job->offset, job->speed); } static void block_stream_cb(void *opaque, int ret) { BlockDriverState *bs = opaque; QObject *obj; trace_block_stream_cb(bs, bs->job, ret); assert(bs->job); obj = qobject_from_block_job(bs->job); if (ret < 0) { QDict *dict = qobject_to_qdict(obj); qdict_put(dict, "error", qstring_from_str(strerror(-ret))); } if (block_job_is_cancelled(bs->job)) { monitor_protocol_event(QEVENT_BLOCK_JOB_CANCELLED, obj); } else { monitor_protocol_event(QEVENT_BLOCK_JOB_COMPLETED, obj); } qobject_decref(obj); drive_put_ref_bh_schedule(drive_get_by_blockdev(bs)); } void qmp_block_stream(const char *device, bool has_base, const char *base, bool has_speed, int64_t speed, Error **errp) { BlockDriverState *bs; BlockDriverState *base_bs = NULL; Error *local_err = NULL; bs = bdrv_find(device); if (!bs) { error_set(errp, QERR_DEVICE_NOT_FOUND, device); return; } if (base) { base_bs = bdrv_find_backing_image(bs, base); if (base_bs == NULL) { error_set(errp, QERR_BASE_NOT_FOUND, base); return; } } stream_start(bs, base_bs, base, has_speed ? speed : 0, block_stream_cb, bs, &local_err); if (error_is_set(&local_err)) { error_propagate(errp, local_err); return; } /* Grab a reference so hotplug does not delete the BlockDriverState from * underneath us. */ drive_get_ref(drive_get_by_blockdev(bs)); trace_qmp_block_stream(bs, bs->job); } static BlockJob *find_block_job(const char *device) { BlockDriverState *bs; bs = bdrv_find(device); if (!bs || !bs->job) { return NULL; } return bs->job; } void qmp_block_job_set_speed(const char *device, int64_t speed, Error **errp) { BlockJob *job = find_block_job(device); if (!job) { error_set(errp, QERR_DEVICE_NOT_ACTIVE, device); return; } block_job_set_speed(job, speed, errp); } void qmp_block_job_cancel(const char *device, Error **errp) { BlockJob *job = find_block_job(device); if (!job) { error_set(errp, QERR_DEVICE_NOT_ACTIVE, device); return; } trace_qmp_block_job_cancel(job); block_job_cancel(job); } static void do_qmp_query_block_jobs_one(void *opaque, BlockDriverState *bs) { BlockJobInfoList **prev = opaque; BlockJob *job = bs->job; if (job) { BlockJobInfoList *elem; BlockJobInfo *info = g_new(BlockJobInfo, 1); *info = (BlockJobInfo){ .type = g_strdup(job->job_type->job_type), .device = g_strdup(bdrv_get_device_name(bs)), .len = job->len, .offset = job->offset, .speed = job->speed, }; elem = g_new0(BlockJobInfoList, 1); elem->value = info; (*prev)->next = elem; *prev = elem; } } BlockJobInfoList *qmp_query_block_jobs(Error **errp) { /* Dummy is a fake list element for holding the head pointer */ BlockJobInfoList dummy = {}; BlockJobInfoList *prev = &dummy; bdrv_iterate(do_qmp_query_block_jobs_one, &prev); return dummy.next; }
gpl-2.0
platux/vlc
modules/gui/qt4/dialogs/gototime.cpp
45
3841
/***************************************************************************** * gototime.cpp : GotoTime and About dialogs **************************************************************************** * Copyright (C) 2007 the VideoLAN team * $Id$ * * Authors: Jean-Baptiste Kempf <jb (at) videolan.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA. *****************************************************************************/ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "dialogs/gototime.hpp" #include "input_manager.hpp" #include <QTabWidget> #include <QLabel> #include <QTimeEdit> #include <QGroupBox> #include <QDialogButtonBox> #include <QPushButton> GotoTimeDialog::GotoTimeDialog( intf_thread_t *_p_intf) : QVLCDialog( (QWidget*)_p_intf->p_sys->p_mi, _p_intf ) { setWindowFlags( Qt::Tool ); setWindowTitle( qtr( "Go to Time" ) ); setWindowRole( "vlc-goto-time" ); QGridLayout *mainLayout = new QGridLayout( this ); mainLayout->setSizeConstraint( QLayout::SetFixedSize ); QPushButton *gotoButton = new QPushButton( qtr( "&Go" ) ); QPushButton *cancelButton = new QPushButton( qtr( "&Cancel" ) ); QDialogButtonBox *buttonBox = new QDialogButtonBox; gotoButton->setDefault( true ); buttonBox->addButton( gotoButton, QDialogButtonBox::AcceptRole ); buttonBox->addButton( cancelButton, QDialogButtonBox::RejectRole ); QLabel *timeIntro = new QLabel( qtr( "Go to time" ) + ":" ); timeIntro->setWordWrap( true ); timeIntro->setAlignment( Qt::AlignCenter ); timeEdit = new QTimeEdit(); timeEdit->setDisplayFormat( "HH'H':mm'm':ss's'" ); timeEdit->setAlignment( Qt::AlignRight ); timeEdit->setSizePolicy( QSizePolicy::Expanding, QSizePolicy::Minimum ); QPushButton *resetButton = new QPushButton( QIcon(":/update"), "" ); resetButton->setToolTip( qtr("Reset") ); mainLayout->addWidget( timeIntro, 0, 0, 1, 1 ); mainLayout->addWidget( timeEdit, 0, 1, 1, 1 ); mainLayout->addWidget( resetButton, 0, 2, 1, 1 ); mainLayout->addWidget( buttonBox, 1, 0, 1, 3 ); BUTTONACT( gotoButton, close() ); BUTTONACT( cancelButton, cancel() ); BUTTONACT( resetButton, reset() ); QVLCTools::restoreWidgetPosition( p_intf, "gototimedialog", this ); } GotoTimeDialog::~GotoTimeDialog() { QVLCTools::saveWidgetPosition( p_intf, "gototimedialog", this ); } void GotoTimeDialog::toggleVisible() { reset(); if ( !isVisible() && THEMIM->getIM()->hasInput() ) { int64_t i_time = var_GetInteger( THEMIM->getInput(), "time" ); timeEdit->setTime( timeEdit->time().addSecs( i_time / CLOCK_FREQ ) ); } QVLCDialog::toggleVisible(); if(isVisible()) activateWindow(); } void GotoTimeDialog::cancel() { reset(); toggleVisible(); } void GotoTimeDialog::close() { if ( THEMIM->getIM()->hasInput() ) { int64_t i_time = (int64_t) ( QTime( 0, 0, 0 ).msecsTo( timeEdit->time() ) ) * 1000; var_SetInteger( THEMIM->getInput(), "time", i_time ); } toggleVisible(); } void GotoTimeDialog::reset() { timeEdit->setTime( QTime( 0, 0, 0) ); }
gpl-2.0
sigma-random/asuswrt-merlin
release/src/router/ffmpeg/libavcodec/arm/vp8dsp_init_arm.c
45
8608
/** * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdint.h> #include "libavcodec/vp8dsp.h" void ff_vp8_luma_dc_wht_neon(DCTELEM block[4][4][16], DCTELEM dc[16]); void ff_vp8_luma_dc_wht_dc_neon(DCTELEM block[4][4][16], DCTELEM dc[16]); void ff_vp8_idct_add_neon(uint8_t *dst, DCTELEM block[16], int stride); void ff_vp8_idct_dc_add_neon(uint8_t *dst, DCTELEM block[16], int stride); void ff_vp8_idct_dc_add4y_neon(uint8_t *dst, DCTELEM block[4][16], int stride); void ff_vp8_idct_dc_add4uv_neon(uint8_t *dst, DCTELEM block[4][16], int stride); void ff_vp8_v_loop_filter16_neon(uint8_t *dst, int stride, int flim_E, int flim_I, int hev_thresh); void ff_vp8_h_loop_filter16_neon(uint8_t *dst, int stride, int flim_E, int flim_I, int hev_thresh); void ff_vp8_v_loop_filter8uv_neon(uint8_t *dstU, uint8_t *dstV, int stride, int flim_E, int flim_I, int hev_thresh); void ff_vp8_h_loop_filter8uv_neon(uint8_t *dstU, uint8_t *dstV, int stride, int flim_E, int flim_I, int hev_thresh); void ff_vp8_v_loop_filter16_inner_neon(uint8_t *dst, int stride, int flim_E, int flim_I, int hev_thresh); void ff_vp8_h_loop_filter16_inner_neon(uint8_t *dst, int stride, int flim_E, int flim_I, int hev_thresh); void ff_vp8_v_loop_filter8uv_inner_neon(uint8_t *dstU, uint8_t *dstV, int stride, int flim_E, int flim_I, int hev_thresh); void ff_vp8_h_loop_filter8uv_inner_neon(uint8_t *dstU, uint8_t *dstV, int stride, int flim_E, int flim_I, int hev_thresh); void ff_vp8_v_loop_filter16_simple_neon(uint8_t *dst, int stride, int flim); void ff_vp8_h_loop_filter16_simple_neon(uint8_t *dst, int stride, int flim); #define VP8_MC(n) \ void ff_put_vp8_##n##_neon(uint8_t *dst, int dststride, \ uint8_t *src, int srcstride, \ int h, int x, int y) #define VP8_EPEL(w) \ VP8_MC(pixels ## w); \ VP8_MC(epel ## w ## _h4); \ VP8_MC(epel ## w ## _h6); \ VP8_MC(epel ## w ## _v4); \ VP8_MC(epel ## w ## _h4v4); \ VP8_MC(epel ## w ## _h6v4); \ VP8_MC(epel ## w ## _v6); \ VP8_MC(epel ## w ## _h4v6); \ VP8_MC(epel ## w ## _h6v6) VP8_EPEL(16); VP8_EPEL(8); VP8_EPEL(4); VP8_MC(bilin16_h); VP8_MC(bilin16_v); VP8_MC(bilin16_hv); VP8_MC(bilin8_h); VP8_MC(bilin8_v); VP8_MC(bilin8_hv); VP8_MC(bilin4_h); VP8_MC(bilin4_v); VP8_MC(bilin4_hv); av_cold void ff_vp8dsp_init_arm(VP8DSPContext *dsp) { if (HAVE_NEON) { dsp->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_neon; dsp->vp8_luma_dc_wht_dc = ff_vp8_luma_dc_wht_dc_neon; dsp->vp8_idct_add = ff_vp8_idct_add_neon; dsp->vp8_idct_dc_add = ff_vp8_idct_dc_add_neon; dsp->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_neon; dsp->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_neon; dsp->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16_neon; dsp->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16_neon; dsp->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_neon; dsp->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_neon; dsp->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16_inner_neon; dsp->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16_inner_neon; dsp->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_neon; dsp->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_neon; dsp->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter16_simple_neon; dsp->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter16_simple_neon; dsp->put_vp8_epel_pixels_tab[0][0][0] = ff_put_vp8_pixels16_neon; dsp->put_vp8_epel_pixels_tab[0][0][2] = ff_put_vp8_epel16_h6_neon; dsp->put_vp8_epel_pixels_tab[0][2][0] = ff_put_vp8_epel16_v6_neon; dsp->put_vp8_epel_pixels_tab[0][2][2] = ff_put_vp8_epel16_h6v6_neon; dsp->put_vp8_epel_pixels_tab[1][0][0] = ff_put_vp8_pixels8_neon; dsp->put_vp8_epel_pixels_tab[1][0][1] = ff_put_vp8_epel8_h4_neon; dsp->put_vp8_epel_pixels_tab[1][0][2] = ff_put_vp8_epel8_h6_neon; dsp->put_vp8_epel_pixels_tab[1][1][0] = ff_put_vp8_epel8_v4_neon; dsp->put_vp8_epel_pixels_tab[1][1][1] = ff_put_vp8_epel8_h4v4_neon; dsp->put_vp8_epel_pixels_tab[1][1][2] = ff_put_vp8_epel8_h6v4_neon; dsp->put_vp8_epel_pixels_tab[1][2][0] = ff_put_vp8_epel8_v6_neon; dsp->put_vp8_epel_pixels_tab[1][2][1] = ff_put_vp8_epel8_h4v6_neon; dsp->put_vp8_epel_pixels_tab[1][2][2] = ff_put_vp8_epel8_h6v6_neon; dsp->put_vp8_epel_pixels_tab[2][0][0] = ff_put_vp8_pixels4_neon; dsp->put_vp8_epel_pixels_tab[2][0][1] = ff_put_vp8_epel4_h4_neon; dsp->put_vp8_epel_pixels_tab[2][0][2] = ff_put_vp8_epel4_h6_neon; dsp->put_vp8_epel_pixels_tab[2][1][0] = ff_put_vp8_epel4_v4_neon; dsp->put_vp8_epel_pixels_tab[2][1][1] = ff_put_vp8_epel4_h4v4_neon; dsp->put_vp8_epel_pixels_tab[2][1][2] = ff_put_vp8_epel4_h6v4_neon; dsp->put_vp8_epel_pixels_tab[2][2][0] = ff_put_vp8_epel4_v6_neon; dsp->put_vp8_epel_pixels_tab[2][2][1] = ff_put_vp8_epel4_h4v6_neon; dsp->put_vp8_epel_pixels_tab[2][2][2] = ff_put_vp8_epel4_h6v6_neon; dsp->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_neon; dsp->put_vp8_bilinear_pixels_tab[0][0][1] = ff_put_vp8_bilin16_h_neon; dsp->put_vp8_bilinear_pixels_tab[0][0][2] = ff_put_vp8_bilin16_h_neon; dsp->put_vp8_bilinear_pixels_tab[0][1][0] = ff_put_vp8_bilin16_v_neon; dsp->put_vp8_bilinear_pixels_tab[0][1][1] = ff_put_vp8_bilin16_hv_neon; dsp->put_vp8_bilinear_pixels_tab[0][1][2] = ff_put_vp8_bilin16_hv_neon; dsp->put_vp8_bilinear_pixels_tab[0][2][0] = ff_put_vp8_bilin16_v_neon; dsp->put_vp8_bilinear_pixels_tab[0][2][1] = ff_put_vp8_bilin16_hv_neon; dsp->put_vp8_bilinear_pixels_tab[0][2][2] = ff_put_vp8_bilin16_hv_neon; dsp->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_neon; dsp->put_vp8_bilinear_pixels_tab[1][0][1] = ff_put_vp8_bilin8_h_neon; dsp->put_vp8_bilinear_pixels_tab[1][0][2] = ff_put_vp8_bilin8_h_neon; dsp->put_vp8_bilinear_pixels_tab[1][1][0] = ff_put_vp8_bilin8_v_neon; dsp->put_vp8_bilinear_pixels_tab[1][1][1] = ff_put_vp8_bilin8_hv_neon; dsp->put_vp8_bilinear_pixels_tab[1][1][2] = ff_put_vp8_bilin8_hv_neon; dsp->put_vp8_bilinear_pixels_tab[1][2][0] = ff_put_vp8_bilin8_v_neon; dsp->put_vp8_bilinear_pixels_tab[1][2][1] = ff_put_vp8_bilin8_hv_neon; dsp->put_vp8_bilinear_pixels_tab[1][2][2] = ff_put_vp8_bilin8_hv_neon; dsp->put_vp8_bilinear_pixels_tab[2][0][0] = ff_put_vp8_pixels4_neon; dsp->put_vp8_bilinear_pixels_tab[2][0][1] = ff_put_vp8_bilin4_h_neon; dsp->put_vp8_bilinear_pixels_tab[2][0][2] = ff_put_vp8_bilin4_h_neon; dsp->put_vp8_bilinear_pixels_tab[2][1][0] = ff_put_vp8_bilin4_v_neon; dsp->put_vp8_bilinear_pixels_tab[2][1][1] = ff_put_vp8_bilin4_hv_neon; dsp->put_vp8_bilinear_pixels_tab[2][1][2] = ff_put_vp8_bilin4_hv_neon; dsp->put_vp8_bilinear_pixels_tab[2][2][0] = ff_put_vp8_bilin4_v_neon; dsp->put_vp8_bilinear_pixels_tab[2][2][1] = ff_put_vp8_bilin4_hv_neon; dsp->put_vp8_bilinear_pixels_tab[2][2][2] = ff_put_vp8_bilin4_hv_neon; } }
gpl-2.0
tezet/kernel-milestone2
drivers/i2c/busses/i2c-iop3xx.c
557
13219
/* ------------------------------------------------------------------------- */ /* i2c-iop3xx.c i2c driver algorithms for Intel XScale IOP3xx & IXP46x */ /* ------------------------------------------------------------------------- */ /* Copyright (C) 2003 Peter Milne, D-TACQ Solutions Ltd * <Peter dot Milne at D hyphen TACQ dot com> * * With acknowledgements to i2c-algo-ibm_ocp.c by * Ian DaSilva, MontaVista Software, Inc. idasilva@mvista.com * * And i2c-algo-pcf.c, which was created by Simon G. Vogl and Hans Berglund: * * Copyright (C) 1995-1997 Simon G. Vogl, 1998-2000 Hans Berglund * * And which acknowledged Kyösti Mälkki <kmalkki@cc.hut.fi>, * Frodo Looijaard <frodol@dds.nl>, Martin Bailey<mbailey@littlefeet-inc.com> * * Major cleanup by Deepak Saxena <dsaxena@plexity.net>, 01/2005: * * - Use driver model to pass per-chip info instead of hardcoding and #ifdefs * - Use ioremap/__raw_readl/__raw_writel instead of direct dereference * - Make it work with IXP46x chips * - Cleanup function names, coding style, etc * * - writing to slave address causes latchup on iop331. * fix: driver refuses to address self. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2. */ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <asm/io.h> #include "i2c-iop3xx.h" /* global unit counter */ static int i2c_id; static inline unsigned char iic_cook_addr(struct i2c_msg *msg) { unsigned char addr; addr = (msg->addr << 1); if (msg->flags & I2C_M_RD) addr |= 1; /* * Read or Write? */ if (msg->flags & I2C_M_REV_DIR_ADDR) addr ^= 1; return addr; } static void iop3xx_i2c_reset(struct i2c_algo_iop3xx_data *iop3xx_adap) { /* Follows devman 9.3 */ __raw_writel(IOP3XX_ICR_UNIT_RESET, iop3xx_adap->ioaddr + CR_OFFSET); __raw_writel(IOP3XX_ISR_CLEARBITS, iop3xx_adap->ioaddr + SR_OFFSET); __raw_writel(0, iop3xx_adap->ioaddr + CR_OFFSET); } static void iop3xx_i2c_enable(struct i2c_algo_iop3xx_data *iop3xx_adap) { u32 cr = IOP3XX_ICR_GCD | IOP3XX_ICR_SCLEN | IOP3XX_ICR_UE; /* * Every time unit enable is asserted, GPOD needs to be cleared * on IOP3XX to avoid data corruption on the bus. */ #if defined(CONFIG_ARCH_IOP32X) || defined(CONFIG_ARCH_IOP33X) if (iop3xx_adap->id == 0) { gpio_line_set(IOP3XX_GPIO_LINE(7), GPIO_LOW); gpio_line_set(IOP3XX_GPIO_LINE(6), GPIO_LOW); } else { gpio_line_set(IOP3XX_GPIO_LINE(5), GPIO_LOW); gpio_line_set(IOP3XX_GPIO_LINE(4), GPIO_LOW); } #endif /* NB SR bits not same position as CR IE bits :-( */ iop3xx_adap->SR_enabled = IOP3XX_ISR_ALD | IOP3XX_ISR_BERRD | IOP3XX_ISR_RXFULL | IOP3XX_ISR_TXEMPTY; cr |= IOP3XX_ICR_ALD_IE | IOP3XX_ICR_BERR_IE | IOP3XX_ICR_RXFULL_IE | IOP3XX_ICR_TXEMPTY_IE; __raw_writel(cr, iop3xx_adap->ioaddr + CR_OFFSET); } static void iop3xx_i2c_transaction_cleanup(struct i2c_algo_iop3xx_data *iop3xx_adap) { unsigned long cr = __raw_readl(iop3xx_adap->ioaddr + CR_OFFSET); cr &= ~(IOP3XX_ICR_MSTART | IOP3XX_ICR_TBYTE | IOP3XX_ICR_MSTOP | IOP3XX_ICR_SCLEN); __raw_writel(cr, iop3xx_adap->ioaddr + CR_OFFSET); } /* * NB: the handler has to clear the source of the interrupt! * Then it passes the SR flags of interest to BH via adap data */ static irqreturn_t iop3xx_i2c_irq_handler(int this_irq, void *dev_id) { struct i2c_algo_iop3xx_data *iop3xx_adap = dev_id; u32 sr = __raw_readl(iop3xx_adap->ioaddr + SR_OFFSET); if ((sr &= iop3xx_adap->SR_enabled)) { __raw_writel(sr, iop3xx_adap->ioaddr + SR_OFFSET); iop3xx_adap->SR_received |= sr; wake_up_interruptible(&iop3xx_adap->waitq); } return IRQ_HANDLED; } /* check all error conditions, clear them , report most important */ static int iop3xx_i2c_error(u32 sr) { int rc = 0; if ((sr & IOP3XX_ISR_BERRD)) { if ( !rc ) rc = -I2C_ERR_BERR; } if ((sr & IOP3XX_ISR_ALD)) { if ( !rc ) rc = -I2C_ERR_ALD; } return rc; } static inline u32 iop3xx_i2c_get_srstat(struct i2c_algo_iop3xx_data *iop3xx_adap) { unsigned long flags; u32 sr; spin_lock_irqsave(&iop3xx_adap->lock, flags); sr = iop3xx_adap->SR_received; iop3xx_adap->SR_received = 0; spin_unlock_irqrestore(&iop3xx_adap->lock, flags); return sr; } /* * sleep until interrupted, then recover and analyse the SR * saved by handler */ typedef int (* compare_func)(unsigned test, unsigned mask); /* returns 1 on correct comparison */ static int iop3xx_i2c_wait_event(struct i2c_algo_iop3xx_data *iop3xx_adap, unsigned flags, unsigned* status, compare_func compare) { unsigned sr = 0; int interrupted; int done; int rc = 0; do { interrupted = wait_event_interruptible_timeout ( iop3xx_adap->waitq, (done = compare( sr = iop3xx_i2c_get_srstat(iop3xx_adap) ,flags )), 1 * HZ; ); if ((rc = iop3xx_i2c_error(sr)) < 0) { *status = sr; return rc; } else if (!interrupted) { *status = sr; return -ETIMEDOUT; } } while(!done); *status = sr; return 0; } /* * Concrete compare_funcs */ static int all_bits_clear(unsigned test, unsigned mask) { return (test & mask) == 0; } static int any_bits_set(unsigned test, unsigned mask) { return (test & mask) != 0; } static int iop3xx_i2c_wait_tx_done(struct i2c_algo_iop3xx_data *iop3xx_adap, int *status) { return iop3xx_i2c_wait_event( iop3xx_adap, IOP3XX_ISR_TXEMPTY | IOP3XX_ISR_ALD | IOP3XX_ISR_BERRD, status, any_bits_set); } static int iop3xx_i2c_wait_rx_done(struct i2c_algo_iop3xx_data *iop3xx_adap, int *status) { return iop3xx_i2c_wait_event( iop3xx_adap, IOP3XX_ISR_RXFULL | IOP3XX_ISR_ALD | IOP3XX_ISR_BERRD, status, any_bits_set); } static int iop3xx_i2c_wait_idle(struct i2c_algo_iop3xx_data *iop3xx_adap, int *status) { return iop3xx_i2c_wait_event( iop3xx_adap, IOP3XX_ISR_UNITBUSY, status, all_bits_clear); } static int iop3xx_i2c_send_target_addr(struct i2c_algo_iop3xx_data *iop3xx_adap, struct i2c_msg* msg) { unsigned long cr = __raw_readl(iop3xx_adap->ioaddr + CR_OFFSET); int status; int rc; /* avoid writing to my slave address (hangs on 80331), * forbidden in Intel developer manual */ if (msg->addr == MYSAR) { return -EBUSY; } __raw_writel(iic_cook_addr(msg), iop3xx_adap->ioaddr + DBR_OFFSET); cr &= ~(IOP3XX_ICR_MSTOP | IOP3XX_ICR_NACK); cr |= IOP3XX_ICR_MSTART | IOP3XX_ICR_TBYTE; __raw_writel(cr, iop3xx_adap->ioaddr + CR_OFFSET); rc = iop3xx_i2c_wait_tx_done(iop3xx_adap, &status); return rc; } static int iop3xx_i2c_write_byte(struct i2c_algo_iop3xx_data *iop3xx_adap, char byte, int stop) { unsigned long cr = __raw_readl(iop3xx_adap->ioaddr + CR_OFFSET); int status; int rc = 0; __raw_writel(byte, iop3xx_adap->ioaddr + DBR_OFFSET); cr &= ~IOP3XX_ICR_MSTART; if (stop) { cr |= IOP3XX_ICR_MSTOP; } else { cr &= ~IOP3XX_ICR_MSTOP; } cr |= IOP3XX_ICR_TBYTE; __raw_writel(cr, iop3xx_adap->ioaddr + CR_OFFSET); rc = iop3xx_i2c_wait_tx_done(iop3xx_adap, &status); return rc; } static int iop3xx_i2c_read_byte(struct i2c_algo_iop3xx_data *iop3xx_adap, char* byte, int stop) { unsigned long cr = __raw_readl(iop3xx_adap->ioaddr + CR_OFFSET); int status; int rc = 0; cr &= ~IOP3XX_ICR_MSTART; if (stop) { cr |= IOP3XX_ICR_MSTOP | IOP3XX_ICR_NACK; } else { cr &= ~(IOP3XX_ICR_MSTOP | IOP3XX_ICR_NACK); } cr |= IOP3XX_ICR_TBYTE; __raw_writel(cr, iop3xx_adap->ioaddr + CR_OFFSET); rc = iop3xx_i2c_wait_rx_done(iop3xx_adap, &status); *byte = __raw_readl(iop3xx_adap->ioaddr + DBR_OFFSET); return rc; } static int iop3xx_i2c_writebytes(struct i2c_adapter *i2c_adap, const char *buf, int count) { struct i2c_algo_iop3xx_data *iop3xx_adap = i2c_adap->algo_data; int ii; int rc = 0; for (ii = 0; rc == 0 && ii != count; ++ii) rc = iop3xx_i2c_write_byte(iop3xx_adap, buf[ii], ii==count-1); return rc; } static int iop3xx_i2c_readbytes(struct i2c_adapter *i2c_adap, char *buf, int count) { struct i2c_algo_iop3xx_data *iop3xx_adap = i2c_adap->algo_data; int ii; int rc = 0; for (ii = 0; rc == 0 && ii != count; ++ii) rc = iop3xx_i2c_read_byte(iop3xx_adap, &buf[ii], ii==count-1); return rc; } /* * Description: This function implements combined transactions. Combined * transactions consist of combinations of reading and writing blocks of data. * FROM THE SAME ADDRESS * Each transfer (i.e. a read or a write) is separated by a repeated start * condition. */ static int iop3xx_i2c_handle_msg(struct i2c_adapter *i2c_adap, struct i2c_msg* pmsg) { struct i2c_algo_iop3xx_data *iop3xx_adap = i2c_adap->algo_data; int rc; rc = iop3xx_i2c_send_target_addr(iop3xx_adap, pmsg); if (rc < 0) { return rc; } if ((pmsg->flags&I2C_M_RD)) { return iop3xx_i2c_readbytes(i2c_adap, pmsg->buf, pmsg->len); } else { return iop3xx_i2c_writebytes(i2c_adap, pmsg->buf, pmsg->len); } } /* * master_xfer() - main read/write entry */ static int iop3xx_i2c_master_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) { struct i2c_algo_iop3xx_data *iop3xx_adap = i2c_adap->algo_data; int im = 0; int ret = 0; int status; iop3xx_i2c_wait_idle(iop3xx_adap, &status); iop3xx_i2c_reset(iop3xx_adap); iop3xx_i2c_enable(iop3xx_adap); for (im = 0; ret == 0 && im != num; im++) { ret = iop3xx_i2c_handle_msg(i2c_adap, &msgs[im]); } iop3xx_i2c_transaction_cleanup(iop3xx_adap); if(ret) return ret; return im; } static u32 iop3xx_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm iop3xx_i2c_algo = { .master_xfer = iop3xx_i2c_master_xfer, .functionality = iop3xx_i2c_func, }; static int iop3xx_i2c_remove(struct platform_device *pdev) { struct i2c_adapter *padapter = platform_get_drvdata(pdev); struct i2c_algo_iop3xx_data *adapter_data = (struct i2c_algo_iop3xx_data *)padapter->algo_data; struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); unsigned long cr = __raw_readl(adapter_data->ioaddr + CR_OFFSET); /* * Disable the actual HW unit */ cr &= ~(IOP3XX_ICR_ALD_IE | IOP3XX_ICR_BERR_IE | IOP3XX_ICR_RXFULL_IE | IOP3XX_ICR_TXEMPTY_IE); __raw_writel(cr, adapter_data->ioaddr + CR_OFFSET); iounmap((void __iomem*)adapter_data->ioaddr); release_mem_region(res->start, IOP3XX_I2C_IO_SIZE); kfree(adapter_data); kfree(padapter); platform_set_drvdata(pdev, NULL); return 0; } static int iop3xx_i2c_probe(struct platform_device *pdev) { struct resource *res; int ret, irq; struct i2c_adapter *new_adapter; struct i2c_algo_iop3xx_data *adapter_data; new_adapter = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL); if (!new_adapter) { ret = -ENOMEM; goto out; } adapter_data = kzalloc(sizeof(struct i2c_algo_iop3xx_data), GFP_KERNEL); if (!adapter_data) { ret = -ENOMEM; goto free_adapter; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ret = -ENODEV; goto free_both; } if (!request_mem_region(res->start, IOP3XX_I2C_IO_SIZE, pdev->name)) { ret = -EBUSY; goto free_both; } /* set the adapter enumeration # */ adapter_data->id = i2c_id++; adapter_data->ioaddr = (u32)ioremap(res->start, IOP3XX_I2C_IO_SIZE); if (!adapter_data->ioaddr) { ret = -ENOMEM; goto release_region; } irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = -ENXIO; goto unmap; } ret = request_irq(irq, iop3xx_i2c_irq_handler, 0, pdev->name, adapter_data); if (ret) { ret = -EIO; goto unmap; } memcpy(new_adapter->name, pdev->name, strlen(pdev->name)); new_adapter->owner = THIS_MODULE; new_adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; new_adapter->dev.parent = &pdev->dev; new_adapter->nr = pdev->id; /* * Default values...should these come in from board code? */ new_adapter->timeout = HZ; new_adapter->algo = &iop3xx_i2c_algo; init_waitqueue_head(&adapter_data->waitq); spin_lock_init(&adapter_data->lock); iop3xx_i2c_reset(adapter_data); iop3xx_i2c_enable(adapter_data); platform_set_drvdata(pdev, new_adapter); new_adapter->algo_data = adapter_data; i2c_add_numbered_adapter(new_adapter); return 0; unmap: iounmap((void __iomem*)adapter_data->ioaddr); release_region: release_mem_region(res->start, IOP3XX_I2C_IO_SIZE); free_both: kfree(adapter_data); free_adapter: kfree(new_adapter); out: return ret; } static struct platform_driver iop3xx_i2c_driver = { .probe = iop3xx_i2c_probe, .remove = iop3xx_i2c_remove, .driver = { .owner = THIS_MODULE, .name = "IOP3xx-I2C", }, }; static int __init i2c_iop3xx_init (void) { return platform_driver_register(&iop3xx_i2c_driver); } static void __exit i2c_iop3xx_exit (void) { platform_driver_unregister(&iop3xx_i2c_driver); return; } module_init (i2c_iop3xx_init); module_exit (i2c_iop3xx_exit); MODULE_AUTHOR("D-TACQ Solutions Ltd <www.d-tacq.com>"); MODULE_DESCRIPTION("IOP3xx iic algorithm and driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:IOP3xx-I2C");
gpl-2.0
TeamWin/android_kernel_samsung_sprat
drivers/media/pci/bt8xx/bttv-cards.c
2093
149880
/* bttv-cards.c this file has configuration informations - card-specific stuff like the big tvcards array for the most part Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de) & Marcus Metzler (mocm@thp.uni-koeln.de) (c) 1999-2001 Gerd Knorr <kraxel@goldbach.in-berlin.de> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/delay.h> #include <linux/module.h> #include <linux/kmod.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/vmalloc.h> #include <linux/firmware.h> #include <net/checksum.h> #include <asm/unaligned.h> #include <asm/io.h> #include "bttvp.h" #include <media/v4l2-common.h> #include <media/tvaudio.h> #include "bttv-audio-hook.h" /* fwd decl */ static void boot_msp34xx(struct bttv *btv, int pin); static void hauppauge_eeprom(struct bttv *btv); static void avermedia_eeprom(struct bttv *btv); static void osprey_eeprom(struct bttv *btv, const u8 ee[256]); static void modtec_eeprom(struct bttv *btv); static void init_PXC200(struct bttv *btv); static void init_RTV24(struct bttv *btv); static void rv605_muxsel(struct bttv *btv, unsigned int input); static void eagle_muxsel(struct bttv *btv, unsigned int input); static void xguard_muxsel(struct bttv *btv, unsigned int input); static void ivc120_muxsel(struct bttv *btv, unsigned int input); static void gvc1100_muxsel(struct bttv *btv, unsigned int input); static void PXC200_muxsel(struct bttv *btv, unsigned int input); static void picolo_tetra_muxsel(struct bttv *btv, unsigned int input); static void picolo_tetra_init(struct bttv *btv); static void tibetCS16_muxsel(struct bttv *btv, unsigned int input); static void tibetCS16_init(struct bttv *btv); static void kodicom4400r_muxsel(struct bttv *btv, unsigned int input); static void kodicom4400r_init(struct bttv *btv); static void sigmaSLC_muxsel(struct bttv *btv, unsigned int input); static void sigmaSQ_muxsel(struct bttv *btv, unsigned int input); static void geovision_muxsel(struct bttv *btv, unsigned int input); static void phytec_muxsel(struct bttv *btv, unsigned int input); static void gv800s_muxsel(struct bttv *btv, unsigned int input); static void gv800s_init(struct bttv *btv); static void td3116_muxsel(struct bttv *btv, unsigned int input); static int terratec_active_radio_upgrade(struct bttv *btv); static int tea5757_read(struct bttv *btv); static int tea5757_write(struct bttv *btv, int value); static void identify_by_eeprom(struct bttv *btv, unsigned char eeprom_data[256]); static int pvr_boot(struct bttv *btv); /* config variables */ static unsigned int triton1; static unsigned int vsfx; static unsigned int latency = UNSET; int no_overlay=-1; static unsigned int card[BTTV_MAX] = { [ 0 ... (BTTV_MAX-1) ] = UNSET }; static unsigned int pll[BTTV_MAX] = { [ 0 ... (BTTV_MAX-1) ] = UNSET }; static unsigned int tuner[BTTV_MAX] = { [ 0 ... (BTTV_MAX-1) ] = UNSET }; static unsigned int svhs[BTTV_MAX] = { [ 0 ... (BTTV_MAX-1) ] = UNSET }; static unsigned int remote[BTTV_MAX] = { [ 0 ... (BTTV_MAX-1) ] = UNSET }; static unsigned int audiodev[BTTV_MAX]; static unsigned int saa6588[BTTV_MAX]; static struct bttv *master[BTTV_MAX] = { [ 0 ... (BTTV_MAX-1) ] = NULL }; static unsigned int autoload = UNSET; static unsigned int gpiomask = UNSET; static unsigned int audioall = UNSET; static unsigned int audiomux[5] = { [ 0 ... 4 ] = UNSET }; /* insmod options */ module_param(triton1, int, 0444); module_param(vsfx, int, 0444); module_param(no_overlay, int, 0444); module_param(latency, int, 0444); module_param(gpiomask, int, 0444); module_param(audioall, int, 0444); module_param(autoload, int, 0444); module_param_array(card, int, NULL, 0444); module_param_array(pll, int, NULL, 0444); module_param_array(tuner, int, NULL, 0444); module_param_array(svhs, int, NULL, 0444); module_param_array(remote, int, NULL, 0444); module_param_array(audiodev, int, NULL, 0444); module_param_array(audiomux, int, NULL, 0444); MODULE_PARM_DESC(triton1,"set ETBF pci config bit " "[enable bug compatibility for triton1 + others]"); MODULE_PARM_DESC(vsfx,"set VSFX pci config bit " "[yet another chipset flaw workaround]"); MODULE_PARM_DESC(latency,"pci latency timer"); MODULE_PARM_DESC(card,"specify TV/grabber card model, see CARDLIST file for a list"); MODULE_PARM_DESC(pll,"specify installed crystal (0=none, 28=28 MHz, 35=35 MHz)"); MODULE_PARM_DESC(tuner,"specify installed tuner type"); MODULE_PARM_DESC(autoload, "obsolete option, please do not use anymore"); MODULE_PARM_DESC(audiodev, "specify audio device:\n" "\t\t-1 = no audio\n" "\t\t 0 = autodetect (default)\n" "\t\t 1 = msp3400\n" "\t\t 2 = tda7432\n" "\t\t 3 = tvaudio"); MODULE_PARM_DESC(saa6588, "if 1, then load the saa6588 RDS module, default (0) is to use the card definition."); MODULE_PARM_DESC(no_overlay,"allow override overlay default (0 disables, 1 enables)" " [some VIA/SIS chipsets are known to have problem with overlay]"); /* ----------------------------------------------------------------------- */ /* list of card IDs for bt878+ cards */ static struct CARD { unsigned id; int cardnr; char *name; } cards[] = { { 0x13eb0070, BTTV_BOARD_HAUPPAUGE878, "Hauppauge WinTV" }, { 0x39000070, BTTV_BOARD_HAUPPAUGE878, "Hauppauge WinTV-D" }, { 0x45000070, BTTV_BOARD_HAUPPAUGEPVR, "Hauppauge WinTV/PVR" }, { 0xff000070, BTTV_BOARD_OSPREY1x0, "Osprey-100" }, { 0xff010070, BTTV_BOARD_OSPREY2x0_SVID,"Osprey-200" }, { 0xff020070, BTTV_BOARD_OSPREY500, "Osprey-500" }, { 0xff030070, BTTV_BOARD_OSPREY2000, "Osprey-2000" }, { 0xff040070, BTTV_BOARD_OSPREY540, "Osprey-540" }, { 0xff070070, BTTV_BOARD_OSPREY440, "Osprey-440" }, { 0x00011002, BTTV_BOARD_ATI_TVWONDER, "ATI TV Wonder" }, { 0x00031002, BTTV_BOARD_ATI_TVWONDERVE,"ATI TV Wonder/VE" }, { 0x6606107d, BTTV_BOARD_WINFAST2000, "Leadtek WinFast TV 2000" }, { 0x6607107d, BTTV_BOARD_WINFASTVC100, "Leadtek WinFast VC 100" }, { 0x6609107d, BTTV_BOARD_WINFAST2000, "Leadtek TV 2000 XP" }, { 0x263610b4, BTTV_BOARD_STB2, "STB TV PCI FM, Gateway P/N 6000704" }, { 0x264510b4, BTTV_BOARD_STB2, "STB TV PCI FM, Gateway P/N 6000704" }, { 0x402010fc, BTTV_BOARD_GVBCTV3PCI, "I-O Data Co. GV-BCTV3/PCI" }, { 0x405010fc, BTTV_BOARD_GVBCTV4PCI, "I-O Data Co. GV-BCTV4/PCI" }, { 0x407010fc, BTTV_BOARD_GVBCTV5PCI, "I-O Data Co. GV-BCTV5/PCI" }, { 0xd01810fc, BTTV_BOARD_GVBCTV5PCI, "I-O Data Co. GV-BCTV5/PCI" }, { 0x001211bd, BTTV_BOARD_PINNACLE, "Pinnacle PCTV" }, /* some cards ship with byteswapped IDs ... */ { 0x1200bd11, BTTV_BOARD_PINNACLE, "Pinnacle PCTV [bswap]" }, { 0xff00bd11, BTTV_BOARD_PINNACLE, "Pinnacle PCTV [bswap]" }, /* this seems to happen as well ... */ { 0xff1211bd, BTTV_BOARD_PINNACLE, "Pinnacle PCTV" }, { 0x3000121a, BTTV_BOARD_VOODOOTV_200, "3Dfx VoodooTV 200" }, { 0x263710b4, BTTV_BOARD_VOODOOTV_FM, "3Dfx VoodooTV FM" }, { 0x3060121a, BTTV_BOARD_STB2, "3Dfx VoodooTV 100/ STB OEM" }, { 0x3000144f, BTTV_BOARD_MAGICTVIEW063, "(Askey Magic/others) TView99 CPH06x" }, { 0xa005144f, BTTV_BOARD_MAGICTVIEW063, "CPH06X TView99-Card" }, { 0x3002144f, BTTV_BOARD_MAGICTVIEW061, "(Askey Magic/others) TView99 CPH05x" }, { 0x3005144f, BTTV_BOARD_MAGICTVIEW061, "(Askey Magic/others) TView99 CPH061/06L (T1/LC)" }, { 0x5000144f, BTTV_BOARD_MAGICTVIEW061, "Askey CPH050" }, { 0x300014ff, BTTV_BOARD_MAGICTVIEW061, "TView 99 (CPH061)" }, { 0x300214ff, BTTV_BOARD_PHOEBE_TVMAS, "Phoebe TV Master (CPH060)" }, { 0x00011461, BTTV_BOARD_AVPHONE98, "AVerMedia TVPhone98" }, { 0x00021461, BTTV_BOARD_AVERMEDIA98, "AVermedia TVCapture 98" }, { 0x00031461, BTTV_BOARD_AVPHONE98, "AVerMedia TVPhone98" }, { 0x00041461, BTTV_BOARD_AVERMEDIA98, "AVerMedia TVCapture 98" }, { 0x03001461, BTTV_BOARD_AVERMEDIA98, "VDOMATE TV TUNER CARD" }, { 0x1117153b, BTTV_BOARD_TERRATVALUE, "Terratec TValue (Philips PAL B/G)" }, { 0x1118153b, BTTV_BOARD_TERRATVALUE, "Terratec TValue (Temic PAL B/G)" }, { 0x1119153b, BTTV_BOARD_TERRATVALUE, "Terratec TValue (Philips PAL I)" }, { 0x111a153b, BTTV_BOARD_TERRATVALUE, "Terratec TValue (Temic PAL I)" }, { 0x1123153b, BTTV_BOARD_TERRATVRADIO, "Terratec TV Radio+" }, { 0x1127153b, BTTV_BOARD_TERRATV, "Terratec TV+ (V1.05)" }, /* clashes with FlyVideo *{ 0x18521852, BTTV_BOARD_TERRATV, "Terratec TV+ (V1.10)" }, */ { 0x1134153b, BTTV_BOARD_TERRATVALUE, "Terratec TValue (LR102)" }, { 0x1135153b, BTTV_BOARD_TERRATVALUER, "Terratec TValue Radio" }, /* LR102 */ { 0x5018153b, BTTV_BOARD_TERRATVALUE, "Terratec TValue" }, /* ?? */ { 0xff3b153b, BTTV_BOARD_TERRATVALUER, "Terratec TValue Radio" }, /* ?? */ { 0x400015b0, BTTV_BOARD_ZOLTRIX_GENIE, "Zoltrix Genie TV" }, { 0x400a15b0, BTTV_BOARD_ZOLTRIX_GENIE, "Zoltrix Genie TV" }, { 0x400d15b0, BTTV_BOARD_ZOLTRIX_GENIE, "Zoltrix Genie TV / Radio" }, { 0x401015b0, BTTV_BOARD_ZOLTRIX_GENIE, "Zoltrix Genie TV / Radio" }, { 0x401615b0, BTTV_BOARD_ZOLTRIX_GENIE, "Zoltrix Genie TV / Radio" }, { 0x1430aa00, BTTV_BOARD_PV143, "Provideo PV143A" }, { 0x1431aa00, BTTV_BOARD_PV143, "Provideo PV143B" }, { 0x1432aa00, BTTV_BOARD_PV143, "Provideo PV143C" }, { 0x1433aa00, BTTV_BOARD_PV143, "Provideo PV143D" }, { 0x1433aa03, BTTV_BOARD_PV143, "Security Eyes" }, { 0x1460aa00, BTTV_BOARD_PV150, "Provideo PV150A-1" }, { 0x1461aa01, BTTV_BOARD_PV150, "Provideo PV150A-2" }, { 0x1462aa02, BTTV_BOARD_PV150, "Provideo PV150A-3" }, { 0x1463aa03, BTTV_BOARD_PV150, "Provideo PV150A-4" }, { 0x1464aa04, BTTV_BOARD_PV150, "Provideo PV150B-1" }, { 0x1465aa05, BTTV_BOARD_PV150, "Provideo PV150B-2" }, { 0x1466aa06, BTTV_BOARD_PV150, "Provideo PV150B-3" }, { 0x1467aa07, BTTV_BOARD_PV150, "Provideo PV150B-4" }, { 0xa132ff00, BTTV_BOARD_IVC100, "IVC-100" }, { 0xa1550000, BTTV_BOARD_IVC200, "IVC-200" }, { 0xa1550001, BTTV_BOARD_IVC200, "IVC-200" }, { 0xa1550002, BTTV_BOARD_IVC200, "IVC-200" }, { 0xa1550003, BTTV_BOARD_IVC200, "IVC-200" }, { 0xa1550100, BTTV_BOARD_IVC200, "IVC-200G" }, { 0xa1550101, BTTV_BOARD_IVC200, "IVC-200G" }, { 0xa1550102, BTTV_BOARD_IVC200, "IVC-200G" }, { 0xa1550103, BTTV_BOARD_IVC200, "IVC-200G" }, { 0xa1550800, BTTV_BOARD_IVC200, "IVC-200" }, { 0xa1550801, BTTV_BOARD_IVC200, "IVC-200" }, { 0xa1550802, BTTV_BOARD_IVC200, "IVC-200" }, { 0xa1550803, BTTV_BOARD_IVC200, "IVC-200" }, { 0xa182ff00, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff01, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff02, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff03, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff04, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff05, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff06, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff07, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff08, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff09, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff0a, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff0b, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff0c, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff0d, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff0e, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xa182ff0f, BTTV_BOARD_IVC120, "IVC-120G" }, { 0xf0500000, BTTV_BOARD_IVCE8784, "IVCE-8784" }, { 0xf0500001, BTTV_BOARD_IVCE8784, "IVCE-8784" }, { 0xf0500002, BTTV_BOARD_IVCE8784, "IVCE-8784" }, { 0xf0500003, BTTV_BOARD_IVCE8784, "IVCE-8784" }, { 0x41424344, BTTV_BOARD_GRANDTEC, "GrandTec Multi Capture" }, { 0x01020304, BTTV_BOARD_XGUARD, "Grandtec Grand X-Guard" }, { 0x18501851, BTTV_BOARD_CHRONOS_VS2, "FlyVideo 98 (LR50)/ Chronos Video Shuttle II" }, { 0xa0501851, BTTV_BOARD_CHRONOS_VS2, "FlyVideo 98 (LR50)/ Chronos Video Shuttle II" }, { 0x18511851, BTTV_BOARD_FLYVIDEO98EZ, "FlyVideo 98EZ (LR51)/ CyberMail AV" }, { 0x18521852, BTTV_BOARD_TYPHOON_TVIEW, "FlyVideo 98FM (LR50)/ Typhoon TView TV/FM Tuner" }, { 0x41a0a051, BTTV_BOARD_FLYVIDEO_98FM, "Lifeview FlyVideo 98 LR50 Rev Q" }, { 0x18501f7f, BTTV_BOARD_FLYVIDEO_98, "Lifeview Flyvideo 98" }, { 0x010115cb, BTTV_BOARD_GMV1, "AG GMV1" }, { 0x010114c7, BTTV_BOARD_MODTEC_205, "Modular Technology MM201/MM202/MM205/MM210/MM215 PCTV" }, { 0x10b42636, BTTV_BOARD_HAUPPAUGE878, "STB ???" }, { 0x217d6606, BTTV_BOARD_WINFAST2000, "Leadtek WinFast TV 2000" }, { 0xfff6f6ff, BTTV_BOARD_WINFAST2000, "Leadtek WinFast TV 2000" }, { 0x03116000, BTTV_BOARD_SENSORAY311_611, "Sensoray 311" }, { 0x06116000, BTTV_BOARD_SENSORAY311_611, "Sensoray 611" }, { 0x00790e11, BTTV_BOARD_WINDVR, "Canopus WinDVR PCI" }, { 0xa0fca1a0, BTTV_BOARD_ZOLTRIX, "Face to Face Tvmax" }, { 0x82b2aa6a, BTTV_BOARD_SIMUS_GVC1100, "SIMUS GVC1100" }, { 0x146caa0c, BTTV_BOARD_PV951, "ituner spectra8" }, { 0x200a1295, BTTV_BOARD_PXC200, "ImageNation PXC200A" }, { 0x40111554, BTTV_BOARD_PV_BT878P_9B, "Prolink Pixelview PV-BT" }, { 0x17de0a01, BTTV_BOARD_KWORLD, "Mecer TV/FM/Video Tuner" }, { 0x01051805, BTTV_BOARD_PICOLO_TETRA_CHIP, "Picolo Tetra Chip #1" }, { 0x01061805, BTTV_BOARD_PICOLO_TETRA_CHIP, "Picolo Tetra Chip #2" }, { 0x01071805, BTTV_BOARD_PICOLO_TETRA_CHIP, "Picolo Tetra Chip #3" }, { 0x01081805, BTTV_BOARD_PICOLO_TETRA_CHIP, "Picolo Tetra Chip #4" }, { 0x15409511, BTTV_BOARD_ACORP_Y878F, "Acorp Y878F" }, { 0x53534149, BTTV_BOARD_SSAI_SECURITY, "SSAI Security Video Interface" }, { 0x5353414a, BTTV_BOARD_SSAI_ULTRASOUND, "SSAI Ultrasound Video Interface" }, /* likely broken, vendor id doesn't match the other magic views ... * { 0xa0fca04f, BTTV_BOARD_MAGICTVIEW063, "Guillemot Maxi TV Video 3" }, */ /* Duplicate PCI ID, reconfigure for this board during the eeprom read. * { 0x13eb0070, BTTV_BOARD_HAUPPAUGE_IMPACTVCB, "Hauppauge ImpactVCB" }, */ { 0x109e036e, BTTV_BOARD_CONCEPTRONIC_CTVFMI2, "Conceptronic CTVFMi v2"}, /* DVB cards (using pci function .1 for mpeg data xfer) */ { 0x001c11bd, BTTV_BOARD_PINNACLESAT, "Pinnacle PCTV Sat" }, { 0x01010071, BTTV_BOARD_NEBULA_DIGITV, "Nebula Electronics DigiTV" }, { 0x20007063, BTTV_BOARD_PC_HDTV, "pcHDTV HD-2000 TV"}, { 0x002611bd, BTTV_BOARD_TWINHAN_DST, "Pinnacle PCTV SAT CI" }, { 0x00011822, BTTV_BOARD_TWINHAN_DST, "Twinhan VisionPlus DVB" }, { 0xfc00270f, BTTV_BOARD_TWINHAN_DST, "ChainTech digitop DST-1000 DVB-S" }, { 0x07711461, BTTV_BOARD_AVDVBT_771, "AVermedia AverTV DVB-T 771" }, { 0x07611461, BTTV_BOARD_AVDVBT_761, "AverMedia AverTV DVB-T 761" }, { 0xdb1018ac, BTTV_BOARD_DVICO_DVBT_LITE, "DViCO FusionHDTV DVB-T Lite" }, { 0xdb1118ac, BTTV_BOARD_DVICO_DVBT_LITE, "Ultraview DVB-T Lite" }, { 0xd50018ac, BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE, "DViCO FusionHDTV 5 Lite" }, { 0x00261822, BTTV_BOARD_TWINHAN_DST, "DNTV Live! Mini "}, { 0xd200dbc0, BTTV_BOARD_DVICO_FUSIONHDTV_2, "DViCO FusionHDTV 2" }, { 0x763c008a, BTTV_BOARD_GEOVISION_GV600, "GeoVision GV-600" }, { 0x18011000, BTTV_BOARD_ENLTV_FM_2, "Encore ENL TV-FM-2" }, { 0x763d800a, BTTV_BOARD_GEOVISION_GV800S, "GeoVision GV-800(S) (master)" }, { 0x763d800b, BTTV_BOARD_GEOVISION_GV800S_SL, "GeoVision GV-800(S) (slave)" }, { 0x763d800c, BTTV_BOARD_GEOVISION_GV800S_SL, "GeoVision GV-800(S) (slave)" }, { 0x763d800d, BTTV_BOARD_GEOVISION_GV800S_SL, "GeoVision GV-800(S) (slave)" }, { 0x15401830, BTTV_BOARD_PV183, "Provideo PV183-1" }, { 0x15401831, BTTV_BOARD_PV183, "Provideo PV183-2" }, { 0x15401832, BTTV_BOARD_PV183, "Provideo PV183-3" }, { 0x15401833, BTTV_BOARD_PV183, "Provideo PV183-4" }, { 0x15401834, BTTV_BOARD_PV183, "Provideo PV183-5" }, { 0x15401835, BTTV_BOARD_PV183, "Provideo PV183-6" }, { 0x15401836, BTTV_BOARD_PV183, "Provideo PV183-7" }, { 0x15401837, BTTV_BOARD_PV183, "Provideo PV183-8" }, { 0x3116f200, BTTV_BOARD_TVT_TD3116, "Tongwei Video Technology TD-3116" }, { 0x02280279, BTTV_BOARD_APOSONIC_WDVR, "Aposonic W-DVR" }, { 0, -1, NULL } }; /* ----------------------------------------------------------------------- */ /* array with description for bt848 / bt878 tv/grabber cards */ struct tvcard bttv_tvcards[] = { /* ---- card 0x00 ---------------------------------- */ [BTTV_BOARD_UNKNOWN] = { .name = " *** UNKNOWN/GENERIC *** ", .video_inputs = 4, .svhs = 2, .muxsel = MUXSEL(2, 3, 1, 0), .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_MIRO] = { .name = "MIRO PCTV", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 15, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 2, 0, 0, 0 }, .gpiomute = 10, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_HAUPPAUGE] = { .name = "Hauppauge (bt848)", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 7, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 1, 2, 3 }, .gpiomute = 4, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_STB] = { .name = "STB, Gateway P/N 6000699 (bt848)", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 7, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 4, 0, 2, 3 }, .gpiomute = 1, .no_msp34xx = 1, .tuner_type = TUNER_PHILIPS_NTSC, .tuner_addr = ADDR_UNSET, .pll = PLL_28, .has_radio = 1, }, /* ---- card 0x04 ---------------------------------- */ [BTTV_BOARD_INTEL] = { .name = "Intel Create and Share PCI/ Smart Video Recorder III", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = 2, .gpiomask = 0, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0 }, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_DIAMOND] = { .name = "Diamond DTV2000", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 3, .muxsel = MUXSEL(2, 3, 1, 0), .gpiomux = { 0, 1, 0, 1 }, .gpiomute = 3, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_AVERMEDIA] = { .name = "AVerMedia TVPhone", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 3, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomask = 0x0f, .gpiomux = { 0x0c, 0x04, 0x08, 0x04 }, /* 0x04 for some cards ?? */ .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, .audio_mode_gpio= avermedia_tvphone_audio, .has_remote = 1, }, [BTTV_BOARD_MATRIX_VISION] = { .name = "MATRIX-Vision MV-Delta", .video_inputs = 5, /* .audio_inputs= 1, */ .svhs = 3, .gpiomask = 0, .muxsel = MUXSEL(2, 3, 1, 0, 0), .gpiomux = { 0 }, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x08 ---------------------------------- */ [BTTV_BOARD_FLYVIDEO] = { .name = "Lifeview FlyVideo II (Bt848) LR26 / MAXI TV Video PCI2 LR26", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0xc00, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0xc00, 0x800, 0x400 }, .gpiomute = 0xc00, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_TURBOTV] = { .name = "IMS/IXmicro TurboTV", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 3, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 1, 1, 2, 3 }, .pll = PLL_28, .tuner_type = TUNER_TEMIC_PAL, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_HAUPPAUGE878] = { .name = "Hauppauge (bt878)", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x0f, /* old: 7 */ .muxsel = MUXSEL(2, 0, 1, 1), .gpiomux = { 0, 1, 2, 3 }, .gpiomute = 4, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_MIROPRO] = { .name = "MIRO PCTV pro", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x3014f, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x20001,0x10001, 0, 0 }, .gpiomute = 10, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x0c ---------------------------------- */ [BTTV_BOARD_ADSTECH_TV] = { .name = "ADS Technologies Channel Surfer TV (bt848)", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 15, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 13, 14, 11, 7 }, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_AVERMEDIA98] = { .name = "AVerMedia TVCapture 98", .video_inputs = 3, /* .audio_inputs= 4, */ .svhs = 2, .gpiomask = 15, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 13, 14, 11, 7 }, .msp34xx_alt = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .audio_mode_gpio= avermedia_tv_stereo_audio, .no_gpioirq = 1, }, [BTTV_BOARD_VHX] = { .name = "Aimslab Video Highway Xtreme (VHX)", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 7, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 2, 1, 3 }, /* old: {0, 1, 2, 3, 4} */ .gpiomute = 4, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_ZOLTRIX] = { .name = "Zoltrix TV-Max", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 15, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0, 1, 0 }, .gpiomute = 10, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x10 ---------------------------------- */ [BTTV_BOARD_PIXVIEWPLAYTV] = { .name = "Prolink Pixelview PlayTV (bt878)", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x01fe00, .muxsel = MUXSEL(2, 3, 1, 1), /* 2003-10-20 by "Anton A. Arapov" <arapov@mail.ru> */ .gpiomux = { 0x001e00, 0, 0x018000, 0x014000 }, .gpiomute = 0x002000, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_WINVIEW_601] = { .name = "Leadtek WinView 601", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x8300f8, .muxsel = MUXSEL(2, 3, 1, 1, 0), .gpiomux = { 0x4fa007,0xcfa007,0xcfa007,0xcfa007 }, .gpiomute = 0xcfa007, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, .volume_gpio = winview_volume, .has_radio = 1, }, [BTTV_BOARD_AVEC_INTERCAP] = { .name = "AVEC Intercapture", .video_inputs = 3, /* .audio_inputs= 2, */ .svhs = 2, .gpiomask = 0, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 1, 0, 0, 0 }, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_LIFE_FLYKIT] = { .name = "Lifeview FlyVideo II EZ /FlyKit LR38 Bt848 (capture only)", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = NO_SVHS, .gpiomask = 0x8dff00, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0 }, .no_msp34xx = 1, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x14 ---------------------------------- */ [BTTV_BOARD_CEI_RAFFLES] = { .name = "CEI Raffles Card", .video_inputs = 3, /* .audio_inputs= 3, */ .svhs = 2, .muxsel = MUXSEL(2, 3, 1, 1), .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_CONFERENCETV] = { .name = "Lifeview FlyVideo 98/ Lucky Star Image World ConferenceTV LR50", .video_inputs = 4, /* .audio_inputs= 2, tuner, line in */ .svhs = 2, .gpiomask = 0x1800, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0x800, 0x1000, 0x1000 }, .gpiomute = 0x1800, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL_I, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_PHOEBE_TVMAS] = { .name = "Askey CPH050/ Phoebe Tv Master + FM", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0xc00, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 1, 0x800, 0x400 }, .gpiomute = 0xc00, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_MODTEC_205] = { .name = "Modular Technology MM201/MM202/MM205/MM210/MM215 PCTV, bt878", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = NO_SVHS, .has_dig_in = 1, .gpiomask = 7, .muxsel = MUXSEL(2, 3, 0), /* input 2 is digital */ /* .digital_mode= DIGITAL_MODE_CAMERA, */ .gpiomux = { 0, 0, 0, 0 }, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_ALPS_TSBB5_PAL_I, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x18 ---------------------------------- */ [BTTV_BOARD_MAGICTVIEW061] = { .name = "Askey CPH05X/06X (bt878) [many vendors]", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0xe00, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = {0x400, 0x400, 0x400, 0x400 }, .gpiomute = 0xc00, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, .has_remote = 1, .has_radio = 1, /* not every card has radio */ }, [BTTV_BOARD_VOBIS_BOOSTAR] = { .name = "Terratec TerraTV+ Version 1.0 (Bt848)/ Terra TValue Version 1.0/ Vobis TV-Boostar", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x1f0fff, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x20000, 0x30000, 0x10000, 0 }, .gpiomute = 0x40000, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .audio_mode_gpio= terratv_audio, }, [BTTV_BOARD_HAUPPAUG_WCAM] = { .name = "Hauppauge WinCam newer (bt878)", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 3, .gpiomask = 7, .muxsel = MUXSEL(2, 0, 1, 1), .gpiomux = { 0, 1, 2, 3 }, .gpiomute = 4, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_MAXI] = { .name = "Lifeview FlyVideo 98/ MAXI TV Video PCI2 LR50", .video_inputs = 4, /* .audio_inputs= 2, */ .svhs = 2, .gpiomask = 0x1800, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0x800, 0x1000, 0x1000 }, .gpiomute = 0x1800, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_SECAM, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x1c ---------------------------------- */ [BTTV_BOARD_TERRATV] = { .name = "Terratec TerraTV+ Version 1.1 (bt878)", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x1f0fff, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x20000, 0x30000, 0x10000, 0x00000 }, .gpiomute = 0x40000, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .audio_mode_gpio= terratv_audio, /* GPIO wiring: External 20 pin connector (for Active Radio Upgrade board) gpio00: i2c-sda gpio01: i2c-scl gpio02: om5610-data gpio03: om5610-clk gpio04: om5610-wre gpio05: om5610-stereo gpio06: rds6588-davn gpio07: Pin 7 n.c. gpio08: nIOW gpio09+10: nIOR, nSEL ?? (bt878) gpio09: nIOR (bt848) gpio10: nSEL (bt848) Sound Routing: gpio16: u2-A0 (1st 4052bt) gpio17: u2-A1 gpio18: u2-nEN gpio19: u4-A0 (2nd 4052) gpio20: u4-A1 u4-nEN - GND Btspy: 00000 : Cdrom (internal audio input) 10000 : ext. Video audio input 20000 : TV Mono a0000 : TV Mono/2 1a0000 : TV Stereo 30000 : Radio 40000 : Mute */ }, [BTTV_BOARD_PXC200] = { /* Jannik Fritsch <jannik@techfak.uni-bielefeld.de> */ .name = "Imagenation PXC200", .video_inputs = 5, /* .audio_inputs= 1, */ .svhs = 1, /* was: 4 */ .gpiomask = 0, .muxsel = MUXSEL(2, 3, 1, 0, 0), .gpiomux = { 0 }, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .muxsel_hook = PXC200_muxsel, }, [BTTV_BOARD_FLYVIDEO_98] = { .name = "Lifeview FlyVideo 98 LR50", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x1800, /* 0x8dfe00 */ .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0x0800, 0x1000, 0x1000 }, .gpiomute = 0x1800, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_IPROTV] = { .name = "Formac iProTV, Formac ProTV I (bt848)", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 3, .gpiomask = 1, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 1, 0, 0, 0 }, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x20 ---------------------------------- */ [BTTV_BOARD_INTEL_C_S_PCI] = { .name = "Intel Create and Share PCI/ Smart Video Recorder III", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = 2, .gpiomask = 0, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0 }, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_TERRATVALUE] = { .name = "Terratec TerraTValue Version Bt878", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0xffff00, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x500, 0, 0x300, 0x900 }, .gpiomute = 0x900, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_WINFAST2000] = { .name = "Leadtek WinFast 2000/ WinFast 2000 XP", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, /* TV, CVid, SVid, CVid over SVid connector */ .muxsel = MUXSEL(2, 3, 1, 1, 0), /* Alexander Varakin <avarakin@hotmail.com> [stereo version] */ .gpiomask = 0xb33000, .gpiomux = { 0x122000,0x1000,0x0000,0x620000 }, .gpiomute = 0x800000, /* Audio Routing for "WinFast 2000 XP" (no tv stereo !) gpio23 -- hef4052:nEnable (0x800000) gpio12 -- hef4052:A1 gpio13 -- hef4052:A0 0x0000: external audio 0x1000: FM 0x2000: TV 0x3000: n.c. Note: There exists another variant "Winfast 2000" with tv stereo !? Note: eeprom only contains FF and pci subsystem id 107d:6606 */ .pll = PLL_28, .has_radio = 1, .tuner_type = TUNER_PHILIPS_PAL, /* default for now, gpio reads BFFF06 for Pal bg+dk */ .tuner_addr = ADDR_UNSET, .audio_mode_gpio= winfast2000_audio, .has_remote = 1, }, [BTTV_BOARD_CHRONOS_VS2] = { .name = "Lifeview FlyVideo 98 LR50 / Chronos Video Shuttle II", .video_inputs = 4, /* .audio_inputs= 3, */ .svhs = 2, .gpiomask = 0x1800, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0x800, 0x1000, 0x1000 }, .gpiomute = 0x1800, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x24 ---------------------------------- */ [BTTV_BOARD_TYPHOON_TVIEW] = { .name = "Lifeview FlyVideo 98FM LR50 / Typhoon TView TV/FM Tuner", .video_inputs = 4, /* .audio_inputs= 3, */ .svhs = 2, .gpiomask = 0x1800, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0x800, 0x1000, 0x1000 }, .gpiomute = 0x1800, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, .has_radio = 1, }, [BTTV_BOARD_PXELVWPLTVPRO] = { .name = "Prolink PixelView PlayTV pro", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0xff, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x21, 0x20, 0x24, 0x2c }, .gpiomute = 0x29, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_MAGICTVIEW063] = { .name = "Askey CPH06X TView99", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x551e00, .muxsel = MUXSEL(2, 3, 1, 0), .gpiomux = { 0x551400, 0x551200, 0, 0 }, .gpiomute = 0x551c00, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL_I, .tuner_addr = ADDR_UNSET, .has_remote = 1, }, [BTTV_BOARD_PINNACLE] = { .name = "Pinnacle PCTV Studio/Rave", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x03000F, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 2, 0xd0001, 0, 0 }, .gpiomute = 1, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x28 ---------------------------------- */ [BTTV_BOARD_STB2] = { .name = "STB TV PCI FM, Gateway P/N 6000704 (bt878), 3Dfx VoodooTV 100", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 7, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 4, 0, 2, 3 }, .gpiomute = 1, .no_msp34xx = 1, .tuner_type = TUNER_PHILIPS_NTSC, .tuner_addr = ADDR_UNSET, .pll = PLL_28, .has_radio = 1, }, [BTTV_BOARD_AVPHONE98] = { .name = "AVerMedia TVPhone 98", .video_inputs = 3, /* .audio_inputs= 4, */ .svhs = 2, .gpiomask = 15, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 13, 4, 11, 7 }, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, .has_radio = 1, .audio_mode_gpio= avermedia_tvphone_audio, }, [BTTV_BOARD_PV951] = { .name = "ProVideo PV951", /* pic16c54 */ .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0, 0, 0}, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL_I, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_ONAIR_TV] = { .name = "Little OnAir TV", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0xe00b, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0xff9ff6, 0xff9ff6, 0xff1ff7, 0 }, .gpiomute = 0xff3ffc, .no_msp34xx = 1, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x2c ---------------------------------- */ [BTTV_BOARD_SIGMA_TVII_FM] = { .name = "Sigma TVII-FM", .video_inputs = 2, /* .audio_inputs= 1, */ .svhs = NO_SVHS, .gpiomask = 3, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 1, 1, 0, 2 }, .gpiomute = 3, .no_msp34xx = 1, .pll = PLL_NONE, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_MATRIX_VISION2] = { .name = "MATRIX-Vision MV-Delta 2", .video_inputs = 5, /* .audio_inputs= 1, */ .svhs = 3, .gpiomask = 0, .muxsel = MUXSEL(2, 3, 1, 0, 0), .gpiomux = { 0 }, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_ZOLTRIX_GENIE] = { .name = "Zoltrix Genie TV/FM", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0xbcf03f, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0xbc803f, 0xbc903f, 0xbcb03f, 0 }, .gpiomute = 0xbcb03f, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_TEMIC_4039FR5_NTSC, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_TERRATVRADIO] = { .name = "Terratec TV/Radio+", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x70000, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x20000, 0x30000, 0x10000, 0 }, .gpiomute = 0x40000, .no_msp34xx = 1, .pll = PLL_35, .tuner_type = TUNER_PHILIPS_PAL_I, .tuner_addr = ADDR_UNSET, .has_radio = 1, }, /* ---- card 0x30 ---------------------------------- */ [BTTV_BOARD_DYNALINK] = { .name = "Askey CPH03x/ Dynalink Magic TView", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 15, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = {2,0,0,0 }, .gpiomute = 1, .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_GVBCTV3PCI] = { .name = "IODATA GV-BCTV3/PCI", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x010f00, .muxsel = MUXSEL(2, 3, 0, 0), .gpiomux = {0x10000, 0, 0x10000, 0 }, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_ALPS_TSHC6_NTSC, .tuner_addr = ADDR_UNSET, .audio_mode_gpio= gvbctv3pci_audio, }, [BTTV_BOARD_PXELVWPLTVPAK] = { .name = "Prolink PV-BT878P+4E / PixelView PlayTV PAK / Lenco MXTV-9578 CP", .video_inputs = 5, /* .audio_inputs= 1, */ .svhs = 3, .has_dig_in = 1, .gpiomask = 0xAA0000, .muxsel = MUXSEL(2, 3, 1, 1, 0), /* in 4 is digital */ /* .digital_mode= DIGITAL_MODE_CAMERA, */ .gpiomux = { 0x20000, 0, 0x80000, 0x80000 }, .gpiomute = 0xa8000, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL_I, .tuner_addr = ADDR_UNSET, .has_remote = 1, /* GPIO wiring: (different from Rev.4C !) GPIO17: U4.A0 (first hef4052bt) GPIO19: U4.A1 GPIO20: U5.A1 (second hef4052bt) GPIO21: U4.nEN GPIO22: BT832 Reset Line GPIO23: A5,A0, U5,nEN Note: At i2c=0x8a is a Bt832 chip, which changes to 0x88 after being reset via GPIO22 */ }, [BTTV_BOARD_EAGLE] = { .name = "Eagle Wireless Capricorn2 (bt878A)", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 7, .muxsel = MUXSEL(2, 0, 1, 1), .gpiomux = { 0, 1, 2, 3 }, .gpiomute = 4, .pll = PLL_28, .tuner_type = UNSET /* TUNER_ALPS_TMDH2_NTSC */, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x34 ---------------------------------- */ [BTTV_BOARD_PINNACLEPRO] = { /* David Härdeman <david@2gen.com> */ .name = "Pinnacle PCTV Studio Pro", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 3, .gpiomask = 0x03000F, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 1, 0xd0001, 0, 0 }, .gpiomute = 10, /* sound path (5 sources): MUX1 (mask 0x03), Enable Pin 0x08 (0=enable, 1=disable) 0= ext. Audio IN 1= from MUX2 2= Mono TV sound from Tuner 3= not connected MUX2 (mask 0x30000): 0,2,3= from MSP34xx 1= FM stereo Radio from Tuner */ .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_TVIEW_RDS_FM] = { /* Claas Langbehn <claas@bigfoot.com>, Sven Grothklags <sven@upb.de> */ .name = "Typhoon TView RDS + FM Stereo / KNC1 TV Station RDS", .video_inputs = 4, /* .audio_inputs= 3, */ .svhs = 2, .gpiomask = 0x1c, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0, 0x10, 8 }, .gpiomute = 4, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .has_radio = 1, }, [BTTV_BOARD_LIFETEC_9415] = { /* Tim Röstermundt <rosterm@uni-muenster.de> in de.comp.os.unix.linux.hardware: options bttv card=0 pll=1 radio=1 gpiomask=0x18e0 gpiomux =0x44c71f,0x44d71f,0,0x44d71f,0x44dfff options tuner type=5 */ .name = "Lifeview FlyVideo 2000 /FlyVideo A2/ Lifetec LT 9415 TV [LR90]", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x18e0, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x0000,0x0800,0x1000,0x1000 }, .gpiomute = 0x18e0, /* For cards with tda9820/tda9821: 0x0000: Tuner normal stereo 0x0080: Tuner A2 SAP (second audio program = Zweikanalton) 0x0880: Tuner A2 stereo */ .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_BESTBUY_EASYTV] = { /* Miguel Angel Alvarez <maacruz@navegalia.com> old Easy TV BT848 version (model CPH031) */ .name = "Askey CPH031/ BESTBUY Easy TV", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0xF, .muxsel = MUXSEL(2, 3, 1, 0), .gpiomux = { 2, 0, 0, 0 }, .gpiomute = 10, .pll = PLL_28, .tuner_type = TUNER_TEMIC_PAL, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x38 ---------------------------------- */ [BTTV_BOARD_FLYVIDEO_98FM] = { /* Gordon Heydon <gjheydon@bigfoot.com ('98) */ .name = "Lifeview FlyVideo 98FM LR50", .video_inputs = 4, /* .audio_inputs= 3, */ .svhs = 2, .gpiomask = 0x1800, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0x800, 0x1000, 0x1000 }, .gpiomute = 0x1800, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, }, /* This is the ultimate cheapo capture card * just a BT848A on a small PCB! * Steve Hosgood <steve@equiinet.com> */ [BTTV_BOARD_GRANDTEC] = { .name = "GrandTec 'Grand Video Capture' (Bt848)", .video_inputs = 2, /* .audio_inputs= 0, */ .svhs = 1, .gpiomask = 0, .muxsel = MUXSEL(3, 1), .gpiomux = { 0 }, .no_msp34xx = 1, .pll = PLL_35, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_ASKEY_CPH060] = { /* Daniel Herrington <daniel.herrington@home.com> */ .name = "Askey CPH060/ Phoebe TV Master Only (No FM)", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0xe00, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x400, 0x400, 0x400, 0x400 }, .gpiomute = 0x800, .pll = PLL_28, .tuner_type = TUNER_TEMIC_4036FY5_NTSC, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_ASKEY_CPH03X] = { /* Matti Mottus <mottus@physic.ut.ee> */ .name = "Askey CPH03x TV Capturer", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x03000F, .muxsel = MUXSEL(2, 3, 1, 0), .gpiomux = { 2, 0, 0, 0 }, .gpiomute = 1, .pll = PLL_28, .tuner_type = TUNER_TEMIC_PAL, .tuner_addr = ADDR_UNSET, .has_remote = 1, }, /* ---- card 0x3c ---------------------------------- */ [BTTV_BOARD_MM100PCTV] = { /* Philip Blundell <philb@gnu.org> */ .name = "Modular Technology MM100PCTV", .video_inputs = 2, /* .audio_inputs= 2, */ .svhs = NO_SVHS, .gpiomask = 11, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 2, 0, 0, 1 }, .gpiomute = 8, .pll = PLL_35, .tuner_type = TUNER_TEMIC_PAL, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_GMV1] = { /* Adrian Cox <adrian@humboldt.co.uk */ .name = "AG Electronics GMV1", .video_inputs = 2, /* .audio_inputs= 0, */ .svhs = 1, .gpiomask = 0xF, .muxsel = MUXSEL(2, 2), .gpiomux = { }, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_BESTBUY_EASYTV2] = { /* Miguel Angel Alvarez <maacruz@navegalia.com> new Easy TV BT878 version (model CPH061) special thanks to Informatica Mieres for providing the card */ .name = "Askey CPH061/ BESTBUY Easy TV (bt878)", .video_inputs = 3, /* .audio_inputs= 2, */ .svhs = 2, .gpiomask = 0xFF, .muxsel = MUXSEL(2, 3, 1, 0), .gpiomux = { 1, 0, 4, 4 }, .gpiomute = 9, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_ATI_TVWONDER] = { /* Lukas Gebauer <geby@volny.cz> */ .name = "ATI TV-Wonder", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0xf03f, .muxsel = MUXSEL(2, 3, 1, 0), .gpiomux = { 0xbffe, 0, 0xbfff, 0 }, .gpiomute = 0xbffe, .pll = PLL_28, .tuner_type = TUNER_TEMIC_4006FN5_MULTI_PAL, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x40 ---------------------------------- */ [BTTV_BOARD_ATI_TVWONDERVE] = { /* Lukas Gebauer <geby@volny.cz> */ .name = "ATI TV-Wonder VE", .video_inputs = 2, /* .audio_inputs= 1, */ .svhs = NO_SVHS, .gpiomask = 1, .muxsel = MUXSEL(2, 3, 0, 1), .gpiomux = { 0, 0, 1, 0 }, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_TEMIC_4006FN5_MULTI_PAL, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_FLYVIDEO2000] = { /* DeeJay <deejay@westel900.net (2000S) */ .name = "Lifeview FlyVideo 2000S LR90", .video_inputs = 3, /* .audio_inputs= 3, */ .svhs = 2, .gpiomask = 0x18e0, .muxsel = MUXSEL(2, 3, 0, 1), /* Radio changed from 1e80 to 0x800 to make FlyVideo2000S in .hu happy (gm)*/ /* -dk-???: set mute=0x1800 for tda9874h daughterboard */ .gpiomux = { 0x0000,0x0800,0x1000,0x1000 }, .gpiomute = 0x1800, .audio_mode_gpio= fv2000s_audio, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_TERRATVALUER] = { .name = "Terratec TValueRadio", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0xffff00, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x500, 0x500, 0x300, 0x900 }, .gpiomute = 0x900, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .has_radio = 1, }, [BTTV_BOARD_GVBCTV4PCI] = { /* TANAKA Kei <peg00625@nifty.com> */ .name = "IODATA GV-BCTV4/PCI", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x010f00, .muxsel = MUXSEL(2, 3, 0, 0), .gpiomux = {0x10000, 0, 0x10000, 0 }, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_SHARP_2U5JF5540_NTSC, .tuner_addr = ADDR_UNSET, .audio_mode_gpio= gvbctv3pci_audio, }, /* ---- card 0x44 ---------------------------------- */ [BTTV_BOARD_VOODOOTV_FM] = { .name = "3Dfx VoodooTV FM (Euro)", /* try "insmod msp3400 simple=0" if you have * sound problems with this card. */ .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = NO_SVHS, .gpiomask = 0x4f8a00, /* 0x100000: 1=MSP enabled (0=disable again) * 0x010000: Connected to "S0" on tda9880 (0=Pal/BG, 1=NTSC) */ .gpiomux = {0x947fff, 0x987fff,0x947fff,0x947fff }, .gpiomute = 0x947fff, /* tvtuner, radio, external,internal, mute, stereo * tuner, Composit, SVid, Composit-on-Svid-adapter */ .muxsel = MUXSEL(2, 3, 0, 1), .tuner_type = TUNER_MT2032, .tuner_addr = ADDR_UNSET, .pll = PLL_28, .has_radio = 1, }, [BTTV_BOARD_VOODOOTV_200] = { .name = "VoodooTV 200 (USA)", /* try "insmod msp3400 simple=0" if you have * sound problems with this card. */ .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = NO_SVHS, .gpiomask = 0x4f8a00, /* 0x100000: 1=MSP enabled (0=disable again) * 0x010000: Connected to "S0" on tda9880 (0=Pal/BG, 1=NTSC) */ .gpiomux = {0x947fff, 0x987fff,0x947fff,0x947fff }, .gpiomute = 0x947fff, /* tvtuner, radio, external,internal, mute, stereo * tuner, Composit, SVid, Composit-on-Svid-adapter */ .muxsel = MUXSEL(2, 3, 0, 1), .tuner_type = TUNER_MT2032, .tuner_addr = ADDR_UNSET, .pll = PLL_28, .has_radio = 1, }, [BTTV_BOARD_AIMMS] = { /* Philip Blundell <pb@nexus.co.uk> */ .name = "Active Imaging AIMMS", .video_inputs = 1, /* .audio_inputs= 0, */ .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .pll = PLL_28, .muxsel = MUXSEL(2), .gpiomask = 0 }, [BTTV_BOARD_PV_BT878P_PLUS] = { /* Tomasz Pyra <hellfire@sedez.iq.pl> */ .name = "Prolink Pixelview PV-BT878P+ (Rev.4C,8E)", .video_inputs = 3, /* .audio_inputs= 4, */ .svhs = 2, .gpiomask = 15, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0, 11, 7 }, /* TV and Radio with same GPIO ! */ .gpiomute = 13, .pll = PLL_28, .tuner_type = TUNER_LG_PAL_I_FM, .tuner_addr = ADDR_UNSET, .has_remote = 1, /* GPIO wiring: GPIO0: U4.A0 (hef4052bt) GPIO1: U4.A1 GPIO2: U4.A1 (second hef4052bt) GPIO3: U4.nEN, U5.A0, A5.nEN GPIO8-15: vrd866b ? */ }, [BTTV_BOARD_FLYVIDEO98EZ] = { .name = "Lifeview FlyVideo 98EZ (capture only) LR51", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = 2, /* AV1, AV2, SVHS, CVid adapter on SVHS */ .muxsel = MUXSEL(2, 3, 1, 1), .pll = PLL_28, .no_msp34xx = 1, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x48 ---------------------------------- */ [BTTV_BOARD_PV_BT878P_9B] = { /* Dariusz Kowalewski <darekk@automex.pl> */ .name = "Prolink Pixelview PV-BT878P+9B (PlayTV Pro rev.9B FM+NICAM)", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x3f, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x01, 0x00, 0x03, 0x03 }, .gpiomute = 0x09, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .audio_mode_gpio= pvbt878p9b_audio, /* Note: not all cards have stereo */ .has_radio = 1, /* Note: not all cards have radio */ .has_remote = 1, /* GPIO wiring: GPIO0: A0 hef4052 GPIO1: A1 hef4052 GPIO3: nEN hef4052 GPIO8-15: vrd866b GPIO20,22,23: R30,R29,R28 */ }, [BTTV_BOARD_SENSORAY311_611] = { /* Clay Kunz <ckunz@mail.arc.nasa.gov> */ /* you must jumper JP5 for the 311 card (PC/104+) to work */ .name = "Sensoray 311/611", .video_inputs = 5, /* .audio_inputs= 0, */ .svhs = 4, .gpiomask = 0, .muxsel = MUXSEL(2, 3, 1, 0, 0), .gpiomux = { 0 }, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_RV605] = { /* Miguel Freitas <miguel@cetuc.puc-rio.br> */ .name = "RemoteVision MX (RV605)", .video_inputs = 16, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .gpiomask = 0x00, .gpiomask2 = 0x07ff, .muxsel = MUXSEL(3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), .no_msp34xx = 1, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .muxsel_hook = rv605_muxsel, }, [BTTV_BOARD_POWERCLR_MTV878] = { .name = "Powercolor MTV878/ MTV878R/ MTV878F", .video_inputs = 3, /* .audio_inputs= 2, */ .svhs = 2, .gpiomask = 0x1C800F, /* Bit0-2: Audio select, 8-12:remote control 14:remote valid 15:remote reset */ .muxsel = MUXSEL(2, 1, 1), .gpiomux = { 0, 1, 2, 2 }, .gpiomute = 4, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .pll = PLL_28, .has_radio = 1, }, /* ---- card 0x4c ---------------------------------- */ [BTTV_BOARD_WINDVR] = { /* Masaki Suzuki <masaki@btree.org> */ .name = "Canopus WinDVR PCI (COMPAQ Presario 3524JP, 5112JP)", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x140007, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 1, 2, 3 }, .gpiomute = 4, .tuner_type = TUNER_PHILIPS_NTSC, .tuner_addr = ADDR_UNSET, .audio_mode_gpio= windvr_audio, }, [BTTV_BOARD_GRANDTEC_MULTI] = { .name = "GrandTec Multi Capture Card (Bt878)", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .gpiomask = 0, .muxsel = MUXSEL(2, 3, 1, 0), .gpiomux = { 0 }, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_KWORLD] = { .name = "Jetway TV/Capture JW-TV878-FBK, Kworld KW-TV878RF", .video_inputs = 4, /* .audio_inputs= 3, */ .svhs = 2, .gpiomask = 7, /* Tuner, SVid, SVHS, SVid to SVHS connector */ .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0, 4, 4 },/* Yes, this tuner uses the same audio output for TV and FM radio! * This card lacks external Audio In, so we mute it on Ext. & Int. * The PCB can take a sbx1637/sbx1673, wiring unknown. * This card lacks PCI subsystem ID, sigh. * gpiomux =1: lower volume, 2+3: mute * btwincap uses 0x80000/0x80003 */ .gpiomute = 4, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, /* Samsung TCPA9095PC27A (BG+DK), philips compatible, w/FM, stereo and radio signal strength indicators work fine. */ .has_radio = 1, /* GPIO Info: GPIO0,1: HEF4052 A0,A1 GPIO2: HEF4052 nENABLE GPIO3-7: n.c. GPIO8-13: IRDC357 data0-5 (data6 n.c. ?) [chip not present on my card] GPIO14,15: ?? GPIO16-21: n.c. GPIO22,23: ?? ?? : mtu8b56ep microcontroller for IR (GPIO wiring unknown)*/ }, [BTTV_BOARD_DSP_TCVIDEO] = { /* Arthur Tetzlaff-Deas, DSP Design Ltd <software@dspdesign.com> */ .name = "DSP Design TCVIDEO", .video_inputs = 4, .svhs = NO_SVHS, .muxsel = MUXSEL(2, 3, 1, 0), .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x50 ---------------------------------- */ [BTTV_BOARD_HAUPPAUGEPVR] = { .name = "Hauppauge WinTV PVR", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .muxsel = MUXSEL(2, 0, 1, 1), .pll = PLL_28, .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, .gpiomask = 7, .gpiomux = {7}, }, [BTTV_BOARD_GVBCTV5PCI] = { .name = "IODATA GV-BCTV5/PCI", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x0f0f80, .muxsel = MUXSEL(2, 3, 1, 0), .gpiomux = {0x030000, 0x010000, 0, 0 }, .gpiomute = 0x020000, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_NTSC_M, .tuner_addr = ADDR_UNSET, .audio_mode_gpio= gvbctv5pci_audio, .has_radio = 1, }, [BTTV_BOARD_OSPREY1x0] = { .name = "Osprey 100/150 (878)", /* 0x1(2|3)-45C6-C1 */ .video_inputs = 4, /* id-inputs-clock */ /* .audio_inputs= 0, */ .svhs = 3, .muxsel = MUXSEL(3, 2, 0, 1), .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, }, [BTTV_BOARD_OSPREY1x0_848] = { .name = "Osprey 100/150 (848)", /* 0x04-54C0-C1 & older boards */ .video_inputs = 3, /* .audio_inputs= 0, */ .svhs = 2, .muxsel = MUXSEL(2, 3, 1), .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, }, /* ---- card 0x54 ---------------------------------- */ [BTTV_BOARD_OSPREY101_848] = { .name = "Osprey 101 (848)", /* 0x05-40C0-C1 */ .video_inputs = 2, /* .audio_inputs= 0, */ .svhs = 1, .muxsel = MUXSEL(3, 1), .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, }, [BTTV_BOARD_OSPREY1x1] = { .name = "Osprey 101/151", /* 0x1(4|5)-0004-C4 */ .video_inputs = 1, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .muxsel = MUXSEL(0), .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, }, [BTTV_BOARD_OSPREY1x1_SVID] = { .name = "Osprey 101/151 w/ svid", /* 0x(16|17|20)-00C4-C1 */ .video_inputs = 2, /* .audio_inputs= 0, */ .svhs = 1, .muxsel = MUXSEL(0, 1), .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, }, [BTTV_BOARD_OSPREY2xx] = { .name = "Osprey 200/201/250/251", /* 0x1(8|9|E|F)-0004-C4 */ .video_inputs = 1, /* .audio_inputs= 1, */ .svhs = NO_SVHS, .muxsel = MUXSEL(0), .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, }, /* ---- card 0x58 ---------------------------------- */ [BTTV_BOARD_OSPREY2x0_SVID] = { .name = "Osprey 200/250", /* 0x1(A|B)-00C4-C1 */ .video_inputs = 2, /* .audio_inputs= 1, */ .svhs = 1, .muxsel = MUXSEL(0, 1), .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, }, [BTTV_BOARD_OSPREY2x0] = { .name = "Osprey 210/220/230", /* 0x1(A|B)-04C0-C1 */ .video_inputs = 2, /* .audio_inputs= 1, */ .svhs = 1, .muxsel = MUXSEL(2, 3), .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, }, [BTTV_BOARD_OSPREY500] = { .name = "Osprey 500", /* 500 */ .video_inputs = 2, /* .audio_inputs= 1, */ .svhs = 1, .muxsel = MUXSEL(2, 3), .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, }, [BTTV_BOARD_OSPREY540] = { .name = "Osprey 540", /* 540 */ .video_inputs = 4, /* .audio_inputs= 1, */ .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, }, /* ---- card 0x5C ---------------------------------- */ [BTTV_BOARD_OSPREY2000] = { .name = "Osprey 2000", /* 2000 */ .video_inputs = 2, /* .audio_inputs= 1, */ .svhs = 1, .muxsel = MUXSEL(2, 3), .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, /* must avoid, conflicts with the bt860 */ }, [BTTV_BOARD_IDS_EAGLE] = { /* M G Berberich <berberic@forwiss.uni-passau.de> */ .name = "IDS Eagle", .video_inputs = 4, /* .audio_inputs= 0, */ .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .svhs = NO_SVHS, .gpiomask = 0, .muxsel = MUXSEL(2, 2, 2, 2), .muxsel_hook = eagle_muxsel, .no_msp34xx = 1, .pll = PLL_28, }, [BTTV_BOARD_PINNACLESAT] = { .name = "Pinnacle PCTV Sat", .video_inputs = 2, /* .audio_inputs= 0, */ .svhs = 1, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, .muxsel = MUXSEL(3, 1), .pll = PLL_28, .no_gpioirq = 1, .has_dvb = 1, }, [BTTV_BOARD_FORMAC_PROTV] = { .name = "Formac ProTV II (bt878)", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 3, .gpiomask = 2, /* TV, Comp1, Composite over SVID con, SVID */ .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 2, 2, 0, 0 }, .pll = PLL_28, .has_radio = 1, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, /* sound routing: GPIO=0x00,0x01,0x03: mute (?) 0x02: both TV and radio (tuner: FM1216/I) The card has onboard audio connectors labeled "cdrom" and "board", not soldered here, though unknown wiring. Card lacks: external audio in, pci subsystem id. */ }, /* ---- card 0x60 ---------------------------------- */ [BTTV_BOARD_MACHTV] = { .name = "MachTV", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = NO_SVHS, .gpiomask = 7, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 1, 2, 3}, .gpiomute = 4, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .pll = PLL_28, }, [BTTV_BOARD_EURESYS_PICOLO] = { .name = "Euresys Picolo", .video_inputs = 3, /* .audio_inputs= 0, */ .svhs = 2, .gpiomask = 0, .no_msp34xx = 1, .no_tda7432 = 1, .muxsel = MUXSEL(2, 0, 1), .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_PV150] = { /* Luc Van Hoeylandt <luc@e-magic.be> */ .name = "ProVideo PV150", /* 0x4f */ .video_inputs = 2, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .gpiomask = 0, .muxsel = MUXSEL(2, 3), .gpiomux = { 0 }, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_AD_TVK503] = { /* Hiroshi Takekawa <sian@big.or.jp> */ /* This card lacks subsystem ID */ .name = "AD-TVK503", /* 0x63 */ .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x001e8007, .muxsel = MUXSEL(2, 3, 1, 0), /* Tuner, Radio, external, internal, off, on */ .gpiomux = { 0x08, 0x0f, 0x0a, 0x08 }, .gpiomute = 0x0f, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_NTSC, .tuner_addr = ADDR_UNSET, .audio_mode_gpio= adtvk503_audio, }, /* ---- card 0x64 ---------------------------------- */ [BTTV_BOARD_HERCULES_SM_TV] = { .name = "Hercules Smart TV Stereo", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x00, .muxsel = MUXSEL(2, 3, 1, 1), .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, /* Notes: - card lacks subsystem ID - stereo variant w/ daughter board with tda9874a @0xb0 - Audio Routing: always from tda9874 independent of GPIO (?) external line in: unknown - Other chips: em78p156elp @ 0x96 (probably IR remote control) hef4053 (instead 4052) for unknown function */ }, [BTTV_BOARD_PACETV] = { .name = "Pace TV & Radio Card", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, /* Tuner, CVid, SVid, CVid over SVid connector */ .muxsel = MUXSEL(2, 3, 1, 1), .gpiomask = 0, .no_tda7432 = 1, .tuner_type = TUNER_PHILIPS_PAL_I, .tuner_addr = ADDR_UNSET, .has_radio = 1, .pll = PLL_28, /* Bt878, Bt832, FI1246 tuner; no pci subsystem id only internal line out: (4pin header) RGGL Radio must be decoded by msp3410d (not routed through)*/ /* .digital_mode = DIGITAL_MODE_CAMERA, todo! */ }, [BTTV_BOARD_IVC200] = { /* Chris Willing <chris@vislab.usyd.edu.au> */ .name = "IVC-200", .video_inputs = 1, /* .audio_inputs= 0, */ .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .svhs = NO_SVHS, .gpiomask = 0xdf, .muxsel = MUXSEL(2), .pll = PLL_28, }, [BTTV_BOARD_IVCE8784] = { .name = "IVCE-8784", .video_inputs = 1, /* .audio_inputs= 0, */ .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .svhs = NO_SVHS, .gpiomask = 0xdf, .muxsel = MUXSEL(2), .pll = PLL_28, }, [BTTV_BOARD_XGUARD] = { .name = "Grand X-Guard / Trust 814PCI", .video_inputs = 16, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .gpiomask2 = 0xff, .muxsel = MUXSEL(2,2,2,2, 3,3,3,3, 1,1,1,1, 0,0,0,0), .muxsel_hook = xguard_muxsel, .no_msp34xx = 1, .no_tda7432 = 1, .pll = PLL_28, }, /* ---- card 0x68 ---------------------------------- */ [BTTV_BOARD_NEBULA_DIGITV] = { .name = "Nebula Electronics DigiTV", .video_inputs = 1, .svhs = NO_SVHS, .muxsel = MUXSEL(2, 3, 1, 0), .no_msp34xx = 1, .no_tda7432 = 1, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .has_dvb = 1, .has_remote = 1, .gpiomask = 0x1b, .no_gpioirq = 1, }, [BTTV_BOARD_PV143] = { /* Jorge Boncompte - DTI2 <jorge@dti2.net> */ .name = "ProVideo PV143", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .gpiomask = 0, .muxsel = MUXSEL(2, 3, 1, 0), .gpiomux = { 0 }, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_VD009X1_VD011_MINIDIN] = { /* M.Klahr@phytec.de */ .name = "PHYTEC VD-009-X1 VD-011 MiniDIN (bt878)", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = 3, .gpiomask = 0x00, .muxsel = MUXSEL(2, 3, 1, 0), .gpiomux = { 0, 0, 0, 0 }, /* card has no audio */ .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_VD009X1_VD011_COMBI] = { .name = "PHYTEC VD-009-X1 VD-011 Combi (bt878)", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = 3, .gpiomask = 0x00, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 0, 0, 0 }, /* card has no audio */ .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x6c ---------------------------------- */ [BTTV_BOARD_VD009_MINIDIN] = { .name = "PHYTEC VD-009 MiniDIN (bt878)", .video_inputs = 10, /* .audio_inputs= 0, */ .svhs = 9, .gpiomask = 0x00, .gpiomask2 = 0x03, /* used for external vodeo mux */ .muxsel = MUXSEL(2, 2, 2, 2, 3, 3, 3, 3, 1, 0), .muxsel_hook = phytec_muxsel, .gpiomux = { 0, 0, 0, 0 }, /* card has no audio */ .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_VD009_COMBI] = { .name = "PHYTEC VD-009 Combi (bt878)", .video_inputs = 10, /* .audio_inputs= 0, */ .svhs = 9, .gpiomask = 0x00, .gpiomask2 = 0x03, /* used for external vodeo mux */ .muxsel = MUXSEL(2, 2, 2, 2, 3, 3, 3, 3, 1, 1), .muxsel_hook = phytec_muxsel, .gpiomux = { 0, 0, 0, 0 }, /* card has no audio */ .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_IVC100] = { .name = "IVC-100", .video_inputs = 4, /* .audio_inputs= 0, */ .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .svhs = NO_SVHS, .gpiomask = 0xdf, .muxsel = MUXSEL(2, 3, 1, 0), .pll = PLL_28, }, [BTTV_BOARD_IVC120] = { /* IVC-120G - Alan Garfield <alan@fromorbit.com> */ .name = "IVC-120G", .video_inputs = 16, /* .audio_inputs= 0, */ .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .svhs = NO_SVHS, /* card has no svhs */ .no_msp34xx = 1, .no_tda7432 = 1, .gpiomask = 0x00, .muxsel = MUXSEL(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .muxsel_hook = ivc120_muxsel, .pll = PLL_28, }, /* ---- card 0x70 ---------------------------------- */ [BTTV_BOARD_PC_HDTV] = { .name = "pcHDTV HD-2000 TV", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .muxsel = MUXSEL(2, 3, 1, 0), .tuner_type = TUNER_PHILIPS_FCV1236D, .tuner_addr = ADDR_UNSET, .has_dvb = 1, }, [BTTV_BOARD_TWINHAN_DST] = { .name = "Twinhan DST + clones", .no_msp34xx = 1, .no_tda7432 = 1, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_video = 1, .has_dvb = 1, }, [BTTV_BOARD_WINFASTVC100] = { .name = "Winfast VC100", .video_inputs = 3, /* .audio_inputs= 0, */ .svhs = 1, /* Vid In, SVid In, Vid over SVid in connector */ .muxsel = MUXSEL(3, 1, 1, 3), .no_msp34xx = 1, .no_tda7432 = 1, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .pll = PLL_28, }, [BTTV_BOARD_TEV560] = { .name = "Teppro TEV-560/InterVision IV-560", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 3, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 1, 1, 1, 1 }, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .pll = PLL_35, }, /* ---- card 0x74 ---------------------------------- */ [BTTV_BOARD_SIMUS_GVC1100] = { .name = "SIMUS GVC1100", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .pll = PLL_28, .muxsel = MUXSEL(2, 2, 2, 2), .gpiomask = 0x3F, .muxsel_hook = gvc1100_muxsel, }, [BTTV_BOARD_NGSTV_PLUS] = { /* Carlos Silva r3pek@r3pek.homelinux.org || card 0x75 */ .name = "NGS NGSTV+", .video_inputs = 3, .svhs = 2, .gpiomask = 0x008007, .muxsel = MUXSEL(2, 3, 0, 0), .gpiomux = { 0, 0, 0, 0 }, .gpiomute = 0x000003, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .has_remote = 1, }, [BTTV_BOARD_LMLBT4] = { /* http://linuxmedialabs.com */ .name = "LMLBT4", .video_inputs = 4, /* IN1,IN2,IN3,IN4 */ /* .audio_inputs= 0, */ .svhs = NO_SVHS, .muxsel = MUXSEL(2, 3, 1, 0), .no_msp34xx = 1, .no_tda7432 = 1, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_TEKRAM_M205] = { /* Helmroos Harri <harri.helmroos@pp.inet.fi> */ .name = "Tekram M205 PRO", .video_inputs = 3, /* .audio_inputs= 1, */ .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .svhs = 2, .gpiomask = 0x68, .muxsel = MUXSEL(2, 3, 1), .gpiomux = { 0x68, 0x68, 0x61, 0x61 }, .pll = PLL_28, }, /* ---- card 0x78 ---------------------------------- */ [BTTV_BOARD_CONTVFMI] = { /* Javier Cendan Ares <jcendan@lycos.es> */ /* bt878 TV + FM without subsystem ID */ .name = "Conceptronic CONTVFMi", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x008007, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 1, 2, 2 }, .gpiomute = 3, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .has_remote = 1, .has_radio = 1, }, [BTTV_BOARD_PICOLO_TETRA_CHIP] = { /*Eric DEBIEF <debief@telemsa.com>*/ /*EURESYS Picolo Tetra : 4 Conexant Fusion 878A, no audio, video input set with analog multiplexers GPIO controlled*/ /* adds picolo_tetra_muxsel(), picolo_tetra_init(), the following declaration strucure, and #define BTTV_BOARD_PICOLO_TETRA_CHIP*/ /*0x79 in bttv.h*/ .name = "Euresys Picolo Tetra", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .gpiomask = 0, .gpiomask2 = 0x3C<<16,/*Set the GPIO[18]->GPIO[21] as output pin.==> drive the video inputs through analog multiplexers*/ .no_msp34xx = 1, .no_tda7432 = 1, /*878A input is always MUX0, see above.*/ .muxsel = MUXSEL(2, 2, 2, 2), .gpiomux = { 0, 0, 0, 0 }, /* card has no audio */ .pll = PLL_28, .muxsel_hook = picolo_tetra_muxsel,/*Required as it doesn't follow the classic input selection policy*/ .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_SPIRIT_TV] = { /* Spirit TV Tuner from http://spiritmodems.com.au */ /* Stafford Goodsell <surge@goliath.homeunix.org> */ .name = "Spirit TV Tuner", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x0000000f, .muxsel = MUXSEL(2, 1, 1), .gpiomux = { 0x02, 0x00, 0x00, 0x00 }, .tuner_type = TUNER_TEMIC_PAL, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, }, [BTTV_BOARD_AVDVBT_771] = { /* Wolfram Joost <wojo@frokaschwei.de> */ .name = "AVerMedia AVerTV DVB-T 771", .video_inputs = 2, .svhs = 1, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .muxsel = MUXSEL(3, 3), .no_msp34xx = 1, .no_tda7432 = 1, .pll = PLL_28, .has_dvb = 1, .no_gpioirq = 1, .has_remote = 1, }, /* ---- card 0x7c ---------------------------------- */ [BTTV_BOARD_AVDVBT_761] = { /* Matt Jesson <dvb@jesson.eclipse.co.uk> */ /* Based on the Nebula card data - added remote and new card number - BTTV_BOARD_AVDVBT_761, see also ir-kbd-gpio.c */ .name = "AverMedia AverTV DVB-T 761", .video_inputs = 2, .svhs = 1, .muxsel = MUXSEL(3, 1, 2, 0), /* Comp0, S-Video, ?, ? */ .no_msp34xx = 1, .no_tda7432 = 1, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .has_dvb = 1, .no_gpioirq = 1, .has_remote = 1, }, [BTTV_BOARD_MATRIX_VISIONSQ] = { /* andre.schwarz@matrix-vision.de */ .name = "MATRIX Vision Sigma-SQ", .video_inputs = 16, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .gpiomask = 0x0, .muxsel = MUXSEL(2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3), .muxsel_hook = sigmaSQ_muxsel, .gpiomux = { 0 }, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_MATRIX_VISIONSLC] = { /* andre.schwarz@matrix-vision.de */ .name = "MATRIX Vision Sigma-SLC", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .gpiomask = 0x0, .muxsel = MUXSEL(2, 2, 2, 2), .muxsel_hook = sigmaSLC_muxsel, .gpiomux = { 0 }, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, /* BTTV_BOARD_APAC_VIEWCOMP */ [BTTV_BOARD_APAC_VIEWCOMP] = { /* Attila Kondoros <attila.kondoros@chello.hu> */ /* bt878 TV + FM 0x00000000 subsystem ID */ .name = "APAC Viewcomp 878(AMAX)", .video_inputs = 2, /* .audio_inputs= 1, */ .svhs = NO_SVHS, .gpiomask = 0xFF, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 2, 0, 0, 0 }, .gpiomute = 10, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL, .tuner_addr = ADDR_UNSET, .has_remote = 1, /* miniremote works, see ir-kbd-gpio.c */ .has_radio = 1, /* not every card has radio */ }, /* ---- card 0x80 ---------------------------------- */ [BTTV_BOARD_DVICO_DVBT_LITE] = { /* Chris Pascoe <c.pascoe@itee.uq.edu.au> */ .name = "DViCO FusionHDTV DVB-T Lite", .no_msp34xx = 1, .no_tda7432 = 1, .pll = PLL_28, .no_video = 1, .has_dvb = 1, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_VGEAR_MYVCD] = { /* Steven <photon38@pchome.com.tw> */ .name = "V-Gear MyVCD", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x3f, .muxsel = MUXSEL(2, 3, 1, 0), .gpiomux = {0x31, 0x31, 0x31, 0x31 }, .gpiomute = 0x31, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_NTSC_M, .tuner_addr = ADDR_UNSET, .has_radio = 0, }, [BTTV_BOARD_SUPER_TV] = { /* Rick C <cryptdragoon@gmail.com> */ .name = "Super TV Tuner", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .muxsel = MUXSEL(2, 3, 1, 0), .tuner_type = TUNER_PHILIPS_NTSC, .tuner_addr = ADDR_UNSET, .gpiomask = 0x008007, .gpiomux = { 0, 0x000001,0,0 }, .has_radio = 1, }, [BTTV_BOARD_TIBET_CS16] = { /* Chris Fanning <video4linux@haydon.net> */ .name = "Tibet Systems 'Progress DVR' CS16", .video_inputs = 16, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .muxsel = MUXSEL(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2), .pll = PLL_28, .no_msp34xx = 1, .no_tda7432 = 1, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .muxsel_hook = tibetCS16_muxsel, }, [BTTV_BOARD_KODICOM_4400R] = { /* Bill Brack <wbrack@mmm.com.hk> */ /* * Note that, because of the card's wiring, the "master" * BT878A chip (i.e. the one which controls the analog switch * and must use this card type) is the 2nd one detected. The * other 3 chips should use card type 0x85, whose description * follows this one. There is a EEPROM on the card (which is * connected to the I2C of one of those other chips), but is * not currently handled. There is also a facility for a * "monitor", which is also not currently implemented. */ .name = "Kodicom 4400R (master)", .video_inputs = 16, /* .audio_inputs= 0, */ .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .svhs = NO_SVHS, /* GPIO bits 0-9 used for analog switch: * 00 - 03: camera selector * 04 - 06: channel (controller) selector * 07: data (1->on, 0->off) * 08: strobe * 09: reset * bit 16 is input from sync separator for the channel */ .gpiomask = 0x0003ff, .no_gpioirq = 1, .muxsel = MUXSEL(3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), .pll = PLL_28, .no_msp34xx = 1, .no_tda7432 = 1, .muxsel_hook = kodicom4400r_muxsel, }, [BTTV_BOARD_KODICOM_4400R_SL] = { /* Bill Brack <wbrack@mmm.com.hk> */ /* Note that, for reasons unknown, the "master" BT878A chip (i.e. the * one which controls the analog switch, and must use the card type) * is the 2nd one detected. The other 3 chips should use this card * type */ .name = "Kodicom 4400R (slave)", .video_inputs = 16, /* .audio_inputs= 0, */ .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .svhs = NO_SVHS, .gpiomask = 0x010000, .no_gpioirq = 1, .muxsel = MUXSEL(3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), .pll = PLL_28, .no_msp34xx = 1, .no_tda7432 = 1, .muxsel_hook = kodicom4400r_muxsel, }, /* ---- card 0x86---------------------------------- */ [BTTV_BOARD_ADLINK_RTV24] = { /* Michael Henson <mhenson@clarityvi.com> */ /* Adlink RTV24 with special unlock codes */ .name = "Adlink RTV24", .video_inputs = 4, /* .audio_inputs= 1, */ .svhs = 2, .muxsel = MUXSEL(2, 3, 1, 0), .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, .pll = PLL_28, }, /* ---- card 0x87---------------------------------- */ [BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE] = { /* Michael Krufky <mkrufky@m1k.net> */ .name = "DViCO FusionHDTV 5 Lite", .tuner_type = TUNER_LG_TDVS_H06XF, /* TDVS-H064F */ .tuner_addr = ADDR_UNSET, .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .muxsel = MUXSEL(2, 3, 1), .gpiomask = 0x00e00007, .gpiomux = { 0x00400005, 0, 0x00000001, 0 }, .gpiomute = 0x00c00007, .no_msp34xx = 1, .no_tda7432 = 1, .has_dvb = 1, }, /* ---- card 0x88---------------------------------- */ [BTTV_BOARD_ACORP_Y878F] = { /* Mauro Carvalho Chehab <mchehab@infradead.org> */ .name = "Acorp Y878F", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x01fe00, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x001e00, 0, 0x018000, 0x014000 }, .gpiomute = 0x002000, .pll = PLL_28, .tuner_type = TUNER_YMEC_TVF66T5_B_DFF, .tuner_addr = 0xc1 >>1, .has_radio = 1, }, /* ---- card 0x89 ---------------------------------- */ [BTTV_BOARD_CONCEPTRONIC_CTVFMI2] = { .name = "Conceptronic CTVFMi v2", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x001c0007, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 1, 2, 2 }, .gpiomute = 3, .pll = PLL_28, .tuner_type = TUNER_TENA_9533_DI, .tuner_addr = ADDR_UNSET, .has_remote = 1, .has_radio = 1, }, /* ---- card 0x8a ---------------------------------- */ [BTTV_BOARD_PV_BT878P_2E] = { .name = "Prolink Pixelview PV-BT878P+ (Rev.2E)", .video_inputs = 5, /* .audio_inputs= 1, */ .svhs = 3, .has_dig_in = 1, .gpiomask = 0x01fe00, .muxsel = MUXSEL(2, 3, 1, 1, 0), /* in 4 is digital */ /* .digital_mode= DIGITAL_MODE_CAMERA, */ .gpiomux = { 0x00400, 0x10400, 0x04400, 0x80000 }, .gpiomute = 0x12400, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_LG_PAL_FM, .tuner_addr = ADDR_UNSET, .has_remote = 1, }, /* ---- card 0x8b ---------------------------------- */ [BTTV_BOARD_PV_M4900] = { /* Sérgio Fortier <sergiofortier@yahoo.com.br> */ .name = "Prolink PixelView PlayTV MPEG2 PV-M4900", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x3f, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x21, 0x20, 0x24, 0x2c }, .gpiomute = 0x29, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_YMEC_TVF_5533MF, .tuner_addr = ADDR_UNSET, .has_radio = 1, .has_remote = 1, }, /* ---- card 0x8c ---------------------------------- */ /* Has four Bt878 chips behind a PCI bridge, each chip has: one external BNC composite input (mux 2) three internal composite inputs (unknown muxes) an 18-bit stereo A/D (CS5331A), which has: one external stereo unblanced (RCA) audio connection one (or 3?) internal stereo balanced (XLR) audio connection input is selected via gpio to a 14052B mux (mask=0x300, unbal=0x000, bal=0x100, ??=0x200,0x300) gain is controlled via an X9221A chip on the I2C bus @0x28 sample rate is controlled via gpio to an MK1413S (mask=0x3, 32kHz=0x0, 44.1kHz=0x1, 48kHz=0x2, ??=0x3) There is neither a tuner nor an svideo input. */ [BTTV_BOARD_OSPREY440] = { .name = "Osprey 440", .video_inputs = 4, /* .audio_inputs= 2, */ .svhs = NO_SVHS, .muxsel = MUXSEL(2, 3, 0, 1), /* 3,0,1 are guesses */ .gpiomask = 0x303, .gpiomute = 0x000, /* int + 32kHz */ .gpiomux = { 0, 0, 0x000, 0x100}, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .no_msp34xx = 1, .no_tda7432 = 1, }, /* ---- card 0x8d ---------------------------------- */ [BTTV_BOARD_ASOUND_SKYEYE] = { .name = "Asound Skyeye PCTV", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 15, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 2, 0, 0, 0 }, .gpiomute = 1, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_NTSC, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x8e ---------------------------------- */ [BTTV_BOARD_SABRENT_TVFM] = { .name = "Sabrent TV-FM (bttv version)", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x108007, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 100000, 100002, 100002, 100000 }, .no_msp34xx = 1, .no_tda7432 = 1, .pll = PLL_28, .tuner_type = TUNER_TNF_5335MF, .tuner_addr = ADDR_UNSET, .has_radio = 1, }, /* ---- card 0x8f ---------------------------------- */ [BTTV_BOARD_HAUPPAUGE_IMPACTVCB] = { .name = "Hauppauge ImpactVCB (bt878)", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .gpiomask = 0x0f, /* old: 7 */ .muxsel = MUXSEL(0, 1, 3, 2), /* Composite 0-3 */ .no_msp34xx = 1, .no_tda7432 = 1, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_MACHTV_MAGICTV] = { /* Julian Calaby <julian.calaby@gmail.com> * Slightly different from original MachTV definition (0x60) * FIXME: RegSpy says gpiomask should be "0x001c800f", but it * stuffs up remote chip. Bug is a pin on the jaecs is not set * properly (methinks) causing no keyup bits being set */ .name = "MagicTV", /* rebranded MachTV */ .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 7, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 1, 2, 3 }, .gpiomute = 4, .tuner_type = TUNER_TEMIC_4009FR5_PAL, .tuner_addr = ADDR_UNSET, .pll = PLL_28, .has_radio = 1, .has_remote = 1, }, [BTTV_BOARD_SSAI_SECURITY] = { .name = "SSAI Security Video Interface", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .muxsel = MUXSEL(0, 1, 2, 3), .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_SSAI_ULTRASOUND] = { .name = "SSAI Ultrasound Video Interface", .video_inputs = 2, /* .audio_inputs= 0, */ .svhs = 1, .muxsel = MUXSEL(2, 0, 1, 3), .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, /* ---- card 0x94---------------------------------- */ [BTTV_BOARD_DVICO_FUSIONHDTV_2] = { .name = "DViCO FusionHDTV 2", .tuner_type = TUNER_PHILIPS_FCV1236D, .tuner_addr = ADDR_UNSET, .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .muxsel = MUXSEL(2, 3, 1), .gpiomask = 0x00e00007, .gpiomux = { 0x00400005, 0, 0x00000001, 0 }, .gpiomute = 0x00c00007, .no_msp34xx = 1, .no_tda7432 = 1, }, /* ---- card 0x95---------------------------------- */ [BTTV_BOARD_TYPHOON_TVTUNERPCI] = { .name = "Typhoon TV-Tuner PCI (50684)", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x3014f, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0x20001,0x10001, 0, 0 }, .gpiomute = 10, .pll = PLL_28, .tuner_type = TUNER_PHILIPS_PAL_I, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_GEOVISION_GV600] = { /* emhn@usb.ve */ .name = "Geovision GV-600", .video_inputs = 16, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .gpiomask = 0x0, .muxsel = MUXSEL(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2), .muxsel_hook = geovision_muxsel, .gpiomux = { 0 }, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_KOZUMI_KTV_01C] = { /* Mauro Lacy <mauro@lacy.com.ar> * Based on MagicTV and Conceptronic CONTVFMi */ .name = "Kozumi KTV-01C", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, .gpiomask = 0x008007, .muxsel = MUXSEL(2, 3, 1, 1), .gpiomux = { 0, 1, 2, 2 }, /* CONTVFMi */ .gpiomute = 3, /* CONTVFMi */ .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* TCL MK3 */ .tuner_addr = ADDR_UNSET, .pll = PLL_28, .has_radio = 1, .has_remote = 1, }, [BTTV_BOARD_ENLTV_FM_2] = { /* Encore TV Tuner Pro ENL TV-FM-2 Mauro Carvalho Chehab <mchehab@infradead.org */ .name = "Encore ENL TV-FM-2", .video_inputs = 3, /* .audio_inputs= 1, */ .svhs = 2, /* bit 6 -> IR disabled bit 18/17 = 00 -> mute 01 -> enable external audio input 10 -> internal audio input (mono?) 11 -> internal audio input */ .gpiomask = 0x060040, .muxsel = MUXSEL(2, 3, 3), .gpiomux = { 0x60000, 0x60000, 0x20000, 0x20000 }, .gpiomute = 0, .tuner_type = TUNER_TCL_MF02GIP_5N, .tuner_addr = ADDR_UNSET, .pll = PLL_28, .has_radio = 1, .has_remote = 1, }, [BTTV_BOARD_VD012] = { /* D.Heer@Phytec.de */ .name = "PHYTEC VD-012 (bt878)", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .gpiomask = 0x00, .muxsel = MUXSEL(0, 2, 3, 1), .gpiomux = { 0, 0, 0, 0 }, /* card has no audio */ .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_VD012_X1] = { /* D.Heer@Phytec.de */ .name = "PHYTEC VD-012-X1 (bt878)", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = 3, .gpiomask = 0x00, .muxsel = MUXSEL(2, 3, 1), .gpiomux = { 0, 0, 0, 0 }, /* card has no audio */ .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_VD012_X2] = { /* D.Heer@Phytec.de */ .name = "PHYTEC VD-012-X2 (bt878)", .video_inputs = 4, /* .audio_inputs= 0, */ .svhs = 3, .gpiomask = 0x00, .muxsel = MUXSEL(3, 2, 1), .gpiomux = { 0, 0, 0, 0 }, /* card has no audio */ .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_GEOVISION_GV800S] = { /* Bruno Christo <bchristo@inf.ufsm.br> * * GeoVision GV-800(S) has 4 Conexant Fusion 878A: * 1 audio input per BT878A = 4 audio inputs * 4 video inputs per BT878A = 16 video inputs * This is the first BT878A chip of the GV-800(S). It's the * "master" chip and it controls the video inputs through an * analog multiplexer (a CD22M3494) via some GPIO pins. The * slaves should use card type 0x9e (following this one). * There is a EEPROM on the card which is currently not handled. * The audio input is not working yet. */ .name = "Geovision GV-800(S) (master)", .video_inputs = 4, /* .audio_inputs= 1, */ .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .svhs = NO_SVHS, .gpiomask = 0xf107f, .no_gpioirq = 1, .muxsel = MUXSEL(2, 2, 2, 2), .pll = PLL_28, .no_msp34xx = 1, .no_tda7432 = 1, .muxsel_hook = gv800s_muxsel, }, [BTTV_BOARD_GEOVISION_GV800S_SL] = { /* Bruno Christo <bchristo@inf.ufsm.br> * * GeoVision GV-800(S) has 4 Conexant Fusion 878A: * 1 audio input per BT878A = 4 audio inputs * 4 video inputs per BT878A = 16 video inputs * The 3 other BT878A chips are "slave" chips of the GV-800(S) * and should use this card type. * The audio input is not working yet. */ .name = "Geovision GV-800(S) (slave)", .video_inputs = 4, /* .audio_inputs= 1, */ .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, .svhs = NO_SVHS, .gpiomask = 0x00, .no_gpioirq = 1, .muxsel = MUXSEL(2, 2, 2, 2), .pll = PLL_28, .no_msp34xx = 1, .no_tda7432 = 1, .muxsel_hook = gv800s_muxsel, }, [BTTV_BOARD_PV183] = { .name = "ProVideo PV183", /* 0x9f */ .video_inputs = 2, /* .audio_inputs= 0, */ .svhs = NO_SVHS, .gpiomask = 0, .muxsel = MUXSEL(2, 3), .gpiomux = { 0 }, .no_msp34xx = 1, .pll = PLL_28, .tuner_type = TUNER_ABSENT, .tuner_addr = ADDR_UNSET, }, [BTTV_BOARD_TVT_TD3116] = { .name = "Tongwei Video Technology TD-3116", .video_inputs = 16, .gpiomask = 0xc00ff, .muxsel = MUXSEL(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2), .muxsel_hook = td3116_muxsel, .svhs = NO_SVHS, .pll = PLL_28, .tuner_type = TUNER_ABSENT, }, [BTTV_BOARD_APOSONIC_WDVR] = { .name = "Aposonic W-DVR", .video_inputs = 4, .svhs = NO_SVHS, .muxsel = MUXSEL(2, 3, 1, 0), .tuner_type = TUNER_ABSENT, }, }; static const unsigned int bttv_num_tvcards = ARRAY_SIZE(bttv_tvcards); /* ----------------------------------------------------------------------- */ static unsigned char eeprom_data[256]; /* * identify card */ void bttv_idcard(struct bttv *btv) { unsigned int gpiobits; int i,type; /* read PCI subsystem ID */ btv->cardid = btv->c.pci->subsystem_device << 16; btv->cardid |= btv->c.pci->subsystem_vendor; if (0 != btv->cardid && 0xffffffff != btv->cardid) { /* look for the card */ for (type = -1, i = 0; cards[i].id != 0; i++) if (cards[i].id == btv->cardid) type = i; if (type != -1) { /* found it */ pr_info("%d: detected: %s [card=%d], PCI subsystem ID is %04x:%04x\n", btv->c.nr, cards[type].name, cards[type].cardnr, btv->cardid & 0xffff, (btv->cardid >> 16) & 0xffff); btv->c.type = cards[type].cardnr; } else { /* 404 */ pr_info("%d: subsystem: %04x:%04x (UNKNOWN)\n", btv->c.nr, btv->cardid & 0xffff, (btv->cardid >> 16) & 0xffff); pr_debug("please mail id, board name and the correct card= insmod option to linux-media@vger.kernel.org\n"); } } /* let the user override the autodetected type */ if (card[btv->c.nr] < bttv_num_tvcards) btv->c.type=card[btv->c.nr]; /* print which card config we are using */ pr_info("%d: using: %s [card=%d,%s]\n", btv->c.nr, bttv_tvcards[btv->c.type].name, btv->c.type, card[btv->c.nr] < bttv_num_tvcards ? "insmod option" : "autodetected"); /* overwrite gpio stuff ?? */ if (UNSET == audioall && UNSET == audiomux[0]) return; if (UNSET != audiomux[0]) { gpiobits = 0; for (i = 0; i < ARRAY_SIZE(bttv_tvcards->gpiomux); i++) { bttv_tvcards[btv->c.type].gpiomux[i] = audiomux[i]; gpiobits |= audiomux[i]; } } else { gpiobits = audioall; for (i = 0; i < ARRAY_SIZE(bttv_tvcards->gpiomux); i++) { bttv_tvcards[btv->c.type].gpiomux[i] = audioall; } } bttv_tvcards[btv->c.type].gpiomask = (UNSET != gpiomask) ? gpiomask : gpiobits; pr_info("%d: gpio config override: mask=0x%x, mux=", btv->c.nr, bttv_tvcards[btv->c.type].gpiomask); for (i = 0; i < ARRAY_SIZE(bttv_tvcards->gpiomux); i++) { pr_cont("%s0x%x", i ? "," : "", bttv_tvcards[btv->c.type].gpiomux[i]); } pr_cont("\n"); } /* * (most) board specific initialisations goes here */ /* Some Modular Technology cards have an eeprom, but no subsystem ID */ static void identify_by_eeprom(struct bttv *btv, unsigned char eeprom_data[256]) { int type = -1; if (0 == strncmp(eeprom_data,"GET MM20xPCTV",13)) type = BTTV_BOARD_MODTEC_205; else if (0 == strncmp(eeprom_data+20,"Picolo",7)) type = BTTV_BOARD_EURESYS_PICOLO; else if (eeprom_data[0] == 0x84 && eeprom_data[2]== 0) type = BTTV_BOARD_HAUPPAUGE; /* old bt848 */ if (-1 != type) { btv->c.type = type; pr_info("%d: detected by eeprom: %s [card=%d]\n", btv->c.nr, bttv_tvcards[btv->c.type].name, btv->c.type); } } static void flyvideo_gpio(struct bttv *btv) { int gpio, has_remote, has_radio, is_capture_only; int is_lr90, has_tda9820_tda9821; int tuner_type = UNSET, ttype; gpio_inout(0xffffff, 0); udelay(8); /* without this we would see the 0x1800 mask */ gpio = gpio_read(); /* FIXME: must restore OUR_EN ??? */ /* all cards provide GPIO info, some have an additional eeprom * LR50: GPIO coding can be found lower right CP1 .. CP9 * CP9=GPIO23 .. CP1=GPIO15; when OPEN, the corresponding GPIO reads 1. * GPIO14-12: n.c. * LR90: GP9=GPIO23 .. GP1=GPIO15 (right above the bt878) * lowest 3 bytes are remote control codes (no handshake needed) * xxxFFF: No remote control chip soldered * xxxF00(LR26/LR50), xxxFE0(LR90): Remote control chip (LVA001 or CF45) soldered * Note: Some bits are Audio_Mask ! */ ttype = (gpio & 0x0f0000) >> 16; switch (ttype) { case 0x0: tuner_type = 2; /* NTSC, e.g. TPI8NSR11P */ break; case 0x2: tuner_type = 39; /* LG NTSC (newer TAPC series) TAPC-H701P */ break; case 0x4: tuner_type = 5; /* Philips PAL TPI8PSB02P, TPI8PSB12P, TPI8PSB12D or FI1216, FM1216 */ break; case 0x6: tuner_type = 37; /* LG PAL (newer TAPC series) TAPC-G702P */ break; case 0xC: tuner_type = 3; /* Philips SECAM(+PAL) FQ1216ME or FI1216MF */ break; default: pr_info("%d: FlyVideo_gpio: unknown tuner type\n", btv->c.nr); break; } has_remote = gpio & 0x800000; has_radio = gpio & 0x400000; /* unknown 0x200000; * unknown2 0x100000; */ is_capture_only = !(gpio & 0x008000); /* GPIO15 */ has_tda9820_tda9821 = !(gpio & 0x004000); is_lr90 = !(gpio & 0x002000); /* else LR26/LR50 (LR38/LR51 f. capture only) */ /* * gpio & 0x001000 output bit for audio routing */ if (is_capture_only) tuner_type = TUNER_ABSENT; /* No tuner present */ pr_info("%d: FlyVideo Radio=%s RemoteControl=%s Tuner=%d gpio=0x%06x\n", btv->c.nr, has_radio ? "yes" : "no", has_remote ? "yes" : "no", tuner_type, gpio); pr_info("%d: FlyVideo LR90=%s tda9821/tda9820=%s capture_only=%s\n", btv->c.nr, is_lr90 ? "yes" : "no", has_tda9820_tda9821 ? "yes" : "no", is_capture_only ? "yes" : "no"); if (tuner_type != UNSET) /* only set if known tuner autodetected, else let insmod option through */ btv->tuner_type = tuner_type; btv->has_radio = has_radio; /* LR90 Audio Routing is done by 2 hef4052, so Audio_Mask has 4 bits: 0x001c80 * LR26/LR50 only has 1 hef4052, Audio_Mask 0x000c00 * Audio options: from tuner, from tda9821/tda9821(mono,stereo,sap), from tda9874, ext., mute */ if (has_tda9820_tda9821) btv->audio_mode_gpio = lt9415_audio; /* todo: if(has_tda9874) btv->audio_mode_gpio = fv2000s_audio; */ } static int miro_tunermap[] = { 0,6,2,3, 4,5,6,0, 3,0,4,5, 5,2,16,1, 14,2,17,1, 4,1,4,3, 1,2,16,1, 4,4,4,4 }; static int miro_fmtuner[] = { 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,1, 1,1,1,1, 1,1,1,0, 0,0,0,0, 0,1,0,0 }; static void miro_pinnacle_gpio(struct bttv *btv) { int id,msp,gpio; char *info; gpio_inout(0xffffff, 0); gpio = gpio_read(); id = ((gpio>>10) & 63) -1; msp = bttv_I2CRead(btv, I2C_ADDR_MSP3400, "MSP34xx"); if (id < 32) { btv->tuner_type = miro_tunermap[id]; if (0 == (gpio & 0x20)) { btv->has_radio = 1; if (!miro_fmtuner[id]) { btv->has_matchbox = 1; btv->mbox_we = (1<<6); btv->mbox_most = (1<<7); btv->mbox_clk = (1<<8); btv->mbox_data = (1<<9); btv->mbox_mask = (1<<6)|(1<<7)|(1<<8)|(1<<9); } } else { btv->has_radio = 0; } if (-1 != msp) { if (btv->c.type == BTTV_BOARD_MIRO) btv->c.type = BTTV_BOARD_MIROPRO; if (btv->c.type == BTTV_BOARD_PINNACLE) btv->c.type = BTTV_BOARD_PINNACLEPRO; } pr_info("%d: miro: id=%d tuner=%d radio=%s stereo=%s\n", btv->c.nr, id+1, btv->tuner_type, !btv->has_radio ? "no" : (btv->has_matchbox ? "matchbox" : "fmtuner"), (-1 == msp) ? "no" : "yes"); } else { /* new cards with microtune tuner */ id = 63 - id; btv->has_radio = 0; switch (id) { case 1: info = "PAL / mono"; btv->tda9887_conf = TDA9887_INTERCARRIER; break; case 2: info = "PAL+SECAM / stereo"; btv->has_radio = 1; btv->tda9887_conf = TDA9887_QSS; break; case 3: info = "NTSC / stereo"; btv->has_radio = 1; btv->tda9887_conf = TDA9887_QSS; break; case 4: info = "PAL+SECAM / mono"; btv->tda9887_conf = TDA9887_QSS; break; case 5: info = "NTSC / mono"; btv->tda9887_conf = TDA9887_INTERCARRIER; break; case 6: info = "NTSC / stereo"; btv->tda9887_conf = TDA9887_INTERCARRIER; break; case 7: info = "PAL / stereo"; btv->tda9887_conf = TDA9887_INTERCARRIER; break; default: info = "oops: unknown card"; break; } if (-1 != msp) btv->c.type = BTTV_BOARD_PINNACLEPRO; pr_info("%d: pinnacle/mt: id=%d info=\"%s\" radio=%s\n", btv->c.nr, id, info, btv->has_radio ? "yes" : "no"); btv->tuner_type = TUNER_MT2032; } } /* GPIO21 L: Buffer aktiv, H: Buffer inaktiv */ #define LM1882_SYNC_DRIVE 0x200000L static void init_ids_eagle(struct bttv *btv) { gpio_inout(0xffffff,0xFFFF37); gpio_write(0x200020); /* flash strobe inverter ?! */ gpio_write(0x200024); /* switch sync drive off */ gpio_bits(LM1882_SYNC_DRIVE,LM1882_SYNC_DRIVE); /* set BT848 muxel to 2 */ btaor((2)<<5, ~(2<<5), BT848_IFORM); } /* Muxsel helper for the IDS Eagle. * the eagles does not use the standard muxsel-bits but * has its own multiplexer */ static void eagle_muxsel(struct bttv *btv, unsigned int input) { gpio_bits(3, input & 3); /* composite */ /* set chroma ADC to sleep */ btor(BT848_ADC_C_SLEEP, BT848_ADC); /* set to composite video */ btand(~BT848_CONTROL_COMP, BT848_E_CONTROL); btand(~BT848_CONTROL_COMP, BT848_O_CONTROL); /* switch sync drive off */ gpio_bits(LM1882_SYNC_DRIVE,LM1882_SYNC_DRIVE); } static void gvc1100_muxsel(struct bttv *btv, unsigned int input) { static const int masks[] = {0x30, 0x01, 0x12, 0x23}; gpio_write(masks[input%4]); } /* LMLBT4x initialization - to allow access to GPIO bits for sensors input and alarms output GPIObit | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | assignment | TI | O3|INx| O2| O1|IN4|IN3|IN2|IN1| | | IN - sensor inputs, INx - sensor inputs and TI XORed together O1,O2,O3 - alarm outputs (relays) OUT ENABLE 1 1 0 . 1 1 0 0 . 0 0 0 0 = 0x6C0 */ static void init_lmlbt4x(struct bttv *btv) { pr_debug("LMLBT4x init\n"); btwrite(0x000000, BT848_GPIO_REG_INP); gpio_inout(0xffffff, 0x0006C0); gpio_write(0x000000); } static void sigmaSQ_muxsel(struct bttv *btv, unsigned int input) { unsigned int inmux = input % 8; gpio_inout( 0xf, 0xf ); gpio_bits( 0xf, inmux ); } static void sigmaSLC_muxsel(struct bttv *btv, unsigned int input) { unsigned int inmux = input % 4; gpio_inout( 3<<9, 3<<9 ); gpio_bits( 3<<9, inmux<<9 ); } static void geovision_muxsel(struct bttv *btv, unsigned int input) { unsigned int inmux = input % 16; gpio_inout(0xf, 0xf); gpio_bits(0xf, inmux); } /* * The TD3116 has 2 74HC4051 muxes wired to the MUX0 input of a bt878. * The first 74HC4051 has the lower 8 inputs, the second one the higher 8. * The muxes are controlled via a 74HC373 latch which is connected to * GPIOs 0-7. GPIO 18 is connected to the LE signal of the latch. * Q0 of the latch is connected to the Enable (~E) input of the first * 74HC4051. Q1 - Q3 are connected to S0 - S2 of the same 74HC4051. * Q4 - Q7 are connected to the second 74HC4051 in the same way. */ static void td3116_latch_value(struct bttv *btv, u32 value) { gpio_bits((1<<18) | 0xff, value); gpio_bits((1<<18) | 0xff, (1<<18) | value); udelay(1); gpio_bits((1<<18) | 0xff, value); } static void td3116_muxsel(struct bttv *btv, unsigned int input) { u32 value; u32 highbit; highbit = (input & 0x8) >> 3 ; /* Disable outputs and set value in the mux */ value = 0x11; /* Disable outputs */ value |= ((input & 0x7) << 1) << (4 * highbit); td3116_latch_value(btv, value); /* Enable the correct output */ value &= ~0x11; value |= ((highbit ^ 0x1) << 4) | highbit; td3116_latch_value(btv, value); } /* ----------------------------------------------------------------------- */ static void bttv_reset_audio(struct bttv *btv) { /* * BT878A has a audio-reset register. * 1. This register is an audio reset function but it is in * function-0 (video capture) address space. * 2. It is enough to do this once per power-up of the card. * 3. There is a typo in the Conexant doc -- it is not at * 0x5B, but at 0x058. (B is an odd-number, obviously a typo!). * --//Shrikumar 030609 */ if (btv->id != 878) return; if (bttv_debug) pr_debug("%d: BT878A ARESET\n", btv->c.nr); btwrite((1<<7), 0x058); udelay(10); btwrite( 0, 0x058); } /* initialization part one -- before registering i2c bus */ void bttv_init_card1(struct bttv *btv) { switch (btv->c.type) { case BTTV_BOARD_HAUPPAUGE: case BTTV_BOARD_HAUPPAUGE878: boot_msp34xx(btv,5); break; case BTTV_BOARD_VOODOOTV_200: case BTTV_BOARD_VOODOOTV_FM: boot_msp34xx(btv,20); break; case BTTV_BOARD_AVERMEDIA98: boot_msp34xx(btv,11); break; case BTTV_BOARD_HAUPPAUGEPVR: pvr_boot(btv); break; case BTTV_BOARD_TWINHAN_DST: case BTTV_BOARD_AVDVBT_771: case BTTV_BOARD_PINNACLESAT: btv->use_i2c_hw = 1; break; case BTTV_BOARD_ADLINK_RTV24: init_RTV24( btv ); break; } if (!bttv_tvcards[btv->c.type].has_dvb) bttv_reset_audio(btv); } /* initialization part two -- after registering i2c bus */ void bttv_init_card2(struct bttv *btv) { btv->tuner_type = UNSET; if (BTTV_BOARD_UNKNOWN == btv->c.type) { bttv_readee(btv,eeprom_data,0xa0); identify_by_eeprom(btv,eeprom_data); } switch (btv->c.type) { case BTTV_BOARD_MIRO: case BTTV_BOARD_MIROPRO: case BTTV_BOARD_PINNACLE: case BTTV_BOARD_PINNACLEPRO: /* miro/pinnacle */ miro_pinnacle_gpio(btv); break; case BTTV_BOARD_FLYVIDEO_98: case BTTV_BOARD_MAXI: case BTTV_BOARD_LIFE_FLYKIT: case BTTV_BOARD_FLYVIDEO: case BTTV_BOARD_TYPHOON_TVIEW: case BTTV_BOARD_CHRONOS_VS2: case BTTV_BOARD_FLYVIDEO_98FM: case BTTV_BOARD_FLYVIDEO2000: case BTTV_BOARD_FLYVIDEO98EZ: case BTTV_BOARD_CONFERENCETV: case BTTV_BOARD_LIFETEC_9415: flyvideo_gpio(btv); break; case BTTV_BOARD_HAUPPAUGE: case BTTV_BOARD_HAUPPAUGE878: case BTTV_BOARD_HAUPPAUGEPVR: /* pick up some config infos from the eeprom */ bttv_readee(btv,eeprom_data,0xa0); hauppauge_eeprom(btv); break; case BTTV_BOARD_AVERMEDIA98: case BTTV_BOARD_AVPHONE98: bttv_readee(btv,eeprom_data,0xa0); avermedia_eeprom(btv); break; case BTTV_BOARD_PXC200: init_PXC200(btv); break; case BTTV_BOARD_PICOLO_TETRA_CHIP: picolo_tetra_init(btv); break; case BTTV_BOARD_VHX: btv->has_radio = 1; btv->has_matchbox = 1; btv->mbox_we = 0x20; btv->mbox_most = 0; btv->mbox_clk = 0x08; btv->mbox_data = 0x10; btv->mbox_mask = 0x38; break; case BTTV_BOARD_VOBIS_BOOSTAR: case BTTV_BOARD_TERRATV: terratec_active_radio_upgrade(btv); break; case BTTV_BOARD_MAGICTVIEW061: if (btv->cardid == 0x3002144f) { btv->has_radio=1; pr_info("%d: radio detected by subsystem id (CPH05x)\n", btv->c.nr); } break; case BTTV_BOARD_STB2: if (btv->cardid == 0x3060121a) { /* Fix up entry for 3DFX VoodooTV 100, which is an OEM STB card variant. */ btv->has_radio=0; btv->tuner_type=TUNER_TEMIC_NTSC; } break; case BTTV_BOARD_OSPREY1x0: case BTTV_BOARD_OSPREY1x0_848: case BTTV_BOARD_OSPREY101_848: case BTTV_BOARD_OSPREY1x1: case BTTV_BOARD_OSPREY1x1_SVID: case BTTV_BOARD_OSPREY2xx: case BTTV_BOARD_OSPREY2x0_SVID: case BTTV_BOARD_OSPREY2x0: case BTTV_BOARD_OSPREY440: case BTTV_BOARD_OSPREY500: case BTTV_BOARD_OSPREY540: case BTTV_BOARD_OSPREY2000: bttv_readee(btv,eeprom_data,0xa0); osprey_eeprom(btv, eeprom_data); break; case BTTV_BOARD_IDS_EAGLE: init_ids_eagle(btv); break; case BTTV_BOARD_MODTEC_205: bttv_readee(btv,eeprom_data,0xa0); modtec_eeprom(btv); break; case BTTV_BOARD_LMLBT4: init_lmlbt4x(btv); break; case BTTV_BOARD_TIBET_CS16: tibetCS16_init(btv); break; case BTTV_BOARD_KODICOM_4400R: kodicom4400r_init(btv); break; case BTTV_BOARD_GEOVISION_GV800S: gv800s_init(btv); break; } /* pll configuration */ if (!(btv->id==848 && btv->revision==0x11)) { /* defaults from card list */ if (PLL_28 == bttv_tvcards[btv->c.type].pll) { btv->pll.pll_ifreq=28636363; btv->pll.pll_crystal=BT848_IFORM_XT0; } if (PLL_35 == bttv_tvcards[btv->c.type].pll) { btv->pll.pll_ifreq=35468950; btv->pll.pll_crystal=BT848_IFORM_XT1; } /* insmod options can override */ switch (pll[btv->c.nr]) { case 0: /* none */ btv->pll.pll_crystal = 0; btv->pll.pll_ifreq = 0; btv->pll.pll_ofreq = 0; break; case 1: /* 28 MHz */ case 28: btv->pll.pll_ifreq = 28636363; btv->pll.pll_ofreq = 0; btv->pll.pll_crystal = BT848_IFORM_XT0; break; case 2: /* 35 MHz */ case 35: btv->pll.pll_ifreq = 35468950; btv->pll.pll_ofreq = 0; btv->pll.pll_crystal = BT848_IFORM_XT1; break; } } btv->pll.pll_current = -1; /* tuner configuration (from card list / autodetect / insmod option) */ if (UNSET != bttv_tvcards[btv->c.type].tuner_type) if (UNSET == btv->tuner_type) btv->tuner_type = bttv_tvcards[btv->c.type].tuner_type; if (UNSET != tuner[btv->c.nr]) btv->tuner_type = tuner[btv->c.nr]; if (btv->tuner_type == TUNER_ABSENT) pr_info("%d: tuner absent\n", btv->c.nr); else if (btv->tuner_type == UNSET) pr_warn("%d: tuner type unset\n", btv->c.nr); else pr_info("%d: tuner type=%d\n", btv->c.nr, btv->tuner_type); if (autoload != UNSET) { pr_warn("%d: the autoload option is obsolete\n", btv->c.nr); pr_warn("%d: use option msp3400, tda7432 or tvaudio to override which audio module should be used\n", btv->c.nr); } if (UNSET == btv->tuner_type) btv->tuner_type = TUNER_ABSENT; btv->dig = bttv_tvcards[btv->c.type].has_dig_in ? bttv_tvcards[btv->c.type].video_inputs - 1 : UNSET; btv->svhs = bttv_tvcards[btv->c.type].svhs == NO_SVHS ? UNSET : bttv_tvcards[btv->c.type].svhs; if (svhs[btv->c.nr] != UNSET) btv->svhs = svhs[btv->c.nr]; if (remote[btv->c.nr] != UNSET) btv->has_remote = remote[btv->c.nr]; if (bttv_tvcards[btv->c.type].has_radio) btv->has_radio = 1; if (bttv_tvcards[btv->c.type].has_remote) btv->has_remote = 1; if (!bttv_tvcards[btv->c.type].no_gpioirq) btv->gpioirq = 1; if (bttv_tvcards[btv->c.type].volume_gpio) btv->volume_gpio = bttv_tvcards[btv->c.type].volume_gpio; if (bttv_tvcards[btv->c.type].audio_mode_gpio) btv->audio_mode_gpio = bttv_tvcards[btv->c.type].audio_mode_gpio; if (btv->tuner_type == TUNER_ABSENT) return; /* no tuner or related drivers to load */ if (btv->has_saa6588 || saa6588[btv->c.nr]) { /* Probe for RDS receiver chip */ static const unsigned short addrs[] = { 0x20 >> 1, 0x22 >> 1, I2C_CLIENT_END }; struct v4l2_subdev *sd; sd = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "saa6588", 0, addrs); btv->has_saa6588 = (sd != NULL); } /* try to detect audio/fader chips */ /* First check if the user specified the audio chip via a module option. */ switch (audiodev[btv->c.nr]) { case -1: return; /* do not load any audio module */ case 0: /* autodetect */ break; case 1: { /* The user specified that we should probe for msp3400 */ static const unsigned short addrs[] = { I2C_ADDR_MSP3400 >> 1, I2C_ADDR_MSP3400_ALT >> 1, I2C_CLIENT_END }; btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "msp3400", 0, addrs); if (btv->sd_msp34xx) return; goto no_audio; } case 2: { /* The user specified that we should probe for tda7432 */ static const unsigned short addrs[] = { I2C_ADDR_TDA7432 >> 1, I2C_CLIENT_END }; if (v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "tda7432", 0, addrs)) return; goto no_audio; } case 3: { /* The user specified that we should probe for tvaudio */ btv->sd_tvaudio = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "tvaudio", 0, tvaudio_addrs()); if (btv->sd_tvaudio) return; goto no_audio; } default: pr_warn("%d: unknown audiodev value!\n", btv->c.nr); return; } /* There were no overrides, so now we try to discover this through the card definition */ /* probe for msp3400 first: this driver can detect whether or not it really is a msp3400, so it will return NULL when the device found is really something else (e.g. a tea6300). */ if (!bttv_tvcards[btv->c.type].no_msp34xx) { btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "msp3400", 0, I2C_ADDRS(I2C_ADDR_MSP3400 >> 1)); } else if (bttv_tvcards[btv->c.type].msp34xx_alt) { btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "msp3400", 0, I2C_ADDRS(I2C_ADDR_MSP3400_ALT >> 1)); } /* If we found a msp34xx, then we're done. */ if (btv->sd_msp34xx) return; /* Now see if we can find one of the tvaudio devices. */ btv->sd_tvaudio = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "tvaudio", 0, tvaudio_addrs()); if (btv->sd_tvaudio) { /* There may be two tvaudio chips on the card, so try to find another. */ v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "tvaudio", 0, tvaudio_addrs()); } /* it might also be a tda7432. */ if (!bttv_tvcards[btv->c.type].no_tda7432) { static const unsigned short addrs[] = { I2C_ADDR_TDA7432 >> 1, I2C_CLIENT_END }; btv->sd_tda7432 = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "tda7432", 0, addrs); if (btv->sd_tda7432) return; } if (btv->sd_tvaudio) return; no_audio: pr_warn("%d: audio absent, no audio device found!\n", btv->c.nr); } /* initialize the tuner */ void bttv_init_tuner(struct bttv *btv) { int addr = ADDR_UNSET; if (ADDR_UNSET != bttv_tvcards[btv->c.type].tuner_addr) addr = bttv_tvcards[btv->c.type].tuner_addr; if (btv->tuner_type != TUNER_ABSENT) { struct tuner_setup tun_setup; /* Load tuner module before issuing tuner config call! */ if (btv->has_radio) v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_RADIO)); v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); v4l2_i2c_new_subdev(&btv->c.v4l2_dev, &btv->c.i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_TV_WITH_DEMOD)); tun_setup.mode_mask = T_ANALOG_TV; tun_setup.type = btv->tuner_type; tun_setup.addr = addr; if (btv->has_radio) tun_setup.mode_mask |= T_RADIO; bttv_call_all(btv, tuner, s_type_addr, &tun_setup); } if (btv->tda9887_conf) { struct v4l2_priv_tun_config tda9887_cfg; tda9887_cfg.tuner = TUNER_TDA9887; tda9887_cfg.priv = &btv->tda9887_conf; bttv_call_all(btv, tuner, s_config, &tda9887_cfg); } } /* ----------------------------------------------------------------------- */ static void modtec_eeprom(struct bttv *btv) { if( strncmp(&(eeprom_data[0x1e]),"Temic 4066 FY5",14) ==0) { btv->tuner_type=TUNER_TEMIC_4066FY5_PAL_I; pr_info("%d: Modtec: Tuner autodetected by eeprom: %s\n", btv->c.nr, &eeprom_data[0x1e]); } else if (strncmp(&(eeprom_data[0x1e]),"Alps TSBB5",10) ==0) { btv->tuner_type=TUNER_ALPS_TSBB5_PAL_I; pr_info("%d: Modtec: Tuner autodetected by eeprom: %s\n", btv->c.nr, &eeprom_data[0x1e]); } else if (strncmp(&(eeprom_data[0x1e]),"Philips FM1246",14) ==0) { btv->tuner_type=TUNER_PHILIPS_NTSC; pr_info("%d: Modtec: Tuner autodetected by eeprom: %s\n", btv->c.nr, &eeprom_data[0x1e]); } else { pr_info("%d: Modtec: Unknown TunerString: %s\n", btv->c.nr, &eeprom_data[0x1e]); } } static void hauppauge_eeprom(struct bttv *btv) { struct tveeprom tv; tveeprom_hauppauge_analog(&btv->i2c_client, &tv, eeprom_data); btv->tuner_type = tv.tuner_type; btv->has_radio = tv.has_radio; pr_info("%d: Hauppauge eeprom indicates model#%d\n", btv->c.nr, tv.model); /* * Some of the 878 boards have duplicate PCI IDs. Switch the board * type based on model #. */ if(tv.model == 64900) { pr_info("%d: Switching board type from %s to %s\n", btv->c.nr, bttv_tvcards[btv->c.type].name, bttv_tvcards[BTTV_BOARD_HAUPPAUGE_IMPACTVCB].name); btv->c.type = BTTV_BOARD_HAUPPAUGE_IMPACTVCB; } /* The 61334 needs the msp3410 to do the radio demod to get sound */ if (tv.model == 61334) btv->radio_uses_msp_demodulator = 1; } static int terratec_active_radio_upgrade(struct bttv *btv) { int freq; btv->has_radio = 1; btv->has_matchbox = 1; btv->mbox_we = 0x10; btv->mbox_most = 0x20; btv->mbox_clk = 0x08; btv->mbox_data = 0x04; btv->mbox_mask = 0x3c; btv->mbox_iow = 1 << 8; btv->mbox_ior = 1 << 9; btv->mbox_csel = 1 << 10; freq=88000/62.5; tea5757_write(btv, 5 * freq + 0x358); /* write 0x1ed8 */ if (0x1ed8 == tea5757_read(btv)) { pr_info("%d: Terratec Active Radio Upgrade found\n", btv->c.nr); btv->has_radio = 1; btv->has_saa6588 = 1; btv->has_matchbox = 1; } else { btv->has_radio = 0; btv->has_matchbox = 0; } return 0; } /* ----------------------------------------------------------------------- */ /* * minimal bootstrap for the WinTV/PVR -- upload altera firmware. * * The hcwamc.rbf firmware file is on the Hauppauge driver CD. Have * a look at Pvr/pvr45xxx.EXE (self-extracting zip archive, can be * unpacked with unzip). */ #define PVR_GPIO_DELAY 10 #define BTTV_ALT_DATA 0x000001 #define BTTV_ALT_DCLK 0x100000 #define BTTV_ALT_NCONFIG 0x800000 static int pvr_altera_load(struct bttv *btv, const u8 *micro, u32 microlen) { u32 n; u8 bits; int i; gpio_inout(0xffffff,BTTV_ALT_DATA|BTTV_ALT_DCLK|BTTV_ALT_NCONFIG); gpio_write(0); udelay(PVR_GPIO_DELAY); gpio_write(BTTV_ALT_NCONFIG); udelay(PVR_GPIO_DELAY); for (n = 0; n < microlen; n++) { bits = micro[n]; for (i = 0 ; i < 8 ; i++) { gpio_bits(BTTV_ALT_DCLK,0); if (bits & 0x01) gpio_bits(BTTV_ALT_DATA,BTTV_ALT_DATA); else gpio_bits(BTTV_ALT_DATA,0); gpio_bits(BTTV_ALT_DCLK,BTTV_ALT_DCLK); bits >>= 1; } } gpio_bits(BTTV_ALT_DCLK,0); udelay(PVR_GPIO_DELAY); /* begin Altera init loop (Not necessary,but doesn't hurt) */ for (i = 0 ; i < 30 ; i++) { gpio_bits(BTTV_ALT_DCLK,0); gpio_bits(BTTV_ALT_DCLK,BTTV_ALT_DCLK); } gpio_bits(BTTV_ALT_DCLK,0); return 0; } static int pvr_boot(struct bttv *btv) { const struct firmware *fw_entry; int rc; rc = request_firmware(&fw_entry, "hcwamc.rbf", &btv->c.pci->dev); if (rc != 0) { pr_warn("%d: no altera firmware [via hotplug]\n", btv->c.nr); return rc; } rc = pvr_altera_load(btv, fw_entry->data, fw_entry->size); pr_info("%d: altera firmware upload %s\n", btv->c.nr, (rc < 0) ? "failed" : "ok"); release_firmware(fw_entry); return rc; } /* ----------------------------------------------------------------------- */ /* some osprey specific stuff */ static void osprey_eeprom(struct bttv *btv, const u8 ee[256]) { int i; u32 serial = 0; int cardid = -1; /* This code will nevery actually get called in this case.... */ if (btv->c.type == BTTV_BOARD_UNKNOWN) { /* this might be an antique... check for MMAC label in eeprom */ if (!strncmp(ee, "MMAC", 4)) { u8 checksum = 0; for (i = 0; i < 21; i++) checksum += ee[i]; if (checksum != ee[21]) return; cardid = BTTV_BOARD_OSPREY1x0_848; for (i = 12; i < 21; i++) serial *= 10, serial += ee[i] - '0'; } } else { unsigned short type; for (i = 4*16; i < 8*16; i += 16) { u16 checksum = ip_compute_csum(ee + i, 16); if ((checksum&0xff) + (checksum>>8) == 0xff) break; } if (i >= 8*16) return; ee += i; /* found a valid descriptor */ type = get_unaligned_be16((__be16 *)(ee+4)); switch(type) { /* 848 based */ case 0x0004: cardid = BTTV_BOARD_OSPREY1x0_848; break; case 0x0005: cardid = BTTV_BOARD_OSPREY101_848; break; /* 878 based */ case 0x0012: case 0x0013: cardid = BTTV_BOARD_OSPREY1x0; break; case 0x0014: case 0x0015: cardid = BTTV_BOARD_OSPREY1x1; break; case 0x0016: case 0x0017: case 0x0020: cardid = BTTV_BOARD_OSPREY1x1_SVID; break; case 0x0018: case 0x0019: case 0x001E: case 0x001F: cardid = BTTV_BOARD_OSPREY2xx; break; case 0x001A: case 0x001B: cardid = BTTV_BOARD_OSPREY2x0_SVID; break; case 0x0040: cardid = BTTV_BOARD_OSPREY500; break; case 0x0050: case 0x0056: cardid = BTTV_BOARD_OSPREY540; /* bttv_osprey_540_init(btv); */ break; case 0x0060: case 0x0070: case 0x00A0: cardid = BTTV_BOARD_OSPREY2x0; /* enable output on select control lines */ gpio_inout(0xffffff,0x000303); break; case 0x00D8: cardid = BTTV_BOARD_OSPREY440; break; default: /* unknown...leave generic, but get serial # */ pr_info("%d: osprey eeprom: unknown card type 0x%04x\n", btv->c.nr, type); break; } serial = get_unaligned_be32((__be32 *)(ee+6)); } pr_info("%d: osprey eeprom: card=%d '%s' serial=%u\n", btv->c.nr, cardid, cardid > 0 ? bttv_tvcards[cardid].name : "Unknown", serial); if (cardid<0 || btv->c.type == cardid) return; /* card type isn't set correctly */ if (card[btv->c.nr] < bttv_num_tvcards) { pr_warn("%d: osprey eeprom: Not overriding user specified card type\n", btv->c.nr); } else { pr_info("%d: osprey eeprom: Changing card type from %d to %d\n", btv->c.nr, btv->c.type, cardid); btv->c.type = cardid; } } /* ----------------------------------------------------------------------- */ /* AVermedia specific stuff, from bktr_card.c */ static int tuner_0_table[] = { TUNER_PHILIPS_NTSC, TUNER_PHILIPS_PAL /* PAL-BG*/, TUNER_PHILIPS_PAL, TUNER_PHILIPS_PAL /* PAL-I*/, TUNER_PHILIPS_PAL, TUNER_PHILIPS_PAL, TUNER_PHILIPS_SECAM, TUNER_PHILIPS_SECAM, TUNER_PHILIPS_SECAM, TUNER_PHILIPS_PAL, TUNER_PHILIPS_FM1216ME_MK3 }; static int tuner_1_table[] = { TUNER_TEMIC_NTSC, TUNER_TEMIC_PAL, TUNER_TEMIC_PAL, TUNER_TEMIC_PAL, TUNER_TEMIC_PAL, TUNER_TEMIC_PAL, TUNER_TEMIC_4012FY5, TUNER_TEMIC_4012FY5, /* TUNER_TEMIC_SECAM */ TUNER_TEMIC_4012FY5, TUNER_TEMIC_PAL}; static void avermedia_eeprom(struct bttv *btv) { int tuner_make, tuner_tv_fm, tuner_format, tuner_type = 0; tuner_make = (eeprom_data[0x41] & 0x7); tuner_tv_fm = (eeprom_data[0x41] & 0x18) >> 3; tuner_format = (eeprom_data[0x42] & 0xf0) >> 4; btv->has_remote = (eeprom_data[0x42] & 0x01); if (tuner_make == 0 || tuner_make == 2) if (tuner_format <= 0x0a) tuner_type = tuner_0_table[tuner_format]; if (tuner_make == 1) if (tuner_format <= 9) tuner_type = tuner_1_table[tuner_format]; if (tuner_make == 4) if (tuner_format == 0x09) tuner_type = TUNER_LG_NTSC_NEW_TAPC; /* TAPC-G702P */ pr_info("%d: Avermedia eeprom[0x%02x%02x]: tuner=", btv->c.nr, eeprom_data[0x41], eeprom_data[0x42]); if (tuner_type) { btv->tuner_type = tuner_type; pr_cont("%d", tuner_type); } else pr_cont("Unknown type"); pr_cont(" radio:%s remote control:%s\n", tuner_tv_fm ? "yes" : "no", btv->has_remote ? "yes" : "no"); } /* * For Voodoo TV/FM and Voodoo 200. These cards' tuners use a TDA9880 * analog demod, which is not I2C controlled like the newer and more common * TDA9887 series. Instead is has two tri-state input pins, S0 and S1, * that control the IF for the video and audio. Apparently, bttv GPIO * 0x10000 is connected to S0. S0 low selects a 38.9 MHz VIF for B/G/D/K/I * (i.e., PAL) while high selects 45.75 MHz for M/N (i.e., NTSC). */ u32 bttv_tda9880_setnorm(struct bttv *btv, u32 gpiobits) { if (btv->audio_input == TVAUDIO_INPUT_TUNER) { if (bttv_tvnorms[btv->tvnorm].v4l2_id & V4L2_STD_MN) gpiobits |= 0x10000; else gpiobits &= ~0x10000; } gpio_bits(bttv_tvcards[btv->c.type].gpiomask, gpiobits); return gpiobits; } /* * reset/enable the MSP on some Hauppauge cards * Thanks to Kyösti Mälkki (kmalkki@cc.hut.fi)! * * Hauppauge: pin 5 * Voodoo: pin 20 */ static void boot_msp34xx(struct bttv *btv, int pin) { int mask = (1 << pin); gpio_inout(mask,mask); gpio_bits(mask,0); mdelay(2); udelay(500); gpio_bits(mask,mask); if (bttv_gpio) bttv_gpio_tracking(btv,"msp34xx"); if (bttv_verbose) pr_info("%d: Hauppauge/Voodoo msp34xx: reset line init [%d]\n", btv->c.nr, pin); } /* ----------------------------------------------------------------------- */ /* Imagenation L-Model PXC200 Framegrabber */ /* This is basically the same procedure as * used by Alessandro Rubini in his pxc200 * driver, but using BTTV functions */ static void init_PXC200(struct bttv *btv) { static int vals[] = { 0x08, 0x09, 0x0a, 0x0b, 0x0d, 0x0d, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x00 }; unsigned int i; int tmp; u32 val; /* Initialise GPIO-connevted stuff */ gpio_inout(0xffffff, (1<<13)); gpio_write(0); udelay(3); gpio_write(1<<13); /* GPIO inputs are pulled up, so no need to drive * reset pin any longer */ gpio_bits(0xffffff, 0); if (bttv_gpio) bttv_gpio_tracking(btv,"pxc200"); /* we could/should try and reset/control the AD pots? but right now we simply turned off the crushing. Without this the AGC drifts drifts remember the EN is reverse logic --> setting BT848_ADC_AGC_EN disable the AGC tboult@eecs.lehigh.edu */ btwrite(BT848_ADC_RESERVED|BT848_ADC_AGC_EN, BT848_ADC); /* Initialise MAX517 DAC */ pr_info("Setting DAC reference voltage level ...\n"); bttv_I2CWrite(btv,0x5E,0,0x80,1); /* Initialise 12C508 PIC */ /* The I2CWrite and I2CRead commmands are actually to the * same chips - but the R/W bit is included in the address * argument so the numbers are different */ pr_info("Initialising 12C508 PIC chip ...\n"); /* First of all, enable the clock line. This is used in the PXC200-F */ val = btread(BT848_GPIO_DMA_CTL); val |= BT848_GPIO_DMA_CTL_GPCLKMODE; btwrite(val, BT848_GPIO_DMA_CTL); /* Then, push to 0 the reset pin long enough to reset the * * device same as above for the reset line, but not the same * value sent to the GPIO-connected stuff * which one is the good one? */ gpio_inout(0xffffff,(1<<2)); gpio_write(0); udelay(10); gpio_write(1<<2); for (i = 0; i < ARRAY_SIZE(vals); i++) { tmp=bttv_I2CWrite(btv,0x1E,0,vals[i],1); if (tmp != -1) { pr_info("I2C Write(%2.2x) = %i\nI2C Read () = %2.2x\n\n", vals[i],tmp,bttv_I2CRead(btv,0x1F,NULL)); } } pr_info("PXC200 Initialised\n"); } /* ----------------------------------------------------------------------- */ /* * The Adlink RTV-24 (aka Angelo) has some special initialisation to unlock * it. This apparently involves the following procedure for each 878 chip: * * 1) write 0x00C3FEFF to the GPIO_OUT_EN register * * 2) write to GPIO_DATA * - 0x0E * - sleep 1ms * - 0x10 + 0x0E * - sleep 10ms * - 0x0E * read from GPIO_DATA into buf (uint_32) * - if ( data>>18 & 0x01 != 0) || ( buf>>19 & 0x01 != 1 ) * error. ERROR_CPLD_Check_Failed stop. * * 3) write to GPIO_DATA * - write 0x4400 + 0x0E * - sleep 10ms * - write 0x4410 + 0x0E * - sleep 1ms * - write 0x0E * read from GPIO_DATA into buf (uint_32) * - if ( buf>>18 & 0x01 ) || ( buf>>19 & 0x01 != 0 ) * error. ERROR_CPLD_Check_Failed. */ /* ----------------------------------------------------------------------- */ static void init_RTV24 (struct bttv *btv) { uint32_t dataRead = 0; long watchdog_value = 0x0E; pr_info("%d: Adlink RTV-24 initialisation in progress ...\n", btv->c.nr); btwrite (0x00c3feff, BT848_GPIO_OUT_EN); btwrite (0 + watchdog_value, BT848_GPIO_DATA); msleep (1); btwrite (0x10 + watchdog_value, BT848_GPIO_DATA); msleep (10); btwrite (0 + watchdog_value, BT848_GPIO_DATA); dataRead = btread (BT848_GPIO_DATA); if ((((dataRead >> 18) & 0x01) != 0) || (((dataRead >> 19) & 0x01) != 1)) { pr_info("%d: Adlink RTV-24 initialisation(1) ERROR_CPLD_Check_Failed (read %d)\n", btv->c.nr, dataRead); } btwrite (0x4400 + watchdog_value, BT848_GPIO_DATA); msleep (10); btwrite (0x4410 + watchdog_value, BT848_GPIO_DATA); msleep (1); btwrite (watchdog_value, BT848_GPIO_DATA); msleep (1); dataRead = btread (BT848_GPIO_DATA); if ((((dataRead >> 18) & 0x01) != 0) || (((dataRead >> 19) & 0x01) != 0)) { pr_info("%d: Adlink RTV-24 initialisation(2) ERROR_CPLD_Check_Failed (read %d)\n", btv->c.nr, dataRead); return; } pr_info("%d: Adlink RTV-24 initialisation complete\n", btv->c.nr); } /* ----------------------------------------------------------------------- */ /* Miro Pro radio stuff -- the tea5757 is connected to some GPIO ports */ /* * Copyright (c) 1999 Csaba Halasz <qgehali@uni-miskolc.hu> * This code is placed under the terms of the GNU General Public License * * Brutally hacked by Dan Sheridan <dan.sheridan@contact.org.uk> djs52 8/3/00 */ static void bus_low(struct bttv *btv, int bit) { if (btv->mbox_ior) { gpio_bits(btv->mbox_ior | btv->mbox_iow | btv->mbox_csel, btv->mbox_ior | btv->mbox_iow | btv->mbox_csel); udelay(5); } gpio_bits(bit,0); udelay(5); if (btv->mbox_ior) { gpio_bits(btv->mbox_iow | btv->mbox_csel, 0); udelay(5); } } static void bus_high(struct bttv *btv, int bit) { if (btv->mbox_ior) { gpio_bits(btv->mbox_ior | btv->mbox_iow | btv->mbox_csel, btv->mbox_ior | btv->mbox_iow | btv->mbox_csel); udelay(5); } gpio_bits(bit,bit); udelay(5); if (btv->mbox_ior) { gpio_bits(btv->mbox_iow | btv->mbox_csel, 0); udelay(5); } } static int bus_in(struct bttv *btv, int bit) { if (btv->mbox_ior) { gpio_bits(btv->mbox_ior | btv->mbox_iow | btv->mbox_csel, btv->mbox_ior | btv->mbox_iow | btv->mbox_csel); udelay(5); gpio_bits(btv->mbox_iow | btv->mbox_csel, 0); udelay(5); } return gpio_read() & (bit); } /* TEA5757 register bits */ #define TEA_FREQ 0:14 #define TEA_BUFFER 15:15 #define TEA_SIGNAL_STRENGTH 16:17 #define TEA_PORT1 18:18 #define TEA_PORT0 19:19 #define TEA_BAND 20:21 #define TEA_BAND_FM 0 #define TEA_BAND_MW 1 #define TEA_BAND_LW 2 #define TEA_BAND_SW 3 #define TEA_MONO 22:22 #define TEA_ALLOW_STEREO 0 #define TEA_FORCE_MONO 1 #define TEA_SEARCH_DIRECTION 23:23 #define TEA_SEARCH_DOWN 0 #define TEA_SEARCH_UP 1 #define TEA_STATUS 24:24 #define TEA_STATUS_TUNED 0 #define TEA_STATUS_SEARCHING 1 /* Low-level stuff */ static int tea5757_read(struct bttv *btv) { unsigned long timeout; int value = 0; int i; /* better safe than sorry */ gpio_inout(btv->mbox_mask, btv->mbox_clk | btv->mbox_we); if (btv->mbox_ior) { gpio_bits(btv->mbox_ior | btv->mbox_iow | btv->mbox_csel, btv->mbox_ior | btv->mbox_iow | btv->mbox_csel); udelay(5); } if (bttv_gpio) bttv_gpio_tracking(btv,"tea5757 read"); bus_low(btv,btv->mbox_we); bus_low(btv,btv->mbox_clk); udelay(10); timeout= jiffies + msecs_to_jiffies(1000); /* wait for DATA line to go low; error if it doesn't */ while (bus_in(btv,btv->mbox_data) && time_before(jiffies, timeout)) schedule(); if (bus_in(btv,btv->mbox_data)) { pr_warn("%d: tea5757: read timeout\n", btv->c.nr); return -1; } dprintk("%d: tea5757:", btv->c.nr); for (i = 0; i < 24; i++) { udelay(5); bus_high(btv,btv->mbox_clk); udelay(5); dprintk_cont("%c", bus_in(btv, btv->mbox_most) == 0 ? 'T' : '-'); bus_low(btv,btv->mbox_clk); value <<= 1; value |= (bus_in(btv,btv->mbox_data) == 0)?0:1; /* MSB first */ dprintk_cont("%c", bus_in(btv, btv->mbox_most) == 0 ? 'S' : 'M'); } dprintk_cont("\n"); dprintk("%d: tea5757: read 0x%X\n", btv->c.nr, value); return value; } static int tea5757_write(struct bttv *btv, int value) { int i; int reg = value; gpio_inout(btv->mbox_mask, btv->mbox_clk | btv->mbox_we | btv->mbox_data); if (btv->mbox_ior) { gpio_bits(btv->mbox_ior | btv->mbox_iow | btv->mbox_csel, btv->mbox_ior | btv->mbox_iow | btv->mbox_csel); udelay(5); } if (bttv_gpio) bttv_gpio_tracking(btv,"tea5757 write"); dprintk("%d: tea5757: write 0x%X\n", btv->c.nr, value); bus_low(btv,btv->mbox_clk); bus_high(btv,btv->mbox_we); for (i = 0; i < 25; i++) { if (reg & 0x1000000) bus_high(btv,btv->mbox_data); else bus_low(btv,btv->mbox_data); reg <<= 1; bus_high(btv,btv->mbox_clk); udelay(10); bus_low(btv,btv->mbox_clk); udelay(10); } bus_low(btv,btv->mbox_we); /* unmute !!! */ return 0; } void tea5757_set_freq(struct bttv *btv, unsigned short freq) { dprintk("tea5757_set_freq %d\n",freq); tea5757_write(btv, 5 * freq + 0x358); /* add 10.7MHz (see docs) */ } /* RemoteVision MX (rv605) muxsel helper [Miguel Freitas] * * This is needed because rv605 don't use a normal multiplex, but a crosspoint * switch instead (CD22M3494E). This IC can have multiple active connections * between Xn (input) and Yn (output) pins. We need to clear any existing * connection prior to establish a new one, pulsing the STROBE pin. * * The board hardwire Y0 (xpoint) to MUX1 and MUXOUT to Yin. * GPIO pins are wired as: * GPIO[0:3] - AX[0:3] (xpoint) - P1[0:3] (microcontroller) * GPIO[4:6] - AY[0:2] (xpoint) - P1[4:6] (microcontroller) * GPIO[7] - DATA (xpoint) - P1[7] (microcontroller) * GPIO[8] - - P3[5] (microcontroller) * GPIO[9] - RESET (xpoint) - P3[6] (microcontroller) * GPIO[10] - STROBE (xpoint) - P3[7] (microcontroller) * GPINTR - - P3[4] (microcontroller) * * The microcontroller is a 80C32 like. It should be possible to change xpoint * configuration either directly (as we are doing) or using the microcontroller * which is also wired to I2C interface. I have no further info on the * microcontroller features, one would need to disassembly the firmware. * note: the vendor refused to give any information on this product, all * that stuff was found using a multimeter! :) */ static void rv605_muxsel(struct bttv *btv, unsigned int input) { static const u8 muxgpio[] = { 0x3, 0x1, 0x2, 0x4, 0xf, 0x7, 0xe, 0x0, 0xd, 0xb, 0xc, 0x6, 0x9, 0x5, 0x8, 0xa }; gpio_bits(0x07f, muxgpio[input]); /* reset all conections */ gpio_bits(0x200,0x200); mdelay(1); gpio_bits(0x200,0x000); mdelay(1); /* create a new connection */ gpio_bits(0x480,0x480); mdelay(1); gpio_bits(0x480,0x080); mdelay(1); } /* Tibet Systems 'Progress DVR' CS16 muxsel helper [Chris Fanning] * * The CS16 (available on eBay cheap) is a PCI board with four Fusion * 878A chips, a PCI bridge, an Atmel microcontroller, four sync separator * chips, ten eight input analog multiplexors, a not chip and a few * other components. * * 16 inputs on a secondary bracket are provided and can be selected * from each of the four capture chips. Two of the eight input * multiplexors are used to select from any of the 16 input signals. * * Unsupported hardware capabilities: * . A video output monitor on the secondary bracket can be selected from * one of the 878A chips. * . Another passthrough but I haven't spent any time investigating it. * . Digital I/O (logic level connected to GPIO) is available from an * onboard header. * * The on chip input mux should always be set to 2. * GPIO[16:19] - Video input selection * GPIO[0:3] - Video output monitor select (only available from one 878A) * GPIO[?:?] - Digital I/O. * * There is an ATMEL microcontroller with an 8031 core on board. I have not * determined what function (if any) it provides. With the microcontroller * and sync separator chips a guess is that it might have to do with video * switching and maybe some digital I/O. */ static void tibetCS16_muxsel(struct bttv *btv, unsigned int input) { /* video mux */ gpio_bits(0x0f0000, input << 16); } static void tibetCS16_init(struct bttv *btv) { /* enable gpio bits, mask obtained via btSpy */ gpio_inout(0xffffff, 0x0f7fff); gpio_write(0x0f7fff); } /* * The following routines for the Kodicom-4400r get a little mind-twisting. * There is a "master" controller and three "slave" controllers, together * an analog switch which connects any of 16 cameras to any of the BT87A's. * The analog switch is controlled by the "master", but the detection order * of the four BT878A chips is in an order which I just don't understand. * The "master" is actually the second controller to be detected. The * logic on the board uses logical numbers for the 4 controllers, but * those numbers are different from the detection sequence. When working * with the analog switch, we need to "map" from the detection sequence * over to the board's logical controller number. This mapping sequence * is {3, 0, 2, 1}, i.e. the first controller to be detected is logical * unit 3, the second (which is the master) is logical unit 0, etc. * We need to maintain the status of the analog switch (which of the 16 * cameras is connected to which of the 4 controllers). Rather than * add to the bttv structure for this, we use the data reserved for * the mbox (unused for this card type). */ /* * First a routine to set the analog switch, which controls which camera * is routed to which controller. The switch comprises an X-address * (gpio bits 0-3, representing the camera, ranging from 0-15), and a * Y-address (gpio bits 4-6, representing the controller, ranging from 0-3). * A data value (gpio bit 7) of '1' enables the switch, and '0' disables * the switch. A STROBE bit (gpio bit 8) latches the data value into the * specified address. The idea is to set the address and data, then bring * STROBE high, and finally bring STROBE back to low. */ static void kodicom4400r_write(struct bttv *btv, unsigned char xaddr, unsigned char yaddr, unsigned char data) { unsigned int udata; udata = (data << 7) | ((yaddr&3) << 4) | (xaddr&0xf); gpio_bits(0x1ff, udata); /* write ADDR and DAT */ gpio_bits(0x1ff, udata | (1 << 8)); /* strobe high */ gpio_bits(0x1ff, udata); /* strobe low */ } /* * Next the mux select. Both the "master" and "slave" 'cards' (controllers) * use this routine. The routine finds the "master" for the card, maps * the controller number from the detected position over to the logical * number, writes the appropriate data to the analog switch, and housekeeps * the local copy of the switch information. The parameter 'input' is the * requested camera number (0 - 15). */ static void kodicom4400r_muxsel(struct bttv *btv, unsigned int input) { char *sw_status; int xaddr, yaddr; struct bttv *mctlr; static unsigned char map[4] = {3, 0, 2, 1}; mctlr = master[btv->c.nr]; if (mctlr == NULL) { /* ignore if master not yet detected */ return; } yaddr = (btv->c.nr - mctlr->c.nr + 1) & 3; /* the '&' is for safety */ yaddr = map[yaddr]; sw_status = (char *)(&mctlr->mbox_we); xaddr = input & 0xf; /* Check if the controller/camera pair has changed, else ignore */ if (sw_status[yaddr] != xaddr) { /* "open" the old switch, "close" the new one, save the new */ kodicom4400r_write(mctlr, sw_status[yaddr], yaddr, 0); sw_status[yaddr] = xaddr; kodicom4400r_write(mctlr, xaddr, yaddr, 1); } } /* * During initialisation, we need to reset the analog switch. We * also preset the switch to map the 4 connectors on the card to the * *user's* (see above description of kodicom4400r_muxsel) channels * 0 through 3 */ static void kodicom4400r_init(struct bttv *btv) { char *sw_status = (char *)(&btv->mbox_we); int ix; gpio_inout(0x0003ff, 0x0003ff); gpio_write(1 << 9); /* reset MUX */ gpio_write(0); /* Preset camera 0 to the 4 controllers */ for (ix = 0; ix < 4; ix++) { sw_status[ix] = ix; kodicom4400r_write(btv, ix, ix, 1); } /* * Since this is the "master", we need to set up the * other three controller chips' pointers to this structure * for later use in the muxsel routine. */ if ((btv->c.nr<1) || (btv->c.nr>BTTV_MAX-3)) return; master[btv->c.nr-1] = btv; master[btv->c.nr] = btv; master[btv->c.nr+1] = btv; master[btv->c.nr+2] = btv; } /* The Grandtec X-Guard framegrabber card uses two Dual 4-channel * video multiplexers to provide up to 16 video inputs. These * multiplexers are controlled by the lower 8 GPIO pins of the * bt878. The multiplexers probably Pericom PI5V331Q or similar. * xxx0 is pin xxx of multiplexer U5, * yyy1 is pin yyy of multiplexer U2 */ #define ENA0 0x01 #define ENB0 0x02 #define ENA1 0x04 #define ENB1 0x08 #define IN10 0x10 #define IN00 0x20 #define IN11 0x40 #define IN01 0x80 static void xguard_muxsel(struct bttv *btv, unsigned int input) { static const int masks[] = { ENB0, ENB0|IN00, ENB0|IN10, ENB0|IN00|IN10, ENA0, ENA0|IN00, ENA0|IN10, ENA0|IN00|IN10, ENB1, ENB1|IN01, ENB1|IN11, ENB1|IN01|IN11, ENA1, ENA1|IN01, ENA1|IN11, ENA1|IN01|IN11, }; gpio_write(masks[input%16]); } static void picolo_tetra_init(struct bttv *btv) { /*This is the video input redirection fonctionality : I DID NOT USED IT. */ btwrite (0x08<<16,BT848_GPIO_DATA);/*GPIO[19] [==> 4053 B+C] set to 1 */ btwrite (0x04<<16,BT848_GPIO_DATA);/*GPIO[18] [==> 4053 A] set to 1*/ } static void picolo_tetra_muxsel (struct bttv* btv, unsigned int input) { dprintk("%d : picolo_tetra_muxsel => input = %d\n", btv->c.nr, input); /*Just set the right path in the analog multiplexers : channel 1 -> 4 ==> Analog Mux ==> MUX0*/ /*GPIO[20]&GPIO[21] used to choose the right input*/ btwrite (input<<20,BT848_GPIO_DATA); } /* * ivc120_muxsel [Added by Alan Garfield <alan@fromorbit.com>] * * The IVC120G security card has 4 i2c controlled TDA8540 matrix * swichers to provide 16 channels to MUX0. The TDA8540's have * 4 independent outputs and as such the IVC120G also has the * optional "Monitor Out" bus. This allows the card to be looking * at one input while the monitor is looking at another. * * Since I've couldn't be bothered figuring out how to add an * independent muxsel for the monitor bus, I've just set it to * whatever the card is looking at. * * OUT0 of the TDA8540's is connected to MUX0 (0x03) * OUT1 of the TDA8540's is connected to "Monitor Out" (0x0C) * * TDA8540_ALT3 IN0-3 = Channel 13 - 16 (0x03) * TDA8540_ALT4 IN0-3 = Channel 1 - 4 (0x03) * TDA8540_ALT5 IN0-3 = Channel 5 - 8 (0x03) * TDA8540_ALT6 IN0-3 = Channel 9 - 12 (0x03) * */ /* All 7 possible sub-ids for the TDA8540 Matrix Switcher */ #define I2C_TDA8540 0x90 #define I2C_TDA8540_ALT1 0x92 #define I2C_TDA8540_ALT2 0x94 #define I2C_TDA8540_ALT3 0x96 #define I2C_TDA8540_ALT4 0x98 #define I2C_TDA8540_ALT5 0x9a #define I2C_TDA8540_ALT6 0x9c static void ivc120_muxsel(struct bttv *btv, unsigned int input) { /* Simple maths */ int key = input % 4; int matrix = input / 4; dprintk("%d: ivc120_muxsel: Input - %02d | TDA - %02d | In - %02d\n", btv->c.nr, input, matrix, key); /* Handles the input selection on the TDA8540's */ bttv_I2CWrite(btv, I2C_TDA8540_ALT3, 0x00, ((matrix == 3) ? (key | key << 2) : 0x00), 1); bttv_I2CWrite(btv, I2C_TDA8540_ALT4, 0x00, ((matrix == 0) ? (key | key << 2) : 0x00), 1); bttv_I2CWrite(btv, I2C_TDA8540_ALT5, 0x00, ((matrix == 1) ? (key | key << 2) : 0x00), 1); bttv_I2CWrite(btv, I2C_TDA8540_ALT6, 0x00, ((matrix == 2) ? (key | key << 2) : 0x00), 1); /* Handles the output enables on the TDA8540's */ bttv_I2CWrite(btv, I2C_TDA8540_ALT3, 0x02, ((matrix == 3) ? 0x03 : 0x00), 1); /* 13 - 16 */ bttv_I2CWrite(btv, I2C_TDA8540_ALT4, 0x02, ((matrix == 0) ? 0x03 : 0x00), 1); /* 1-4 */ bttv_I2CWrite(btv, I2C_TDA8540_ALT5, 0x02, ((matrix == 1) ? 0x03 : 0x00), 1); /* 5-8 */ bttv_I2CWrite(btv, I2C_TDA8540_ALT6, 0x02, ((matrix == 2) ? 0x03 : 0x00), 1); /* 9-12 */ /* 878's MUX0 is already selected for input via muxsel values */ } /* PXC200 muxsel helper * luke@syseng.anu.edu.au * another transplant * from Alessandro Rubini (rubini@linux.it) * * There are 4 kinds of cards: * PXC200L which is bt848 * PXC200F which is bt848 with PIC controlling mux * PXC200AL which is bt878 * PXC200AF which is bt878 with PIC controlling mux */ #define PX_CFG_PXC200F 0x01 #define PX_FLAG_PXC200A 0x00001000 /* a pxc200A is bt-878 based */ #define PX_I2C_PIC 0x0f #define PX_PXC200A_CARDID 0x200a1295 #define PX_I2C_CMD_CFG 0x00 static void PXC200_muxsel(struct bttv *btv, unsigned int input) { int rc; long mux; int bitmask; unsigned char buf[2]; /* Read PIC config to determine if this is a PXC200F */ /* PX_I2C_CMD_CFG*/ buf[0]=0; buf[1]=0; rc=bttv_I2CWrite(btv,(PX_I2C_PIC<<1),buf[0],buf[1],1); if (rc) { pr_debug("%d: PXC200_muxsel: pic cfg write failed:%d\n", btv->c.nr, rc); /* not PXC ? do nothing */ return; } rc=bttv_I2CRead(btv,(PX_I2C_PIC<<1),NULL); if (!(rc & PX_CFG_PXC200F)) { pr_debug("%d: PXC200_muxsel: not PXC200F rc:%d\n", btv->c.nr, rc); return; } /* The multiplexer in the 200F is handled by the GPIO port */ /* get correct mapping between inputs */ /* mux = bttv_tvcards[btv->type].muxsel[input] & 3; */ /* ** not needed!? */ mux = input; /* make sure output pins are enabled */ /* bitmask=0x30f; */ bitmask=0x302; /* check whether we have a PXC200A */ if (btv->cardid == PX_PXC200A_CARDID) { bitmask ^= 0x180; /* use 7 and 9, not 8 and 9 */ bitmask |= 7<<4; /* the DAC */ } btwrite(bitmask, BT848_GPIO_OUT_EN); bitmask = btread(BT848_GPIO_DATA); if (btv->cardid == PX_PXC200A_CARDID) bitmask = (bitmask & ~0x280) | ((mux & 2) << 8) | ((mux & 1) << 7); else /* older device */ bitmask = (bitmask & ~0x300) | ((mux & 3) << 8); btwrite(bitmask,BT848_GPIO_DATA); /* * Was "to be safe, set the bt848 to input 0" * Actually, since it's ok at load time, better not messing * with these bits (on PXC200AF you need to set mux 2 here) * * needed because bttv-driver sets mux before calling this function */ if (btv->cardid == PX_PXC200A_CARDID) btaor(2<<5, ~BT848_IFORM_MUXSEL, BT848_IFORM); else /* older device */ btand(~BT848_IFORM_MUXSEL,BT848_IFORM); pr_debug("%d: setting input channel to:%d\n", btv->c.nr, (int)mux); } static void phytec_muxsel(struct bttv *btv, unsigned int input) { unsigned int mux = input % 4; if (input == btv->svhs) mux = 0; gpio_bits(0x3, mux); } /* * GeoVision GV-800(S) functions * Bruno Christo <bchristo@inf.ufsm.br> */ /* This is a function to control the analog switch, which determines which * camera is routed to which controller. The switch comprises an X-address * (gpio bits 0-3, representing the camera, ranging from 0-15), and a * Y-address (gpio bits 4-6, representing the controller, ranging from 0-3). * A data value (gpio bit 18) of '1' enables the switch, and '0' disables * the switch. A STROBE bit (gpio bit 17) latches the data value into the * specified address. There is also a chip select (gpio bit 16). * The idea is to set the address and chip select together, bring * STROBE high, write the data, and finally bring STROBE back to low. */ static void gv800s_write(struct bttv *btv, unsigned char xaddr, unsigned char yaddr, unsigned char data) { /* On the "master" 878A: * GPIO bits 0-9 are used for the analog switch: * 00 - 03: camera selector * 04 - 06: 878A (controller) selector * 16: cselect * 17: strobe * 18: data (1->on, 0->off) * 19: reset */ const u32 ADDRESS = ((xaddr&0xf) | (yaddr&3)<<4); const u32 CSELECT = 1<<16; const u32 STROBE = 1<<17; const u32 DATA = data<<18; gpio_bits(0x1007f, ADDRESS | CSELECT); /* write ADDRESS and CSELECT */ gpio_bits(0x20000, STROBE); /* STROBE high */ gpio_bits(0x40000, DATA); /* write DATA */ gpio_bits(0x20000, ~STROBE); /* STROBE low */ } /* * GeoVision GV-800(S) muxsel * * Each of the 4 cards (controllers) use this function. * The controller using this function selects the input through the GPIO pins * of the "master" card. A pointer to this card is stored in master[btv->c.nr]. * * The parameter 'input' is the requested camera number (0-4) on the controller. * The map array has the address of each input. Note that the addresses in the * array are in the sequence the original GeoVision driver uses, that is, set * every controller to input 0, then to input 1, 2, 3, repeat. This means that * the physical "camera 1" connector corresponds to controller 0 input 0, * "camera 2" corresponds to controller 1 input 0, and so on. * * After getting the input address, the function then writes the appropriate * data to the analog switch, and housekeeps the local copy of the switch * information. */ static void gv800s_muxsel(struct bttv *btv, unsigned int input) { struct bttv *mctlr; char *sw_status; int xaddr, yaddr; static unsigned int map[4][4] = { { 0x0, 0x4, 0xa, 0x6 }, { 0x1, 0x5, 0xb, 0x7 }, { 0x2, 0x8, 0xc, 0xe }, { 0x3, 0x9, 0xd, 0xf } }; input = input%4; mctlr = master[btv->c.nr]; if (mctlr == NULL) { /* do nothing until the "master" is detected */ return; } yaddr = (btv->c.nr - mctlr->c.nr) & 3; sw_status = (char *)(&mctlr->mbox_we); xaddr = map[yaddr][input] & 0xf; /* Check if the controller/camera pair has changed, ignore otherwise */ if (sw_status[yaddr] != xaddr) { /* disable the old switch, enable the new one and save status */ gv800s_write(mctlr, sw_status[yaddr], yaddr, 0); sw_status[yaddr] = xaddr; gv800s_write(mctlr, xaddr, yaddr, 1); } } /* GeoVision GV-800(S) "master" chip init */ static void gv800s_init(struct bttv *btv) { char *sw_status = (char *)(&btv->mbox_we); int ix; gpio_inout(0xf107f, 0xf107f); gpio_write(1<<19); /* reset the analog MUX */ gpio_write(0); /* Preset camera 0 to the 4 controllers */ for (ix = 0; ix < 4; ix++) { sw_status[ix] = ix; gv800s_write(btv, ix, ix, 1); } /* Inputs on the "master" controller need this brightness fix */ bttv_I2CWrite(btv, 0x18, 0x5, 0x90, 1); if (btv->c.nr > BTTV_MAX-4) return; /* * Store the "master" controller pointer in the master * array for later use in the muxsel function. */ master[btv->c.nr] = btv; master[btv->c.nr+1] = btv; master[btv->c.nr+2] = btv; master[btv->c.nr+3] = btv; } /* ----------------------------------------------------------------------- */ /* motherboard chipset specific stuff */ void __init bttv_check_chipset(void) { int pcipci_fail = 0; struct pci_dev *dev = NULL; if (pci_pci_problems & (PCIPCI_FAIL|PCIAGP_FAIL)) /* should check if target is AGP */ pcipci_fail = 1; if (pci_pci_problems & (PCIPCI_TRITON|PCIPCI_NATOMA|PCIPCI_VIAETBF)) triton1 = 1; if (pci_pci_problems & PCIPCI_VSFX) vsfx = 1; #ifdef PCIPCI_ALIMAGIK if (pci_pci_problems & PCIPCI_ALIMAGIK) latency = 0x0A; #endif /* print warnings about any quirks found */ if (triton1) pr_info("Host bridge needs ETBF enabled\n"); if (vsfx) pr_info("Host bridge needs VSFX enabled\n"); if (pcipci_fail) { pr_info("bttv and your chipset may not work together\n"); if (!no_overlay) { pr_info("overlay will be disabled\n"); no_overlay = 1; } else { pr_info("overlay forced. Use this option at your own risk.\n"); } } if (UNSET != latency) pr_info("pci latency fixup [%d]\n", latency); while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, dev))) { unsigned char b; pci_read_config_byte(dev, 0x53, &b); if (bttv_debug) pr_info("Host bridge: 82441FX Natoma, bufcon=0x%02x\n", b); } } int bttv_handle_chipset(struct bttv *btv) { unsigned char command; if (!triton1 && !vsfx && UNSET == latency) return 0; if (bttv_verbose) { if (triton1) pr_info("%d: enabling ETBF (430FX/VP3 compatibility)\n", btv->c.nr); if (vsfx && btv->id >= 878) pr_info("%d: enabling VSFX\n", btv->c.nr); if (UNSET != latency) pr_info("%d: setting pci timer to %d\n", btv->c.nr, latency); } if (btv->id < 878) { /* bt848 (mis)uses a bit in the irq mask for etbf */ if (triton1) btv->triton1 = BT848_INT_ETBF; } else { /* bt878 has a bit in the pci config space for it */ pci_read_config_byte(btv->c.pci, BT878_DEVCTRL, &command); if (triton1) command |= BT878_EN_TBFX; if (vsfx) command |= BT878_EN_VSFX; pci_write_config_byte(btv->c.pci, BT878_DEVCTRL, command); } if (UNSET != latency) pci_write_config_byte(btv->c.pci, PCI_LATENCY_TIMER, latency); return 0; } /* * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
run/perf-power7
drivers/media/platform/soc_camera/omap1_camera.c
2093
45488
/* * V4L2 SoC Camera driver for OMAP1 Camera Interface * * Copyright (C) 2010, Janusz Krzysztofik <jkrzyszt@tis.icnet.pl> * * Based on V4L2 Driver for i.MXL/i.MXL camera (CSI) host * Copyright (C) 2008, Paulius Zaleckas <paulius.zaleckas@teltonika.lt> * Copyright (C) 2009, Darius Augulis <augulis.darius@gmail.com> * * Based on PXA SoC camera driver * Copyright (C) 2006, Sascha Hauer, Pengutronix * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de> * * Hardware specific bits initialy based on former work by Matt Callow * drivers/media/platform/omap/omap1510cam.c * Copyright (C) 2006 Matt Callow * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <media/omap1_camera.h> #include <media/soc_camera.h> #include <media/soc_mediabus.h> #include <media/videobuf-dma-contig.h> #include <media/videobuf-dma-sg.h> #include <linux/omap-dma.h> #define DRIVER_NAME "omap1-camera" #define DRIVER_VERSION "0.0.2" #define OMAP_DMA_CAMERA_IF_RX 20 /* * --------------------------------------------------------------------------- * OMAP1 Camera Interface registers * --------------------------------------------------------------------------- */ #define REG_CTRLCLOCK 0x00 #define REG_IT_STATUS 0x04 #define REG_MODE 0x08 #define REG_STATUS 0x0C #define REG_CAMDATA 0x10 #define REG_GPIO 0x14 #define REG_PEAK_COUNTER 0x18 /* CTRLCLOCK bit shifts */ #define LCLK_EN BIT(7) #define DPLL_EN BIT(6) #define MCLK_EN BIT(5) #define CAMEXCLK_EN BIT(4) #define POLCLK BIT(3) #define FOSCMOD_SHIFT 0 #define FOSCMOD_MASK (0x7 << FOSCMOD_SHIFT) #define FOSCMOD_12MHz 0x0 #define FOSCMOD_6MHz 0x2 #define FOSCMOD_9_6MHz 0x4 #define FOSCMOD_24MHz 0x5 #define FOSCMOD_8MHz 0x6 /* IT_STATUS bit shifts */ #define DATA_TRANSFER BIT(5) #define FIFO_FULL BIT(4) #define H_DOWN BIT(3) #define H_UP BIT(2) #define V_DOWN BIT(1) #define V_UP BIT(0) /* MODE bit shifts */ #define RAZ_FIFO BIT(18) #define EN_FIFO_FULL BIT(17) #define EN_NIRQ BIT(16) #define THRESHOLD_SHIFT 9 #define THRESHOLD_MASK (0x7f << THRESHOLD_SHIFT) #define DMA BIT(8) #define EN_H_DOWN BIT(7) #define EN_H_UP BIT(6) #define EN_V_DOWN BIT(5) #define EN_V_UP BIT(4) #define ORDERCAMD BIT(3) #define IRQ_MASK (EN_V_UP | EN_V_DOWN | EN_H_UP | EN_H_DOWN | \ EN_NIRQ | EN_FIFO_FULL) /* STATUS bit shifts */ #define HSTATUS BIT(1) #define VSTATUS BIT(0) /* GPIO bit shifts */ #define CAM_RST BIT(0) /* end of OMAP1 Camera Interface registers */ #define SOCAM_BUS_FLAGS (V4L2_MBUS_MASTER | \ V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH | \ V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING | \ V4L2_MBUS_DATA_ACTIVE_HIGH) #define FIFO_SIZE ((THRESHOLD_MASK >> THRESHOLD_SHIFT) + 1) #define FIFO_SHIFT __fls(FIFO_SIZE) #define DMA_BURST_SHIFT (1 + OMAP_DMA_DATA_BURST_4) #define DMA_BURST_SIZE (1 << DMA_BURST_SHIFT) #define DMA_ELEMENT_SHIFT OMAP_DMA_DATA_TYPE_S32 #define DMA_ELEMENT_SIZE (1 << DMA_ELEMENT_SHIFT) #define DMA_FRAME_SHIFT_CONTIG (FIFO_SHIFT - 1) #define DMA_FRAME_SHIFT_SG DMA_BURST_SHIFT #define DMA_FRAME_SHIFT(x) ((x) == OMAP1_CAM_DMA_CONTIG ? \ DMA_FRAME_SHIFT_CONTIG : \ DMA_FRAME_SHIFT_SG) #define DMA_FRAME_SIZE(x) (1 << DMA_FRAME_SHIFT(x)) #define DMA_SYNC OMAP_DMA_SYNC_FRAME #define THRESHOLD_LEVEL DMA_FRAME_SIZE #define MAX_VIDEO_MEM 4 /* arbitrary video memory limit in MB */ /* * Structures */ /* buffer for one video frame */ struct omap1_cam_buf { struct videobuf_buffer vb; enum v4l2_mbus_pixelcode code; int inwork; struct scatterlist *sgbuf; int sgcount; int bytes_left; enum videobuf_state result; }; struct omap1_cam_dev { struct soc_camera_host soc_host; struct soc_camera_device *icd; struct clk *clk; unsigned int irq; void __iomem *base; int dma_ch; struct omap1_cam_platform_data *pdata; struct resource *res; unsigned long pflags; unsigned long camexclk; struct list_head capture; /* lock used to protect videobuf */ spinlock_t lock; /* Pointers to DMA buffers */ struct omap1_cam_buf *active; struct omap1_cam_buf *ready; enum omap1_cam_vb_mode vb_mode; int (*mmap_mapper)(struct videobuf_queue *q, struct videobuf_buffer *buf, struct vm_area_struct *vma); u32 reg_cache[0]; }; static void cam_write(struct omap1_cam_dev *pcdev, u16 reg, u32 val) { pcdev->reg_cache[reg / sizeof(u32)] = val; __raw_writel(val, pcdev->base + reg); } static u32 cam_read(struct omap1_cam_dev *pcdev, u16 reg, bool from_cache) { return !from_cache ? __raw_readl(pcdev->base + reg) : pcdev->reg_cache[reg / sizeof(u32)]; } #define CAM_READ(pcdev, reg) \ cam_read(pcdev, REG_##reg, false) #define CAM_WRITE(pcdev, reg, val) \ cam_write(pcdev, REG_##reg, val) #define CAM_READ_CACHE(pcdev, reg) \ cam_read(pcdev, REG_##reg, true) /* * Videobuf operations */ static int omap1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size) { struct soc_camera_device *icd = vq->priv_data; struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct omap1_cam_dev *pcdev = ici->priv; *size = icd->sizeimage; if (!*count || *count < OMAP1_CAMERA_MIN_BUF_COUNT(pcdev->vb_mode)) *count = OMAP1_CAMERA_MIN_BUF_COUNT(pcdev->vb_mode); if (*size * *count > MAX_VIDEO_MEM * 1024 * 1024) *count = (MAX_VIDEO_MEM * 1024 * 1024) / *size; dev_dbg(icd->parent, "%s: count=%d, size=%d\n", __func__, *count, *size); return 0; } static void free_buffer(struct videobuf_queue *vq, struct omap1_cam_buf *buf, enum omap1_cam_vb_mode vb_mode) { struct videobuf_buffer *vb = &buf->vb; BUG_ON(in_interrupt()); videobuf_waiton(vq, vb, 0, 0); if (vb_mode == OMAP1_CAM_DMA_CONTIG) { videobuf_dma_contig_free(vq, vb); } else { struct soc_camera_device *icd = vq->priv_data; struct device *dev = icd->parent; struct videobuf_dmabuf *dma = videobuf_to_dma(vb); videobuf_dma_unmap(dev, dma); videobuf_dma_free(dma); } vb->state = VIDEOBUF_NEEDS_INIT; } static int omap1_videobuf_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb, enum v4l2_field field) { struct soc_camera_device *icd = vq->priv_data; struct omap1_cam_buf *buf = container_of(vb, struct omap1_cam_buf, vb); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct omap1_cam_dev *pcdev = ici->priv; int ret; WARN_ON(!list_empty(&vb->queue)); BUG_ON(NULL == icd->current_fmt); buf->inwork = 1; if (buf->code != icd->current_fmt->code || vb->field != field || vb->width != icd->user_width || vb->height != icd->user_height) { buf->code = icd->current_fmt->code; vb->width = icd->user_width; vb->height = icd->user_height; vb->field = field; vb->state = VIDEOBUF_NEEDS_INIT; } vb->size = icd->sizeimage; if (vb->baddr && vb->bsize < vb->size) { ret = -EINVAL; goto out; } if (vb->state == VIDEOBUF_NEEDS_INIT) { ret = videobuf_iolock(vq, vb, NULL); if (ret) goto fail; vb->state = VIDEOBUF_PREPARED; } buf->inwork = 0; return 0; fail: free_buffer(vq, buf, pcdev->vb_mode); out: buf->inwork = 0; return ret; } static void set_dma_dest_params(int dma_ch, struct omap1_cam_buf *buf, enum omap1_cam_vb_mode vb_mode) { dma_addr_t dma_addr; unsigned int block_size; if (vb_mode == OMAP1_CAM_DMA_CONTIG) { dma_addr = videobuf_to_dma_contig(&buf->vb); block_size = buf->vb.size; } else { if (WARN_ON(!buf->sgbuf)) { buf->result = VIDEOBUF_ERROR; return; } dma_addr = sg_dma_address(buf->sgbuf); if (WARN_ON(!dma_addr)) { buf->sgbuf = NULL; buf->result = VIDEOBUF_ERROR; return; } block_size = sg_dma_len(buf->sgbuf); if (WARN_ON(!block_size)) { buf->sgbuf = NULL; buf->result = VIDEOBUF_ERROR; return; } if (unlikely(buf->bytes_left < block_size)) block_size = buf->bytes_left; if (WARN_ON(dma_addr & (DMA_FRAME_SIZE(vb_mode) * DMA_ELEMENT_SIZE - 1))) { dma_addr = ALIGN(dma_addr, DMA_FRAME_SIZE(vb_mode) * DMA_ELEMENT_SIZE); block_size &= ~(DMA_FRAME_SIZE(vb_mode) * DMA_ELEMENT_SIZE - 1); } buf->bytes_left -= block_size; buf->sgcount++; } omap_set_dma_dest_params(dma_ch, OMAP_DMA_PORT_EMIFF, OMAP_DMA_AMODE_POST_INC, dma_addr, 0, 0); omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32, DMA_FRAME_SIZE(vb_mode), block_size >> (DMA_FRAME_SHIFT(vb_mode) + DMA_ELEMENT_SHIFT), DMA_SYNC, 0, 0); } static struct omap1_cam_buf *prepare_next_vb(struct omap1_cam_dev *pcdev) { struct omap1_cam_buf *buf; /* * If there is already a buffer pointed out by the pcdev->ready, * (re)use it, otherwise try to fetch and configure a new one. */ buf = pcdev->ready; if (!buf) { if (list_empty(&pcdev->capture)) return buf; buf = list_entry(pcdev->capture.next, struct omap1_cam_buf, vb.queue); buf->vb.state = VIDEOBUF_ACTIVE; pcdev->ready = buf; list_del_init(&buf->vb.queue); } if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) { /* * In CONTIG mode, we can safely enter next buffer parameters * into the DMA programming register set after the DMA * has already been activated on the previous buffer */ set_dma_dest_params(pcdev->dma_ch, buf, pcdev->vb_mode); } else { /* * In SG mode, the above is not safe since there are probably * a bunch of sgbufs from previous sglist still pending. * Instead, mark the sglist fresh for the upcoming * try_next_sgbuf(). */ buf->sgbuf = NULL; } return buf; } static struct scatterlist *try_next_sgbuf(int dma_ch, struct omap1_cam_buf *buf) { struct scatterlist *sgbuf; if (likely(buf->sgbuf)) { /* current sglist is active */ if (unlikely(!buf->bytes_left)) { /* indicate sglist complete */ sgbuf = NULL; } else { /* process next sgbuf */ sgbuf = sg_next(buf->sgbuf); if (WARN_ON(!sgbuf)) { buf->result = VIDEOBUF_ERROR; } else if (WARN_ON(!sg_dma_len(sgbuf))) { sgbuf = NULL; buf->result = VIDEOBUF_ERROR; } } buf->sgbuf = sgbuf; } else { /* sglist is fresh, initialize it before using */ struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb); sgbuf = dma->sglist; if (!(WARN_ON(!sgbuf))) { buf->sgbuf = sgbuf; buf->sgcount = 0; buf->bytes_left = buf->vb.size; buf->result = VIDEOBUF_DONE; } } if (sgbuf) /* * Put our next sgbuf parameters (address, size) * into the DMA programming register set. */ set_dma_dest_params(dma_ch, buf, OMAP1_CAM_DMA_SG); return sgbuf; } static void start_capture(struct omap1_cam_dev *pcdev) { struct omap1_cam_buf *buf = pcdev->active; u32 ctrlclock = CAM_READ_CACHE(pcdev, CTRLCLOCK); u32 mode = CAM_READ_CACHE(pcdev, MODE) & ~EN_V_DOWN; if (WARN_ON(!buf)) return; /* * Enable start of frame interrupt, which we will use for activating * our end of frame watchdog when capture actually starts. */ mode |= EN_V_UP; if (unlikely(ctrlclock & LCLK_EN)) /* stop pixel clock before FIFO reset */ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~LCLK_EN); /* reset FIFO */ CAM_WRITE(pcdev, MODE, mode | RAZ_FIFO); omap_start_dma(pcdev->dma_ch); if (pcdev->vb_mode == OMAP1_CAM_DMA_SG) { /* * In SG mode, it's a good moment for fetching next sgbuf * from the current sglist and, if available, already putting * its parameters into the DMA programming register set. */ try_next_sgbuf(pcdev->dma_ch, buf); } /* (re)enable pixel clock */ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock | LCLK_EN); /* release FIFO reset */ CAM_WRITE(pcdev, MODE, mode); } static void suspend_capture(struct omap1_cam_dev *pcdev) { u32 ctrlclock = CAM_READ_CACHE(pcdev, CTRLCLOCK); CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~LCLK_EN); omap_stop_dma(pcdev->dma_ch); } static void disable_capture(struct omap1_cam_dev *pcdev) { u32 mode = CAM_READ_CACHE(pcdev, MODE); CAM_WRITE(pcdev, MODE, mode & ~(IRQ_MASK | DMA)); } static void omap1_videobuf_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct soc_camera_device *icd = vq->priv_data; struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct omap1_cam_dev *pcdev = ici->priv; struct omap1_cam_buf *buf; u32 mode; list_add_tail(&vb->queue, &pcdev->capture); vb->state = VIDEOBUF_QUEUED; if (pcdev->active) { /* * Capture in progress, so don't touch pcdev->ready even if * empty. Since the transfer of the DMA programming register set * content to the DMA working register set is done automatically * by the DMA hardware, this can pretty well happen while we * are keeping the lock here. Leave fetching it from the queue * to be done when a next DMA interrupt occures instead. */ return; } WARN_ON(pcdev->ready); buf = prepare_next_vb(pcdev); if (WARN_ON(!buf)) return; pcdev->active = buf; pcdev->ready = NULL; dev_dbg(icd->parent, "%s: capture not active, setup FIFO, start DMA\n", __func__); mode = CAM_READ_CACHE(pcdev, MODE) & ~THRESHOLD_MASK; mode |= THRESHOLD_LEVEL(pcdev->vb_mode) << THRESHOLD_SHIFT; CAM_WRITE(pcdev, MODE, mode | EN_FIFO_FULL | DMA); if (pcdev->vb_mode == OMAP1_CAM_DMA_SG) { /* * In SG mode, the above prepare_next_vb() didn't actually * put anything into the DMA programming register set, * so we have to do it now, before activating DMA. */ try_next_sgbuf(pcdev->dma_ch, buf); } start_capture(pcdev); } static void omap1_videobuf_release(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct omap1_cam_buf *buf = container_of(vb, struct omap1_cam_buf, vb); struct soc_camera_device *icd = vq->priv_data; struct device *dev = icd->parent; struct soc_camera_host *ici = to_soc_camera_host(dev); struct omap1_cam_dev *pcdev = ici->priv; switch (vb->state) { case VIDEOBUF_DONE: dev_dbg(dev, "%s (done)\n", __func__); break; case VIDEOBUF_ACTIVE: dev_dbg(dev, "%s (active)\n", __func__); break; case VIDEOBUF_QUEUED: dev_dbg(dev, "%s (queued)\n", __func__); break; case VIDEOBUF_PREPARED: dev_dbg(dev, "%s (prepared)\n", __func__); break; default: dev_dbg(dev, "%s (unknown %d)\n", __func__, vb->state); break; } free_buffer(vq, buf, pcdev->vb_mode); } static void videobuf_done(struct omap1_cam_dev *pcdev, enum videobuf_state result) { struct omap1_cam_buf *buf = pcdev->active; struct videobuf_buffer *vb; struct device *dev = pcdev->icd->parent; if (WARN_ON(!buf)) { suspend_capture(pcdev); disable_capture(pcdev); return; } if (result == VIDEOBUF_ERROR) suspend_capture(pcdev); vb = &buf->vb; if (waitqueue_active(&vb->done)) { if (!pcdev->ready && result != VIDEOBUF_ERROR) { /* * No next buffer has been entered into the DMA * programming register set on time (could be done only * while the previous DMA interurpt was processed, not * later), so the last DMA block, be it a whole buffer * if in CONTIG or its last sgbuf if in SG mode, is * about to be reused by the just autoreinitialized DMA * engine, and overwritten with next frame data. Best we * can do is stopping the capture as soon as possible, * hopefully before the next frame start. */ suspend_capture(pcdev); } vb->state = result; v4l2_get_timestamp(&vb->ts); if (result != VIDEOBUF_ERROR) vb->field_count++; wake_up(&vb->done); /* shift in next buffer */ buf = pcdev->ready; pcdev->active = buf; pcdev->ready = NULL; if (!buf) { /* * No next buffer was ready on time (see above), so * indicate error condition to force capture restart or * stop, depending on next buffer already queued or not. */ result = VIDEOBUF_ERROR; prepare_next_vb(pcdev); buf = pcdev->ready; pcdev->active = buf; pcdev->ready = NULL; } } else if (pcdev->ready) { /* * In both CONTIG and SG mode, the DMA engine has possibly * been already autoreinitialized with the preprogrammed * pcdev->ready buffer. We can either accept this fact * and just swap the buffers, or provoke an error condition * and restart capture. The former seems less intrusive. */ dev_dbg(dev, "%s: nobody waiting on videobuf, swap with next\n", __func__); pcdev->active = pcdev->ready; if (pcdev->vb_mode == OMAP1_CAM_DMA_SG) { /* * In SG mode, we have to make sure that the buffer we * are putting back into the pcdev->ready is marked * fresh. */ buf->sgbuf = NULL; } pcdev->ready = buf; buf = pcdev->active; } else { /* * No next buffer has been entered into * the DMA programming register set on time. */ if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) { /* * In CONTIG mode, the DMA engine has already been * reinitialized with the current buffer. Best we can do * is not touching it. */ dev_dbg(dev, "%s: nobody waiting on videobuf, reuse it\n", __func__); } else { /* * In SG mode, the DMA engine has just been * autoreinitialized with the last sgbuf from the * current list. Restart capture in order to transfer * next frame start into the first sgbuf, not the last * one. */ if (result != VIDEOBUF_ERROR) { suspend_capture(pcdev); result = VIDEOBUF_ERROR; } } } if (!buf) { dev_dbg(dev, "%s: no more videobufs, stop capture\n", __func__); disable_capture(pcdev); return; } if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) { /* * In CONTIG mode, the current buffer parameters had already * been entered into the DMA programming register set while the * buffer was fetched with prepare_next_vb(), they may have also * been transferred into the runtime set and already active if * the DMA still running. */ } else { /* In SG mode, extra steps are required */ if (result == VIDEOBUF_ERROR) /* make sure we (re)use sglist from start on error */ buf->sgbuf = NULL; /* * In any case, enter the next sgbuf parameters into the DMA * programming register set. They will be used either during * nearest DMA autoreinitialization or, in case of an error, * on DMA startup below. */ try_next_sgbuf(pcdev->dma_ch, buf); } if (result == VIDEOBUF_ERROR) { dev_dbg(dev, "%s: videobuf error; reset FIFO, restart DMA\n", __func__); start_capture(pcdev); /* * In SG mode, the above also resulted in the next sgbuf * parameters being entered into the DMA programming register * set, making them ready for next DMA autoreinitialization. */ } /* * Finally, try fetching next buffer. * In CONTIG mode, it will also enter it into the DMA programming * register set, making it ready for next DMA autoreinitialization. */ prepare_next_vb(pcdev); } static void dma_isr(int channel, unsigned short status, void *data) { struct omap1_cam_dev *pcdev = data; struct omap1_cam_buf *buf = pcdev->active; unsigned long flags; spin_lock_irqsave(&pcdev->lock, flags); if (WARN_ON(!buf)) { suspend_capture(pcdev); disable_capture(pcdev); goto out; } if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) { /* * In CONTIG mode, assume we have just managed to collect the * whole frame, hopefully before our end of frame watchdog is * triggered. Then, all we have to do is disabling the watchdog * for this frame, and calling videobuf_done() with success * indicated. */ CAM_WRITE(pcdev, MODE, CAM_READ_CACHE(pcdev, MODE) & ~EN_V_DOWN); videobuf_done(pcdev, VIDEOBUF_DONE); } else { /* * In SG mode, we have to process every sgbuf from the current * sglist, one after another. */ if (buf->sgbuf) { /* * Current sglist not completed yet, try fetching next * sgbuf, hopefully putting it into the DMA programming * register set, making it ready for next DMA * autoreinitialization. */ try_next_sgbuf(pcdev->dma_ch, buf); if (buf->sgbuf) goto out; /* * No more sgbufs left in the current sglist. This * doesn't mean that the whole videobuffer is already * complete, but only that the last sgbuf from the * current sglist is about to be filled. It will be * ready on next DMA interrupt, signalled with the * buf->sgbuf set back to NULL. */ if (buf->result != VIDEOBUF_ERROR) { /* * Video frame collected without errors so far, * we can prepare for collecting a next one * as soon as DMA gets autoreinitialized * after the current (last) sgbuf is completed. */ buf = prepare_next_vb(pcdev); if (!buf) goto out; try_next_sgbuf(pcdev->dma_ch, buf); goto out; } } /* end of videobuf */ videobuf_done(pcdev, buf->result); } out: spin_unlock_irqrestore(&pcdev->lock, flags); } static irqreturn_t cam_isr(int irq, void *data) { struct omap1_cam_dev *pcdev = data; struct device *dev = pcdev->icd->parent; struct omap1_cam_buf *buf = pcdev->active; u32 it_status; unsigned long flags; it_status = CAM_READ(pcdev, IT_STATUS); if (!it_status) return IRQ_NONE; spin_lock_irqsave(&pcdev->lock, flags); if (WARN_ON(!buf)) { dev_warn(dev, "%s: unhandled camera interrupt, status == %#x\n", __func__, it_status); suspend_capture(pcdev); disable_capture(pcdev); goto out; } if (unlikely(it_status & FIFO_FULL)) { dev_warn(dev, "%s: FIFO overflow\n", __func__); } else if (it_status & V_DOWN) { /* end of video frame watchdog */ if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) { /* * In CONTIG mode, the watchdog is disabled with * successful DMA end of block interrupt, and reenabled * on next frame start. If we get here, there is nothing * to check, we must be out of sync. */ } else { if (buf->sgcount == 2) { /* * If exactly 2 sgbufs from the next sglist have * been programmed into the DMA engine (the * first one already transferred into the DMA * runtime register set, the second one still * in the programming set), then we are in sync. */ goto out; } } dev_notice(dev, "%s: unexpected end of video frame\n", __func__); } else if (it_status & V_UP) { u32 mode; if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) { /* * In CONTIG mode, we need this interrupt every frame * in oredr to reenable our end of frame watchdog. */ mode = CAM_READ_CACHE(pcdev, MODE); } else { /* * In SG mode, the below enabled end of frame watchdog * is kept on permanently, so we can turn this one shot * setup off. */ mode = CAM_READ_CACHE(pcdev, MODE) & ~EN_V_UP; } if (!(mode & EN_V_DOWN)) { /* (re)enable end of frame watchdog interrupt */ mode |= EN_V_DOWN; } CAM_WRITE(pcdev, MODE, mode); goto out; } else { dev_warn(dev, "%s: unhandled camera interrupt, status == %#x\n", __func__, it_status); goto out; } videobuf_done(pcdev, VIDEOBUF_ERROR); out: spin_unlock_irqrestore(&pcdev->lock, flags); return IRQ_HANDLED; } static struct videobuf_queue_ops omap1_videobuf_ops = { .buf_setup = omap1_videobuf_setup, .buf_prepare = omap1_videobuf_prepare, .buf_queue = omap1_videobuf_queue, .buf_release = omap1_videobuf_release, }; /* * SOC Camera host operations */ static void sensor_reset(struct omap1_cam_dev *pcdev, bool reset) { /* apply/release camera sensor reset if requested by platform data */ if (pcdev->pflags & OMAP1_CAMERA_RST_HIGH) CAM_WRITE(pcdev, GPIO, reset); else if (pcdev->pflags & OMAP1_CAMERA_RST_LOW) CAM_WRITE(pcdev, GPIO, !reset); } /* * The following two functions absolutely depend on the fact, that * there can be only one camera on OMAP1 camera sensor interface */ static int omap1_cam_add_device(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct omap1_cam_dev *pcdev = ici->priv; u32 ctrlclock; if (pcdev->icd) return -EBUSY; clk_enable(pcdev->clk); /* setup sensor clock */ ctrlclock = CAM_READ(pcdev, CTRLCLOCK); ctrlclock &= ~(CAMEXCLK_EN | MCLK_EN | DPLL_EN); CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock); ctrlclock &= ~FOSCMOD_MASK; switch (pcdev->camexclk) { case 6000000: ctrlclock |= CAMEXCLK_EN | FOSCMOD_6MHz; break; case 8000000: ctrlclock |= CAMEXCLK_EN | FOSCMOD_8MHz | DPLL_EN; break; case 9600000: ctrlclock |= CAMEXCLK_EN | FOSCMOD_9_6MHz | DPLL_EN; break; case 12000000: ctrlclock |= CAMEXCLK_EN | FOSCMOD_12MHz; break; case 24000000: ctrlclock |= CAMEXCLK_EN | FOSCMOD_24MHz | DPLL_EN; default: break; } CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~DPLL_EN); /* enable internal clock */ ctrlclock |= MCLK_EN; CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock); sensor_reset(pcdev, false); pcdev->icd = icd; dev_dbg(icd->parent, "OMAP1 Camera driver attached to camera %d\n", icd->devnum); return 0; } static void omap1_cam_remove_device(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct omap1_cam_dev *pcdev = ici->priv; u32 ctrlclock; BUG_ON(icd != pcdev->icd); suspend_capture(pcdev); disable_capture(pcdev); sensor_reset(pcdev, true); /* disable and release system clocks */ ctrlclock = CAM_READ_CACHE(pcdev, CTRLCLOCK); ctrlclock &= ~(MCLK_EN | DPLL_EN | CAMEXCLK_EN); CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock); ctrlclock = (ctrlclock & ~FOSCMOD_MASK) | FOSCMOD_12MHz; CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock); CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock | MCLK_EN); CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~MCLK_EN); clk_disable(pcdev->clk); pcdev->icd = NULL; dev_dbg(icd->parent, "OMAP1 Camera driver detached from camera %d\n", icd->devnum); } /* Duplicate standard formats based on host capability of byte swapping */ static const struct soc_mbus_lookup omap1_cam_formats[] = { { .code = V4L2_MBUS_FMT_UYVY8_2X8, .fmt = { .fourcc = V4L2_PIX_FMT_YUYV, .name = "YUYV", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_BE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_VYUY8_2X8, .fmt = { .fourcc = V4L2_PIX_FMT_YVYU, .name = "YVYU", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_BE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_YUYV8_2X8, .fmt = { .fourcc = V4L2_PIX_FMT_UYVY, .name = "UYVY", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_BE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_YVYU8_2X8, .fmt = { .fourcc = V4L2_PIX_FMT_VYUY, .name = "VYUY", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_BE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB555, .name = "RGB555", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_BE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB555X, .name = "RGB555X", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_BE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_RGB565_2X8_BE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB565, .name = "RGB565", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_BE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, { .code = V4L2_MBUS_FMT_RGB565_2X8_LE, .fmt = { .fourcc = V4L2_PIX_FMT_RGB565X, .name = "RGB565X", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_BE, .layout = SOC_MBUS_LAYOUT_PACKED, }, }, }; static int omap1_cam_get_formats(struct soc_camera_device *icd, unsigned int idx, struct soc_camera_format_xlate *xlate) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->parent; int formats = 0, ret; enum v4l2_mbus_pixelcode code; const struct soc_mbus_pixelfmt *fmt; ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code); if (ret < 0) /* No more formats */ return 0; fmt = soc_mbus_get_fmtdesc(code); if (!fmt) { dev_warn(dev, "%s: unsupported format code #%d: %d\n", __func__, idx, code); return 0; } /* Check support for the requested bits-per-sample */ if (fmt->bits_per_sample != 8) return 0; switch (code) { case V4L2_MBUS_FMT_YUYV8_2X8: case V4L2_MBUS_FMT_YVYU8_2X8: case V4L2_MBUS_FMT_UYVY8_2X8: case V4L2_MBUS_FMT_VYUY8_2X8: case V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE: case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE: case V4L2_MBUS_FMT_RGB565_2X8_BE: case V4L2_MBUS_FMT_RGB565_2X8_LE: formats++; if (xlate) { xlate->host_fmt = soc_mbus_find_fmtdesc(code, omap1_cam_formats, ARRAY_SIZE(omap1_cam_formats)); xlate->code = code; xlate++; dev_dbg(dev, "%s: providing format %s as byte swapped code #%d\n", __func__, xlate->host_fmt->name, code); } default: if (xlate) dev_dbg(dev, "%s: providing format %s in pass-through mode\n", __func__, fmt->name); } formats++; if (xlate) { xlate->host_fmt = fmt; xlate->code = code; xlate++; } return formats; } static bool is_dma_aligned(s32 bytes_per_line, unsigned int height, enum omap1_cam_vb_mode vb_mode) { int size = bytes_per_line * height; return IS_ALIGNED(bytes_per_line, DMA_ELEMENT_SIZE) && IS_ALIGNED(size, DMA_FRAME_SIZE(vb_mode) * DMA_ELEMENT_SIZE); } static int dma_align(int *width, int *height, const struct soc_mbus_pixelfmt *fmt, enum omap1_cam_vb_mode vb_mode, bool enlarge) { s32 bytes_per_line = soc_mbus_bytes_per_line(*width, fmt); if (bytes_per_line < 0) return bytes_per_line; if (!is_dma_aligned(bytes_per_line, *height, vb_mode)) { unsigned int pxalign = __fls(bytes_per_line / *width); unsigned int salign = DMA_FRAME_SHIFT(vb_mode) + DMA_ELEMENT_SHIFT - pxalign; unsigned int incr = enlarge << salign; v4l_bound_align_image(width, 1, *width + incr, 0, height, 1, *height + incr, 0, salign); return 0; } return 1; } #define subdev_call_with_sense(pcdev, dev, icd, sd, function, args...) \ ({ \ struct soc_camera_sense sense = { \ .master_clock = pcdev->camexclk, \ .pixel_clock_max = 0, \ }; \ int __ret; \ \ if (pcdev->pdata) \ sense.pixel_clock_max = pcdev->pdata->lclk_khz_max * 1000; \ icd->sense = &sense; \ __ret = v4l2_subdev_call(sd, video, function, ##args); \ icd->sense = NULL; \ \ if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) { \ if (sense.pixel_clock > sense.pixel_clock_max) { \ dev_err(dev, \ "%s: pixel clock %lu set by the camera too high!\n", \ __func__, sense.pixel_clock); \ __ret = -EINVAL; \ } \ } \ __ret; \ }) static int set_mbus_format(struct omap1_cam_dev *pcdev, struct device *dev, struct soc_camera_device *icd, struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf, const struct soc_camera_format_xlate *xlate) { s32 bytes_per_line; int ret = subdev_call_with_sense(pcdev, dev, icd, sd, s_mbus_fmt, mf); if (ret < 0) { dev_err(dev, "%s: s_mbus_fmt failed\n", __func__); return ret; } if (mf->code != xlate->code) { dev_err(dev, "%s: unexpected pixel code change\n", __func__); return -EINVAL; } bytes_per_line = soc_mbus_bytes_per_line(mf->width, xlate->host_fmt); if (bytes_per_line < 0) { dev_err(dev, "%s: soc_mbus_bytes_per_line() failed\n", __func__); return bytes_per_line; } if (!is_dma_aligned(bytes_per_line, mf->height, pcdev->vb_mode)) { dev_err(dev, "%s: resulting geometry %ux%u not DMA aligned\n", __func__, mf->width, mf->height); return -EINVAL; } return 0; } static int omap1_cam_set_crop(struct soc_camera_device *icd, const struct v4l2_crop *crop) { const struct v4l2_rect *rect = &crop->c; const struct soc_camera_format_xlate *xlate = icd->current_fmt; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->parent; struct soc_camera_host *ici = to_soc_camera_host(dev); struct omap1_cam_dev *pcdev = ici->priv; struct v4l2_mbus_framefmt mf; int ret; ret = subdev_call_with_sense(pcdev, dev, icd, sd, s_crop, crop); if (ret < 0) { dev_warn(dev, "%s: failed to crop to %ux%u@%u:%u\n", __func__, rect->width, rect->height, rect->left, rect->top); return ret; } ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf); if (ret < 0) { dev_warn(dev, "%s: failed to fetch current format\n", __func__); return ret; } ret = dma_align(&mf.width, &mf.height, xlate->host_fmt, pcdev->vb_mode, false); if (ret < 0) { dev_err(dev, "%s: failed to align %ux%u %s with DMA\n", __func__, mf.width, mf.height, xlate->host_fmt->name); return ret; } if (!ret) { /* sensor returned geometry not DMA aligned, trying to fix */ ret = set_mbus_format(pcdev, dev, icd, sd, &mf, xlate); if (ret < 0) { dev_err(dev, "%s: failed to set format\n", __func__); return ret; } } icd->user_width = mf.width; icd->user_height = mf.height; return 0; } static int omap1_cam_set_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); const struct soc_camera_format_xlate *xlate; struct device *dev = icd->parent; struct soc_camera_host *ici = to_soc_camera_host(dev); struct omap1_cam_dev *pcdev = ici->priv; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_mbus_framefmt mf; int ret; xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); if (!xlate) { dev_warn(dev, "%s: format %#x not found\n", __func__, pix->pixelformat); return -EINVAL; } mf.width = pix->width; mf.height = pix->height; mf.field = pix->field; mf.colorspace = pix->colorspace; mf.code = xlate->code; ret = dma_align(&mf.width, &mf.height, xlate->host_fmt, pcdev->vb_mode, true); if (ret < 0) { dev_err(dev, "%s: failed to align %ux%u %s with DMA\n", __func__, pix->width, pix->height, xlate->host_fmt->name); return ret; } ret = set_mbus_format(pcdev, dev, icd, sd, &mf, xlate); if (ret < 0) { dev_err(dev, "%s: failed to set format\n", __func__); return ret; } pix->width = mf.width; pix->height = mf.height; pix->field = mf.field; pix->colorspace = mf.colorspace; icd->current_fmt = xlate; return 0; } static int omap1_cam_try_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); const struct soc_camera_format_xlate *xlate; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_mbus_framefmt mf; int ret; /* TODO: limit to mx1 hardware capabilities */ xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); if (!xlate) { dev_warn(icd->parent, "Format %#x not found\n", pix->pixelformat); return -EINVAL; } mf.width = pix->width; mf.height = pix->height; mf.field = pix->field; mf.colorspace = pix->colorspace; mf.code = xlate->code; /* limit to sensor capabilities */ ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf); if (ret < 0) return ret; pix->width = mf.width; pix->height = mf.height; pix->field = mf.field; pix->colorspace = mf.colorspace; return 0; } static bool sg_mode; /* * Local mmap_mapper wrapper, * used for detecting videobuf-dma-contig buffer allocation failures * and switching to videobuf-dma-sg automatically for future attempts. */ static int omap1_cam_mmap_mapper(struct videobuf_queue *q, struct videobuf_buffer *buf, struct vm_area_struct *vma) { struct soc_camera_device *icd = q->priv_data; struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct omap1_cam_dev *pcdev = ici->priv; int ret; ret = pcdev->mmap_mapper(q, buf, vma); if (ret == -ENOMEM) sg_mode = true; return ret; } static void omap1_cam_init_videobuf(struct videobuf_queue *q, struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct omap1_cam_dev *pcdev = ici->priv; if (!sg_mode) videobuf_queue_dma_contig_init(q, &omap1_videobuf_ops, icd->parent, &pcdev->lock, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE, sizeof(struct omap1_cam_buf), icd, &ici->host_lock); else videobuf_queue_sg_init(q, &omap1_videobuf_ops, icd->parent, &pcdev->lock, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE, sizeof(struct omap1_cam_buf), icd, &ici->host_lock); /* use videobuf mode (auto)selected with the module parameter */ pcdev->vb_mode = sg_mode ? OMAP1_CAM_DMA_SG : OMAP1_CAM_DMA_CONTIG; /* * Ensure we substitute the videobuf-dma-contig version of the * mmap_mapper() callback with our own wrapper, used for switching * automatically to videobuf-dma-sg on buffer allocation failure. */ if (!sg_mode && q->int_ops->mmap_mapper != omap1_cam_mmap_mapper) { pcdev->mmap_mapper = q->int_ops->mmap_mapper; q->int_ops->mmap_mapper = omap1_cam_mmap_mapper; } } static int omap1_cam_reqbufs(struct soc_camera_device *icd, struct v4l2_requestbuffers *p) { int i; /* * This is for locking debugging only. I removed spinlocks and now I * check whether .prepare is ever called on a linked buffer, or whether * a dma IRQ can occur for an in-work or unlinked buffer. Until now * it hadn't triggered */ for (i = 0; i < p->count; i++) { struct omap1_cam_buf *buf = container_of(icd->vb_vidq.bufs[i], struct omap1_cam_buf, vb); buf->inwork = 0; INIT_LIST_HEAD(&buf->vb.queue); } return 0; } static int omap1_cam_querycap(struct soc_camera_host *ici, struct v4l2_capability *cap) { /* cap->name is set by the friendly caller:-> */ strlcpy(cap->card, "OMAP1 Camera", sizeof(cap->card)); cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; return 0; } static int omap1_cam_set_bus_param(struct soc_camera_device *icd) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->parent; struct soc_camera_host *ici = to_soc_camera_host(dev); struct omap1_cam_dev *pcdev = ici->priv; u32 pixfmt = icd->current_fmt->host_fmt->fourcc; const struct soc_camera_format_xlate *xlate; const struct soc_mbus_pixelfmt *fmt; struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,}; unsigned long common_flags; u32 ctrlclock, mode; int ret; ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg); if (!ret) { common_flags = soc_mbus_config_compatible(&cfg, SOCAM_BUS_FLAGS); if (!common_flags) { dev_warn(dev, "Flags incompatible: camera 0x%x, host 0x%x\n", cfg.flags, SOCAM_BUS_FLAGS); return -EINVAL; } } else if (ret != -ENOIOCTLCMD) { return ret; } else { common_flags = SOCAM_BUS_FLAGS; } /* Make choices, possibly based on platform configuration */ if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) && (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) { if (!pcdev->pdata || pcdev->pdata->flags & OMAP1_CAMERA_LCLK_RISING) common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING; else common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING; } cfg.flags = common_flags; ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg); if (ret < 0 && ret != -ENOIOCTLCMD) { dev_dbg(dev, "camera s_mbus_config(0x%lx) returned %d\n", common_flags, ret); return ret; } ctrlclock = CAM_READ_CACHE(pcdev, CTRLCLOCK); if (ctrlclock & LCLK_EN) CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~LCLK_EN); if (common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) { dev_dbg(dev, "CTRLCLOCK_REG |= POLCLK\n"); ctrlclock |= POLCLK; } else { dev_dbg(dev, "CTRLCLOCK_REG &= ~POLCLK\n"); ctrlclock &= ~POLCLK; } CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~LCLK_EN); if (ctrlclock & LCLK_EN) CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock); /* select bus endianess */ xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); fmt = xlate->host_fmt; mode = CAM_READ(pcdev, MODE) & ~(RAZ_FIFO | IRQ_MASK | DMA); if (fmt->order == SOC_MBUS_ORDER_LE) { dev_dbg(dev, "MODE_REG &= ~ORDERCAMD\n"); CAM_WRITE(pcdev, MODE, mode & ~ORDERCAMD); } else { dev_dbg(dev, "MODE_REG |= ORDERCAMD\n"); CAM_WRITE(pcdev, MODE, mode | ORDERCAMD); } return 0; } static unsigned int omap1_cam_poll(struct file *file, poll_table *pt) { struct soc_camera_device *icd = file->private_data; struct omap1_cam_buf *buf; buf = list_entry(icd->vb_vidq.stream.next, struct omap1_cam_buf, vb.stream); poll_wait(file, &buf->vb.done, pt); if (buf->vb.state == VIDEOBUF_DONE || buf->vb.state == VIDEOBUF_ERROR) return POLLIN | POLLRDNORM; return 0; } static struct soc_camera_host_ops omap1_host_ops = { .owner = THIS_MODULE, .add = omap1_cam_add_device, .remove = omap1_cam_remove_device, .get_formats = omap1_cam_get_formats, .set_crop = omap1_cam_set_crop, .set_fmt = omap1_cam_set_fmt, .try_fmt = omap1_cam_try_fmt, .init_videobuf = omap1_cam_init_videobuf, .reqbufs = omap1_cam_reqbufs, .querycap = omap1_cam_querycap, .set_bus_param = omap1_cam_set_bus_param, .poll = omap1_cam_poll, }; static int omap1_cam_probe(struct platform_device *pdev) { struct omap1_cam_dev *pcdev; struct resource *res; struct clk *clk; void __iomem *base; unsigned int irq; int err = 0; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!res || (int)irq <= 0) { err = -ENODEV; goto exit; } clk = clk_get(&pdev->dev, "armper_ck"); if (IS_ERR(clk)) { err = PTR_ERR(clk); goto exit; } pcdev = kzalloc(sizeof(*pcdev) + resource_size(res), GFP_KERNEL); if (!pcdev) { dev_err(&pdev->dev, "Could not allocate pcdev\n"); err = -ENOMEM; goto exit_put_clk; } pcdev->res = res; pcdev->clk = clk; pcdev->pdata = pdev->dev.platform_data; if (pcdev->pdata) { pcdev->pflags = pcdev->pdata->flags; pcdev->camexclk = pcdev->pdata->camexclk_khz * 1000; } switch (pcdev->camexclk) { case 6000000: case 8000000: case 9600000: case 12000000: case 24000000: break; default: /* pcdev->camexclk != 0 => pcdev->pdata != NULL */ dev_warn(&pdev->dev, "Incorrect sensor clock frequency %ld kHz, " "should be one of 0, 6, 8, 9.6, 12 or 24 MHz, " "please correct your platform data\n", pcdev->pdata->camexclk_khz); pcdev->camexclk = 0; case 0: dev_info(&pdev->dev, "Not providing sensor clock\n"); } INIT_LIST_HEAD(&pcdev->capture); spin_lock_init(&pcdev->lock); /* * Request the region. */ if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME)) { err = -EBUSY; goto exit_kfree; } base = ioremap(res->start, resource_size(res)); if (!base) { err = -ENOMEM; goto exit_release; } pcdev->irq = irq; pcdev->base = base; sensor_reset(pcdev, true); err = omap_request_dma(OMAP_DMA_CAMERA_IF_RX, DRIVER_NAME, dma_isr, (void *)pcdev, &pcdev->dma_ch); if (err < 0) { dev_err(&pdev->dev, "Can't request DMA for OMAP1 Camera\n"); err = -EBUSY; goto exit_iounmap; } dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_ch); /* preconfigure DMA */ omap_set_dma_src_params(pcdev->dma_ch, OMAP_DMA_PORT_TIPB, OMAP_DMA_AMODE_CONSTANT, res->start + REG_CAMDATA, 0, 0); omap_set_dma_dest_burst_mode(pcdev->dma_ch, OMAP_DMA_DATA_BURST_4); /* setup DMA autoinitialization */ omap_dma_link_lch(pcdev->dma_ch, pcdev->dma_ch); err = request_irq(pcdev->irq, cam_isr, 0, DRIVER_NAME, pcdev); if (err) { dev_err(&pdev->dev, "Camera interrupt register failed\n"); goto exit_free_dma; } pcdev->soc_host.drv_name = DRIVER_NAME; pcdev->soc_host.ops = &omap1_host_ops; pcdev->soc_host.priv = pcdev; pcdev->soc_host.v4l2_dev.dev = &pdev->dev; pcdev->soc_host.nr = pdev->id; err = soc_camera_host_register(&pcdev->soc_host); if (err) goto exit_free_irq; dev_info(&pdev->dev, "OMAP1 Camera Interface driver loaded\n"); return 0; exit_free_irq: free_irq(pcdev->irq, pcdev); exit_free_dma: omap_free_dma(pcdev->dma_ch); exit_iounmap: iounmap(base); exit_release: release_mem_region(res->start, resource_size(res)); exit_kfree: kfree(pcdev); exit_put_clk: clk_put(clk); exit: return err; } static int omap1_cam_remove(struct platform_device *pdev) { struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev); struct omap1_cam_dev *pcdev = container_of(soc_host, struct omap1_cam_dev, soc_host); struct resource *res; free_irq(pcdev->irq, pcdev); omap_free_dma(pcdev->dma_ch); soc_camera_host_unregister(soc_host); iounmap(pcdev->base); res = pcdev->res; release_mem_region(res->start, resource_size(res)); clk_put(pcdev->clk); kfree(pcdev); dev_info(&pdev->dev, "OMAP1 Camera Interface driver unloaded\n"); return 0; } static struct platform_driver omap1_cam_driver = { .driver = { .name = DRIVER_NAME, }, .probe = omap1_cam_probe, .remove = omap1_cam_remove, }; module_platform_driver(omap1_cam_driver); module_param(sg_mode, bool, 0644); MODULE_PARM_DESC(sg_mode, "videobuf mode, 0: dma-contig (default), 1: dma-sg"); MODULE_DESCRIPTION("OMAP1 Camera Interface driver"); MODULE_AUTHOR("Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>"); MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRIVER_VERSION); MODULE_ALIAS("platform:" DRIVER_NAME);
gpl-2.0
EPDCenter/android_kernel_rockchip
net/rxrpc/ar-output.c
2605
18400
/* RxRPC packet transmission * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/net.h> #include <linux/gfp.h> #include <linux/skbuff.h> #include <linux/circ_buf.h> #include <linux/export.h> #include <net/sock.h> #include <net/af_rxrpc.h> #include "ar-internal.h" int rxrpc_resend_timeout = 4; static int rxrpc_send_data(struct kiocb *iocb, struct rxrpc_sock *rx, struct rxrpc_call *call, struct msghdr *msg, size_t len); /* * extract control messages from the sendmsg() control buffer */ static int rxrpc_sendmsg_cmsg(struct rxrpc_sock *rx, struct msghdr *msg, unsigned long *user_call_ID, enum rxrpc_command *command, u32 *abort_code, bool server) { struct cmsghdr *cmsg; int len; *command = RXRPC_CMD_SEND_DATA; if (msg->msg_controllen == 0) return -EINVAL; for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { if (!CMSG_OK(msg, cmsg)) return -EINVAL; len = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)); _debug("CMSG %d, %d, %d", cmsg->cmsg_level, cmsg->cmsg_type, len); if (cmsg->cmsg_level != SOL_RXRPC) continue; switch (cmsg->cmsg_type) { case RXRPC_USER_CALL_ID: if (msg->msg_flags & MSG_CMSG_COMPAT) { if (len != sizeof(u32)) return -EINVAL; *user_call_ID = *(u32 *) CMSG_DATA(cmsg); } else { if (len != sizeof(unsigned long)) return -EINVAL; *user_call_ID = *(unsigned long *) CMSG_DATA(cmsg); } _debug("User Call ID %lx", *user_call_ID); break; case RXRPC_ABORT: if (*command != RXRPC_CMD_SEND_DATA) return -EINVAL; *command = RXRPC_CMD_SEND_ABORT; if (len != sizeof(*abort_code)) return -EINVAL; *abort_code = *(unsigned int *) CMSG_DATA(cmsg); _debug("Abort %x", *abort_code); if (*abort_code == 0) return -EINVAL; break; case RXRPC_ACCEPT: if (*command != RXRPC_CMD_SEND_DATA) return -EINVAL; *command = RXRPC_CMD_ACCEPT; if (len != 0) return -EINVAL; if (!server) return -EISCONN; break; default: return -EINVAL; } } _leave(" = 0"); return 0; } /* * abort a call, sending an ABORT packet to the peer */ static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code) { write_lock_bh(&call->state_lock); if (call->state <= RXRPC_CALL_COMPLETE) { call->state = RXRPC_CALL_LOCALLY_ABORTED; call->abort_code = abort_code; set_bit(RXRPC_CALL_ABORT, &call->events); del_timer_sync(&call->resend_timer); del_timer_sync(&call->ack_timer); clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events); clear_bit(RXRPC_CALL_ACK, &call->events); clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); rxrpc_queue_call(call); } write_unlock_bh(&call->state_lock); } /* * send a message forming part of a client call through an RxRPC socket * - caller holds the socket locked * - the socket may be either a client socket or a server socket */ int rxrpc_client_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx, struct rxrpc_transport *trans, struct msghdr *msg, size_t len) { struct rxrpc_conn_bundle *bundle; enum rxrpc_command cmd; struct rxrpc_call *call; unsigned long user_call_ID = 0; struct key *key; __be16 service_id; u32 abort_code = 0; int ret; _enter(""); ASSERT(trans != NULL); ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code, false); if (ret < 0) return ret; bundle = NULL; if (trans) { service_id = rx->service_id; if (msg->msg_name) { struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) msg->msg_name; service_id = htons(srx->srx_service); } key = rx->key; if (key && !rx->key->payload.data) key = NULL; bundle = rxrpc_get_bundle(rx, trans, key, service_id, GFP_KERNEL); if (IS_ERR(bundle)) return PTR_ERR(bundle); } call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID, abort_code == 0, GFP_KERNEL); if (trans) rxrpc_put_bundle(trans, bundle); if (IS_ERR(call)) { _leave(" = %ld", PTR_ERR(call)); return PTR_ERR(call); } _debug("CALL %d USR %lx ST %d on CONN %p", call->debug_id, call->user_call_ID, call->state, call->conn); if (call->state >= RXRPC_CALL_COMPLETE) { /* it's too late for this call */ ret = -ESHUTDOWN; } else if (cmd == RXRPC_CMD_SEND_ABORT) { rxrpc_send_abort(call, abort_code); } else if (cmd != RXRPC_CMD_SEND_DATA) { ret = -EINVAL; } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) { /* request phase complete for this client call */ ret = -EPROTO; } else { ret = rxrpc_send_data(iocb, rx, call, msg, len); } rxrpc_put_call(call); _leave(" = %d", ret); return ret; } /** * rxrpc_kernel_send_data - Allow a kernel service to send data on a call * @call: The call to send data through * @msg: The data to send * @len: The amount of data to send * * Allow a kernel service to send data on a call. The call must be in an state * appropriate to sending data. No control data should be supplied in @msg, * nor should an address be supplied. MSG_MORE should be flagged if there's * more data to come, otherwise this data will end the transmission phase. */ int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg, size_t len) { int ret; _enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]); ASSERTCMP(msg->msg_name, ==, NULL); ASSERTCMP(msg->msg_control, ==, NULL); lock_sock(&call->socket->sk); _debug("CALL %d USR %lx ST %d on CONN %p", call->debug_id, call->user_call_ID, call->state, call->conn); if (call->state >= RXRPC_CALL_COMPLETE) { ret = -ESHUTDOWN; /* it's too late for this call */ } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST && call->state != RXRPC_CALL_SERVER_ACK_REQUEST && call->state != RXRPC_CALL_SERVER_SEND_REPLY) { ret = -EPROTO; /* request phase complete for this client call */ } else { mm_segment_t oldfs = get_fs(); set_fs(KERNEL_DS); ret = rxrpc_send_data(NULL, call->socket, call, msg, len); set_fs(oldfs); } release_sock(&call->socket->sk); _leave(" = %d", ret); return ret; } EXPORT_SYMBOL(rxrpc_kernel_send_data); /** * rxrpc_kernel_abort_call - Allow a kernel service to abort a call * @call: The call to be aborted * @abort_code: The abort code to stick into the ABORT packet * * Allow a kernel service to abort a call, if it's still in an abortable state. */ void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code) { _enter("{%d},%d", call->debug_id, abort_code); lock_sock(&call->socket->sk); _debug("CALL %d USR %lx ST %d on CONN %p", call->debug_id, call->user_call_ID, call->state, call->conn); if (call->state < RXRPC_CALL_COMPLETE) rxrpc_send_abort(call, abort_code); release_sock(&call->socket->sk); _leave(""); } EXPORT_SYMBOL(rxrpc_kernel_abort_call); /* * send a message through a server socket * - caller holds the socket locked */ int rxrpc_server_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx, struct msghdr *msg, size_t len) { enum rxrpc_command cmd; struct rxrpc_call *call; unsigned long user_call_ID = 0; u32 abort_code = 0; int ret; _enter(""); ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code, true); if (ret < 0) return ret; if (cmd == RXRPC_CMD_ACCEPT) { call = rxrpc_accept_call(rx, user_call_ID); if (IS_ERR(call)) return PTR_ERR(call); rxrpc_put_call(call); return 0; } call = rxrpc_find_server_call(rx, user_call_ID); if (!call) return -EBADSLT; if (call->state >= RXRPC_CALL_COMPLETE) { ret = -ESHUTDOWN; goto out; } switch (cmd) { case RXRPC_CMD_SEND_DATA: if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST && call->state != RXRPC_CALL_SERVER_ACK_REQUEST && call->state != RXRPC_CALL_SERVER_SEND_REPLY) { /* Tx phase not yet begun for this call */ ret = -EPROTO; break; } ret = rxrpc_send_data(iocb, rx, call, msg, len); break; case RXRPC_CMD_SEND_ABORT: rxrpc_send_abort(call, abort_code); break; default: BUG(); } out: rxrpc_put_call(call); _leave(" = %d", ret); return ret; } /* * send a packet through the transport endpoint */ int rxrpc_send_packet(struct rxrpc_transport *trans, struct sk_buff *skb) { struct kvec iov[1]; struct msghdr msg; int ret, opt; _enter(",{%d}", skb->len); iov[0].iov_base = skb->head; iov[0].iov_len = skb->len; msg.msg_name = &trans->peer->srx.transport.sin; msg.msg_namelen = sizeof(trans->peer->srx.transport.sin); msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; /* send the packet with the don't fragment bit set if we currently * think it's small enough */ if (skb->len - sizeof(struct rxrpc_header) < trans->peer->maxdata) { down_read(&trans->local->defrag_sem); /* send the packet by UDP * - returns -EMSGSIZE if UDP would have to fragment the packet * to go out of the interface * - in which case, we'll have processed the ICMP error * message and update the peer record */ ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1, iov[0].iov_len); up_read(&trans->local->defrag_sem); if (ret == -EMSGSIZE) goto send_fragmentable; _leave(" = %d [%u]", ret, trans->peer->maxdata); return ret; } send_fragmentable: /* attempt to send this message with fragmentation enabled */ _debug("send fragment"); down_write(&trans->local->defrag_sem); opt = IP_PMTUDISC_DONT; ret = kernel_setsockopt(trans->local->socket, SOL_IP, IP_MTU_DISCOVER, (char *) &opt, sizeof(opt)); if (ret == 0) { ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1, iov[0].iov_len); opt = IP_PMTUDISC_DO; kernel_setsockopt(trans->local->socket, SOL_IP, IP_MTU_DISCOVER, (char *) &opt, sizeof(opt)); } up_write(&trans->local->defrag_sem); _leave(" = %d [frag %u]", ret, trans->peer->maxdata); return ret; } /* * wait for space to appear in the transmit/ACK window * - caller holds the socket locked */ static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx, struct rxrpc_call *call, long *timeo) { DECLARE_WAITQUEUE(myself, current); int ret; _enter(",{%d},%ld", CIRC_SPACE(call->acks_head, call->acks_tail, call->acks_winsz), *timeo); add_wait_queue(&call->tx_waitq, &myself); for (;;) { set_current_state(TASK_INTERRUPTIBLE); ret = 0; if (CIRC_SPACE(call->acks_head, call->acks_tail, call->acks_winsz) > 0) break; if (signal_pending(current)) { ret = sock_intr_errno(*timeo); break; } release_sock(&rx->sk); *timeo = schedule_timeout(*timeo); lock_sock(&rx->sk); } remove_wait_queue(&call->tx_waitq, &myself); set_current_state(TASK_RUNNING); _leave(" = %d", ret); return ret; } /* * attempt to schedule an instant Tx resend */ static inline void rxrpc_instant_resend(struct rxrpc_call *call) { read_lock_bh(&call->state_lock); if (try_to_del_timer_sync(&call->resend_timer) >= 0) { clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); if (call->state < RXRPC_CALL_COMPLETE && !test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events)) rxrpc_queue_call(call); } read_unlock_bh(&call->state_lock); } /* * queue a packet for transmission, set the resend timer and attempt * to send the packet immediately */ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb, bool last) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); int ret; _net("queue skb %p [%d]", skb, call->acks_head); ASSERT(call->acks_window != NULL); call->acks_window[call->acks_head] = (unsigned long) skb; smp_wmb(); call->acks_head = (call->acks_head + 1) & (call->acks_winsz - 1); if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) { _debug("________awaiting reply/ACK__________"); write_lock_bh(&call->state_lock); switch (call->state) { case RXRPC_CALL_CLIENT_SEND_REQUEST: call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; break; case RXRPC_CALL_SERVER_ACK_REQUEST: call->state = RXRPC_CALL_SERVER_SEND_REPLY; if (!last) break; case RXRPC_CALL_SERVER_SEND_REPLY: call->state = RXRPC_CALL_SERVER_AWAIT_ACK; break; default: break; } write_unlock_bh(&call->state_lock); } _proto("Tx DATA %%%u { #%u }", ntohl(sp->hdr.serial), ntohl(sp->hdr.seq)); sp->need_resend = false; sp->resend_at = jiffies + rxrpc_resend_timeout * HZ; if (!test_and_set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags)) { _debug("run timer"); call->resend_timer.expires = sp->resend_at; add_timer(&call->resend_timer); } /* attempt to cancel the rx-ACK timer, deferring reply transmission if * we're ACK'ing the request phase of an incoming call */ ret = -EAGAIN; if (try_to_del_timer_sync(&call->ack_timer) >= 0) { /* the packet may be freed by rxrpc_process_call() before this * returns */ ret = rxrpc_send_packet(call->conn->trans, skb); _net("sent skb %p", skb); } else { _debug("failed to delete ACK timer"); } if (ret < 0) { _debug("need instant resend %d", ret); sp->need_resend = true; rxrpc_instant_resend(call); } _leave(""); } /* * send data through a socket * - must be called in process context * - caller holds the socket locked */ static int rxrpc_send_data(struct kiocb *iocb, struct rxrpc_sock *rx, struct rxrpc_call *call, struct msghdr *msg, size_t len) { struct rxrpc_skb_priv *sp; unsigned char __user *from; struct sk_buff *skb; struct iovec *iov; struct sock *sk = &rx->sk; long timeo; bool more; int ret, ioc, segment, copied; _enter(",,,{%zu},%zu", msg->msg_iovlen, len); timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); /* this should be in poll */ clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) return -EPIPE; iov = msg->msg_iov; ioc = msg->msg_iovlen - 1; from = iov->iov_base; segment = iov->iov_len; iov++; more = msg->msg_flags & MSG_MORE; skb = call->tx_pending; call->tx_pending = NULL; copied = 0; do { int copy; if (segment > len) segment = len; _debug("SEGMENT %d @%p", segment, from); if (!skb) { size_t size, chunk, max, space; _debug("alloc"); if (CIRC_SPACE(call->acks_head, call->acks_tail, call->acks_winsz) <= 0) { ret = -EAGAIN; if (msg->msg_flags & MSG_DONTWAIT) goto maybe_error; ret = rxrpc_wait_for_tx_window(rx, call, &timeo); if (ret < 0) goto maybe_error; } max = call->conn->trans->peer->maxdata; max -= call->conn->security_size; max &= ~(call->conn->size_align - 1UL); chunk = max; if (chunk > len && !more) chunk = len; space = chunk + call->conn->size_align; space &= ~(call->conn->size_align - 1UL); size = space + call->conn->header_size; _debug("SIZE: %zu/%zu/%zu", chunk, space, size); /* create a buffer that we can retain until it's ACK'd */ skb = sock_alloc_send_skb( sk, size, msg->msg_flags & MSG_DONTWAIT, &ret); if (!skb) goto maybe_error; rxrpc_new_skb(skb); _debug("ALLOC SEND %p", skb); ASSERTCMP(skb->mark, ==, 0); _debug("HS: %u", call->conn->header_size); skb_reserve(skb, call->conn->header_size); skb->len += call->conn->header_size; sp = rxrpc_skb(skb); sp->remain = chunk; if (sp->remain > skb_tailroom(skb)) sp->remain = skb_tailroom(skb); _net("skb: hr %d, tr %d, hl %d, rm %d", skb_headroom(skb), skb_tailroom(skb), skb_headlen(skb), sp->remain); skb->ip_summed = CHECKSUM_UNNECESSARY; } _debug("append"); sp = rxrpc_skb(skb); /* append next segment of data to the current buffer */ copy = skb_tailroom(skb); ASSERTCMP(copy, >, 0); if (copy > segment) copy = segment; if (copy > sp->remain) copy = sp->remain; _debug("add"); ret = skb_add_data(skb, from, copy); _debug("added"); if (ret < 0) goto efault; sp->remain -= copy; skb->mark += copy; copied += copy; len -= copy; segment -= copy; from += copy; while (segment == 0 && ioc > 0) { from = iov->iov_base; segment = iov->iov_len; iov++; ioc--; } if (len == 0) { segment = 0; ioc = 0; } /* check for the far side aborting the call or a network error * occurring */ if (call->state > RXRPC_CALL_COMPLETE) goto call_aborted; /* add the packet to the send queue if it's now full */ if (sp->remain <= 0 || (segment == 0 && !more)) { struct rxrpc_connection *conn = call->conn; size_t pad; /* pad out if we're using security */ if (conn->security) { pad = conn->security_size + skb->mark; pad = conn->size_align - pad; pad &= conn->size_align - 1; _debug("pad %zu", pad); if (pad) memset(skb_put(skb, pad), 0, pad); } sp->hdr.epoch = conn->epoch; sp->hdr.cid = call->cid; sp->hdr.callNumber = call->call_id; sp->hdr.seq = htonl(atomic_inc_return(&call->sequence)); sp->hdr.serial = htonl(atomic_inc_return(&conn->serial)); sp->hdr.type = RXRPC_PACKET_TYPE_DATA; sp->hdr.userStatus = 0; sp->hdr.securityIndex = conn->security_ix; sp->hdr._rsvd = 0; sp->hdr.serviceId = conn->service_id; sp->hdr.flags = conn->out_clientflag; if (len == 0 && !more) sp->hdr.flags |= RXRPC_LAST_PACKET; else if (CIRC_SPACE(call->acks_head, call->acks_tail, call->acks_winsz) > 1) sp->hdr.flags |= RXRPC_MORE_PACKETS; ret = rxrpc_secure_packet( call, skb, skb->mark, skb->head + sizeof(struct rxrpc_header)); if (ret < 0) goto out; memcpy(skb->head, &sp->hdr, sizeof(struct rxrpc_header)); rxrpc_queue_packet(call, skb, segment == 0 && !more); skb = NULL; } } while (segment > 0); success: ret = copied; out: call->tx_pending = skb; _leave(" = %d", ret); return ret; call_aborted: rxrpc_free_skb(skb); if (call->state == RXRPC_CALL_NETWORK_ERROR) ret = call->conn->trans->peer->net_error; else ret = -ECONNABORTED; _leave(" = %d", ret); return ret; maybe_error: if (copied) goto success; goto out; efault: ret = -EFAULT; goto out; }
gpl-2.0
aatjitra/PR26
arch/sh/boards/mach-hp6xx/pm.c
3117
3174
/* * hp6x0 Power Management Routines * * Copyright (c) 2006 Andriy Skulysh <askulsyh@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License. */ #include <linux/init.h> #include <linux/suspend.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/delay.h> #include <linux/gfp.h> #include <asm/io.h> #include <asm/hd64461.h> #include <mach/hp6xx.h> #include <cpu/dac.h> #include <asm/freq.h> #include <asm/watchdog.h> #define INTR_OFFSET 0x600 #define STBCR 0xffffff82 #define STBCR2 0xffffff88 #define STBCR_STBY 0x80 #define STBCR_MSTP2 0x04 #define MCR 0xffffff68 #define RTCNT 0xffffff70 #define MCR_RMODE 2 #define MCR_RFSH 4 extern u8 wakeup_start; extern u8 wakeup_end; static void pm_enter(void) { u8 stbcr, csr; u16 frqcr, mcr; u32 vbr_new, vbr_old; set_bl_bit(); /* set wdt */ csr = sh_wdt_read_csr(); csr &= ~WTCSR_TME; csr |= WTCSR_CKS_4096; sh_wdt_write_csr(csr); csr = sh_wdt_read_csr(); sh_wdt_write_cnt(0); /* disable PLL1 */ frqcr = __raw_readw(FRQCR); frqcr &= ~(FRQCR_PLLEN | FRQCR_PSTBY); __raw_writew(frqcr, FRQCR); /* enable standby */ stbcr = __raw_readb(STBCR); __raw_writeb(stbcr | STBCR_STBY | STBCR_MSTP2, STBCR); /* set self-refresh */ mcr = __raw_readw(MCR); __raw_writew(mcr & ~MCR_RFSH, MCR); /* set interrupt handler */ asm volatile("stc vbr, %0" : "=r" (vbr_old)); vbr_new = get_zeroed_page(GFP_ATOMIC); udelay(50); memcpy((void*)(vbr_new + INTR_OFFSET), &wakeup_start, &wakeup_end - &wakeup_start); asm volatile("ldc %0, vbr" : : "r" (vbr_new)); __raw_writew(0, RTCNT); __raw_writew(mcr | MCR_RFSH | MCR_RMODE, MCR); cpu_sleep(); asm volatile("ldc %0, vbr" : : "r" (vbr_old)); free_page(vbr_new); /* enable PLL1 */ frqcr = __raw_readw(FRQCR); frqcr |= FRQCR_PSTBY; __raw_writew(frqcr, FRQCR); udelay(50); frqcr |= FRQCR_PLLEN; __raw_writew(frqcr, FRQCR); __raw_writeb(stbcr, STBCR); clear_bl_bit(); } static int hp6x0_pm_enter(suspend_state_t state) { u8 stbcr, stbcr2; #ifdef CONFIG_HD64461_ENABLER u8 scr; u16 hd64461_stbcr; #endif #ifdef CONFIG_HD64461_ENABLER outb(0, HD64461_PCC1CSCIER); scr = inb(HD64461_PCC1SCR); scr |= HD64461_PCCSCR_VCC1; outb(scr, HD64461_PCC1SCR); hd64461_stbcr = inw(HD64461_STBCR); hd64461_stbcr |= HD64461_STBCR_SPC1ST; outw(hd64461_stbcr, HD64461_STBCR); #endif __raw_writeb(0x1f, DACR); stbcr = __raw_readb(STBCR); __raw_writeb(0x01, STBCR); stbcr2 = __raw_readb(STBCR2); __raw_writeb(0x7f , STBCR2); outw(0xf07f, HD64461_SCPUCR); pm_enter(); outw(0, HD64461_SCPUCR); __raw_writeb(stbcr, STBCR); __raw_writeb(stbcr2, STBCR2); #ifdef CONFIG_HD64461_ENABLER hd64461_stbcr = inw(HD64461_STBCR); hd64461_stbcr &= ~HD64461_STBCR_SPC1ST; outw(hd64461_stbcr, HD64461_STBCR); outb(0x4c, HD64461_PCC1CSCIER); outb(0x00, HD64461_PCC1CSCR); #endif return 0; } static const struct platform_suspend_ops hp6x0_pm_ops = { .enter = hp6x0_pm_enter, .valid = suspend_valid_only_mem, }; static int __init hp6x0_pm_init(void) { suspend_set_ops(&hp6x0_pm_ops); return 0; } late_initcall(hp6x0_pm_init);
gpl-2.0
sjurbren/modem-ipc
arch/microblaze/pci/xilinx_pci.c
7981
4579
/* * PCI support for Xilinx plbv46_pci soft-core which can be used on * Xilinx Virtex ML410 / ML510 boards. * * Copyright 2009 Roderick Colenbrander * Copyright 2009 Secret Lab Technologies Ltd. * * The pci bridge fixup code was copied from ppc4xx_pci.c and was written * by Benjamin Herrenschmidt. * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/ioport.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/pci.h> #include <asm/io.h> #define XPLB_PCI_ADDR 0x10c #define XPLB_PCI_DATA 0x110 #define XPLB_PCI_BUS 0x114 #define PCI_HOST_ENABLE_CMD (PCI_COMMAND_SERR | PCI_COMMAND_PARITY | \ PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY) static struct of_device_id xilinx_pci_match[] = { { .compatible = "xlnx,plbv46-pci-1.03.a", }, {} }; /** * xilinx_pci_fixup_bridge - Block Xilinx PHB configuration. */ static void xilinx_pci_fixup_bridge(struct pci_dev *dev) { struct pci_controller *hose; int i; if (dev->devfn || dev->bus->self) return; hose = pci_bus_to_host(dev->bus); if (!hose) return; if (!of_match_node(xilinx_pci_match, hose->dn)) return; /* Hide the PCI host BARs from the kernel as their content doesn't * fit well in the resource management */ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { dev->resource[i].start = 0; dev->resource[i].end = 0; dev->resource[i].flags = 0; } dev_info(&dev->dev, "Hiding Xilinx plb-pci host bridge resources %s\n", pci_name(dev)); } DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, xilinx_pci_fixup_bridge); #ifdef DEBUG /** * xilinx_pci_exclude_device - Don't do config access for non-root bus * * This is a hack. Config access to any bus other than bus 0 does not * currently work on the ML510 so we prevent it here. */ static int xilinx_pci_exclude_device(struct pci_controller *hose, u_char bus, u8 devfn) { return (bus != 0); } /** * xilinx_early_pci_scan - List pci config space for available devices * * List pci devices in very early phase. */ void __init xilinx_early_pci_scan(struct pci_controller *hose) { u32 bus = 0; u32 val, dev, func, offset; /* Currently we have only 2 device connected - up-to 32 devices */ for (dev = 0; dev < 2; dev++) { /* List only first function number - up-to 8 functions */ for (func = 0; func < 1; func++) { printk(KERN_INFO "%02x:%02x:%02x", bus, dev, func); /* read the first 64 standardized bytes */ /* Up-to 192 bytes can be list of capabilities */ for (offset = 0; offset < 64; offset += 4) { early_read_config_dword(hose, bus, PCI_DEVFN(dev, func), offset, &val); if (offset == 0 && val == 0xFFFFFFFF) { printk(KERN_CONT "\nABSENT"); break; } if (!(offset % 0x10)) printk(KERN_CONT "\n%04x: ", offset); printk(KERN_CONT "%08x ", val); } printk(KERN_INFO "\n"); } } } #else void __init xilinx_early_pci_scan(struct pci_controller *hose) { } #endif /** * xilinx_pci_init - Find and register a Xilinx PCI host bridge */ void __init xilinx_pci_init(void) { struct pci_controller *hose; struct resource r; void __iomem *pci_reg; struct device_node *pci_node; pci_node = of_find_matching_node(NULL, xilinx_pci_match); if (!pci_node) return; if (of_address_to_resource(pci_node, 0, &r)) { pr_err("xilinx-pci: cannot resolve base address\n"); return; } hose = pcibios_alloc_controller(pci_node); if (!hose) { pr_err("xilinx-pci: pcibios_alloc_controller() failed\n"); return; } /* Setup config space */ setup_indirect_pci(hose, r.start + XPLB_PCI_ADDR, r.start + XPLB_PCI_DATA, INDIRECT_TYPE_SET_CFG_TYPE); /* According to the xilinx plbv46_pci documentation the soft-core starts * a self-init when the bus master enable bit is set. Without this bit * set the pci bus can't be scanned. */ early_write_config_word(hose, 0, 0, PCI_COMMAND, PCI_HOST_ENABLE_CMD); /* Set the max latency timer to 255 */ early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0xff); /* Set the max bus number to 255, and bus/subbus no's to 0 */ pci_reg = of_iomap(pci_node, 0); out_be32(pci_reg + XPLB_PCI_BUS, 0x000000ff); iounmap(pci_reg); /* Register the host bridge with the linux kernel! */ pci_process_bridge_OF_ranges(hose, pci_node, INDIRECT_TYPE_SET_CFG_TYPE); pr_info("xilinx-pci: Registered PCI host bridge\n"); xilinx_early_pci_scan(hose); }
gpl-2.0
xaxaxa/n7102_kernel
fs/partitions/mac.c
8237
3370
/* * fs/partitions/mac.c * * Code extracted from drivers/block/genhd.c * Copyright (C) 1991-1998 Linus Torvalds * Re-organised Feb 1998 Russell King */ #include <linux/ctype.h> #include "check.h" #include "mac.h" #ifdef CONFIG_PPC_PMAC #include <asm/machdep.h> extern void note_bootable_part(dev_t dev, int part, int goodness); #endif /* * Code to understand MacOS partition tables. */ static inline void mac_fix_string(char *stg, int len) { int i; for (i = len - 1; i >= 0 && stg[i] == ' '; i--) stg[i] = 0; } int mac_partition(struct parsed_partitions *state) { Sector sect; unsigned char *data; int slot, blocks_in_map; unsigned secsize; #ifdef CONFIG_PPC_PMAC int found_root = 0; int found_root_goodness = 0; #endif struct mac_partition *part; struct mac_driver_desc *md; /* Get 0th block and look at the first partition map entry. */ md = read_part_sector(state, 0, &sect); if (!md) return -1; if (be16_to_cpu(md->signature) != MAC_DRIVER_MAGIC) { put_dev_sector(sect); return 0; } secsize = be16_to_cpu(md->block_size); put_dev_sector(sect); data = read_part_sector(state, secsize/512, &sect); if (!data) return -1; part = (struct mac_partition *) (data + secsize%512); if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) { put_dev_sector(sect); return 0; /* not a MacOS disk */ } blocks_in_map = be32_to_cpu(part->map_count); if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) { put_dev_sector(sect); return 0; } strlcat(state->pp_buf, " [mac]", PAGE_SIZE); for (slot = 1; slot <= blocks_in_map; ++slot) { int pos = slot * secsize; put_dev_sector(sect); data = read_part_sector(state, pos/512, &sect); if (!data) return -1; part = (struct mac_partition *) (data + pos%512); if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) break; put_partition(state, slot, be32_to_cpu(part->start_block) * (secsize/512), be32_to_cpu(part->block_count) * (secsize/512)); if (!strnicmp(part->type, "Linux_RAID", 10)) state->parts[slot].flags = ADDPART_FLAG_RAID; #ifdef CONFIG_PPC_PMAC /* * If this is the first bootable partition, tell the * setup code, in case it wants to make this the root. */ if (machine_is(powermac)) { int goodness = 0; mac_fix_string(part->processor, 16); mac_fix_string(part->name, 32); mac_fix_string(part->type, 32); if ((be32_to_cpu(part->status) & MAC_STATUS_BOOTABLE) && strcasecmp(part->processor, "powerpc") == 0) goodness++; if (strcasecmp(part->type, "Apple_UNIX_SVR2") == 0 || (strnicmp(part->type, "Linux", 5) == 0 && strcasecmp(part->type, "Linux_swap") != 0)) { int i, l; goodness++; l = strlen(part->name); if (strcmp(part->name, "/") == 0) goodness++; for (i = 0; i <= l - 4; ++i) { if (strnicmp(part->name + i, "root", 4) == 0) { goodness += 2; break; } } if (strnicmp(part->name, "swap", 4) == 0) goodness--; } if (goodness > found_root_goodness) { found_root = slot; found_root_goodness = goodness; } } #endif /* CONFIG_PPC_PMAC */ } #ifdef CONFIG_PPC_PMAC if (found_root_goodness) note_bootable_part(state->bdev->bd_dev, found_root, found_root_goodness); #endif put_dev_sector(sect); strlcat(state->pp_buf, "\n", PAGE_SIZE); return 1; }
gpl-2.0
ubuntustudio-kernel/ubuntu-raring-lowlatency
drivers/isdn/mISDN/l1oip_codec.c
8237
11139
/* * l1oip_codec.c generic codec using lookup table * -> conversion from a-Law to u-Law * -> conversion from u-Law to a-Law * -> compression by reducing the number of sample resolution to 4 * * NOTE: It is not compatible with any standard codec like ADPCM. * * Author Andreas Eversberg (jolly@eversberg.eu) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* How the codec works: -------------------- The volume is increased to increase the dynamic range of the audio signal. Each sample is converted to a-LAW with only 16 steps of level resolution. A pair of two samples are stored in one byte. The first byte is stored in the upper bits, the second byte is stored in the lower bits. To speed up compression and decompression, two lookup tables are formed: - 16 bits index for two samples (law encoded) with 8 bit compressed result. - 8 bits index for one compressed data with 16 bits decompressed result. NOTE: The bytes are handled as they are law-encoded. */ #include <linux/vmalloc.h> #include <linux/mISDNif.h> #include <linux/in.h> #include "core.h" #include "l1oip.h" /* definitions of codec. don't use calculations, code may run slower. */ static u8 *table_com; static u16 *table_dec; /* alaw -> ulaw */ static u8 alaw_to_ulaw[256] = { 0xab, 0x2b, 0xe3, 0x63, 0x8b, 0x0b, 0xc9, 0x49, 0xba, 0x3a, 0xf6, 0x76, 0x9b, 0x1b, 0xd7, 0x57, 0xa3, 0x23, 0xdd, 0x5d, 0x83, 0x03, 0xc1, 0x41, 0xb2, 0x32, 0xeb, 0x6b, 0x93, 0x13, 0xcf, 0x4f, 0xaf, 0x2f, 0xe7, 0x67, 0x8f, 0x0f, 0xcd, 0x4d, 0xbe, 0x3e, 0xfe, 0x7e, 0x9f, 0x1f, 0xdb, 0x5b, 0xa7, 0x27, 0xdf, 0x5f, 0x87, 0x07, 0xc5, 0x45, 0xb6, 0x36, 0xef, 0x6f, 0x97, 0x17, 0xd3, 0x53, 0xa9, 0x29, 0xe1, 0x61, 0x89, 0x09, 0xc7, 0x47, 0xb8, 0x38, 0xf2, 0x72, 0x99, 0x19, 0xd5, 0x55, 0xa1, 0x21, 0xdc, 0x5c, 0x81, 0x01, 0xbf, 0x3f, 0xb0, 0x30, 0xe9, 0x69, 0x91, 0x11, 0xce, 0x4e, 0xad, 0x2d, 0xe5, 0x65, 0x8d, 0x0d, 0xcb, 0x4b, 0xbc, 0x3c, 0xfa, 0x7a, 0x9d, 0x1d, 0xd9, 0x59, 0xa5, 0x25, 0xde, 0x5e, 0x85, 0x05, 0xc3, 0x43, 0xb4, 0x34, 0xed, 0x6d, 0x95, 0x15, 0xd1, 0x51, 0xac, 0x2c, 0xe4, 0x64, 0x8c, 0x0c, 0xca, 0x4a, 0xbb, 0x3b, 0xf8, 0x78, 0x9c, 0x1c, 0xd8, 0x58, 0xa4, 0x24, 0xde, 0x5e, 0x84, 0x04, 0xc2, 0x42, 0xb3, 0x33, 0xec, 0x6c, 0x94, 0x14, 0xd0, 0x50, 0xb0, 0x30, 0xe8, 0x68, 0x90, 0x10, 0xce, 0x4e, 0xbf, 0x3f, 0xfe, 0x7e, 0xa0, 0x20, 0xdc, 0x5c, 0xa8, 0x28, 0xe0, 0x60, 0x88, 0x08, 0xc6, 0x46, 0xb7, 0x37, 0xf0, 0x70, 0x98, 0x18, 0xd4, 0x54, 0xaa, 0x2a, 0xe2, 0x62, 0x8a, 0x0a, 0xc8, 0x48, 0xb9, 0x39, 0xf4, 0x74, 0x9a, 0x1a, 0xd6, 0x56, 0xa2, 0x22, 0xdd, 0x5d, 0x82, 0x02, 0xc0, 0x40, 0xb1, 0x31, 0xea, 0x6a, 0x92, 0x12, 0xcf, 0x4f, 0xae, 0x2e, 0xe6, 0x66, 0x8e, 0x0e, 0xcc, 0x4c, 0xbd, 0x3d, 0xfc, 0x7c, 0x9e, 0x1e, 0xda, 0x5a, 0xa6, 0x26, 0xdf, 0x5f, 0x86, 0x06, 0xc4, 0x44, 0xb5, 0x35, 0xee, 0x6e, 0x96, 0x16, 0xd2, 0x52 }; /* ulaw -> alaw */ static u8 ulaw_to_alaw[256] = { 0xab, 0x55, 0xd5, 0x15, 0x95, 0x75, 0xf5, 0x35, 0xb5, 0x45, 0xc5, 0x05, 0x85, 0x65, 0xe5, 0x25, 0xa5, 0x5d, 0xdd, 0x1d, 0x9d, 0x7d, 0xfd, 0x3d, 0xbd, 0x4d, 0xcd, 0x0d, 0x8d, 0x6d, 0xed, 0x2d, 0xad, 0x51, 0xd1, 0x11, 0x91, 0x71, 0xf1, 0x31, 0xb1, 0x41, 0xc1, 0x01, 0x81, 0x61, 0xe1, 0x21, 0x59, 0xd9, 0x19, 0x99, 0x79, 0xf9, 0x39, 0xb9, 0x49, 0xc9, 0x09, 0x89, 0x69, 0xe9, 0x29, 0xa9, 0xd7, 0x17, 0x97, 0x77, 0xf7, 0x37, 0xb7, 0x47, 0xc7, 0x07, 0x87, 0x67, 0xe7, 0x27, 0xa7, 0xdf, 0x9f, 0x7f, 0xff, 0x3f, 0xbf, 0x4f, 0xcf, 0x0f, 0x8f, 0x6f, 0xef, 0x2f, 0x53, 0x13, 0x73, 0x33, 0xb3, 0x43, 0xc3, 0x03, 0x83, 0x63, 0xe3, 0x23, 0xa3, 0x5b, 0xdb, 0x1b, 0x9b, 0x7b, 0xfb, 0x3b, 0xbb, 0xbb, 0x4b, 0x4b, 0xcb, 0xcb, 0x0b, 0x0b, 0x8b, 0x8b, 0x6b, 0x6b, 0xeb, 0xeb, 0x2b, 0x2b, 0xab, 0x54, 0xd4, 0x14, 0x94, 0x74, 0xf4, 0x34, 0xb4, 0x44, 0xc4, 0x04, 0x84, 0x64, 0xe4, 0x24, 0xa4, 0x5c, 0xdc, 0x1c, 0x9c, 0x7c, 0xfc, 0x3c, 0xbc, 0x4c, 0xcc, 0x0c, 0x8c, 0x6c, 0xec, 0x2c, 0xac, 0x50, 0xd0, 0x10, 0x90, 0x70, 0xf0, 0x30, 0xb0, 0x40, 0xc0, 0x00, 0x80, 0x60, 0xe0, 0x20, 0x58, 0xd8, 0x18, 0x98, 0x78, 0xf8, 0x38, 0xb8, 0x48, 0xc8, 0x08, 0x88, 0x68, 0xe8, 0x28, 0xa8, 0xd6, 0x16, 0x96, 0x76, 0xf6, 0x36, 0xb6, 0x46, 0xc6, 0x06, 0x86, 0x66, 0xe6, 0x26, 0xa6, 0xde, 0x9e, 0x7e, 0xfe, 0x3e, 0xbe, 0x4e, 0xce, 0x0e, 0x8e, 0x6e, 0xee, 0x2e, 0x52, 0x12, 0x72, 0x32, 0xb2, 0x42, 0xc2, 0x02, 0x82, 0x62, 0xe2, 0x22, 0xa2, 0x5a, 0xda, 0x1a, 0x9a, 0x7a, 0xfa, 0x3a, 0xba, 0xba, 0x4a, 0x4a, 0xca, 0xca, 0x0a, 0x0a, 0x8a, 0x8a, 0x6a, 0x6a, 0xea, 0xea, 0x2a, 0x2a }; /* alaw -> 4bit compression */ static u8 alaw_to_4bit[256] = { 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0d, 0x02, 0x0e, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04, 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x01, 0x0a, 0x05, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x09, 0x07, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04, 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03, 0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04, }; /* 4bit -> alaw decompression */ static u8 _4bit_to_alaw[16] = { 0x5d, 0x51, 0xd9, 0xd7, 0x5f, 0x53, 0xa3, 0x4b, 0x2a, 0x3a, 0x22, 0x2e, 0x26, 0x56, 0x20, 0x2c, }; /* ulaw -> 4bit compression */ static u8 ulaw_to_4bit[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x08, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, }; /* 4bit -> ulaw decompression */ static u8 _4bit_to_ulaw[16] = { 0x11, 0x21, 0x31, 0x40, 0x4e, 0x5c, 0x68, 0x71, 0xfe, 0xef, 0xe7, 0xdb, 0xcd, 0xbf, 0xaf, 0x9f, }; /* * Compresses data to the result buffer * The result size must be at least half of the input buffer. * The number of samples also must be even! */ int l1oip_law_to_4bit(u8 *data, int len, u8 *result, u32 *state) { int ii, i = 0, o = 0; if (!len) return 0; /* send saved byte and first input byte */ if (*state) { *result++ = table_com[(((*state) << 8) & 0xff00) | (*data++)]; len--; o++; } ii = len >> 1; while (i < ii) { *result++ = table_com[(data[0]<<8) | (data[1])]; data += 2; i++; o++; } /* if len has an odd number, we save byte for next call */ if (len & 1) *state = 0x100 + *data; else *state = 0; return o; } /* Decompress data to the result buffer * The result size must be the number of sample in packet. (2 * input data) * The number of samples in the result are even! */ int l1oip_4bit_to_law(u8 *data, int len, u8 *result) { int i = 0; u16 r; while (i < len) { r = table_dec[*data++]; *result++ = r >> 8; *result++ = r; i++; } return len << 1; } /* * law conversion */ int l1oip_alaw_to_ulaw(u8 *data, int len, u8 *result) { int i = 0; while (i < len) { *result++ = alaw_to_ulaw[*data++]; i++; } return len; } int l1oip_ulaw_to_alaw(u8 *data, int len, u8 *result) { int i = 0; while (i < len) { *result++ = ulaw_to_alaw[*data++]; i++; } return len; } /* * generate/free compression and decompression table */ void l1oip_4bit_free(void) { if (table_dec) vfree(table_dec); if (table_com) vfree(table_com); table_com = NULL; table_dec = NULL; } int l1oip_4bit_alloc(int ulaw) { int i1, i2, c, sample; /* in case, it is called again */ if (table_dec) return 0; /* alloc conversion tables */ table_com = vzalloc(65536); table_dec = vzalloc(512); if (!table_com || !table_dec) { l1oip_4bit_free(); return -ENOMEM; } /* generate compression table */ i1 = 0; while (i1 < 256) { if (ulaw) c = ulaw_to_4bit[i1]; else c = alaw_to_4bit[i1]; i2 = 0; while (i2 < 256) { table_com[(i1 << 8) | i2] |= (c << 4); table_com[(i2 << 8) | i1] |= c; i2++; } i1++; } /* generate decompression table */ i1 = 0; while (i1 < 16) { if (ulaw) sample = _4bit_to_ulaw[i1]; else sample = _4bit_to_alaw[i1]; i2 = 0; while (i2 < 16) { table_dec[(i1 << 4) | i2] |= (sample << 8); table_dec[(i2 << 4) | i1] |= sample; i2++; } i1++; } return 0; }
gpl-2.0
MarvinCorro/linux-cmps107
drivers/staging/dgnc/dgnc_tty.c
46
70762
/* * Copyright 2003 Digi International (www.digi.com) * Scott H Kilau <Scott_Kilau at digi dot com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR * PURPOSE. See the GNU General Public License for more details. */ /************************************************************************ * * This file implements the tty driver functionality for the * Neo and ClassicBoard PCI based product lines. * ************************************************************************ * */ #include <linux/kernel.h> #include <linux/sched.h> /* For jiffies, task states */ #include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */ #include <linux/module.h> #include <linux/ctype.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/types.h> #include <linux/serial_reg.h> #include <linux/slab.h> #include <linux/delay.h> /* For udelay */ #include <linux/uaccess.h> /* For copy_from_user/copy_to_user */ #include <linux/pci.h> #include "dgnc_driver.h" #include "dgnc_tty.h" #include "dgnc_neo.h" #include "dgnc_cls.h" #include "dgnc_sysfs.h" #include "dgnc_utils.h" /* * internal variables */ static struct dgnc_board *dgnc_BoardsByMajor[256]; static unsigned char *dgnc_TmpWriteBuf; /* * Default transparent print information. */ static struct digi_t dgnc_digi_init = { .digi_flags = DIGI_COOK, /* Flags */ .digi_maxcps = 100, /* Max CPS */ .digi_maxchar = 50, /* Max chars in print queue */ .digi_bufsize = 100, /* Printer buffer size */ .digi_onlen = 4, /* size of printer on string */ .digi_offlen = 4, /* size of printer off string */ .digi_onstr = "\033[5i", /* ANSI printer on string ] */ .digi_offstr = "\033[4i", /* ANSI printer off string ] */ .digi_term = "ansi" /* default terminal type */ }; /* * Define a local default termios struct. All ports will be created * with this termios initially. * * This defines a raw port at 9600 baud, 8 data bits, no parity, * 1 stop bit. */ static struct ktermios DgncDefaultTermios = { .c_iflag = (DEFAULT_IFLAGS), /* iflags */ .c_oflag = (DEFAULT_OFLAGS), /* oflags */ .c_cflag = (DEFAULT_CFLAGS), /* cflags */ .c_lflag = (DEFAULT_LFLAGS), /* lflags */ .c_cc = INIT_C_CC, .c_line = 0, }; /* Our function prototypes */ static int dgnc_tty_open(struct tty_struct *tty, struct file *file); static void dgnc_tty_close(struct tty_struct *tty, struct file *file); static int dgnc_block_til_ready(struct tty_struct *tty, struct file *file, struct channel_t *ch); static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg); static int dgnc_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retinfo); static int dgnc_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_info); static int dgnc_tty_write_room(struct tty_struct *tty); static int dgnc_tty_put_char(struct tty_struct *tty, unsigned char c); static int dgnc_tty_chars_in_buffer(struct tty_struct *tty); static void dgnc_tty_start(struct tty_struct *tty); static void dgnc_tty_stop(struct tty_struct *tty); static void dgnc_tty_throttle(struct tty_struct *tty); static void dgnc_tty_unthrottle(struct tty_struct *tty); static void dgnc_tty_flush_chars(struct tty_struct *tty); static void dgnc_tty_flush_buffer(struct tty_struct *tty); static void dgnc_tty_hangup(struct tty_struct *tty); static int dgnc_set_modem_info(struct tty_struct *tty, unsigned int command, unsigned int __user *value); static int dgnc_get_modem_info(struct channel_t *ch, unsigned int __user *value); static int dgnc_tty_tiocmget(struct tty_struct *tty); static int dgnc_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear); static int dgnc_tty_send_break(struct tty_struct *tty, int msec); static void dgnc_tty_wait_until_sent(struct tty_struct *tty, int timeout); static int dgnc_tty_write(struct tty_struct *tty, const unsigned char *buf, int count); static void dgnc_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios); static void dgnc_tty_send_xchar(struct tty_struct *tty, char ch); static const struct tty_operations dgnc_tty_ops = { .open = dgnc_tty_open, .close = dgnc_tty_close, .write = dgnc_tty_write, .write_room = dgnc_tty_write_room, .flush_buffer = dgnc_tty_flush_buffer, .chars_in_buffer = dgnc_tty_chars_in_buffer, .flush_chars = dgnc_tty_flush_chars, .ioctl = dgnc_tty_ioctl, .set_termios = dgnc_tty_set_termios, .stop = dgnc_tty_stop, .start = dgnc_tty_start, .throttle = dgnc_tty_throttle, .unthrottle = dgnc_tty_unthrottle, .hangup = dgnc_tty_hangup, .put_char = dgnc_tty_put_char, .tiocmget = dgnc_tty_tiocmget, .tiocmset = dgnc_tty_tiocmset, .break_ctl = dgnc_tty_send_break, .wait_until_sent = dgnc_tty_wait_until_sent, .send_xchar = dgnc_tty_send_xchar }; /************************************************************************ * * TTY Initialization/Cleanup Functions * ************************************************************************/ /* * dgnc_tty_preinit() * * Initialize any global tty related data before we download any boards. */ int dgnc_tty_preinit(void) { /* * Allocate a buffer for doing the copy from user space to * kernel space in dgnc_write(). We only use one buffer and * control access to it with a semaphore. If we are paging, we * are already in trouble so one buffer won't hurt much anyway. * * We are okay to sleep in the malloc, as this routine * is only called during module load, (not in interrupt context), * and with no locks held. */ dgnc_TmpWriteBuf = kmalloc(WRITEBUFLEN, GFP_KERNEL); if (!dgnc_TmpWriteBuf) return -ENOMEM; return 0; } /* * dgnc_tty_register() * * Init the tty subsystem for this board. */ int dgnc_tty_register(struct dgnc_board *brd) { int rc = 0; brd->SerialDriver.magic = TTY_DRIVER_MAGIC; snprintf(brd->SerialName, MAXTTYNAMELEN, "tty_dgnc_%d_", brd->boardnum); brd->SerialDriver.name = brd->SerialName; brd->SerialDriver.name_base = 0; brd->SerialDriver.major = 0; brd->SerialDriver.minor_start = 0; brd->SerialDriver.num = brd->maxports; brd->SerialDriver.type = TTY_DRIVER_TYPE_SERIAL; brd->SerialDriver.subtype = SERIAL_TYPE_NORMAL; brd->SerialDriver.init_termios = DgncDefaultTermios; brd->SerialDriver.driver_name = DRVSTR; brd->SerialDriver.flags = (TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK); /* * The kernel wants space to store pointers to * tty_struct's and termios's. */ brd->SerialDriver.ttys = kcalloc(brd->maxports, sizeof(*brd->SerialDriver.ttys), GFP_KERNEL); if (!brd->SerialDriver.ttys) return -ENOMEM; kref_init(&brd->SerialDriver.kref); brd->SerialDriver.termios = kcalloc(brd->maxports, sizeof(*brd->SerialDriver.termios), GFP_KERNEL); if (!brd->SerialDriver.termios) return -ENOMEM; /* * Entry points for driver. Called by the kernel from * tty_io.c and n_tty.c. */ tty_set_operations(&brd->SerialDriver, &dgnc_tty_ops); if (!brd->dgnc_Major_Serial_Registered) { /* Register tty devices */ rc = tty_register_driver(&brd->SerialDriver); if (rc < 0) { dev_dbg(&brd->pdev->dev, "Can't register tty device (%d)\n", rc); return rc; } brd->dgnc_Major_Serial_Registered = true; } /* * If we're doing transparent print, we have to do all of the above * again, separately so we don't get the LD confused about what major * we are when we get into the dgnc_tty_open() routine. */ brd->PrintDriver.magic = TTY_DRIVER_MAGIC; snprintf(brd->PrintName, MAXTTYNAMELEN, "pr_dgnc_%d_", brd->boardnum); brd->PrintDriver.name = brd->PrintName; brd->PrintDriver.name_base = 0; brd->PrintDriver.major = brd->SerialDriver.major; brd->PrintDriver.minor_start = 0x80; brd->PrintDriver.num = brd->maxports; brd->PrintDriver.type = TTY_DRIVER_TYPE_SERIAL; brd->PrintDriver.subtype = SERIAL_TYPE_NORMAL; brd->PrintDriver.init_termios = DgncDefaultTermios; brd->PrintDriver.driver_name = DRVSTR; brd->PrintDriver.flags = (TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK); /* * The kernel wants space to store pointers to * tty_struct's and termios's. Must be separated from * the Serial Driver so we don't get confused */ brd->PrintDriver.ttys = kcalloc(brd->maxports, sizeof(*brd->PrintDriver.ttys), GFP_KERNEL); if (!brd->PrintDriver.ttys) return -ENOMEM; kref_init(&brd->PrintDriver.kref); brd->PrintDriver.termios = kcalloc(brd->maxports, sizeof(*brd->PrintDriver.termios), GFP_KERNEL); if (!brd->PrintDriver.termios) return -ENOMEM; /* * Entry points for driver. Called by the kernel from * tty_io.c and n_tty.c. */ tty_set_operations(&brd->PrintDriver, &dgnc_tty_ops); if (!brd->dgnc_Major_TransparentPrint_Registered) { /* Register Transparent Print devices */ rc = tty_register_driver(&brd->PrintDriver); if (rc < 0) { dev_dbg(&brd->pdev->dev, "Can't register Transparent Print device(%d)\n", rc); return rc; } brd->dgnc_Major_TransparentPrint_Registered = true; } dgnc_BoardsByMajor[brd->SerialDriver.major] = brd; brd->dgnc_Serial_Major = brd->SerialDriver.major; brd->dgnc_TransparentPrint_Major = brd->PrintDriver.major; return rc; } /* * dgnc_tty_init() * * Init the tty subsystem. Called once per board after board has been * downloaded and init'ed. */ int dgnc_tty_init(struct dgnc_board *brd) { int i; void __iomem *vaddr; struct channel_t *ch; if (!brd) return -ENXIO; /* * Initialize board structure elements. */ vaddr = brd->re_map_membase; brd->nasync = brd->maxports; for (i = 0; i < brd->nasync; i++) { /* * Okay to malloc with GFP_KERNEL, we are not at * interrupt context, and there are no locks held. */ brd->channels[i] = kzalloc(sizeof(*brd->channels[i]), GFP_KERNEL); if (!brd->channels[i]) goto err_free_channels; } ch = brd->channels[0]; vaddr = brd->re_map_membase; /* Set up channel variables */ for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) { spin_lock_init(&ch->ch_lock); /* Store all our magic numbers */ ch->magic = DGNC_CHANNEL_MAGIC; ch->ch_tun.magic = DGNC_UNIT_MAGIC; ch->ch_tun.un_ch = ch; ch->ch_tun.un_type = DGNC_SERIAL; ch->ch_tun.un_dev = i; ch->ch_pun.magic = DGNC_UNIT_MAGIC; ch->ch_pun.un_ch = ch; ch->ch_pun.un_type = DGNC_PRINT; ch->ch_pun.un_dev = i + 128; if (brd->bd_uart_offset == 0x200) ch->ch_neo_uart = vaddr + (brd->bd_uart_offset * i); else ch->ch_cls_uart = vaddr + (brd->bd_uart_offset * i); ch->ch_bd = brd; ch->ch_portnum = i; ch->ch_digi = dgnc_digi_init; /* .25 second delay */ ch->ch_close_delay = 250; init_waitqueue_head(&ch->ch_flags_wait); init_waitqueue_head(&ch->ch_tun.un_flags_wait); init_waitqueue_head(&ch->ch_pun.un_flags_wait); { struct device *classp; classp = tty_register_device(&brd->SerialDriver, i, &ch->ch_bd->pdev->dev); ch->ch_tun.un_sysfs = classp; dgnc_create_tty_sysfs(&ch->ch_tun, classp); classp = tty_register_device(&brd->PrintDriver, i, &ch->ch_bd->pdev->dev); ch->ch_pun.un_sysfs = classp; dgnc_create_tty_sysfs(&ch->ch_pun, classp); } } return 0; err_free_channels: for (i = i - 1; i >= 0; --i) { kfree(brd->channels[i]); brd->channels[i] = NULL; } return -ENOMEM; } /* * dgnc_tty_post_uninit() * * UnInitialize any global tty related data. */ void dgnc_tty_post_uninit(void) { kfree(dgnc_TmpWriteBuf); dgnc_TmpWriteBuf = NULL; } /* * dgnc_tty_uninit() * * Uninitialize the TTY portion of this driver. Free all memory and * resources. */ void dgnc_tty_uninit(struct dgnc_board *brd) { int i = 0; if (brd->dgnc_Major_Serial_Registered) { dgnc_BoardsByMajor[brd->SerialDriver.major] = NULL; brd->dgnc_Serial_Major = 0; for (i = 0; i < brd->nasync; i++) { if (brd->channels[i]) dgnc_remove_tty_sysfs(brd->channels[i]-> ch_tun.un_sysfs); tty_unregister_device(&brd->SerialDriver, i); } tty_unregister_driver(&brd->SerialDriver); brd->dgnc_Major_Serial_Registered = false; } if (brd->dgnc_Major_TransparentPrint_Registered) { dgnc_BoardsByMajor[brd->PrintDriver.major] = NULL; brd->dgnc_TransparentPrint_Major = 0; for (i = 0; i < brd->nasync; i++) { if (brd->channels[i]) dgnc_remove_tty_sysfs(brd->channels[i]-> ch_pun.un_sysfs); tty_unregister_device(&brd->PrintDriver, i); } tty_unregister_driver(&brd->PrintDriver); brd->dgnc_Major_TransparentPrint_Registered = false; } kfree(brd->SerialDriver.ttys); brd->SerialDriver.ttys = NULL; kfree(brd->SerialDriver.termios); brd->SerialDriver.termios = NULL; kfree(brd->PrintDriver.ttys); brd->PrintDriver.ttys = NULL; kfree(brd->PrintDriver.termios); brd->PrintDriver.termios = NULL; } /*======================================================================= * * dgnc_wmove - Write data to transmit queue. * * ch - Pointer to channel structure. * buf - Pointer to characters to be moved. * n - Number of characters to move. * *=======================================================================*/ static void dgnc_wmove(struct channel_t *ch, char *buf, uint n) { int remain; uint head; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return; head = ch->ch_w_head & WQUEUEMASK; /* * If the write wraps over the top of the circular buffer, * move the portion up to the wrap point, and reset the * pointers to the bottom. */ remain = WQUEUESIZE - head; if (n >= remain) { n -= remain; memcpy(ch->ch_wqueue + head, buf, remain); head = 0; buf += remain; } if (n > 0) { /* * Move rest of data. */ remain = n; memcpy(ch->ch_wqueue + head, buf, remain); head += remain; } head &= WQUEUEMASK; ch->ch_w_head = head; } /*======================================================================= * * dgnc_input - Process received data. * * ch - Pointer to channel structure. * *=======================================================================*/ void dgnc_input(struct channel_t *ch) { struct dgnc_board *bd; struct tty_struct *tp; struct tty_ldisc *ld = NULL; uint rmask; ushort head; ushort tail; int data_len; unsigned long flags; int flip_len; int len = 0; int n = 0; int s = 0; int i = 0; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return; tp = ch->ch_tun.un_tty; bd = ch->ch_bd; if (!bd || bd->magic != DGNC_BOARD_MAGIC) return; spin_lock_irqsave(&ch->ch_lock, flags); /* * Figure the number of characters in the buffer. * Exit immediately if none. */ rmask = RQUEUEMASK; head = ch->ch_r_head & rmask; tail = ch->ch_r_tail & rmask; data_len = (head - tail) & rmask; if (data_len == 0) goto exit_unlock; /* * If the device is not open, or CREAD is off, * flush input data and return immediately. */ if (!tp || (tp->magic != TTY_MAGIC) || !(ch->ch_tun.un_flags & UN_ISOPEN) || !(tp->termios.c_cflag & CREAD) || (ch->ch_tun.un_flags & UN_CLOSING)) { ch->ch_r_head = tail; /* Force queue flow control to be released, if needed */ dgnc_check_queue_flow_control(ch); goto exit_unlock; } /* * If we are throttled, simply don't read any data. */ if (ch->ch_flags & CH_FORCED_STOPI) goto exit_unlock; flip_len = TTY_FLIPBUF_SIZE; /* Chop down the length, if needed */ len = min(data_len, flip_len); len = min(len, (N_TTY_BUF_SIZE - 1)); ld = tty_ldisc_ref(tp); /* * If we were unable to get a reference to the ld, * don't flush our buffer, and act like the ld doesn't * have any space to put the data right now. */ if (!ld) { len = 0; } else { /* * If ld doesn't have a pointer to a receive_buf function, * flush the data, then act like the ld doesn't have any * space to put the data right now. */ if (!ld->ops->receive_buf) { ch->ch_r_head = ch->ch_r_tail; len = 0; } } if (len <= 0) goto exit_unlock; /* * The tty layer in the kernel has changed in 2.6.16+. * * The flip buffers in the tty structure are no longer exposed, * and probably will be going away eventually. * * If we are completely raw, we don't need to go through a lot * of the tty layers that exist. * In this case, we take the shortest and fastest route we * can to relay the data to the user. * * On the other hand, if we are not raw, we need to go through * the new 2.6.16+ tty layer, which has its API more well defined. */ len = tty_buffer_request_room(tp->port, len); n = len; /* * n now contains the most amount of data we can copy, * bounded either by how much the Linux tty layer can handle, * or the amount of data the card actually has pending... */ while (n) { s = ((head >= tail) ? head : RQUEUESIZE) - tail; s = min(s, n); if (s <= 0) break; /* * If conditions are such that ld needs to see all * UART errors, we will have to walk each character * and error byte and send them to the buffer one at * a time. */ if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) { for (i = 0; i < s; i++) { if (*(ch->ch_equeue + tail + i) & UART_LSR_BI) tty_insert_flip_char(tp->port, *(ch->ch_rqueue + tail + i), TTY_BREAK); else if (*(ch->ch_equeue + tail + i) & UART_LSR_PE) tty_insert_flip_char(tp->port, *(ch->ch_rqueue + tail + i), TTY_PARITY); else if (*(ch->ch_equeue + tail + i) & UART_LSR_FE) tty_insert_flip_char(tp->port, *(ch->ch_rqueue + tail + i), TTY_FRAME); else tty_insert_flip_char(tp->port, *(ch->ch_rqueue + tail + i), TTY_NORMAL); } } else { tty_insert_flip_string(tp->port, ch->ch_rqueue + tail, s); } tail += s; n -= s; /* Flip queue if needed */ tail &= rmask; } ch->ch_r_tail = tail & rmask; ch->ch_e_tail = tail & rmask; dgnc_check_queue_flow_control(ch); spin_unlock_irqrestore(&ch->ch_lock, flags); /* Tell the tty layer its okay to "eat" the data now */ tty_flip_buffer_push(tp->port); if (ld) tty_ldisc_deref(ld); return; exit_unlock: spin_unlock_irqrestore(&ch->ch_lock, flags); if (ld) tty_ldisc_deref(ld); } /************************************************************************ * Determines when CARRIER changes state and takes appropriate * action. ************************************************************************/ void dgnc_carrier(struct channel_t *ch) { struct dgnc_board *bd; int virt_carrier = 0; int phys_carrier = 0; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return; bd = ch->ch_bd; if (!bd || bd->magic != DGNC_BOARD_MAGIC) return; if (ch->ch_mistat & UART_MSR_DCD) phys_carrier = 1; if (ch->ch_digi.digi_flags & DIGI_FORCEDCD) virt_carrier = 1; if (ch->ch_c_cflag & CLOCAL) virt_carrier = 1; /* * Test for a VIRTUAL carrier transition to HIGH. */ if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) { /* * When carrier rises, wake any threads waiting * for carrier in the open routine. */ if (waitqueue_active(&ch->ch_flags_wait)) wake_up_interruptible(&ch->ch_flags_wait); } /* * Test for a PHYSICAL carrier transition to HIGH. */ if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) { /* * When carrier rises, wake any threads waiting * for carrier in the open routine. */ if (waitqueue_active(&ch->ch_flags_wait)) wake_up_interruptible(&ch->ch_flags_wait); } /* * Test for a PHYSICAL transition to low, so long as we aren't * currently ignoring physical transitions (which is what "virtual * carrier" indicates). * * The transition of the virtual carrier to low really doesn't * matter... it really only means "ignore carrier state", not * "make pretend that carrier is there". */ if ((virt_carrier == 0) && ((ch->ch_flags & CH_CD) != 0) && (phys_carrier == 0)) { /* * When carrier drops: * * Drop carrier on all open units. * * Flush queues, waking up any task waiting in the * line discipline. * * Send a hangup to the control terminal. * * Enable all select calls. */ if (waitqueue_active(&ch->ch_flags_wait)) wake_up_interruptible(&ch->ch_flags_wait); if (ch->ch_tun.un_open_count > 0) tty_hangup(ch->ch_tun.un_tty); if (ch->ch_pun.un_open_count > 0) tty_hangup(ch->ch_pun.un_tty); } /* * Make sure that our cached values reflect the current reality. */ if (virt_carrier == 1) ch->ch_flags |= CH_FCAR; else ch->ch_flags &= ~CH_FCAR; if (phys_carrier == 1) ch->ch_flags |= CH_CD; else ch->ch_flags &= ~CH_CD; } /* * Assign the custom baud rate to the channel structure */ static void dgnc_set_custom_speed(struct channel_t *ch, uint newrate) { int testdiv; int testrate_high; int testrate_low; int deltahigh; int deltalow; if (newrate <= 0) { ch->ch_custom_speed = 0; return; } /* * Since the divisor is stored in a 16-bit integer, we make sure * we don't allow any rates smaller than a 16-bit integer would allow. * And of course, rates above the dividend won't fly. */ if (newrate && newrate < ((ch->ch_bd->bd_dividend / 0xFFFF) + 1)) newrate = ((ch->ch_bd->bd_dividend / 0xFFFF) + 1); if (newrate && newrate > ch->ch_bd->bd_dividend) newrate = ch->ch_bd->bd_dividend; if (newrate > 0) { testdiv = ch->ch_bd->bd_dividend / newrate; /* * If we try to figure out what rate the board would use * with the test divisor, it will be either equal or higher * than the requested baud rate. If we then determine the * rate with a divisor one higher, we will get the next lower * supported rate below the requested. */ testrate_high = ch->ch_bd->bd_dividend / testdiv; testrate_low = ch->ch_bd->bd_dividend / (testdiv + 1); /* * If the rate for the requested divisor is correct, just * use it and be done. */ if (testrate_high != newrate) { /* * Otherwise, pick the rate that is closer * (i.e. whichever rate has a smaller delta). */ deltahigh = testrate_high - newrate; deltalow = newrate - testrate_low; if (deltahigh < deltalow) newrate = testrate_high; else newrate = testrate_low; } } ch->ch_custom_speed = newrate; } void dgnc_check_queue_flow_control(struct channel_t *ch) { int qleft; /* Store how much space we have left in the queue */ qleft = ch->ch_r_tail - ch->ch_r_head - 1; if (qleft < 0) qleft += RQUEUEMASK + 1; /* * Check to see if we should enforce flow control on our queue because * the ld (or user) isn't reading data out of our queue fast enuf. * * NOTE: This is done based on what the current flow control of the * port is set for. * * 1) HWFLOW (RTS) - Turn off the UART's Receive interrupt. * This will cause the UART's FIFO to back up, and force * the RTS signal to be dropped. * 2) SWFLOW (IXOFF) - Keep trying to send a stop character to * the other side, in hopes it will stop sending data to us. * 3) NONE - Nothing we can do. We will simply drop any extra data * that gets sent into us when the queue fills up. */ if (qleft < 256) { /* HWFLOW */ if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) { if (!(ch->ch_flags & CH_RECEIVER_OFF)) { ch->ch_bd->bd_ops->disable_receiver(ch); ch->ch_flags |= (CH_RECEIVER_OFF); } } /* SWFLOW */ else if (ch->ch_c_iflag & IXOFF) { if (ch->ch_stops_sent <= MAX_STOPS_SENT) { ch->ch_bd->bd_ops->send_stop_character(ch); ch->ch_stops_sent++; } } } /* * Check to see if we should unenforce flow control because * ld (or user) finally read enuf data out of our queue. * * NOTE: This is done based on what the current flow control of the * port is set for. * * 1) HWFLOW (RTS) - Turn back on the UART's Receive interrupt. * This will cause the UART's FIFO to raise RTS back up, * which will allow the other side to start sending data again. * 2) SWFLOW (IXOFF) - Send a start character to * the other side, so it will start sending data to us again. * 3) NONE - Do nothing. Since we didn't do anything to turn off the * other side, we don't need to do anything now. */ if (qleft > (RQUEUESIZE / 2)) { /* HWFLOW */ if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) { if (ch->ch_flags & CH_RECEIVER_OFF) { ch->ch_bd->bd_ops->enable_receiver(ch); ch->ch_flags &= ~(CH_RECEIVER_OFF); } } /* SWFLOW */ else if (ch->ch_c_iflag & IXOFF && ch->ch_stops_sent) { ch->ch_stops_sent = 0; ch->ch_bd->bd_ops->send_start_character(ch); } } } void dgnc_wakeup_writes(struct channel_t *ch) { int qlen = 0; unsigned long flags; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return; spin_lock_irqsave(&ch->ch_lock, flags); /* * If channel now has space, wake up anyone waiting on the condition. */ qlen = ch->ch_w_head - ch->ch_w_tail; if (qlen < 0) qlen += WQUEUESIZE; if (qlen >= (WQUEUESIZE - 256)) { spin_unlock_irqrestore(&ch->ch_lock, flags); return; } if (ch->ch_tun.un_flags & UN_ISOPEN) { if ((ch->ch_tun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && ch->ch_tun.un_tty->ldisc->ops->write_wakeup) { spin_unlock_irqrestore(&ch->ch_lock, flags); ch->ch_tun.un_tty->ldisc->ops->write_wakeup(ch->ch_tun.un_tty); spin_lock_irqsave(&ch->ch_lock, flags); } wake_up_interruptible(&ch->ch_tun.un_tty->write_wait); /* * If unit is set to wait until empty, check to make sure * the queue AND FIFO are both empty. */ if (ch->ch_tun.un_flags & UN_EMPTY) { if ((qlen == 0) && (ch->ch_bd->bd_ops->get_uart_bytes_left(ch) == 0)) { ch->ch_tun.un_flags &= ~(UN_EMPTY); /* * If RTS Toggle mode is on, whenever * the queue and UART is empty, keep RTS low. */ if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) { ch->ch_mostat &= ~(UART_MCR_RTS); ch->ch_bd->bd_ops->assert_modem_signals(ch); } /* * If DTR Toggle mode is on, whenever * the queue and UART is empty, keep DTR low. */ if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) { ch->ch_mostat &= ~(UART_MCR_DTR); ch->ch_bd->bd_ops->assert_modem_signals(ch); } } } wake_up_interruptible(&ch->ch_tun.un_flags_wait); } if (ch->ch_pun.un_flags & UN_ISOPEN) { if ((ch->ch_pun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && ch->ch_pun.un_tty->ldisc->ops->write_wakeup) { spin_unlock_irqrestore(&ch->ch_lock, flags); ch->ch_pun.un_tty->ldisc->ops->write_wakeup(ch->ch_pun.un_tty); spin_lock_irqsave(&ch->ch_lock, flags); } wake_up_interruptible(&ch->ch_pun.un_tty->write_wait); /* * If unit is set to wait until empty, check to make sure * the queue AND FIFO are both empty. */ if (ch->ch_pun.un_flags & UN_EMPTY) { if ((qlen == 0) && (ch->ch_bd->bd_ops->get_uart_bytes_left(ch) == 0)) ch->ch_pun.un_flags &= ~(UN_EMPTY); } wake_up_interruptible(&ch->ch_pun.un_flags_wait); } spin_unlock_irqrestore(&ch->ch_lock, flags); } /************************************************************************ * * TTY Entry points and helper functions * ************************************************************************/ /* * dgnc_tty_open() * */ static int dgnc_tty_open(struct tty_struct *tty, struct file *file) { struct dgnc_board *brd; struct channel_t *ch; struct un_t *un; uint major = 0; uint minor = 0; int rc = 0; unsigned long flags; rc = 0; major = MAJOR(tty_devnum(tty)); minor = MINOR(tty_devnum(tty)); if (major > 255) return -ENXIO; /* Get board pointer from our array of majors we have allocated */ brd = dgnc_BoardsByMajor[major]; if (!brd) return -ENXIO; /* * If board is not yet up to a state of READY, go to * sleep waiting for it to happen or they cancel the open. */ rc = wait_event_interruptible(brd->state_wait, (brd->state & BOARD_READY)); if (rc) return rc; spin_lock_irqsave(&brd->bd_lock, flags); /* If opened device is greater than our number of ports, bail. */ if (PORT_NUM(minor) >= brd->nasync) { spin_unlock_irqrestore(&brd->bd_lock, flags); return -ENXIO; } ch = brd->channels[PORT_NUM(minor)]; if (!ch) { spin_unlock_irqrestore(&brd->bd_lock, flags); return -ENXIO; } /* Drop board lock */ spin_unlock_irqrestore(&brd->bd_lock, flags); /* Grab channel lock */ spin_lock_irqsave(&ch->ch_lock, flags); /* Figure out our type */ if (!IS_PRINT(minor)) { un = &brd->channels[PORT_NUM(minor)]->ch_tun; un->un_type = DGNC_SERIAL; } else if (IS_PRINT(minor)) { un = &brd->channels[PORT_NUM(minor)]->ch_pun; un->un_type = DGNC_PRINT; } else { spin_unlock_irqrestore(&ch->ch_lock, flags); return -ENXIO; } /* * If the port is still in a previous open, and in a state * where we simply cannot safely keep going, wait until the * state clears. */ spin_unlock_irqrestore(&ch->ch_lock, flags); rc = wait_event_interruptible(ch->ch_flags_wait, ((ch->ch_flags & CH_OPENING) == 0)); /* If ret is non-zero, user ctrl-c'ed us */ if (rc) return -EINTR; /* * If either unit is in the middle of the fragile part of close, * we just cannot touch the channel safely. * Go to sleep, knowing that when the channel can be * touched safely, the close routine will signal the * ch_flags_wait to wake us back up. */ rc = wait_event_interruptible(ch->ch_flags_wait, (((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_CLOSING) == 0)); /* If ret is non-zero, user ctrl-c'ed us */ if (rc) return -EINTR; spin_lock_irqsave(&ch->ch_lock, flags); /* Store our unit into driver_data, so we always have it available. */ tty->driver_data = un; /* * Initialize tty's */ if (!(un->un_flags & UN_ISOPEN)) { /* Store important variables. */ un->un_tty = tty; /* Maybe do something here to the TTY struct as well? */ } /* * Allocate channel buffers for read/write/error. * Set flag, so we don't get trounced on. */ ch->ch_flags |= (CH_OPENING); /* Drop locks, as malloc with GFP_KERNEL can sleep */ spin_unlock_irqrestore(&ch->ch_lock, flags); if (!ch->ch_rqueue) ch->ch_rqueue = kzalloc(RQUEUESIZE, GFP_KERNEL); if (!ch->ch_equeue) ch->ch_equeue = kzalloc(EQUEUESIZE, GFP_KERNEL); if (!ch->ch_wqueue) ch->ch_wqueue = kzalloc(WQUEUESIZE, GFP_KERNEL); spin_lock_irqsave(&ch->ch_lock, flags); ch->ch_flags &= ~(CH_OPENING); wake_up_interruptible(&ch->ch_flags_wait); /* * Initialize if neither terminal or printer is open. */ if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) { /* * Flush input queues. */ ch->ch_r_head = 0; ch->ch_r_tail = 0; ch->ch_e_head = 0; ch->ch_e_tail = 0; ch->ch_w_head = 0; ch->ch_w_tail = 0; brd->bd_ops->flush_uart_write(ch); brd->bd_ops->flush_uart_read(ch); ch->ch_flags = 0; ch->ch_cached_lsr = 0; ch->ch_stop_sending_break = 0; ch->ch_stops_sent = 0; ch->ch_c_cflag = tty->termios.c_cflag; ch->ch_c_iflag = tty->termios.c_iflag; ch->ch_c_oflag = tty->termios.c_oflag; ch->ch_c_lflag = tty->termios.c_lflag; ch->ch_startc = tty->termios.c_cc[VSTART]; ch->ch_stopc = tty->termios.c_cc[VSTOP]; /* * Bring up RTS and DTR... * Also handle RTS or DTR toggle if set. */ if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE)) ch->ch_mostat |= (UART_MCR_RTS); if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE)) ch->ch_mostat |= (UART_MCR_DTR); /* Tell UART to init itself */ brd->bd_ops->uart_init(ch); } /* * Run param in case we changed anything */ brd->bd_ops->param(tty); dgnc_carrier(ch); /* * follow protocol for opening port */ spin_unlock_irqrestore(&ch->ch_lock, flags); rc = dgnc_block_til_ready(tty, file, ch); /* No going back now, increment our unit and channel counters */ spin_lock_irqsave(&ch->ch_lock, flags); ch->ch_open_count++; un->un_open_count++; un->un_flags |= (UN_ISOPEN); spin_unlock_irqrestore(&ch->ch_lock, flags); return rc; } /* * dgnc_block_til_ready() * * Wait for DCD, if needed. */ static int dgnc_block_til_ready(struct tty_struct *tty, struct file *file, struct channel_t *ch) { int retval = 0; struct un_t *un = NULL; unsigned long flags; uint old_flags = 0; int sleep_on_un_flags = 0; if (!tty || tty->magic != TTY_MAGIC || !file || !ch || ch->magic != DGNC_CHANNEL_MAGIC) return -ENXIO; un = tty->driver_data; if (!un || un->magic != DGNC_UNIT_MAGIC) return -ENXIO; spin_lock_irqsave(&ch->ch_lock, flags); ch->ch_wopen++; /* Loop forever */ while (1) { sleep_on_un_flags = 0; /* * If board has failed somehow during our sleep, * bail with error. */ if (ch->ch_bd->state == BOARD_FAILED) { retval = -ENXIO; break; } /* If tty was hung up, break out of loop and set error. */ if (tty_hung_up_p(file)) { retval = -EAGAIN; break; } /* * If either unit is in the middle of the fragile part of close, * we just cannot touch the channel safely. * Go back to sleep, knowing that when the channel can be * touched safely, the close routine will signal the * ch_wait_flags to wake us back up. */ if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_CLOSING)) { /* * Our conditions to leave cleanly and happily: * 1) NONBLOCKING on the tty is set. * 2) CLOCAL is set. * 3) DCD (fake or real) is active. */ if (file->f_flags & O_NONBLOCK) break; if (tty->flags & (1 << TTY_IO_ERROR)) { retval = -EIO; break; } if (ch->ch_flags & CH_CD) break; if (ch->ch_flags & CH_FCAR) break; } else { sleep_on_un_flags = 1; } /* * If there is a signal pending, the user probably * interrupted (ctrl-c) us. * Leave loop with error set. */ if (signal_pending(current)) { retval = -ERESTARTSYS; break; } /* * Store the flags before we let go of channel lock */ if (sleep_on_un_flags) old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags; else old_flags = ch->ch_flags; /* * Let go of channel lock before calling schedule. * Our poller will get any FEP events and wake us up when DCD * eventually goes active. */ spin_unlock_irqrestore(&ch->ch_lock, flags); /* * Wait for something in the flags to change * from the current value. */ if (sleep_on_un_flags) retval = wait_event_interruptible(un->un_flags_wait, (old_flags != (ch->ch_tun.un_flags | ch->ch_pun.un_flags))); else retval = wait_event_interruptible(ch->ch_flags_wait, (old_flags != ch->ch_flags)); /* * We got woken up for some reason. * Before looping around, grab our channel lock. */ spin_lock_irqsave(&ch->ch_lock, flags); } ch->ch_wopen--; spin_unlock_irqrestore(&ch->ch_lock, flags); return retval; } /* * dgnc_tty_hangup() * * Hangup the port. Like a close, but don't wait for output to drain. */ static void dgnc_tty_hangup(struct tty_struct *tty) { struct un_t *un; if (!tty || tty->magic != TTY_MAGIC) return; un = tty->driver_data; if (!un || un->magic != DGNC_UNIT_MAGIC) return; /* flush the transmit queues */ dgnc_tty_flush_buffer(tty); } /* * dgnc_tty_close() * */ static void dgnc_tty_close(struct tty_struct *tty, struct file *file) { struct dgnc_board *bd; struct channel_t *ch; struct un_t *un; unsigned long flags; if (!tty || tty->magic != TTY_MAGIC) return; un = tty->driver_data; if (!un || un->magic != DGNC_UNIT_MAGIC) return; ch = un->un_ch; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return; bd = ch->ch_bd; if (!bd || bd->magic != DGNC_BOARD_MAGIC) return; spin_lock_irqsave(&ch->ch_lock, flags); /* * Determine if this is the last close or not - and if we agree about * which type of close it is with the Line Discipline */ if ((tty->count == 1) && (un->un_open_count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. un_open_count should always * be one in these conditions. If it's greater than * one, we've got real problems, since it means the * serial port won't be shutdown. */ dev_dbg(tty->dev, "tty->count is 1, un open count is %d\n", un->un_open_count); un->un_open_count = 1; } if (un->un_open_count) un->un_open_count--; else dev_dbg(tty->dev, "bad serial port open count of %d\n", un->un_open_count); ch->ch_open_count--; if (ch->ch_open_count && un->un_open_count) { spin_unlock_irqrestore(&ch->ch_lock, flags); return; } /* OK, its the last close on the unit */ un->un_flags |= UN_CLOSING; tty->closing = 1; /* * Only officially close channel if count is 0 and * DIGI_PRINTER bit is not set. */ if ((ch->ch_open_count == 0) && !(ch->ch_digi.digi_flags & DIGI_PRINTER)) { ch->ch_flags &= ~(CH_STOPI | CH_FORCED_STOPI); /* * turn off print device when closing print device. */ if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON)) { dgnc_wmove(ch, ch->ch_digi.digi_offstr, (int)ch->ch_digi.digi_offlen); ch->ch_flags &= ~CH_PRON; } spin_unlock_irqrestore(&ch->ch_lock, flags); /* wait for output to drain */ /* This will also return if we take an interrupt */ bd->bd_ops->drain(tty, 0); dgnc_tty_flush_buffer(tty); tty_ldisc_flush(tty); spin_lock_irqsave(&ch->ch_lock, flags); tty->closing = 0; /* * If we have HUPCL set, lower DTR and RTS */ if (ch->ch_c_cflag & HUPCL) { /* Drop RTS/DTR */ ch->ch_mostat &= ~(UART_MCR_DTR | UART_MCR_RTS); bd->bd_ops->assert_modem_signals(ch); /* * Go to sleep to ensure RTS/DTR * have been dropped for modems to see it. */ if (ch->ch_close_delay) { spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_ms_sleep(ch->ch_close_delay); spin_lock_irqsave(&ch->ch_lock, flags); } } ch->ch_old_baud = 0; /* Turn off UART interrupts for this port */ ch->ch_bd->bd_ops->uart_off(ch); } else { /* * turn off print device when closing print device. */ if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON)) { dgnc_wmove(ch, ch->ch_digi.digi_offstr, (int)ch->ch_digi.digi_offlen); ch->ch_flags &= ~CH_PRON; } } un->un_tty = NULL; un->un_flags &= ~(UN_ISOPEN | UN_CLOSING); wake_up_interruptible(&ch->ch_flags_wait); wake_up_interruptible(&un->un_flags_wait); spin_unlock_irqrestore(&ch->ch_lock, flags); } /* * dgnc_tty_chars_in_buffer() * * Return number of characters that have not been transmitted yet. * * This routine is used by the line discipline to determine if there * is data waiting to be transmitted/drained/flushed or not. */ static int dgnc_tty_chars_in_buffer(struct tty_struct *tty) { struct channel_t *ch = NULL; struct un_t *un = NULL; ushort thead; ushort ttail; uint tmask; uint chars = 0; unsigned long flags; if (!tty) return 0; un = tty->driver_data; if (!un || un->magic != DGNC_UNIT_MAGIC) return 0; ch = un->un_ch; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return 0; spin_lock_irqsave(&ch->ch_lock, flags); tmask = WQUEUEMASK; thead = ch->ch_w_head & tmask; ttail = ch->ch_w_tail & tmask; spin_unlock_irqrestore(&ch->ch_lock, flags); if (ttail == thead) { chars = 0; } else { if (thead >= ttail) chars = thead - ttail; else chars = thead - ttail + WQUEUESIZE; } return chars; } /* * dgnc_maxcps_room * * Reduces bytes_available to the max number of characters * that can be sent currently given the maxcps value, and * returns the new bytes_available. This only affects printer * output. */ static int dgnc_maxcps_room(struct tty_struct *tty, int bytes_available) { struct channel_t *ch = NULL; struct un_t *un = NULL; if (!tty) return bytes_available; un = tty->driver_data; if (!un || un->magic != DGNC_UNIT_MAGIC) return bytes_available; ch = un->un_ch; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return bytes_available; /* * If its not the Transparent print device, return * the full data amount. */ if (un->un_type != DGNC_PRINT) return bytes_available; if (ch->ch_digi.digi_maxcps > 0 && ch->ch_digi.digi_bufsize > 0) { int cps_limit = 0; unsigned long current_time = jiffies; unsigned long buffer_time = current_time + (HZ * ch->ch_digi.digi_bufsize) / ch->ch_digi.digi_maxcps; if (ch->ch_cpstime < current_time) { /* buffer is empty */ ch->ch_cpstime = current_time; /* reset ch_cpstime */ cps_limit = ch->ch_digi.digi_bufsize; } else if (ch->ch_cpstime < buffer_time) { /* still room in the buffer */ cps_limit = ((buffer_time - ch->ch_cpstime) * ch->ch_digi.digi_maxcps) / HZ; } else { /* no room in the buffer */ cps_limit = 0; } bytes_available = min(cps_limit, bytes_available); } return bytes_available; } /* * dgnc_tty_write_room() * * Return space available in Tx buffer */ static int dgnc_tty_write_room(struct tty_struct *tty) { struct channel_t *ch = NULL; struct un_t *un = NULL; ushort head; ushort tail; ushort tmask; int ret = 0; unsigned long flags; if (!tty || !dgnc_TmpWriteBuf) return 0; un = tty->driver_data; if (!un || un->magic != DGNC_UNIT_MAGIC) return 0; ch = un->un_ch; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return 0; spin_lock_irqsave(&ch->ch_lock, flags); tmask = WQUEUEMASK; head = (ch->ch_w_head) & tmask; tail = (ch->ch_w_tail) & tmask; ret = tail - head - 1; if (ret < 0) ret += WQUEUESIZE; /* Limit printer to maxcps */ ret = dgnc_maxcps_room(tty, ret); /* * If we are printer device, leave space for * possibly both the on and off strings. */ if (un->un_type == DGNC_PRINT) { if (!(ch->ch_flags & CH_PRON)) ret -= ch->ch_digi.digi_onlen; ret -= ch->ch_digi.digi_offlen; } else { if (ch->ch_flags & CH_PRON) ret -= ch->ch_digi.digi_offlen; } if (ret < 0) ret = 0; spin_unlock_irqrestore(&ch->ch_lock, flags); return ret; } /* * dgnc_tty_put_char() * * Put a character into ch->ch_buf * * - used by the line discipline for OPOST processing */ static int dgnc_tty_put_char(struct tty_struct *tty, unsigned char c) { /* * Simply call tty_write. */ dgnc_tty_write(tty, &c, 1); return 1; } /* * dgnc_tty_write() * * Take data from the user or kernel and send it out to the FEP. * In here exists all the Transparent Print magic as well. */ static int dgnc_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) { struct channel_t *ch = NULL; struct un_t *un = NULL; int bufcount = 0, n = 0; unsigned long flags; ushort head; ushort tail; ushort tmask; uint remain; if (!tty || !dgnc_TmpWriteBuf) return 0; un = tty->driver_data; if (!un || un->magic != DGNC_UNIT_MAGIC) return 0; ch = un->un_ch; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return 0; if (!count) return 0; /* * Store original amount of characters passed in. * This helps to figure out if we should ask the FEP * to send us an event when it has more space available. */ spin_lock_irqsave(&ch->ch_lock, flags); /* Get our space available for the channel from the board */ tmask = WQUEUEMASK; head = (ch->ch_w_head) & tmask; tail = (ch->ch_w_tail) & tmask; bufcount = tail - head - 1; if (bufcount < 0) bufcount += WQUEUESIZE; /* * Limit printer output to maxcps overall, with bursts allowed * up to bufsize characters. */ bufcount = dgnc_maxcps_room(tty, bufcount); /* * Take minimum of what the user wants to send, and the * space available in the FEP buffer. */ count = min(count, bufcount); /* * Bail if no space left. */ if (count <= 0) goto exit_retry; /* * Output the printer ON string, if we are in terminal mode, but * need to be in printer mode. */ if ((un->un_type == DGNC_PRINT) && !(ch->ch_flags & CH_PRON)) { dgnc_wmove(ch, ch->ch_digi.digi_onstr, (int)ch->ch_digi.digi_onlen); head = (ch->ch_w_head) & tmask; ch->ch_flags |= CH_PRON; } /* * On the other hand, output the printer OFF string, if we are * currently in printer mode, but need to output to the terminal. */ if ((un->un_type != DGNC_PRINT) && (ch->ch_flags & CH_PRON)) { dgnc_wmove(ch, ch->ch_digi.digi_offstr, (int)ch->ch_digi.digi_offlen); head = (ch->ch_w_head) & tmask; ch->ch_flags &= ~CH_PRON; } n = count; /* * If the write wraps over the top of the circular buffer, * move the portion up to the wrap point, and reset the * pointers to the bottom. */ remain = WQUEUESIZE - head; if (n >= remain) { n -= remain; memcpy(ch->ch_wqueue + head, buf, remain); head = 0; buf += remain; } if (n > 0) { /* * Move rest of data. */ remain = n; memcpy(ch->ch_wqueue + head, buf, remain); head += remain; } if (count) { head &= tmask; ch->ch_w_head = head; } /* Update printer buffer empty time. */ if ((un->un_type == DGNC_PRINT) && (ch->ch_digi.digi_maxcps > 0) && (ch->ch_digi.digi_bufsize > 0)) { ch->ch_cpstime += (HZ * count) / ch->ch_digi.digi_maxcps; } spin_unlock_irqrestore(&ch->ch_lock, flags); if (count) { /* * Channel lock is grabbed and then released * inside this routine. */ ch->ch_bd->bd_ops->copy_data_from_queue_to_uart(ch); } return count; exit_retry: spin_unlock_irqrestore(&ch->ch_lock, flags); return 0; } /* * Return modem signals to ld. */ static int dgnc_tty_tiocmget(struct tty_struct *tty) { struct channel_t *ch; struct un_t *un; int result = -EIO; unsigned char mstat = 0; unsigned long flags; if (!tty || tty->magic != TTY_MAGIC) return result; un = tty->driver_data; if (!un || un->magic != DGNC_UNIT_MAGIC) return result; ch = un->un_ch; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return result; spin_lock_irqsave(&ch->ch_lock, flags); mstat = (ch->ch_mostat | ch->ch_mistat); spin_unlock_irqrestore(&ch->ch_lock, flags); result = 0; if (mstat & UART_MCR_DTR) result |= TIOCM_DTR; if (mstat & UART_MCR_RTS) result |= TIOCM_RTS; if (mstat & UART_MSR_CTS) result |= TIOCM_CTS; if (mstat & UART_MSR_DSR) result |= TIOCM_DSR; if (mstat & UART_MSR_RI) result |= TIOCM_RI; if (mstat & UART_MSR_DCD) result |= TIOCM_CD; return result; } /* * dgnc_tty_tiocmset() * * Set modem signals, called by ld. */ static int dgnc_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct dgnc_board *bd; struct channel_t *ch; struct un_t *un; int ret = -EIO; unsigned long flags; if (!tty || tty->magic != TTY_MAGIC) return ret; un = tty->driver_data; if (!un || un->magic != DGNC_UNIT_MAGIC) return ret; ch = un->un_ch; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return ret; bd = ch->ch_bd; if (!bd || bd->magic != DGNC_BOARD_MAGIC) return ret; spin_lock_irqsave(&ch->ch_lock, flags); if (set & TIOCM_RTS) ch->ch_mostat |= UART_MCR_RTS; if (set & TIOCM_DTR) ch->ch_mostat |= UART_MCR_DTR; if (clear & TIOCM_RTS) ch->ch_mostat &= ~(UART_MCR_RTS); if (clear & TIOCM_DTR) ch->ch_mostat &= ~(UART_MCR_DTR); ch->ch_bd->bd_ops->assert_modem_signals(ch); spin_unlock_irqrestore(&ch->ch_lock, flags); return 0; } /* * dgnc_tty_send_break() * * Send a Break, called by ld. */ static int dgnc_tty_send_break(struct tty_struct *tty, int msec) { struct dgnc_board *bd; struct channel_t *ch; struct un_t *un; int ret = -EIO; unsigned long flags; if (!tty || tty->magic != TTY_MAGIC) return ret; un = tty->driver_data; if (!un || un->magic != DGNC_UNIT_MAGIC) return ret; ch = un->un_ch; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return ret; bd = ch->ch_bd; if (!bd || bd->magic != DGNC_BOARD_MAGIC) return ret; switch (msec) { case -1: msec = 0xFFFF; break; case 0: msec = 0; break; default: break; } spin_lock_irqsave(&ch->ch_lock, flags); ch->ch_bd->bd_ops->send_break(ch, msec); spin_unlock_irqrestore(&ch->ch_lock, flags); return 0; } /* * dgnc_tty_wait_until_sent() * * wait until data has been transmitted, called by ld. */ static void dgnc_tty_wait_until_sent(struct tty_struct *tty, int timeout) { struct dgnc_board *bd; struct channel_t *ch; struct un_t *un; if (!tty || tty->magic != TTY_MAGIC) return; un = tty->driver_data; if (!un || un->magic != DGNC_UNIT_MAGIC) return; ch = un->un_ch; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return; bd = ch->ch_bd; if (!bd || bd->magic != DGNC_BOARD_MAGIC) return; bd->bd_ops->drain(tty, 0); } /* * dgnc_send_xchar() * * send a high priority character, called by ld. */ static void dgnc_tty_send_xchar(struct tty_struct *tty, char c) { struct dgnc_board *bd; struct channel_t *ch; struct un_t *un; unsigned long flags; if (!tty || tty->magic != TTY_MAGIC) return; un = tty->driver_data; if (!un || un->magic != DGNC_UNIT_MAGIC) return; ch = un->un_ch; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return; bd = ch->ch_bd; if (!bd || bd->magic != DGNC_BOARD_MAGIC) return; dev_dbg(tty->dev, "dgnc_tty_send_xchar start\n"); spin_lock_irqsave(&ch->ch_lock, flags); bd->bd_ops->send_immediate_char(ch, c); spin_unlock_irqrestore(&ch->ch_lock, flags); dev_dbg(tty->dev, "dgnc_tty_send_xchar finish\n"); } /* * Return modem signals to ld. */ static inline int dgnc_get_mstat(struct channel_t *ch) { unsigned char mstat; int result = -EIO; unsigned long flags; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return -ENXIO; spin_lock_irqsave(&ch->ch_lock, flags); mstat = (ch->ch_mostat | ch->ch_mistat); spin_unlock_irqrestore(&ch->ch_lock, flags); result = 0; if (mstat & UART_MCR_DTR) result |= TIOCM_DTR; if (mstat & UART_MCR_RTS) result |= TIOCM_RTS; if (mstat & UART_MSR_CTS) result |= TIOCM_CTS; if (mstat & UART_MSR_DSR) result |= TIOCM_DSR; if (mstat & UART_MSR_RI) result |= TIOCM_RI; if (mstat & UART_MSR_DCD) result |= TIOCM_CD; return result; } /* * Return modem signals to ld. */ static int dgnc_get_modem_info(struct channel_t *ch, unsigned int __user *value) { int result; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return -ENXIO; result = dgnc_get_mstat(ch); if (result < 0) return -ENXIO; return put_user(result, value); } /* * dgnc_set_modem_info() * * Set modem signals, called by ld. */ static int dgnc_set_modem_info(struct tty_struct *tty, unsigned int command, unsigned int __user *value) { struct dgnc_board *bd; struct channel_t *ch; struct un_t *un; int ret = -ENXIO; unsigned int arg = 0; unsigned long flags; if (!tty || tty->magic != TTY_MAGIC) return ret; un = tty->driver_data; if (!un || un->magic != DGNC_UNIT_MAGIC) return ret; ch = un->un_ch; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return ret; bd = ch->ch_bd; if (!bd || bd->magic != DGNC_BOARD_MAGIC) return ret; ret = get_user(arg, value); if (ret) return ret; switch (command) { case TIOCMBIS: if (arg & TIOCM_RTS) ch->ch_mostat |= UART_MCR_RTS; if (arg & TIOCM_DTR) ch->ch_mostat |= UART_MCR_DTR; break; case TIOCMBIC: if (arg & TIOCM_RTS) ch->ch_mostat &= ~(UART_MCR_RTS); if (arg & TIOCM_DTR) ch->ch_mostat &= ~(UART_MCR_DTR); break; case TIOCMSET: if (arg & TIOCM_RTS) ch->ch_mostat |= UART_MCR_RTS; else ch->ch_mostat &= ~(UART_MCR_RTS); if (arg & TIOCM_DTR) ch->ch_mostat |= UART_MCR_DTR; else ch->ch_mostat &= ~(UART_MCR_DTR); break; default: return -EINVAL; } spin_lock_irqsave(&ch->ch_lock, flags); ch->ch_bd->bd_ops->assert_modem_signals(ch); spin_unlock_irqrestore(&ch->ch_lock, flags); return 0; } /* * dgnc_tty_digigeta() * * Ioctl to get the information for ditty. * * * */ static int dgnc_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retinfo) { struct channel_t *ch; struct un_t *un; struct digi_t tmp; unsigned long flags; if (!retinfo) return -EFAULT; if (!tty || tty->magic != TTY_MAGIC) return -EFAULT; un = tty->driver_data; if (!un || un->magic != DGNC_UNIT_MAGIC) return -EFAULT; ch = un->un_ch; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return -EFAULT; memset(&tmp, 0, sizeof(tmp)); spin_lock_irqsave(&ch->ch_lock, flags); memcpy(&tmp, &ch->ch_digi, sizeof(tmp)); spin_unlock_irqrestore(&ch->ch_lock, flags); if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) return -EFAULT; return 0; } /* * dgnc_tty_digiseta() * * Ioctl to set the information for ditty. * * * */ static int dgnc_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_info) { struct dgnc_board *bd; struct channel_t *ch; struct un_t *un; struct digi_t new_digi; unsigned long flags; if (!tty || tty->magic != TTY_MAGIC) return -EFAULT; un = tty->driver_data; if (!un || un->magic != DGNC_UNIT_MAGIC) return -EFAULT; ch = un->un_ch; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return -EFAULT; bd = ch->ch_bd; if (!bd || bd->magic != DGNC_BOARD_MAGIC) return -EFAULT; if (copy_from_user(&new_digi, new_info, sizeof(new_digi))) return -EFAULT; spin_lock_irqsave(&ch->ch_lock, flags); /* * Handle transistions to and from RTS Toggle. */ if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) && (new_digi.digi_flags & DIGI_RTS_TOGGLE)) ch->ch_mostat &= ~(UART_MCR_RTS); if ((ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) && !(new_digi.digi_flags & DIGI_RTS_TOGGLE)) ch->ch_mostat |= (UART_MCR_RTS); /* * Handle transistions to and from DTR Toggle. */ if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) && (new_digi.digi_flags & DIGI_DTR_TOGGLE)) ch->ch_mostat &= ~(UART_MCR_DTR); if ((ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) && !(new_digi.digi_flags & DIGI_DTR_TOGGLE)) ch->ch_mostat |= (UART_MCR_DTR); memcpy(&ch->ch_digi, &new_digi, sizeof(new_digi)); if (ch->ch_digi.digi_maxcps < 1) ch->ch_digi.digi_maxcps = 1; if (ch->ch_digi.digi_maxcps > 10000) ch->ch_digi.digi_maxcps = 10000; if (ch->ch_digi.digi_bufsize < 10) ch->ch_digi.digi_bufsize = 10; if (ch->ch_digi.digi_maxchar < 1) ch->ch_digi.digi_maxchar = 1; if (ch->ch_digi.digi_maxchar > ch->ch_digi.digi_bufsize) ch->ch_digi.digi_maxchar = ch->ch_digi.digi_bufsize; if (ch->ch_digi.digi_onlen > DIGI_PLEN) ch->ch_digi.digi_onlen = DIGI_PLEN; if (ch->ch_digi.digi_offlen > DIGI_PLEN) ch->ch_digi.digi_offlen = DIGI_PLEN; ch->ch_bd->bd_ops->param(tty); spin_unlock_irqrestore(&ch->ch_lock, flags); return 0; } /* * dgnc_set_termios() */ static void dgnc_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios) { struct dgnc_board *bd; struct channel_t *ch; struct un_t *un; unsigned long flags; if (!tty || tty->magic != TTY_MAGIC) return; un = tty->driver_data; if (!un || un->magic != DGNC_UNIT_MAGIC) return; ch = un->un_ch; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return; bd = ch->ch_bd; if (!bd || bd->magic != DGNC_BOARD_MAGIC) return; spin_lock_irqsave(&ch->ch_lock, flags); ch->ch_c_cflag = tty->termios.c_cflag; ch->ch_c_iflag = tty->termios.c_iflag; ch->ch_c_oflag = tty->termios.c_oflag; ch->ch_c_lflag = tty->termios.c_lflag; ch->ch_startc = tty->termios.c_cc[VSTART]; ch->ch_stopc = tty->termios.c_cc[VSTOP]; ch->ch_bd->bd_ops->param(tty); dgnc_carrier(ch); spin_unlock_irqrestore(&ch->ch_lock, flags); } static void dgnc_tty_throttle(struct tty_struct *tty) { struct channel_t *ch; struct un_t *un; unsigned long flags; if (!tty || tty->magic != TTY_MAGIC) return; un = tty->driver_data; if (!un || un->magic != DGNC_UNIT_MAGIC) return; ch = un->un_ch; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return; spin_lock_irqsave(&ch->ch_lock, flags); ch->ch_flags |= (CH_FORCED_STOPI); spin_unlock_irqrestore(&ch->ch_lock, flags); } static void dgnc_tty_unthrottle(struct tty_struct *tty) { struct channel_t *ch; struct un_t *un; unsigned long flags; if (!tty || tty->magic != TTY_MAGIC) return; un = tty->driver_data; if (!un || un->magic != DGNC_UNIT_MAGIC) return; ch = un->un_ch; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return; spin_lock_irqsave(&ch->ch_lock, flags); ch->ch_flags &= ~(CH_FORCED_STOPI); spin_unlock_irqrestore(&ch->ch_lock, flags); } static void dgnc_tty_start(struct tty_struct *tty) { struct dgnc_board *bd; struct channel_t *ch; struct un_t *un; unsigned long flags; if (!tty || tty->magic != TTY_MAGIC) return; un = tty->driver_data; if (!un || un->magic != DGNC_UNIT_MAGIC) return; ch = un->un_ch; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return; bd = ch->ch_bd; if (!bd || bd->magic != DGNC_BOARD_MAGIC) return; spin_lock_irqsave(&ch->ch_lock, flags); ch->ch_flags &= ~(CH_FORCED_STOP); spin_unlock_irqrestore(&ch->ch_lock, flags); } static void dgnc_tty_stop(struct tty_struct *tty) { struct dgnc_board *bd; struct channel_t *ch; struct un_t *un; unsigned long flags; if (!tty || tty->magic != TTY_MAGIC) return; un = tty->driver_data; if (!un || un->magic != DGNC_UNIT_MAGIC) return; ch = un->un_ch; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return; bd = ch->ch_bd; if (!bd || bd->magic != DGNC_BOARD_MAGIC) return; spin_lock_irqsave(&ch->ch_lock, flags); ch->ch_flags |= (CH_FORCED_STOP); spin_unlock_irqrestore(&ch->ch_lock, flags); } /* * dgnc_tty_flush_chars() * * Flush the cook buffer * * Note to self, and any other poor souls who venture here: * * flush in this case DOES NOT mean dispose of the data. * instead, it means "stop buffering and send it if you * haven't already." Just guess how I figured that out... SRW 2-Jun-98 * * It is also always called in interrupt context - JAR 8-Sept-99 */ static void dgnc_tty_flush_chars(struct tty_struct *tty) { struct dgnc_board *bd; struct channel_t *ch; struct un_t *un; unsigned long flags; if (!tty || tty->magic != TTY_MAGIC) return; un = tty->driver_data; if (!un || un->magic != DGNC_UNIT_MAGIC) return; ch = un->un_ch; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return; bd = ch->ch_bd; if (!bd || bd->magic != DGNC_BOARD_MAGIC) return; spin_lock_irqsave(&ch->ch_lock, flags); /* Do something maybe here */ spin_unlock_irqrestore(&ch->ch_lock, flags); } /* * dgnc_tty_flush_buffer() * * Flush Tx buffer (make in == out) */ static void dgnc_tty_flush_buffer(struct tty_struct *tty) { struct channel_t *ch; struct un_t *un; unsigned long flags; if (!tty || tty->magic != TTY_MAGIC) return; un = tty->driver_data; if (!un || un->magic != DGNC_UNIT_MAGIC) return; ch = un->un_ch; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return; spin_lock_irqsave(&ch->ch_lock, flags); ch->ch_flags &= ~CH_STOP; /* Flush our write queue */ ch->ch_w_head = ch->ch_w_tail; /* Flush UARTs transmit FIFO */ ch->ch_bd->bd_ops->flush_uart_write(ch); if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) { ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY); wake_up_interruptible(&ch->ch_tun.un_flags_wait); } if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) { ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY); wake_up_interruptible(&ch->ch_pun.un_flags_wait); } spin_unlock_irqrestore(&ch->ch_lock, flags); } /***************************************************************************** * * The IOCTL function and all of its helpers * *****************************************************************************/ /* * dgnc_tty_ioctl() * * The usual assortment of ioctl's */ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct dgnc_board *bd; struct channel_t *ch; struct un_t *un; int rc; unsigned long flags; void __user *uarg = (void __user *)arg; if (!tty || tty->magic != TTY_MAGIC) return -ENODEV; un = tty->driver_data; if (!un || un->magic != DGNC_UNIT_MAGIC) return -ENODEV; ch = un->un_ch; if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) return -ENODEV; bd = ch->ch_bd; if (!bd || bd->magic != DGNC_BOARD_MAGIC) return -ENODEV; spin_lock_irqsave(&ch->ch_lock, flags); if (un->un_open_count <= 0) { spin_unlock_irqrestore(&ch->ch_lock, flags); return -EIO; } switch (cmd) { /* Here are all the standard ioctl's that we MUST implement */ case TCSBRK: /* * TCSBRK is SVID version: non-zero arg --> no break * this behaviour is exploited by tcdrain(). * * According to POSIX.1 spec (7.2.2.1.2) breaks should be * between 0.25 and 0.5 seconds so we'll ask for something * in the middle: 0.375 seconds. */ rc = tty_check_change(tty); spin_unlock_irqrestore(&ch->ch_lock, flags); if (rc) return rc; rc = ch->ch_bd->bd_ops->drain(tty, 0); if (rc) return -EINTR; spin_lock_irqsave(&ch->ch_lock, flags); if (((cmd == TCSBRK) && (!arg)) || (cmd == TCSBRKP)) ch->ch_bd->bd_ops->send_break(ch, 250); spin_unlock_irqrestore(&ch->ch_lock, flags); return 0; case TCSBRKP: /* support for POSIX tcsendbreak() * According to POSIX.1 spec (7.2.2.1.2) breaks should be * between 0.25 and 0.5 seconds so we'll ask for something * in the middle: 0.375 seconds. */ rc = tty_check_change(tty); spin_unlock_irqrestore(&ch->ch_lock, flags); if (rc) return rc; rc = ch->ch_bd->bd_ops->drain(tty, 0); if (rc) return -EINTR; spin_lock_irqsave(&ch->ch_lock, flags); ch->ch_bd->bd_ops->send_break(ch, 250); spin_unlock_irqrestore(&ch->ch_lock, flags); return 0; case TIOCSBRK: rc = tty_check_change(tty); spin_unlock_irqrestore(&ch->ch_lock, flags); if (rc) return rc; rc = ch->ch_bd->bd_ops->drain(tty, 0); if (rc) return -EINTR; spin_lock_irqsave(&ch->ch_lock, flags); ch->ch_bd->bd_ops->send_break(ch, 250); spin_unlock_irqrestore(&ch->ch_lock, flags); return 0; case TIOCCBRK: /* Do Nothing */ spin_unlock_irqrestore(&ch->ch_lock, flags); return 0; case TIOCGSOFTCAR: spin_unlock_irqrestore(&ch->ch_lock, flags); rc = put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *)arg); return rc; case TIOCSSOFTCAR: spin_unlock_irqrestore(&ch->ch_lock, flags); rc = get_user(arg, (unsigned long __user *)arg); if (rc) return rc; spin_lock_irqsave(&ch->ch_lock, flags); tty->termios.c_cflag = ((tty->termios.c_cflag & ~CLOCAL) | (arg ? CLOCAL : 0)); ch->ch_bd->bd_ops->param(tty); spin_unlock_irqrestore(&ch->ch_lock, flags); return 0; case TIOCMGET: spin_unlock_irqrestore(&ch->ch_lock, flags); return dgnc_get_modem_info(ch, uarg); case TIOCMBIS: case TIOCMBIC: case TIOCMSET: spin_unlock_irqrestore(&ch->ch_lock, flags); return dgnc_set_modem_info(tty, cmd, uarg); /* * Here are any additional ioctl's that we want to implement */ case TCFLSH: /* * The linux tty driver doesn't have a flush * input routine for the driver, assuming all backed * up data is in the line disc. buffers. However, * we all know that's not the case. Here, we * act on the ioctl, but then lie and say we didn't * so the line discipline will process the flush * also. */ rc = tty_check_change(tty); if (rc) { spin_unlock_irqrestore(&ch->ch_lock, flags); return rc; } if ((arg == TCIFLUSH) || (arg == TCIOFLUSH)) { ch->ch_r_head = ch->ch_r_tail; ch->ch_bd->bd_ops->flush_uart_read(ch); /* Force queue flow control to be released, if needed */ dgnc_check_queue_flow_control(ch); } if ((arg == TCOFLUSH) || (arg == TCIOFLUSH)) { if (!(un->un_type == DGNC_PRINT)) { ch->ch_w_head = ch->ch_w_tail; ch->ch_bd->bd_ops->flush_uart_write(ch); if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) { ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY); wake_up_interruptible(&ch->ch_tun.un_flags_wait); } if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) { ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY); wake_up_interruptible(&ch->ch_pun.un_flags_wait); } } } /* pretend we didn't recognize this IOCTL */ spin_unlock_irqrestore(&ch->ch_lock, flags); return -ENOIOCTLCMD; case TCSETSF: case TCSETSW: /* * The linux tty driver doesn't have a flush * input routine for the driver, assuming all backed * up data is in the line disc. buffers. However, * we all know that's not the case. Here, we * act on the ioctl, but then lie and say we didn't * so the line discipline will process the flush * also. */ if (cmd == TCSETSF) { /* flush rx */ ch->ch_flags &= ~CH_STOP; ch->ch_r_head = ch->ch_r_tail; ch->ch_bd->bd_ops->flush_uart_read(ch); /* Force queue flow control to be released, if needed */ dgnc_check_queue_flow_control(ch); } /* now wait for all the output to drain */ spin_unlock_irqrestore(&ch->ch_lock, flags); rc = ch->ch_bd->bd_ops->drain(tty, 0); if (rc) return -EINTR; /* pretend we didn't recognize this */ return -ENOIOCTLCMD; case TCSETAW: spin_unlock_irqrestore(&ch->ch_lock, flags); rc = ch->ch_bd->bd_ops->drain(tty, 0); if (rc) return -EINTR; /* pretend we didn't recognize this */ return -ENOIOCTLCMD; case TCXONC: spin_unlock_irqrestore(&ch->ch_lock, flags); /* Make the ld do it */ return -ENOIOCTLCMD; case DIGI_GETA: /* get information for ditty */ spin_unlock_irqrestore(&ch->ch_lock, flags); return dgnc_tty_digigeta(tty, uarg); case DIGI_SETAW: case DIGI_SETAF: /* set information for ditty */ if (cmd == (DIGI_SETAW)) { spin_unlock_irqrestore(&ch->ch_lock, flags); rc = ch->ch_bd->bd_ops->drain(tty, 0); if (rc) return -EINTR; spin_lock_irqsave(&ch->ch_lock, flags); } else { tty_ldisc_flush(tty); } /* fall thru */ case DIGI_SETA: spin_unlock_irqrestore(&ch->ch_lock, flags); return dgnc_tty_digiseta(tty, uarg); case DIGI_LOOPBACK: { uint loopback = 0; /* Let go of locks when accessing user space, * could sleep */ spin_unlock_irqrestore(&ch->ch_lock, flags); rc = get_user(loopback, (unsigned int __user *)arg); if (rc) return rc; spin_lock_irqsave(&ch->ch_lock, flags); /* Enable/disable internal loopback for this port */ if (loopback) ch->ch_flags |= CH_LOOPBACK; else ch->ch_flags &= ~(CH_LOOPBACK); ch->ch_bd->bd_ops->param(tty); spin_unlock_irqrestore(&ch->ch_lock, flags); return 0; } case DIGI_GETCUSTOMBAUD: spin_unlock_irqrestore(&ch->ch_lock, flags); rc = put_user(ch->ch_custom_speed, (unsigned int __user *)arg); return rc; case DIGI_SETCUSTOMBAUD: { int new_rate; /* Let go of locks when accessing user space, could sleep */ spin_unlock_irqrestore(&ch->ch_lock, flags); rc = get_user(new_rate, (int __user *)arg); if (rc) return rc; spin_lock_irqsave(&ch->ch_lock, flags); dgnc_set_custom_speed(ch, new_rate); ch->ch_bd->bd_ops->param(tty); spin_unlock_irqrestore(&ch->ch_lock, flags); return 0; } /* * This ioctl allows insertion of a character into the front * of any pending data to be transmitted. * * This ioctl is to satify the "Send Character Immediate" * call that the RealPort protocol spec requires. */ case DIGI_REALPORT_SENDIMMEDIATE: { unsigned char c; spin_unlock_irqrestore(&ch->ch_lock, flags); rc = get_user(c, (unsigned char __user *)arg); if (rc) return rc; spin_lock_irqsave(&ch->ch_lock, flags); ch->ch_bd->bd_ops->send_immediate_char(ch, c); spin_unlock_irqrestore(&ch->ch_lock, flags); return 0; } /* * This ioctl returns all the current counts for the port. * * This ioctl is to satify the "Line Error Counters" * call that the RealPort protocol spec requires. */ case DIGI_REALPORT_GETCOUNTERS: { struct digi_getcounter buf; buf.norun = ch->ch_err_overrun; buf.noflow = 0; /* The driver doesn't keep this stat */ buf.nframe = ch->ch_err_frame; buf.nparity = ch->ch_err_parity; buf.nbreak = ch->ch_err_break; buf.rbytes = ch->ch_rxcount; buf.tbytes = ch->ch_txcount; spin_unlock_irqrestore(&ch->ch_lock, flags); if (copy_to_user(uarg, &buf, sizeof(buf))) return -EFAULT; return 0; } /* * This ioctl returns all current events. * * This ioctl is to satify the "Event Reporting" * call that the RealPort protocol spec requires. */ case DIGI_REALPORT_GETEVENTS: { unsigned int events = 0; /* NOTE: MORE EVENTS NEEDS TO BE ADDED HERE */ if (ch->ch_flags & CH_BREAK_SENDING) events |= EV_TXB; if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_FORCED_STOP)) events |= (EV_OPU | EV_OPS); if ((ch->ch_flags & CH_STOPI) || (ch->ch_flags & CH_FORCED_STOPI)) events |= (EV_IPU | EV_IPS); spin_unlock_irqrestore(&ch->ch_lock, flags); rc = put_user(events, (unsigned int __user *)arg); return rc; } /* * This ioctl returns TOUT and TIN counters based * upon the values passed in by the RealPort Server. * It also passes back whether the UART Transmitter is * empty as well. */ case DIGI_REALPORT_GETBUFFERS: { struct digi_getbuffer buf; int tdist; int count; spin_unlock_irqrestore(&ch->ch_lock, flags); /* * Get data from user first. */ if (copy_from_user(&buf, uarg, sizeof(buf))) return -EFAULT; spin_lock_irqsave(&ch->ch_lock, flags); /* * Figure out how much data is in our RX and TX queues. */ buf.rxbuf = (ch->ch_r_head - ch->ch_r_tail) & RQUEUEMASK; buf.txbuf = (ch->ch_w_head - ch->ch_w_tail) & WQUEUEMASK; /* * Is the UART empty? Add that value to whats in our TX queue. */ count = buf.txbuf + ch->ch_bd->bd_ops->get_uart_bytes_left(ch); /* * Figure out how much data the RealPort Server believes should * be in our TX queue. */ tdist = (buf.tIn - buf.tOut) & 0xffff; /* * If we have more data than the RealPort Server believes we * should have, reduce our count to its amount. * * This count difference CAN happen because the Linux LD can * insert more characters into our queue for OPOST processing * that the RealPort Server doesn't know about. */ if (buf.txbuf > tdist) buf.txbuf = tdist; /* * Report whether our queue and UART TX are completely empty. */ if (count) buf.txdone = 0; else buf.txdone = 1; spin_unlock_irqrestore(&ch->ch_lock, flags); if (copy_to_user(uarg, &buf, sizeof(buf))) return -EFAULT; return 0; } default: spin_unlock_irqrestore(&ch->ch_lock, flags); return -ENOIOCTLCMD; } }
gpl-2.0
namgk/kernel-tut
drivers/net/ibm_newemac/phy.c
46
10694
/* * drivers/net/ibm_newemac/phy.c * * Driver for PowerPC 4xx on-chip ethernet controller, PHY support. * Borrowed from sungem_phy.c, though I only kept the generic MII * driver for now. * * This file should be shared with other drivers or eventually * merged as the "low level" part of miilib * * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> * * Based on the arch/ppc version of the driver: * * (c) 2003, Benjamin Herrenscmidt (benh@kernel.crashing.org) * (c) 2004-2005, Eugene Surovegin <ebs@ebshome.net> * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/netdevice.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/delay.h> #include "emac.h" #include "phy.h" static inline int phy_read(struct mii_phy *phy, int reg) { return phy->mdio_read(phy->dev, phy->address, reg); } static inline void phy_write(struct mii_phy *phy, int reg, int val) { phy->mdio_write(phy->dev, phy->address, reg, val); } int emac_mii_reset_phy(struct mii_phy *phy) { int val; int limit = 10000; val = phy_read(phy, MII_BMCR); val &= ~(BMCR_ISOLATE | BMCR_ANENABLE); val |= BMCR_RESET; phy_write(phy, MII_BMCR, val); udelay(300); while (limit--) { val = phy_read(phy, MII_BMCR); if (val >= 0 && (val & BMCR_RESET) == 0) break; udelay(10); } if ((val & BMCR_ISOLATE) && limit > 0) phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE); return limit <= 0; } static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise) { int ctl, adv; phy->autoneg = AUTONEG_ENABLE; phy->speed = SPEED_10; phy->duplex = DUPLEX_HALF; phy->pause = phy->asym_pause = 0; phy->advertising = advertise; ctl = phy_read(phy, MII_BMCR); if (ctl < 0) return ctl; ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); /* First clear the PHY */ phy_write(phy, MII_BMCR, ctl); /* Setup standard advertise */ adv = phy_read(phy, MII_ADVERTISE); if (adv < 0) return adv; adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); if (advertise & ADVERTISED_10baseT_Half) adv |= ADVERTISE_10HALF; if (advertise & ADVERTISED_10baseT_Full) adv |= ADVERTISE_10FULL; if (advertise & ADVERTISED_100baseT_Half) adv |= ADVERTISE_100HALF; if (advertise & ADVERTISED_100baseT_Full) adv |= ADVERTISE_100FULL; if (advertise & ADVERTISED_Pause) adv |= ADVERTISE_PAUSE_CAP; if (advertise & ADVERTISED_Asym_Pause) adv |= ADVERTISE_PAUSE_ASYM; phy_write(phy, MII_ADVERTISE, adv); if (phy->features & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)) { adv = phy_read(phy, MII_CTRL1000); if (adv < 0) return adv; adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); if (advertise & ADVERTISED_1000baseT_Full) adv |= ADVERTISE_1000FULL; if (advertise & ADVERTISED_1000baseT_Half) adv |= ADVERTISE_1000HALF; phy_write(phy, MII_CTRL1000, adv); } /* Start/Restart aneg */ ctl = phy_read(phy, MII_BMCR); ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); phy_write(phy, MII_BMCR, ctl); return 0; } static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd) { int ctl; phy->autoneg = AUTONEG_DISABLE; phy->speed = speed; phy->duplex = fd; phy->pause = phy->asym_pause = 0; ctl = phy_read(phy, MII_BMCR); if (ctl < 0) return ctl; ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); /* First clear the PHY */ phy_write(phy, MII_BMCR, ctl | BMCR_RESET); /* Select speed & duplex */ switch (speed) { case SPEED_10: break; case SPEED_100: ctl |= BMCR_SPEED100; break; case SPEED_1000: ctl |= BMCR_SPEED1000; break; default: return -EINVAL; } if (fd == DUPLEX_FULL) ctl |= BMCR_FULLDPLX; phy_write(phy, MII_BMCR, ctl); return 0; } static int genmii_poll_link(struct mii_phy *phy) { int status; /* Clear latched value with dummy read */ phy_read(phy, MII_BMSR); status = phy_read(phy, MII_BMSR); if (status < 0 || (status & BMSR_LSTATUS) == 0) return 0; if (phy->autoneg == AUTONEG_ENABLE && !(status & BMSR_ANEGCOMPLETE)) return 0; return 1; } static int genmii_read_link(struct mii_phy *phy) { if (phy->autoneg == AUTONEG_ENABLE) { int glpa = 0; int lpa = phy_read(phy, MII_LPA) & phy_read(phy, MII_ADVERTISE); if (lpa < 0) return lpa; if (phy->features & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)) { int adv = phy_read(phy, MII_CTRL1000); glpa = phy_read(phy, MII_STAT1000); if (glpa < 0 || adv < 0) return adv; glpa &= adv << 2; } phy->speed = SPEED_10; phy->duplex = DUPLEX_HALF; phy->pause = phy->asym_pause = 0; if (glpa & (LPA_1000FULL | LPA_1000HALF)) { phy->speed = SPEED_1000; if (glpa & LPA_1000FULL) phy->duplex = DUPLEX_FULL; } else if (lpa & (LPA_100FULL | LPA_100HALF)) { phy->speed = SPEED_100; if (lpa & LPA_100FULL) phy->duplex = DUPLEX_FULL; } else if (lpa & LPA_10FULL) phy->duplex = DUPLEX_FULL; if (phy->duplex == DUPLEX_FULL) { phy->pause = lpa & LPA_PAUSE_CAP ? 1 : 0; phy->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0; } } else { int bmcr = phy_read(phy, MII_BMCR); if (bmcr < 0) return bmcr; if (bmcr & BMCR_FULLDPLX) phy->duplex = DUPLEX_FULL; else phy->duplex = DUPLEX_HALF; if (bmcr & BMCR_SPEED1000) phy->speed = SPEED_1000; else if (bmcr & BMCR_SPEED100) phy->speed = SPEED_100; else phy->speed = SPEED_10; phy->pause = phy->asym_pause = 0; } return 0; } /* Generic implementation for most 10/100/1000 PHYs */ static struct mii_phy_ops generic_phy_ops = { .setup_aneg = genmii_setup_aneg, .setup_forced = genmii_setup_forced, .poll_link = genmii_poll_link, .read_link = genmii_read_link }; static struct mii_phy_def genmii_phy_def = { .phy_id = 0x00000000, .phy_id_mask = 0x00000000, .name = "Generic MII", .ops = &generic_phy_ops }; /* CIS8201 */ #define MII_CIS8201_10BTCSR 0x16 #define TENBTCSR_ECHO_DISABLE 0x2000 #define MII_CIS8201_EPCR 0x17 #define EPCR_MODE_MASK 0x3000 #define EPCR_GMII_MODE 0x0000 #define EPCR_RGMII_MODE 0x1000 #define EPCR_TBI_MODE 0x2000 #define EPCR_RTBI_MODE 0x3000 #define MII_CIS8201_ACSR 0x1c #define ACSR_PIN_PRIO_SELECT 0x0004 static int cis8201_init(struct mii_phy *phy) { int epcr; epcr = phy_read(phy, MII_CIS8201_EPCR); if (epcr < 0) return epcr; epcr &= ~EPCR_MODE_MASK; switch (phy->mode) { case PHY_MODE_TBI: epcr |= EPCR_TBI_MODE; break; case PHY_MODE_RTBI: epcr |= EPCR_RTBI_MODE; break; case PHY_MODE_GMII: epcr |= EPCR_GMII_MODE; break; case PHY_MODE_RGMII: default: epcr |= EPCR_RGMII_MODE; } phy_write(phy, MII_CIS8201_EPCR, epcr); /* MII regs override strap pins */ phy_write(phy, MII_CIS8201_ACSR, phy_read(phy, MII_CIS8201_ACSR) | ACSR_PIN_PRIO_SELECT); /* Disable TX_EN -> CRS echo mode, otherwise 10/HDX doesn't work */ phy_write(phy, MII_CIS8201_10BTCSR, phy_read(phy, MII_CIS8201_10BTCSR) | TENBTCSR_ECHO_DISABLE); return 0; } static struct mii_phy_ops cis8201_phy_ops = { .init = cis8201_init, .setup_aneg = genmii_setup_aneg, .setup_forced = genmii_setup_forced, .poll_link = genmii_poll_link, .read_link = genmii_read_link }; static struct mii_phy_def cis8201_phy_def = { .phy_id = 0x000fc410, .phy_id_mask = 0x000ffff0, .name = "CIS8201 Gigabit Ethernet", .ops = &cis8201_phy_ops }; static struct mii_phy_def bcm5248_phy_def = { .phy_id = 0x0143bc00, .phy_id_mask = 0x0ffffff0, .name = "BCM5248 10/100 SMII Ethernet", .ops = &generic_phy_ops }; static int m88e1111_init(struct mii_phy *phy) { pr_debug("%s: Marvell 88E1111 Ethernet\n", __FUNCTION__); phy_write(phy, 0x14, 0x0ce3); phy_write(phy, 0x18, 0x4101); phy_write(phy, 0x09, 0x0e00); phy_write(phy, 0x04, 0x01e1); phy_write(phy, 0x00, 0x9140); phy_write(phy, 0x00, 0x1140); return 0; } static int et1011c_init(struct mii_phy *phy) { u16 reg_short; reg_short = (u16)(phy_read(phy, 0x16)); reg_short &= ~(0x7); reg_short |= 0x6; /* RGMII Trace Delay*/ phy_write(phy, 0x16, reg_short); reg_short = (u16)(phy_read(phy, 0x17)); reg_short &= ~(0x40); phy_write(phy, 0x17, reg_short); phy_write(phy, 0x1c, 0x74f0); return 0; } static struct mii_phy_ops et1011c_phy_ops = { .init = et1011c_init, .setup_aneg = genmii_setup_aneg, .setup_forced = genmii_setup_forced, .poll_link = genmii_poll_link, .read_link = genmii_read_link }; static struct mii_phy_def et1011c_phy_def = { .phy_id = 0x0282f000, .phy_id_mask = 0x0fffff00, .name = "ET1011C Gigabit Ethernet", .ops = &et1011c_phy_ops }; static struct mii_phy_ops m88e1111_phy_ops = { .init = m88e1111_init, .setup_aneg = genmii_setup_aneg, .setup_forced = genmii_setup_forced, .poll_link = genmii_poll_link, .read_link = genmii_read_link }; static struct mii_phy_def m88e1111_phy_def = { .phy_id = 0x01410CC0, .phy_id_mask = 0x0ffffff0, .name = "Marvell 88E1111 Ethernet", .ops = &m88e1111_phy_ops, }; static struct mii_phy_def *mii_phy_table[] = { &et1011c_phy_def, &cis8201_phy_def, &bcm5248_phy_def, &m88e1111_phy_def, &genmii_phy_def, NULL }; int emac_mii_phy_probe(struct mii_phy *phy, int address) { struct mii_phy_def *def; int i; u32 id; phy->autoneg = AUTONEG_DISABLE; phy->advertising = 0; phy->address = address; phy->speed = SPEED_10; phy->duplex = DUPLEX_HALF; phy->pause = phy->asym_pause = 0; /* Take PHY out of isolate mode and reset it. */ if (emac_mii_reset_phy(phy)) return -ENODEV; /* Read ID and find matching entry */ id = (phy_read(phy, MII_PHYSID1) << 16) | phy_read(phy, MII_PHYSID2); for (i = 0; (def = mii_phy_table[i]) != NULL; i++) if ((id & def->phy_id_mask) == def->phy_id) break; /* Should never be NULL (we have a generic entry), but... */ if (!def) return -ENODEV; phy->def = def; /* Determine PHY features if needed */ phy->features = def->features; if (!phy->features) { u16 bmsr = phy_read(phy, MII_BMSR); if (bmsr & BMSR_ANEGCAPABLE) phy->features |= SUPPORTED_Autoneg; if (bmsr & BMSR_10HALF) phy->features |= SUPPORTED_10baseT_Half; if (bmsr & BMSR_10FULL) phy->features |= SUPPORTED_10baseT_Full; if (bmsr & BMSR_100HALF) phy->features |= SUPPORTED_100baseT_Half; if (bmsr & BMSR_100FULL) phy->features |= SUPPORTED_100baseT_Full; if (bmsr & BMSR_ESTATEN) { u16 esr = phy_read(phy, MII_ESTATUS); if (esr & ESTATUS_1000_TFULL) phy->features |= SUPPORTED_1000baseT_Full; if (esr & ESTATUS_1000_THALF) phy->features |= SUPPORTED_1000baseT_Half; } phy->features |= SUPPORTED_MII; } /* Setup default advertising */ phy->advertising = phy->features; return 0; } MODULE_LICENSE("GPL");
gpl-2.0
CyanogenMod/htc-kernel-msm7227
sound/usb/usbaudio.c
302
109924
/* * (Tentative) USB Audio Driver for ALSA * * Main and PCM part * * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de> * * Many codes borrowed from audio.c by * Alan Cox (alan@lxorguk.ukuu.org.uk) * Thomas Sailer (sailer@ife.ee.ethz.ch) * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * NOTES: * * - async unlink should be used for avoiding the sleep inside lock. * 2.4.22 usb-uhci seems buggy for async unlinking and results in * oops. in such a cse, pass async_unlink=0 option. * - the linked URBs would be preferred but not used so far because of * the instability of unlinking. * - type II is not supported properly. there is no device which supports * this type *correctly*. SB extigy looks as if it supports, but it's * indeed an AC3 stream packed in SPDIF frames (i.e. no real AC3 stream). */ #include <linux/bitops.h> #include <linux/init.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/usb.h> #include <linux/vmalloc.h> #include <linux/moduleparam.h> #include <linux/mutex.h> #include <sound/core.h> #include <sound/info.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/initval.h> #include "usbaudio.h" MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("USB Audio"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Generic,USB Audio}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;/* Enable this card */ /* Vendor/product IDs for this card */ static int vid[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = -1 }; static int pid[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = -1 }; static int nrpacks = 8; /* max. number of packets per urb */ static int async_unlink = 1; static int device_setup[SNDRV_CARDS]; /* device parameter for this card*/ static int ignore_ctl_error; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the USB audio adapter."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for the USB audio adapter."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable USB audio adapter."); module_param_array(vid, int, NULL, 0444); MODULE_PARM_DESC(vid, "Vendor ID for the USB audio device."); module_param_array(pid, int, NULL, 0444); MODULE_PARM_DESC(pid, "Product ID for the USB audio device."); module_param(nrpacks, int, 0644); MODULE_PARM_DESC(nrpacks, "Max. number of packets per URB."); module_param(async_unlink, bool, 0444); MODULE_PARM_DESC(async_unlink, "Use async unlink mode."); module_param_array(device_setup, int, NULL, 0444); MODULE_PARM_DESC(device_setup, "Specific device setup (if needed)."); module_param(ignore_ctl_error, bool, 0444); MODULE_PARM_DESC(ignore_ctl_error, "Ignore errors from USB controller for mixer interfaces."); /* * debug the h/w constraints */ /* #define HW_CONST_DEBUG */ /* * */ #define MAX_PACKS 20 #define MAX_PACKS_HS (MAX_PACKS * 8) /* in high speed mode */ #define MAX_URBS 8 #define SYNC_URBS 4 /* always four urbs for sync */ #define MAX_QUEUE 24 /* try not to exceed this queue length, in ms */ struct audioformat { struct list_head list; snd_pcm_format_t format; /* format type */ unsigned int channels; /* # channels */ unsigned int fmt_type; /* USB audio format type (1-3) */ unsigned int frame_size; /* samples per frame for non-audio */ int iface; /* interface number */ unsigned char altsetting; /* corresponding alternate setting */ unsigned char altset_idx; /* array index of altenate setting */ unsigned char attributes; /* corresponding attributes of cs endpoint */ unsigned char endpoint; /* endpoint */ unsigned char ep_attr; /* endpoint attributes */ unsigned char datainterval; /* log_2 of data packet interval */ unsigned int maxpacksize; /* max. packet size */ unsigned int rates; /* rate bitmasks */ unsigned int rate_min, rate_max; /* min/max rates */ unsigned int nr_rates; /* number of rate table entries */ unsigned int *rate_table; /* rate table */ }; struct snd_usb_substream; struct snd_urb_ctx { struct urb *urb; unsigned int buffer_size; /* size of data buffer, if data URB */ struct snd_usb_substream *subs; int index; /* index for urb array */ int packets; /* number of packets per urb */ }; struct snd_urb_ops { int (*prepare)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u); int (*retire)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u); int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u); int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u); }; struct snd_usb_substream { struct snd_usb_stream *stream; struct usb_device *dev; struct snd_pcm_substream *pcm_substream; int direction; /* playback or capture */ int interface; /* current interface */ int endpoint; /* assigned endpoint */ struct audioformat *cur_audiofmt; /* current audioformat pointer (for hw_params callback) */ unsigned int cur_rate; /* current rate (for hw_params callback) */ unsigned int period_bytes; /* current period bytes (for hw_params callback) */ unsigned int format; /* USB data format */ unsigned int datapipe; /* the data i/o pipe */ unsigned int syncpipe; /* 1 - async out or adaptive in */ unsigned int datainterval; /* log_2 of data packet interval */ unsigned int syncinterval; /* P for adaptive mode, 0 otherwise */ unsigned int freqn; /* nominal sampling rate in fs/fps in Q16.16 format */ unsigned int freqm; /* momentary sampling rate in fs/fps in Q16.16 format */ unsigned int freqmax; /* maximum sampling rate, used for buffer management */ unsigned int phase; /* phase accumulator */ unsigned int maxpacksize; /* max packet size in bytes */ unsigned int maxframesize; /* max packet size in frames */ unsigned int curpacksize; /* current packet size in bytes (for capture) */ unsigned int curframesize; /* current packet size in frames (for capture) */ unsigned int fill_max: 1; /* fill max packet size always */ unsigned int fmt_type; /* USB audio format type (1-3) */ unsigned int running: 1; /* running status */ unsigned int hwptr_done; /* processed frame position in the buffer */ unsigned int transfer_done; /* processed frames since last period update */ unsigned long active_mask; /* bitmask of active urbs */ unsigned long unlink_mask; /* bitmask of unlinked urbs */ unsigned int nurbs; /* # urbs */ struct snd_urb_ctx dataurb[MAX_URBS]; /* data urb table */ struct snd_urb_ctx syncurb[SYNC_URBS]; /* sync urb table */ char *syncbuf; /* sync buffer for all sync URBs */ dma_addr_t sync_dma; /* DMA address of syncbuf */ u64 formats; /* format bitmasks (all or'ed) */ unsigned int num_formats; /* number of supported audio formats (list) */ struct list_head fmt_list; /* format list */ struct snd_pcm_hw_constraint_list rate_list; /* limited rates */ spinlock_t lock; struct snd_urb_ops ops; /* callbacks (must be filled at init) */ }; struct snd_usb_stream { struct snd_usb_audio *chip; struct snd_pcm *pcm; int pcm_index; unsigned int fmt_type; /* USB audio format type (1-3) */ struct snd_usb_substream substream[2]; struct list_head list; }; /* * we keep the snd_usb_audio_t instances by ourselves for merging * the all interfaces on the same card as one sound device. */ static DEFINE_MUTEX(register_mutex); static struct snd_usb_audio *usb_chip[SNDRV_CARDS]; /* * convert a sampling rate into our full speed format (fs/1000 in Q16.16) * this will overflow at approx 524 kHz */ static inline unsigned get_usb_full_speed_rate(unsigned int rate) { return ((rate << 13) + 62) / 125; } /* * convert a sampling rate into USB high speed format (fs/8000 in Q16.16) * this will overflow at approx 4 MHz */ static inline unsigned get_usb_high_speed_rate(unsigned int rate) { return ((rate << 10) + 62) / 125; } /* convert our full speed USB rate into sampling rate in Hz */ static inline unsigned get_full_speed_hz(unsigned int usb_rate) { return (usb_rate * 125 + (1 << 12)) >> 13; } /* convert our high speed USB rate into sampling rate in Hz */ static inline unsigned get_high_speed_hz(unsigned int usb_rate) { return (usb_rate * 125 + (1 << 9)) >> 10; } /* * prepare urb for full speed capture sync pipe * * fill the length and offset of each urb descriptor. * the fixed 10.14 frequency is passed through the pipe. */ static int prepare_capture_sync_urb(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *urb) { unsigned char *cp = urb->transfer_buffer; struct snd_urb_ctx *ctx = urb->context; urb->dev = ctx->subs->dev; /* we need to set this at each time */ urb->iso_frame_desc[0].length = 3; urb->iso_frame_desc[0].offset = 0; cp[0] = subs->freqn >> 2; cp[1] = subs->freqn >> 10; cp[2] = subs->freqn >> 18; return 0; } /* * prepare urb for high speed capture sync pipe * * fill the length and offset of each urb descriptor. * the fixed 12.13 frequency is passed as 16.16 through the pipe. */ static int prepare_capture_sync_urb_hs(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *urb) { unsigned char *cp = urb->transfer_buffer; struct snd_urb_ctx *ctx = urb->context; urb->dev = ctx->subs->dev; /* we need to set this at each time */ urb->iso_frame_desc[0].length = 4; urb->iso_frame_desc[0].offset = 0; cp[0] = subs->freqn; cp[1] = subs->freqn >> 8; cp[2] = subs->freqn >> 16; cp[3] = subs->freqn >> 24; return 0; } /* * process after capture sync complete * - nothing to do */ static int retire_capture_sync_urb(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *urb) { return 0; } /* * prepare urb for capture data pipe * * fill the offset and length of each descriptor. * * we use a temporary buffer to write the captured data. * since the length of written data is determined by host, we cannot * write onto the pcm buffer directly... the data is thus copied * later at complete callback to the global buffer. */ static int prepare_capture_urb(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *urb) { int i, offs; struct snd_urb_ctx *ctx = urb->context; offs = 0; urb->dev = ctx->subs->dev; /* we need to set this at each time */ for (i = 0; i < ctx->packets; i++) { urb->iso_frame_desc[i].offset = offs; urb->iso_frame_desc[i].length = subs->curpacksize; offs += subs->curpacksize; } urb->transfer_buffer_length = offs; urb->number_of_packets = ctx->packets; return 0; } /* * process after capture complete * * copy the data from each desctiptor to the pcm buffer, and * update the current position. */ static int retire_capture_urb(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *urb) { unsigned long flags; unsigned char *cp; int i; unsigned int stride, len, oldptr; int period_elapsed = 0; stride = runtime->frame_bits >> 3; for (i = 0; i < urb->number_of_packets; i++) { cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset; if (urb->iso_frame_desc[i].status) { snd_printd(KERN_ERR "frame %d active: %d\n", i, urb->iso_frame_desc[i].status); // continue; } len = urb->iso_frame_desc[i].actual_length / stride; if (! len) continue; /* update the current pointer */ spin_lock_irqsave(&subs->lock, flags); oldptr = subs->hwptr_done; subs->hwptr_done += len; if (subs->hwptr_done >= runtime->buffer_size) subs->hwptr_done -= runtime->buffer_size; subs->transfer_done += len; if (subs->transfer_done >= runtime->period_size) { subs->transfer_done -= runtime->period_size; period_elapsed = 1; } spin_unlock_irqrestore(&subs->lock, flags); /* copy a data chunk */ if (oldptr + len > runtime->buffer_size) { unsigned int cnt = runtime->buffer_size - oldptr; unsigned int blen = cnt * stride; memcpy(runtime->dma_area + oldptr * stride, cp, blen); memcpy(runtime->dma_area, cp + blen, len * stride - blen); } else { memcpy(runtime->dma_area + oldptr * stride, cp, len * stride); } } if (period_elapsed) snd_pcm_period_elapsed(subs->pcm_substream); return 0; } /* * Process after capture complete when paused. Nothing to do. */ static int retire_paused_capture_urb(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *urb) { return 0; } /* * prepare urb for full speed playback sync pipe * * set up the offset and length to receive the current frequency. */ static int prepare_playback_sync_urb(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *urb) { struct snd_urb_ctx *ctx = urb->context; urb->dev = ctx->subs->dev; /* we need to set this at each time */ urb->iso_frame_desc[0].length = 3; urb->iso_frame_desc[0].offset = 0; return 0; } /* * prepare urb for high speed playback sync pipe * * set up the offset and length to receive the current frequency. */ static int prepare_playback_sync_urb_hs(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *urb) { struct snd_urb_ctx *ctx = urb->context; urb->dev = ctx->subs->dev; /* we need to set this at each time */ urb->iso_frame_desc[0].length = 4; urb->iso_frame_desc[0].offset = 0; return 0; } /* * process after full speed playback sync complete * * retrieve the current 10.14 frequency from pipe, and set it. * the value is referred in prepare_playback_urb(). */ static int retire_playback_sync_urb(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *urb) { unsigned int f; unsigned long flags; if (urb->iso_frame_desc[0].status == 0 && urb->iso_frame_desc[0].actual_length == 3) { f = combine_triple((u8*)urb->transfer_buffer) << 2; if (f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax) { spin_lock_irqsave(&subs->lock, flags); subs->freqm = f; spin_unlock_irqrestore(&subs->lock, flags); } } return 0; } /* * process after high speed playback sync complete * * retrieve the current 12.13 frequency from pipe, and set it. * the value is referred in prepare_playback_urb(). */ static int retire_playback_sync_urb_hs(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *urb) { unsigned int f; unsigned long flags; if (urb->iso_frame_desc[0].status == 0 && urb->iso_frame_desc[0].actual_length == 4) { f = combine_quad((u8*)urb->transfer_buffer) & 0x0fffffff; if (f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax) { spin_lock_irqsave(&subs->lock, flags); subs->freqm = f; spin_unlock_irqrestore(&subs->lock, flags); } } return 0; } /* * process after E-Mu 0202/0404/Tracker Pre high speed playback sync complete * * These devices return the number of samples per packet instead of the number * of samples per microframe. */ static int retire_playback_sync_urb_hs_emu(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *urb) { unsigned int f; unsigned long flags; if (urb->iso_frame_desc[0].status == 0 && urb->iso_frame_desc[0].actual_length == 4) { f = combine_quad((u8*)urb->transfer_buffer) & 0x0fffffff; f >>= subs->datainterval; if (f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax) { spin_lock_irqsave(&subs->lock, flags); subs->freqm = f; spin_unlock_irqrestore(&subs->lock, flags); } } return 0; } /* determine the number of frames in the next packet */ static int snd_usb_audio_next_packet_size(struct snd_usb_substream *subs) { if (subs->fill_max) return subs->maxframesize; else { subs->phase = (subs->phase & 0xffff) + (subs->freqm << subs->datainterval); return min(subs->phase >> 16, subs->maxframesize); } } /* * Prepare urb for streaming before playback starts or when paused. * * We don't have any data, so we send silence. */ static int prepare_nodata_playback_urb(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *urb) { unsigned int i, offs, counts; struct snd_urb_ctx *ctx = urb->context; int stride = runtime->frame_bits >> 3; offs = 0; urb->dev = ctx->subs->dev; for (i = 0; i < ctx->packets; ++i) { counts = snd_usb_audio_next_packet_size(subs); urb->iso_frame_desc[i].offset = offs * stride; urb->iso_frame_desc[i].length = counts * stride; offs += counts; } urb->number_of_packets = ctx->packets; urb->transfer_buffer_length = offs * stride; memset(urb->transfer_buffer, subs->cur_audiofmt->format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0, offs * stride); return 0; } /* * prepare urb for playback data pipe * * Since a URB can handle only a single linear buffer, we must use double * buffering when the data to be transferred overflows the buffer boundary. * To avoid inconsistencies when updating hwptr_done, we use double buffering * for all URBs. */ static int prepare_playback_urb(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *urb) { int i, stride, offs; unsigned int counts; unsigned long flags; int period_elapsed = 0; struct snd_urb_ctx *ctx = urb->context; stride = runtime->frame_bits >> 3; offs = 0; urb->dev = ctx->subs->dev; /* we need to set this at each time */ urb->number_of_packets = 0; spin_lock_irqsave(&subs->lock, flags); for (i = 0; i < ctx->packets; i++) { counts = snd_usb_audio_next_packet_size(subs); /* set up descriptor */ urb->iso_frame_desc[i].offset = offs * stride; urb->iso_frame_desc[i].length = counts * stride; offs += counts; urb->number_of_packets++; subs->transfer_done += counts; if (subs->transfer_done >= runtime->period_size) { subs->transfer_done -= runtime->period_size; period_elapsed = 1; if (subs->fmt_type == USB_FORMAT_TYPE_II) { if (subs->transfer_done > 0) { /* FIXME: fill-max mode is not * supported yet */ offs -= subs->transfer_done; counts -= subs->transfer_done; urb->iso_frame_desc[i].length = counts * stride; subs->transfer_done = 0; } i++; if (i < ctx->packets) { /* add a transfer delimiter */ urb->iso_frame_desc[i].offset = offs * stride; urb->iso_frame_desc[i].length = 0; urb->number_of_packets++; } break; } } if (period_elapsed) /* finish at the period boundary */ break; } if (subs->hwptr_done + offs > runtime->buffer_size) { /* err, the transferred area goes over buffer boundary. */ unsigned int len = runtime->buffer_size - subs->hwptr_done; memcpy(urb->transfer_buffer, runtime->dma_area + subs->hwptr_done * stride, len * stride); memcpy(urb->transfer_buffer + len * stride, runtime->dma_area, (offs - len) * stride); } else { memcpy(urb->transfer_buffer, runtime->dma_area + subs->hwptr_done * stride, offs * stride); } subs->hwptr_done += offs; if (subs->hwptr_done >= runtime->buffer_size) subs->hwptr_done -= runtime->buffer_size; runtime->delay += offs; spin_unlock_irqrestore(&subs->lock, flags); urb->transfer_buffer_length = offs * stride; if (period_elapsed) snd_pcm_period_elapsed(subs->pcm_substream); return 0; } /* * process after playback data complete * - decrease the delay count again */ static int retire_playback_urb(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *urb) { unsigned long flags; int stride = runtime->frame_bits >> 3; int processed = urb->transfer_buffer_length / stride; spin_lock_irqsave(&subs->lock, flags); if (processed > runtime->delay) runtime->delay = 0; else runtime->delay -= processed; spin_unlock_irqrestore(&subs->lock, flags); return 0; } /* */ static struct snd_urb_ops audio_urb_ops[2] = { { .prepare = prepare_nodata_playback_urb, .retire = retire_playback_urb, .prepare_sync = prepare_playback_sync_urb, .retire_sync = retire_playback_sync_urb, }, { .prepare = prepare_capture_urb, .retire = retire_capture_urb, .prepare_sync = prepare_capture_sync_urb, .retire_sync = retire_capture_sync_urb, }, }; static struct snd_urb_ops audio_urb_ops_high_speed[2] = { { .prepare = prepare_nodata_playback_urb, .retire = retire_playback_urb, .prepare_sync = prepare_playback_sync_urb_hs, .retire_sync = retire_playback_sync_urb_hs, }, { .prepare = prepare_capture_urb, .retire = retire_capture_urb, .prepare_sync = prepare_capture_sync_urb_hs, .retire_sync = retire_capture_sync_urb, }, }; /* * complete callback from data urb */ static void snd_complete_urb(struct urb *urb) { struct snd_urb_ctx *ctx = urb->context; struct snd_usb_substream *subs = ctx->subs; struct snd_pcm_substream *substream = ctx->subs->pcm_substream; int err = 0; if ((subs->running && subs->ops.retire(subs, substream->runtime, urb)) || !subs->running || /* can be stopped during retire callback */ (err = subs->ops.prepare(subs, substream->runtime, urb)) < 0 || (err = usb_submit_urb(urb, GFP_ATOMIC)) < 0) { clear_bit(ctx->index, &subs->active_mask); if (err < 0) { snd_printd(KERN_ERR "cannot submit urb (err = %d)\n", err); snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); } } } /* * complete callback from sync urb */ static void snd_complete_sync_urb(struct urb *urb) { struct snd_urb_ctx *ctx = urb->context; struct snd_usb_substream *subs = ctx->subs; struct snd_pcm_substream *substream = ctx->subs->pcm_substream; int err = 0; if ((subs->running && subs->ops.retire_sync(subs, substream->runtime, urb)) || !subs->running || /* can be stopped during retire callback */ (err = subs->ops.prepare_sync(subs, substream->runtime, urb)) < 0 || (err = usb_submit_urb(urb, GFP_ATOMIC)) < 0) { clear_bit(ctx->index + 16, &subs->active_mask); if (err < 0) { snd_printd(KERN_ERR "cannot submit sync urb (err = %d)\n", err); snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); } } } /* get the physical page pointer at the given offset */ static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs, unsigned long offset) { void *pageptr = subs->runtime->dma_area + offset; return vmalloc_to_page(pageptr); } /* allocate virtual buffer; may be called more than once */ static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs, size_t size) { struct snd_pcm_runtime *runtime = subs->runtime; if (runtime->dma_area) { if (runtime->dma_bytes >= size) return 0; /* already large enough */ vfree(runtime->dma_area); } runtime->dma_area = vmalloc_user(size); if (!runtime->dma_area) return -ENOMEM; runtime->dma_bytes = size; return 0; } /* free virtual buffer; may be called more than once */ static int snd_pcm_free_vmalloc_buffer(struct snd_pcm_substream *subs) { struct snd_pcm_runtime *runtime = subs->runtime; vfree(runtime->dma_area); runtime->dma_area = NULL; return 0; } /* * unlink active urbs. */ static int deactivate_urbs(struct snd_usb_substream *subs, int force, int can_sleep) { unsigned int i; int async; subs->running = 0; if (!force && subs->stream->chip->shutdown) /* to be sure... */ return -EBADFD; async = !can_sleep && async_unlink; if (!async && in_interrupt()) return 0; for (i = 0; i < subs->nurbs; i++) { if (test_bit(i, &subs->active_mask)) { if (!test_and_set_bit(i, &subs->unlink_mask)) { struct urb *u = subs->dataurb[i].urb; if (async) usb_unlink_urb(u); else usb_kill_urb(u); } } } if (subs->syncpipe) { for (i = 0; i < SYNC_URBS; i++) { if (test_bit(i+16, &subs->active_mask)) { if (!test_and_set_bit(i+16, &subs->unlink_mask)) { struct urb *u = subs->syncurb[i].urb; if (async) usb_unlink_urb(u); else usb_kill_urb(u); } } } } return 0; } static const char *usb_error_string(int err) { switch (err) { case -ENODEV: return "no device"; case -ENOENT: return "endpoint not enabled"; case -EPIPE: return "endpoint stalled"; case -ENOSPC: return "not enough bandwidth"; case -ESHUTDOWN: return "device disabled"; case -EHOSTUNREACH: return "device suspended"; case -EINVAL: case -EAGAIN: case -EFBIG: case -EMSGSIZE: return "internal error"; default: return "unknown error"; } } /* * set up and start data/sync urbs */ static int start_urbs(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime) { unsigned int i; int err; if (subs->stream->chip->shutdown) return -EBADFD; for (i = 0; i < subs->nurbs; i++) { if (snd_BUG_ON(!subs->dataurb[i].urb)) return -EINVAL; if (subs->ops.prepare(subs, runtime, subs->dataurb[i].urb) < 0) { snd_printk(KERN_ERR "cannot prepare datapipe for urb %d\n", i); goto __error; } } if (subs->syncpipe) { for (i = 0; i < SYNC_URBS; i++) { if (snd_BUG_ON(!subs->syncurb[i].urb)) return -EINVAL; if (subs->ops.prepare_sync(subs, runtime, subs->syncurb[i].urb) < 0) { snd_printk(KERN_ERR "cannot prepare syncpipe for urb %d\n", i); goto __error; } } } subs->active_mask = 0; subs->unlink_mask = 0; subs->running = 1; for (i = 0; i < subs->nurbs; i++) { err = usb_submit_urb(subs->dataurb[i].urb, GFP_ATOMIC); if (err < 0) { snd_printk(KERN_ERR "cannot submit datapipe " "for urb %d, error %d: %s\n", i, err, usb_error_string(err)); goto __error; } set_bit(i, &subs->active_mask); } if (subs->syncpipe) { for (i = 0; i < SYNC_URBS; i++) { err = usb_submit_urb(subs->syncurb[i].urb, GFP_ATOMIC); if (err < 0) { snd_printk(KERN_ERR "cannot submit syncpipe " "for urb %d, error %d: %s\n", i, err, usb_error_string(err)); goto __error; } set_bit(i + 16, &subs->active_mask); } } return 0; __error: // snd_pcm_stop(subs->pcm_substream, SNDRV_PCM_STATE_XRUN); deactivate_urbs(subs, 0, 0); return -EPIPE; } /* * wait until all urbs are processed. */ static int wait_clear_urbs(struct snd_usb_substream *subs) { unsigned long end_time = jiffies + msecs_to_jiffies(1000); unsigned int i; int alive; do { alive = 0; for (i = 0; i < subs->nurbs; i++) { if (test_bit(i, &subs->active_mask)) alive++; } if (subs->syncpipe) { for (i = 0; i < SYNC_URBS; i++) { if (test_bit(i + 16, &subs->active_mask)) alive++; } } if (! alive) break; schedule_timeout_uninterruptible(1); } while (time_before(jiffies, end_time)); if (alive) snd_printk(KERN_ERR "timeout: still %d active urbs..\n", alive); return 0; } /* * return the current pcm pointer. just return the hwptr_done value. */ static snd_pcm_uframes_t snd_usb_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_usb_substream *subs; snd_pcm_uframes_t hwptr_done; subs = (struct snd_usb_substream *)substream->runtime->private_data; spin_lock(&subs->lock); hwptr_done = subs->hwptr_done; spin_unlock(&subs->lock); return hwptr_done; } /* * start/stop playback substream */ static int snd_usb_pcm_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_usb_substream *subs = substream->runtime->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: subs->ops.prepare = prepare_playback_urb; return 0; case SNDRV_PCM_TRIGGER_STOP: return deactivate_urbs(subs, 0, 0); case SNDRV_PCM_TRIGGER_PAUSE_PUSH: subs->ops.prepare = prepare_nodata_playback_urb; return 0; default: return -EINVAL; } } /* * start/stop capture substream */ static int snd_usb_pcm_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_usb_substream *subs = substream->runtime->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: subs->ops.retire = retire_capture_urb; return start_urbs(subs, substream->runtime); case SNDRV_PCM_TRIGGER_STOP: return deactivate_urbs(subs, 0, 0); case SNDRV_PCM_TRIGGER_PAUSE_PUSH: subs->ops.retire = retire_paused_capture_urb; return 0; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: subs->ops.retire = retire_capture_urb; return 0; default: return -EINVAL; } } /* * release a urb data */ static void release_urb_ctx(struct snd_urb_ctx *u) { if (u->urb) { if (u->buffer_size) usb_buffer_free(u->subs->dev, u->buffer_size, u->urb->transfer_buffer, u->urb->transfer_dma); usb_free_urb(u->urb); u->urb = NULL; } } /* * release a substream */ static void release_substream_urbs(struct snd_usb_substream *subs, int force) { int i; /* stop urbs (to be sure) */ deactivate_urbs(subs, force, 1); wait_clear_urbs(subs); for (i = 0; i < MAX_URBS; i++) release_urb_ctx(&subs->dataurb[i]); for (i = 0; i < SYNC_URBS; i++) release_urb_ctx(&subs->syncurb[i]); usb_buffer_free(subs->dev, SYNC_URBS * 4, subs->syncbuf, subs->sync_dma); subs->syncbuf = NULL; subs->nurbs = 0; } /* * initialize a substream for plaback/capture */ static int init_substream_urbs(struct snd_usb_substream *subs, unsigned int period_bytes, unsigned int rate, unsigned int frame_bits) { unsigned int maxsize, i; int is_playback = subs->direction == SNDRV_PCM_STREAM_PLAYBACK; unsigned int urb_packs, total_packs, packs_per_ms; /* calculate the frequency in 16.16 format */ if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) subs->freqn = get_usb_full_speed_rate(rate); else subs->freqn = get_usb_high_speed_rate(rate); subs->freqm = subs->freqn; /* calculate max. frequency */ if (subs->maxpacksize) { /* whatever fits into a max. size packet */ maxsize = subs->maxpacksize; subs->freqmax = (maxsize / (frame_bits >> 3)) << (16 - subs->datainterval); } else { /* no max. packet size: just take 25% higher than nominal */ subs->freqmax = subs->freqn + (subs->freqn >> 2); maxsize = ((subs->freqmax + 0xffff) * (frame_bits >> 3)) >> (16 - subs->datainterval); } subs->phase = 0; if (subs->fill_max) subs->curpacksize = subs->maxpacksize; else subs->curpacksize = maxsize; if (snd_usb_get_speed(subs->dev) == USB_SPEED_HIGH) packs_per_ms = 8 >> subs->datainterval; else packs_per_ms = 1; if (is_playback) { urb_packs = max(nrpacks, 1); urb_packs = min(urb_packs, (unsigned int)MAX_PACKS); } else urb_packs = 1; urb_packs *= packs_per_ms; if (subs->syncpipe) urb_packs = min(urb_packs, 1U << subs->syncinterval); /* decide how many packets to be used */ if (is_playback) { unsigned int minsize, maxpacks; /* determine how small a packet can be */ minsize = (subs->freqn >> (16 - subs->datainterval)) * (frame_bits >> 3); /* with sync from device, assume it can be 12% lower */ if (subs->syncpipe) minsize -= minsize >> 3; minsize = max(minsize, 1u); total_packs = (period_bytes + minsize - 1) / minsize; /* we need at least two URBs for queueing */ if (total_packs < 2) { total_packs = 2; } else { /* and we don't want too long a queue either */ maxpacks = max(MAX_QUEUE * packs_per_ms, urb_packs * 2); total_packs = min(total_packs, maxpacks); } } else { while (urb_packs > 1 && urb_packs * maxsize >= period_bytes) urb_packs >>= 1; total_packs = MAX_URBS * urb_packs; } subs->nurbs = (total_packs + urb_packs - 1) / urb_packs; if (subs->nurbs > MAX_URBS) { /* too much... */ subs->nurbs = MAX_URBS; total_packs = MAX_URBS * urb_packs; } else if (subs->nurbs < 2) { /* too little - we need at least two packets * to ensure contiguous playback/capture */ subs->nurbs = 2; } /* allocate and initialize data urbs */ for (i = 0; i < subs->nurbs; i++) { struct snd_urb_ctx *u = &subs->dataurb[i]; u->index = i; u->subs = subs; u->packets = (i + 1) * total_packs / subs->nurbs - i * total_packs / subs->nurbs; u->buffer_size = maxsize * u->packets; if (subs->fmt_type == USB_FORMAT_TYPE_II) u->packets++; /* for transfer delimiter */ u->urb = usb_alloc_urb(u->packets, GFP_KERNEL); if (!u->urb) goto out_of_memory; u->urb->transfer_buffer = usb_buffer_alloc(subs->dev, u->buffer_size, GFP_KERNEL, &u->urb->transfer_dma); if (!u->urb->transfer_buffer) goto out_of_memory; u->urb->pipe = subs->datapipe; u->urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; u->urb->interval = 1 << subs->datainterval; u->urb->context = u; u->urb->complete = snd_complete_urb; } if (subs->syncpipe) { /* allocate and initialize sync urbs */ subs->syncbuf = usb_buffer_alloc(subs->dev, SYNC_URBS * 4, GFP_KERNEL, &subs->sync_dma); if (!subs->syncbuf) goto out_of_memory; for (i = 0; i < SYNC_URBS; i++) { struct snd_urb_ctx *u = &subs->syncurb[i]; u->index = i; u->subs = subs; u->packets = 1; u->urb = usb_alloc_urb(1, GFP_KERNEL); if (!u->urb) goto out_of_memory; u->urb->transfer_buffer = subs->syncbuf + i * 4; u->urb->transfer_dma = subs->sync_dma + i * 4; u->urb->transfer_buffer_length = 4; u->urb->pipe = subs->syncpipe; u->urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; u->urb->number_of_packets = 1; u->urb->interval = 1 << subs->syncinterval; u->urb->context = u; u->urb->complete = snd_complete_sync_urb; } } return 0; out_of_memory: release_substream_urbs(subs, 0); return -ENOMEM; } /* * find a matching audio format */ static struct audioformat *find_format(struct snd_usb_substream *subs, unsigned int format, unsigned int rate, unsigned int channels) { struct list_head *p; struct audioformat *found = NULL; int cur_attr = 0, attr; list_for_each(p, &subs->fmt_list) { struct audioformat *fp; fp = list_entry(p, struct audioformat, list); if (fp->format != format || fp->channels != channels) continue; if (rate < fp->rate_min || rate > fp->rate_max) continue; if (! (fp->rates & SNDRV_PCM_RATE_CONTINUOUS)) { unsigned int i; for (i = 0; i < fp->nr_rates; i++) if (fp->rate_table[i] == rate) break; if (i >= fp->nr_rates) continue; } attr = fp->ep_attr & EP_ATTR_MASK; if (! found) { found = fp; cur_attr = attr; continue; } /* avoid async out and adaptive in if the other method * supports the same format. * this is a workaround for the case like * M-audio audiophile USB. */ if (attr != cur_attr) { if ((attr == EP_ATTR_ASYNC && subs->direction == SNDRV_PCM_STREAM_PLAYBACK) || (attr == EP_ATTR_ADAPTIVE && subs->direction == SNDRV_PCM_STREAM_CAPTURE)) continue; if ((cur_attr == EP_ATTR_ASYNC && subs->direction == SNDRV_PCM_STREAM_PLAYBACK) || (cur_attr == EP_ATTR_ADAPTIVE && subs->direction == SNDRV_PCM_STREAM_CAPTURE)) { found = fp; cur_attr = attr; continue; } } /* find the format with the largest max. packet size */ if (fp->maxpacksize > found->maxpacksize) { found = fp; cur_attr = attr; } } return found; } /* * initialize the picth control and sample rate */ static int init_usb_pitch(struct usb_device *dev, int iface, struct usb_host_interface *alts, struct audioformat *fmt) { unsigned int ep; unsigned char data[1]; int err; ep = get_endpoint(alts, 0)->bEndpointAddress; /* if endpoint has pitch control, enable it */ if (fmt->attributes & EP_CS_ATTR_PITCH_CONTROL) { data[0] = 1; if ((err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), SET_CUR, USB_TYPE_CLASS|USB_RECIP_ENDPOINT|USB_DIR_OUT, PITCH_CONTROL << 8, ep, data, 1, 1000)) < 0) { snd_printk(KERN_ERR "%d:%d:%d: cannot set enable PITCH\n", dev->devnum, iface, ep); return err; } } return 0; } static int init_usb_sample_rate(struct usb_device *dev, int iface, struct usb_host_interface *alts, struct audioformat *fmt, int rate) { unsigned int ep; unsigned char data[3]; int err; ep = get_endpoint(alts, 0)->bEndpointAddress; /* if endpoint has sampling rate control, set it */ if (fmt->attributes & EP_CS_ATTR_SAMPLE_RATE) { int crate; data[0] = rate; data[1] = rate >> 8; data[2] = rate >> 16; if ((err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), SET_CUR, USB_TYPE_CLASS|USB_RECIP_ENDPOINT|USB_DIR_OUT, SAMPLING_FREQ_CONTROL << 8, ep, data, 3, 1000)) < 0) { snd_printk(KERN_ERR "%d:%d:%d: cannot set freq %d to ep %#x\n", dev->devnum, iface, fmt->altsetting, rate, ep); return err; } if ((err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), GET_CUR, USB_TYPE_CLASS|USB_RECIP_ENDPOINT|USB_DIR_IN, SAMPLING_FREQ_CONTROL << 8, ep, data, 3, 1000)) < 0) { snd_printk(KERN_WARNING "%d:%d:%d: cannot get freq at ep %#x\n", dev->devnum, iface, fmt->altsetting, ep); return 0; /* some devices don't support reading */ } crate = data[0] | (data[1] << 8) | (data[2] << 16); if (crate != rate) { snd_printd(KERN_WARNING "current rate %d is different from the runtime rate %d\n", crate, rate); // runtime->rate = crate; } } return 0; } /* * find a matching format and set up the interface */ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt) { struct usb_device *dev = subs->dev; struct usb_host_interface *alts; struct usb_interface_descriptor *altsd; struct usb_interface *iface; unsigned int ep, attr; int is_playback = subs->direction == SNDRV_PCM_STREAM_PLAYBACK; int err; iface = usb_ifnum_to_if(dev, fmt->iface); if (WARN_ON(!iface)) return -EINVAL; alts = &iface->altsetting[fmt->altset_idx]; altsd = get_iface_desc(alts); if (WARN_ON(altsd->bAlternateSetting != fmt->altsetting)) return -EINVAL; if (fmt == subs->cur_audiofmt) return 0; /* close the old interface */ if (subs->interface >= 0 && subs->interface != fmt->iface) { if (usb_set_interface(subs->dev, subs->interface, 0) < 0) { snd_printk(KERN_ERR "%d:%d:%d: return to setting 0 failed\n", dev->devnum, fmt->iface, fmt->altsetting); return -EIO; } subs->interface = -1; subs->format = 0; } /* set interface */ if (subs->interface != fmt->iface || subs->format != fmt->altset_idx) { if (usb_set_interface(dev, fmt->iface, fmt->altsetting) < 0) { snd_printk(KERN_ERR "%d:%d:%d: usb_set_interface failed\n", dev->devnum, fmt->iface, fmt->altsetting); return -EIO; } snd_printdd(KERN_INFO "setting usb interface %d:%d\n", fmt->iface, fmt->altsetting); subs->interface = fmt->iface; subs->format = fmt->altset_idx; } /* create a data pipe */ ep = fmt->endpoint & USB_ENDPOINT_NUMBER_MASK; if (is_playback) subs->datapipe = usb_sndisocpipe(dev, ep); else subs->datapipe = usb_rcvisocpipe(dev, ep); subs->datainterval = fmt->datainterval; subs->syncpipe = subs->syncinterval = 0; subs->maxpacksize = fmt->maxpacksize; subs->fill_max = 0; /* we need a sync pipe in async OUT or adaptive IN mode */ /* check the number of EP, since some devices have broken * descriptors which fool us. if it has only one EP, * assume it as adaptive-out or sync-in. */ attr = fmt->ep_attr & EP_ATTR_MASK; if (((is_playback && attr == EP_ATTR_ASYNC) || (! is_playback && attr == EP_ATTR_ADAPTIVE)) && altsd->bNumEndpoints >= 2) { /* check sync-pipe endpoint */ /* ... and check descriptor size before accessing bSynchAddress because there is a version of the SB Audigy 2 NX firmware lacking the audio fields in the endpoint descriptors */ if ((get_endpoint(alts, 1)->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != 0x01 || (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE && get_endpoint(alts, 1)->bSynchAddress != 0)) { snd_printk(KERN_ERR "%d:%d:%d : invalid synch pipe\n", dev->devnum, fmt->iface, fmt->altsetting); return -EINVAL; } ep = get_endpoint(alts, 1)->bEndpointAddress; if (get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE && (( is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) || (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) { snd_printk(KERN_ERR "%d:%d:%d : invalid synch pipe\n", dev->devnum, fmt->iface, fmt->altsetting); return -EINVAL; } ep &= USB_ENDPOINT_NUMBER_MASK; if (is_playback) subs->syncpipe = usb_rcvisocpipe(dev, ep); else subs->syncpipe = usb_sndisocpipe(dev, ep); if (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE && get_endpoint(alts, 1)->bRefresh >= 1 && get_endpoint(alts, 1)->bRefresh <= 9) subs->syncinterval = get_endpoint(alts, 1)->bRefresh; else if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) subs->syncinterval = 1; else if (get_endpoint(alts, 1)->bInterval >= 1 && get_endpoint(alts, 1)->bInterval <= 16) subs->syncinterval = get_endpoint(alts, 1)->bInterval - 1; else subs->syncinterval = 3; } /* always fill max packet size */ if (fmt->attributes & EP_CS_ATTR_FILL_MAX) subs->fill_max = 1; if ((err = init_usb_pitch(dev, subs->interface, alts, fmt)) < 0) return err; subs->cur_audiofmt = fmt; #if 0 printk(KERN_DEBUG "setting done: format = %d, rate = %d..%d, channels = %d\n", fmt->format, fmt->rate_min, fmt->rate_max, fmt->channels); printk(KERN_DEBUG " datapipe = 0x%0x, syncpipe = 0x%0x\n", subs->datapipe, subs->syncpipe); #endif return 0; } /* * hw_params callback * * allocate a buffer and set the given audio format. * * so far we use a physically linear buffer although packetize transfer * doesn't need a continuous area. * if sg buffer is supported on the later version of alsa, we'll follow * that. */ static int snd_usb_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_usb_substream *subs = substream->runtime->private_data; struct audioformat *fmt; unsigned int channels, rate, format; int ret, changed; ret = snd_pcm_alloc_vmalloc_buffer(substream, params_buffer_bytes(hw_params)); if (ret < 0) return ret; format = params_format(hw_params); rate = params_rate(hw_params); channels = params_channels(hw_params); fmt = find_format(subs, format, rate, channels); if (!fmt) { snd_printd(KERN_DEBUG "cannot set format: format = %#x, rate = %d, channels = %d\n", format, rate, channels); return -EINVAL; } changed = subs->cur_audiofmt != fmt || subs->period_bytes != params_period_bytes(hw_params) || subs->cur_rate != rate; if ((ret = set_format(subs, fmt)) < 0) return ret; if (subs->cur_rate != rate) { struct usb_host_interface *alts; struct usb_interface *iface; iface = usb_ifnum_to_if(subs->dev, fmt->iface); alts = &iface->altsetting[fmt->altset_idx]; ret = init_usb_sample_rate(subs->dev, subs->interface, alts, fmt, rate); if (ret < 0) return ret; subs->cur_rate = rate; } if (changed) { /* format changed */ release_substream_urbs(subs, 0); /* influenced: period_bytes, channels, rate, format, */ ret = init_substream_urbs(subs, params_period_bytes(hw_params), params_rate(hw_params), snd_pcm_format_physical_width(params_format(hw_params)) * params_channels(hw_params)); } return ret; } /* * hw_free callback * * reset the audio format and release the buffer */ static int snd_usb_hw_free(struct snd_pcm_substream *substream) { struct snd_usb_substream *subs = substream->runtime->private_data; subs->cur_audiofmt = NULL; subs->cur_rate = 0; subs->period_bytes = 0; if (!subs->stream->chip->shutdown) release_substream_urbs(subs, 0); return snd_pcm_free_vmalloc_buffer(substream); } /* * prepare callback * * only a few subtle things... */ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_usb_substream *subs = runtime->private_data; if (! subs->cur_audiofmt) { snd_printk(KERN_ERR "usbaudio: no format is specified!\n"); return -ENXIO; } /* some unit conversions in runtime */ subs->maxframesize = bytes_to_frames(runtime, subs->maxpacksize); subs->curframesize = bytes_to_frames(runtime, subs->curpacksize); /* reset the pointer */ subs->hwptr_done = 0; subs->transfer_done = 0; subs->phase = 0; runtime->delay = 0; /* clear urbs (to be sure) */ deactivate_urbs(subs, 0, 1); wait_clear_urbs(subs); /* for playback, submit the URBs now; otherwise, the first hwptr_done * updates for all URBs would happen at the same time when starting */ if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) { subs->ops.prepare = prepare_nodata_playback_urb; return start_urbs(subs, runtime); } else return 0; } static struct snd_pcm_hardware snd_usb_hardware = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BATCH | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE, .buffer_bytes_max = 1024 * 1024, .period_bytes_min = 64, .period_bytes_max = 512 * 1024, .periods_min = 2, .periods_max = 1024, }; /* * h/w constraints */ #ifdef HW_CONST_DEBUG #define hwc_debug(fmt, args...) printk(KERN_DEBUG fmt, ##args) #else #define hwc_debug(fmt, args...) /**/ #endif static int hw_check_valid_format(struct snd_usb_substream *subs, struct snd_pcm_hw_params *params, struct audioformat *fp) { struct snd_interval *it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); struct snd_interval *ct = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); struct snd_mask *fmts = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); struct snd_interval *pt = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIOD_TIME); unsigned int ptime; /* check the format */ if (!snd_mask_test(fmts, fp->format)) { hwc_debug(" > check: no supported format %d\n", fp->format); return 0; } /* check the channels */ if (fp->channels < ct->min || fp->channels > ct->max) { hwc_debug(" > check: no valid channels %d (%d/%d)\n", fp->channels, ct->min, ct->max); return 0; } /* check the rate is within the range */ if (fp->rate_min > it->max || (fp->rate_min == it->max && it->openmax)) { hwc_debug(" > check: rate_min %d > max %d\n", fp->rate_min, it->max); return 0; } if (fp->rate_max < it->min || (fp->rate_max == it->min && it->openmin)) { hwc_debug(" > check: rate_max %d < min %d\n", fp->rate_max, it->min); return 0; } /* check whether the period time is >= the data packet interval */ if (snd_usb_get_speed(subs->dev) == USB_SPEED_HIGH) { ptime = 125 * (1 << fp->datainterval); if (ptime > pt->max || (ptime == pt->max && pt->openmax)) { hwc_debug(" > check: ptime %u > max %u\n", ptime, pt->max); return 0; } } return 1; } static int hw_rule_rate(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_usb_substream *subs = rule->private; struct list_head *p; struct snd_interval *it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); unsigned int rmin, rmax; int changed; hwc_debug("hw_rule_rate: (%d,%d)\n", it->min, it->max); changed = 0; rmin = rmax = 0; list_for_each(p, &subs->fmt_list) { struct audioformat *fp; fp = list_entry(p, struct audioformat, list); if (!hw_check_valid_format(subs, params, fp)) continue; if (changed++) { if (rmin > fp->rate_min) rmin = fp->rate_min; if (rmax < fp->rate_max) rmax = fp->rate_max; } else { rmin = fp->rate_min; rmax = fp->rate_max; } } if (!changed) { hwc_debug(" --> get empty\n"); it->empty = 1; return -EINVAL; } changed = 0; if (it->min < rmin) { it->min = rmin; it->openmin = 0; changed = 1; } if (it->max > rmax) { it->max = rmax; it->openmax = 0; changed = 1; } if (snd_interval_checkempty(it)) { it->empty = 1; return -EINVAL; } hwc_debug(" --> (%d, %d) (changed = %d)\n", it->min, it->max, changed); return changed; } static int hw_rule_channels(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_usb_substream *subs = rule->private; struct list_head *p; struct snd_interval *it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); unsigned int rmin, rmax; int changed; hwc_debug("hw_rule_channels: (%d,%d)\n", it->min, it->max); changed = 0; rmin = rmax = 0; list_for_each(p, &subs->fmt_list) { struct audioformat *fp; fp = list_entry(p, struct audioformat, list); if (!hw_check_valid_format(subs, params, fp)) continue; if (changed++) { if (rmin > fp->channels) rmin = fp->channels; if (rmax < fp->channels) rmax = fp->channels; } else { rmin = fp->channels; rmax = fp->channels; } } if (!changed) { hwc_debug(" --> get empty\n"); it->empty = 1; return -EINVAL; } changed = 0; if (it->min < rmin) { it->min = rmin; it->openmin = 0; changed = 1; } if (it->max > rmax) { it->max = rmax; it->openmax = 0; changed = 1; } if (snd_interval_checkempty(it)) { it->empty = 1; return -EINVAL; } hwc_debug(" --> (%d, %d) (changed = %d)\n", it->min, it->max, changed); return changed; } static int hw_rule_format(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_usb_substream *subs = rule->private; struct list_head *p; struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); u64 fbits; u32 oldbits[2]; int changed; hwc_debug("hw_rule_format: %x:%x\n", fmt->bits[0], fmt->bits[1]); fbits = 0; list_for_each(p, &subs->fmt_list) { struct audioformat *fp; fp = list_entry(p, struct audioformat, list); if (!hw_check_valid_format(subs, params, fp)) continue; fbits |= (1ULL << fp->format); } oldbits[0] = fmt->bits[0]; oldbits[1] = fmt->bits[1]; fmt->bits[0] &= (u32)fbits; fmt->bits[1] &= (u32)(fbits >> 32); if (!fmt->bits[0] && !fmt->bits[1]) { hwc_debug(" --> get empty\n"); return -EINVAL; } changed = (oldbits[0] != fmt->bits[0] || oldbits[1] != fmt->bits[1]); hwc_debug(" --> %x:%x (changed = %d)\n", fmt->bits[0], fmt->bits[1], changed); return changed; } static int hw_rule_period_time(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_usb_substream *subs = rule->private; struct audioformat *fp; struct snd_interval *it; unsigned char min_datainterval; unsigned int pmin; int changed; it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIOD_TIME); hwc_debug("hw_rule_period_time: (%u,%u)\n", it->min, it->max); min_datainterval = 0xff; list_for_each_entry(fp, &subs->fmt_list, list) { if (!hw_check_valid_format(subs, params, fp)) continue; min_datainterval = min(min_datainterval, fp->datainterval); } if (min_datainterval == 0xff) { hwc_debug(" --> get emtpy\n"); it->empty = 1; return -EINVAL; } pmin = 125 * (1 << min_datainterval); changed = 0; if (it->min < pmin) { it->min = pmin; it->openmin = 0; changed = 1; } if (snd_interval_checkempty(it)) { it->empty = 1; return -EINVAL; } hwc_debug(" --> (%u,%u) (changed = %d)\n", it->min, it->max, changed); return changed; } /* * If the device supports unusual bit rates, does the request meet these? */ static int snd_usb_pcm_check_knot(struct snd_pcm_runtime *runtime, struct snd_usb_substream *subs) { struct audioformat *fp; int count = 0, needs_knot = 0; int err; list_for_each_entry(fp, &subs->fmt_list, list) { if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS) return 0; count += fp->nr_rates; if (fp->rates & SNDRV_PCM_RATE_KNOT) needs_knot = 1; } if (!needs_knot) return 0; subs->rate_list.count = count; subs->rate_list.list = kmalloc(sizeof(int) * count, GFP_KERNEL); subs->rate_list.mask = 0; count = 0; list_for_each_entry(fp, &subs->fmt_list, list) { int i; for (i = 0; i < fp->nr_rates; i++) subs->rate_list.list[count++] = fp->rate_table[i]; } err = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &subs->rate_list); if (err < 0) return err; return 0; } /* * set up the runtime hardware information. */ static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substream *subs) { struct list_head *p; unsigned int pt, ptmin; int param_period_time_if_needed; int err; runtime->hw.formats = subs->formats; runtime->hw.rate_min = 0x7fffffff; runtime->hw.rate_max = 0; runtime->hw.channels_min = 256; runtime->hw.channels_max = 0; runtime->hw.rates = 0; ptmin = UINT_MAX; /* check min/max rates and channels */ list_for_each(p, &subs->fmt_list) { struct audioformat *fp; fp = list_entry(p, struct audioformat, list); runtime->hw.rates |= fp->rates; if (runtime->hw.rate_min > fp->rate_min) runtime->hw.rate_min = fp->rate_min; if (runtime->hw.rate_max < fp->rate_max) runtime->hw.rate_max = fp->rate_max; if (runtime->hw.channels_min > fp->channels) runtime->hw.channels_min = fp->channels; if (runtime->hw.channels_max < fp->channels) runtime->hw.channels_max = fp->channels; if (fp->fmt_type == USB_FORMAT_TYPE_II && fp->frame_size > 0) { /* FIXME: there might be more than one audio formats... */ runtime->hw.period_bytes_min = runtime->hw.period_bytes_max = fp->frame_size; } pt = 125 * (1 << fp->datainterval); ptmin = min(ptmin, pt); } param_period_time_if_needed = SNDRV_PCM_HW_PARAM_PERIOD_TIME; if (snd_usb_get_speed(subs->dev) != USB_SPEED_HIGH) /* full speed devices have fixed data packet interval */ ptmin = 1000; if (ptmin == 1000) /* if period time doesn't go below 1 ms, no rules needed */ param_period_time_if_needed = -1; snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_TIME, ptmin, UINT_MAX); if ((err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, hw_rule_rate, subs, SNDRV_PCM_HW_PARAM_FORMAT, SNDRV_PCM_HW_PARAM_CHANNELS, param_period_time_if_needed, -1)) < 0) return err; if ((err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, hw_rule_channels, subs, SNDRV_PCM_HW_PARAM_FORMAT, SNDRV_PCM_HW_PARAM_RATE, param_period_time_if_needed, -1)) < 0) return err; if ((err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT, hw_rule_format, subs, SNDRV_PCM_HW_PARAM_RATE, SNDRV_PCM_HW_PARAM_CHANNELS, param_period_time_if_needed, -1)) < 0) return err; if (param_period_time_if_needed >= 0) { err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_TIME, hw_rule_period_time, subs, SNDRV_PCM_HW_PARAM_FORMAT, SNDRV_PCM_HW_PARAM_CHANNELS, SNDRV_PCM_HW_PARAM_RATE, -1); if (err < 0) return err; } if ((err = snd_usb_pcm_check_knot(runtime, subs)) < 0) return err; return 0; } static int snd_usb_pcm_open(struct snd_pcm_substream *substream, int direction) { struct snd_usb_stream *as = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_usb_substream *subs = &as->substream[direction]; subs->interface = -1; subs->format = 0; runtime->hw = snd_usb_hardware; runtime->private_data = subs; subs->pcm_substream = substream; return setup_hw_info(runtime, subs); } static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction) { struct snd_usb_stream *as = snd_pcm_substream_chip(substream); struct snd_usb_substream *subs = &as->substream[direction]; if (!as->chip->shutdown && subs->interface >= 0) { usb_set_interface(subs->dev, subs->interface, 0); subs->interface = -1; } subs->pcm_substream = NULL; return 0; } static int snd_usb_playback_open(struct snd_pcm_substream *substream) { return snd_usb_pcm_open(substream, SNDRV_PCM_STREAM_PLAYBACK); } static int snd_usb_playback_close(struct snd_pcm_substream *substream) { return snd_usb_pcm_close(substream, SNDRV_PCM_STREAM_PLAYBACK); } static int snd_usb_capture_open(struct snd_pcm_substream *substream) { return snd_usb_pcm_open(substream, SNDRV_PCM_STREAM_CAPTURE); } static int snd_usb_capture_close(struct snd_pcm_substream *substream) { return snd_usb_pcm_close(substream, SNDRV_PCM_STREAM_CAPTURE); } static struct snd_pcm_ops snd_usb_playback_ops = { .open = snd_usb_playback_open, .close = snd_usb_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_usb_hw_params, .hw_free = snd_usb_hw_free, .prepare = snd_usb_pcm_prepare, .trigger = snd_usb_pcm_playback_trigger, .pointer = snd_usb_pcm_pointer, .page = snd_pcm_get_vmalloc_page, }; static struct snd_pcm_ops snd_usb_capture_ops = { .open = snd_usb_capture_open, .close = snd_usb_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_usb_hw_params, .hw_free = snd_usb_hw_free, .prepare = snd_usb_pcm_prepare, .trigger = snd_usb_pcm_capture_trigger, .pointer = snd_usb_pcm_pointer, .page = snd_pcm_get_vmalloc_page, }; /* * helper functions */ /* * combine bytes and get an integer value */ unsigned int snd_usb_combine_bytes(unsigned char *bytes, int size) { switch (size) { case 1: return *bytes; case 2: return combine_word(bytes); case 3: return combine_triple(bytes); case 4: return combine_quad(bytes); default: return 0; } } /* * parse descriptor buffer and return the pointer starting the given * descriptor type. */ void *snd_usb_find_desc(void *descstart, int desclen, void *after, u8 dtype) { u8 *p, *end, *next; p = descstart; end = p + desclen; for (; p < end;) { if (p[0] < 2) return NULL; next = p + p[0]; if (next > end) return NULL; if (p[1] == dtype && (!after || (void *)p > after)) { return p; } p = next; } return NULL; } /* * find a class-specified interface descriptor with the given subtype. */ void *snd_usb_find_csint_desc(void *buffer, int buflen, void *after, u8 dsubtype) { unsigned char *p = after; while ((p = snd_usb_find_desc(buffer, buflen, p, USB_DT_CS_INTERFACE)) != NULL) { if (p[0] >= 3 && p[2] == dsubtype) return p; } return NULL; } /* * Wrapper for usb_control_msg(). * Allocates a temp buffer to prevent dmaing from/to the stack. */ int snd_usb_ctl_msg(struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size, int timeout) { int err; void *buf = NULL; if (size > 0) { buf = kmemdup(data, size, GFP_KERNEL); if (!buf) return -ENOMEM; } err = usb_control_msg(dev, pipe, request, requesttype, value, index, buf, size, timeout); if (size > 0) { memcpy(data, buf, size); kfree(buf); } return err; } /* * entry point for linux usb interface */ static int usb_audio_probe(struct usb_interface *intf, const struct usb_device_id *id); static void usb_audio_disconnect(struct usb_interface *intf); #ifdef CONFIG_PM static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message); static int usb_audio_resume(struct usb_interface *intf); #else #define usb_audio_suspend NULL #define usb_audio_resume NULL #endif static struct usb_device_id usb_audio_ids [] = { #include "usbquirks.h" { .match_flags = (USB_DEVICE_ID_MATCH_INT_CLASS | USB_DEVICE_ID_MATCH_INT_SUBCLASS), .bInterfaceClass = USB_CLASS_AUDIO, .bInterfaceSubClass = USB_SUBCLASS_AUDIO_CONTROL }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, usb_audio_ids); static struct usb_driver usb_audio_driver = { .name = "snd-usb-audio", .probe = usb_audio_probe, .disconnect = usb_audio_disconnect, .suspend = usb_audio_suspend, .resume = usb_audio_resume, .id_table = usb_audio_ids, }; #if defined(CONFIG_PROC_FS) && defined(CONFIG_SND_VERBOSE_PROCFS) /* * proc interface for list the supported pcm formats */ static void proc_dump_substream_formats(struct snd_usb_substream *subs, struct snd_info_buffer *buffer) { struct list_head *p; static char *sync_types[4] = { "NONE", "ASYNC", "ADAPTIVE", "SYNC" }; list_for_each(p, &subs->fmt_list) { struct audioformat *fp; fp = list_entry(p, struct audioformat, list); snd_iprintf(buffer, " Interface %d\n", fp->iface); snd_iprintf(buffer, " Altset %d\n", fp->altsetting); snd_iprintf(buffer, " Format: %s\n", snd_pcm_format_name(fp->format)); snd_iprintf(buffer, " Channels: %d\n", fp->channels); snd_iprintf(buffer, " Endpoint: %d %s (%s)\n", fp->endpoint & USB_ENDPOINT_NUMBER_MASK, fp->endpoint & USB_DIR_IN ? "IN" : "OUT", sync_types[(fp->ep_attr & EP_ATTR_MASK) >> 2]); if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS) { snd_iprintf(buffer, " Rates: %d - %d (continuous)\n", fp->rate_min, fp->rate_max); } else { unsigned int i; snd_iprintf(buffer, " Rates: "); for (i = 0; i < fp->nr_rates; i++) { if (i > 0) snd_iprintf(buffer, ", "); snd_iprintf(buffer, "%d", fp->rate_table[i]); } snd_iprintf(buffer, "\n"); } if (snd_usb_get_speed(subs->dev) == USB_SPEED_HIGH) snd_iprintf(buffer, " Data packet interval: %d us\n", 125 * (1 << fp->datainterval)); // snd_iprintf(buffer, " Max Packet Size = %d\n", fp->maxpacksize); // snd_iprintf(buffer, " EP Attribute = %#x\n", fp->attributes); } } static void proc_dump_substream_status(struct snd_usb_substream *subs, struct snd_info_buffer *buffer) { if (subs->running) { unsigned int i; snd_iprintf(buffer, " Status: Running\n"); snd_iprintf(buffer, " Interface = %d\n", subs->interface); snd_iprintf(buffer, " Altset = %d\n", subs->format); snd_iprintf(buffer, " URBs = %d [ ", subs->nurbs); for (i = 0; i < subs->nurbs; i++) snd_iprintf(buffer, "%d ", subs->dataurb[i].packets); snd_iprintf(buffer, "]\n"); snd_iprintf(buffer, " Packet Size = %d\n", subs->curpacksize); snd_iprintf(buffer, " Momentary freq = %u Hz (%#x.%04x)\n", snd_usb_get_speed(subs->dev) == USB_SPEED_FULL ? get_full_speed_hz(subs->freqm) : get_high_speed_hz(subs->freqm), subs->freqm >> 16, subs->freqm & 0xffff); } else { snd_iprintf(buffer, " Status: Stop\n"); } } static void proc_pcm_format_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_usb_stream *stream = entry->private_data; snd_iprintf(buffer, "%s : %s\n", stream->chip->card->longname, stream->pcm->name); if (stream->substream[SNDRV_PCM_STREAM_PLAYBACK].num_formats) { snd_iprintf(buffer, "\nPlayback:\n"); proc_dump_substream_status(&stream->substream[SNDRV_PCM_STREAM_PLAYBACK], buffer); proc_dump_substream_formats(&stream->substream[SNDRV_PCM_STREAM_PLAYBACK], buffer); } if (stream->substream[SNDRV_PCM_STREAM_CAPTURE].num_formats) { snd_iprintf(buffer, "\nCapture:\n"); proc_dump_substream_status(&stream->substream[SNDRV_PCM_STREAM_CAPTURE], buffer); proc_dump_substream_formats(&stream->substream[SNDRV_PCM_STREAM_CAPTURE], buffer); } } static void proc_pcm_format_add(struct snd_usb_stream *stream) { struct snd_info_entry *entry; char name[32]; struct snd_card *card = stream->chip->card; sprintf(name, "stream%d", stream->pcm_index); if (!snd_card_proc_new(card, name, &entry)) snd_info_set_text_ops(entry, stream, proc_pcm_format_read); } #else static inline void proc_pcm_format_add(struct snd_usb_stream *stream) { } #endif /* * initialize the substream instance. */ static void init_substream(struct snd_usb_stream *as, int stream, struct audioformat *fp) { struct snd_usb_substream *subs = &as->substream[stream]; INIT_LIST_HEAD(&subs->fmt_list); spin_lock_init(&subs->lock); subs->stream = as; subs->direction = stream; subs->dev = as->chip->dev; if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) { subs->ops = audio_urb_ops[stream]; } else { subs->ops = audio_urb_ops_high_speed[stream]; switch (as->chip->usb_id) { case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */ case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */ case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */ subs->ops.retire_sync = retire_playback_sync_urb_hs_emu; break; } } snd_pcm_set_ops(as->pcm, stream, stream == SNDRV_PCM_STREAM_PLAYBACK ? &snd_usb_playback_ops : &snd_usb_capture_ops); list_add_tail(&fp->list, &subs->fmt_list); subs->formats |= 1ULL << fp->format; subs->endpoint = fp->endpoint; subs->num_formats++; subs->fmt_type = fp->fmt_type; } /* * free a substream */ static void free_substream(struct snd_usb_substream *subs) { struct list_head *p, *n; if (!subs->num_formats) return; /* not initialized */ list_for_each_safe(p, n, &subs->fmt_list) { struct audioformat *fp = list_entry(p, struct audioformat, list); kfree(fp->rate_table); kfree(fp); } kfree(subs->rate_list.list); } /* * free a usb stream instance */ static void snd_usb_audio_stream_free(struct snd_usb_stream *stream) { free_substream(&stream->substream[0]); free_substream(&stream->substream[1]); list_del(&stream->list); kfree(stream); } static void snd_usb_audio_pcm_free(struct snd_pcm *pcm) { struct snd_usb_stream *stream = pcm->private_data; if (stream) { stream->pcm = NULL; snd_usb_audio_stream_free(stream); } } /* * add this endpoint to the chip instance. * if a stream with the same endpoint already exists, append to it. * if not, create a new pcm stream. */ static int add_audio_endpoint(struct snd_usb_audio *chip, int stream, struct audioformat *fp) { struct list_head *p; struct snd_usb_stream *as; struct snd_usb_substream *subs; struct snd_pcm *pcm; int err; list_for_each(p, &chip->pcm_list) { as = list_entry(p, struct snd_usb_stream, list); if (as->fmt_type != fp->fmt_type) continue; subs = &as->substream[stream]; if (!subs->endpoint) continue; if (subs->endpoint == fp->endpoint) { list_add_tail(&fp->list, &subs->fmt_list); subs->num_formats++; subs->formats |= 1ULL << fp->format; return 0; } } /* look for an empty stream */ list_for_each(p, &chip->pcm_list) { as = list_entry(p, struct snd_usb_stream, list); if (as->fmt_type != fp->fmt_type) continue; subs = &as->substream[stream]; if (subs->endpoint) continue; err = snd_pcm_new_stream(as->pcm, stream, 1); if (err < 0) return err; init_substream(as, stream, fp); return 0; } /* create a new pcm */ as = kzalloc(sizeof(*as), GFP_KERNEL); if (!as) return -ENOMEM; as->pcm_index = chip->pcm_devs; as->chip = chip; as->fmt_type = fp->fmt_type; err = snd_pcm_new(chip->card, "USB Audio", chip->pcm_devs, stream == SNDRV_PCM_STREAM_PLAYBACK ? 1 : 0, stream == SNDRV_PCM_STREAM_PLAYBACK ? 0 : 1, &pcm); if (err < 0) { kfree(as); return err; } as->pcm = pcm; pcm->private_data = as; pcm->private_free = snd_usb_audio_pcm_free; pcm->info_flags = 0; if (chip->pcm_devs > 0) sprintf(pcm->name, "USB Audio #%d", chip->pcm_devs); else strcpy(pcm->name, "USB Audio"); init_substream(as, stream, fp); list_add(&as->list, &chip->pcm_list); chip->pcm_devs++; proc_pcm_format_add(as); return 0; } /* * check if the device uses big-endian samples */ static int is_big_endian_format(struct snd_usb_audio *chip, struct audioformat *fp) { switch (chip->usb_id) { case USB_ID(0x0763, 0x2001): /* M-Audio Quattro: captured data only */ if (fp->endpoint & USB_DIR_IN) return 1; break; case USB_ID(0x0763, 0x2003): /* M-Audio Audiophile USB */ if (device_setup[chip->index] == 0x00 || fp->altsetting==1 || fp->altsetting==2 || fp->altsetting==3) return 1; } return 0; } /* * parse the audio format type I descriptor * and returns the corresponding pcm format * * @dev: usb device * @fp: audioformat record * @format: the format tag (wFormatTag) * @fmt: the format type descriptor */ static int parse_audio_format_i_type(struct snd_usb_audio *chip, struct audioformat *fp, int format, unsigned char *fmt) { int pcm_format; int sample_width, sample_bytes; /* FIXME: correct endianess and sign? */ pcm_format = -1; sample_width = fmt[6]; sample_bytes = fmt[5]; switch (format) { case 0: /* some devices don't define this correctly... */ snd_printdd(KERN_INFO "%d:%u:%d : format type 0 is detected, processed as PCM\n", chip->dev->devnum, fp->iface, fp->altsetting); /* fall-through */ case USB_AUDIO_FORMAT_PCM: if (sample_width > sample_bytes * 8) { snd_printk(KERN_INFO "%d:%u:%d : sample bitwidth %d in over sample bytes %d\n", chip->dev->devnum, fp->iface, fp->altsetting, sample_width, sample_bytes); } /* check the format byte size */ switch (fmt[5]) { case 1: pcm_format = SNDRV_PCM_FORMAT_S8; break; case 2: if (is_big_endian_format(chip, fp)) pcm_format = SNDRV_PCM_FORMAT_S16_BE; /* grrr, big endian!! */ else pcm_format = SNDRV_PCM_FORMAT_S16_LE; break; case 3: if (is_big_endian_format(chip, fp)) pcm_format = SNDRV_PCM_FORMAT_S24_3BE; /* grrr, big endian!! */ else pcm_format = SNDRV_PCM_FORMAT_S24_3LE; break; case 4: pcm_format = SNDRV_PCM_FORMAT_S32_LE; break; default: snd_printk(KERN_INFO "%d:%u:%d : unsupported sample bitwidth %d in %d bytes\n", chip->dev->devnum, fp->iface, fp->altsetting, sample_width, sample_bytes); break; } break; case USB_AUDIO_FORMAT_PCM8: pcm_format = SNDRV_PCM_FORMAT_U8; /* Dallas DS4201 workaround: it advertises U8 format, but really supports S8. */ if (chip->usb_id == USB_ID(0x04fa, 0x4201)) pcm_format = SNDRV_PCM_FORMAT_S8; break; case USB_AUDIO_FORMAT_IEEE_FLOAT: pcm_format = SNDRV_PCM_FORMAT_FLOAT_LE; break; case USB_AUDIO_FORMAT_ALAW: pcm_format = SNDRV_PCM_FORMAT_A_LAW; break; case USB_AUDIO_FORMAT_MU_LAW: pcm_format = SNDRV_PCM_FORMAT_MU_LAW; break; default: snd_printk(KERN_INFO "%d:%u:%d : unsupported format type %d\n", chip->dev->devnum, fp->iface, fp->altsetting, format); break; } return pcm_format; } /* * parse the format descriptor and stores the possible sample rates * on the audioformat table. * * @dev: usb device * @fp: audioformat record * @fmt: the format descriptor * @offset: the start offset of descriptor pointing the rate type * (7 for type I and II, 8 for type II) */ static int parse_audio_format_rates(struct snd_usb_audio *chip, struct audioformat *fp, unsigned char *fmt, int offset) { int nr_rates = fmt[offset]; if (fmt[0] < offset + 1 + 3 * (nr_rates ? nr_rates : 2)) { snd_printk(KERN_ERR "%d:%u:%d : invalid FORMAT_TYPE desc\n", chip->dev->devnum, fp->iface, fp->altsetting); return -1; } if (nr_rates) { /* * build the rate table and bitmap flags */ int r, idx; fp->rate_table = kmalloc(sizeof(int) * nr_rates, GFP_KERNEL); if (fp->rate_table == NULL) { snd_printk(KERN_ERR "cannot malloc\n"); return -1; } fp->nr_rates = 0; fp->rate_min = fp->rate_max = 0; for (r = 0, idx = offset + 1; r < nr_rates; r++, idx += 3) { unsigned int rate = combine_triple(&fmt[idx]); if (!rate) continue; /* C-Media CM6501 mislabels its 96 kHz altsetting */ if (rate == 48000 && nr_rates == 1 && (chip->usb_id == USB_ID(0x0d8c, 0x0201) || chip->usb_id == USB_ID(0x0d8c, 0x0102)) && fp->altsetting == 5 && fp->maxpacksize == 392) rate = 96000; fp->rate_table[fp->nr_rates] = rate; if (!fp->rate_min || rate < fp->rate_min) fp->rate_min = rate; if (!fp->rate_max || rate > fp->rate_max) fp->rate_max = rate; fp->rates |= snd_pcm_rate_to_rate_bit(rate); fp->nr_rates++; } if (!fp->nr_rates) { hwc_debug("All rates were zero. Skipping format!\n"); return -1; } } else { /* continuous rates */ fp->rates = SNDRV_PCM_RATE_CONTINUOUS; fp->rate_min = combine_triple(&fmt[offset + 1]); fp->rate_max = combine_triple(&fmt[offset + 4]); } return 0; } /* * parse the format type I and III descriptors */ static int parse_audio_format_i(struct snd_usb_audio *chip, struct audioformat *fp, int format, unsigned char *fmt) { int pcm_format; if (fmt[3] == USB_FORMAT_TYPE_III) { /* FIXME: the format type is really IECxxx * but we give normal PCM format to get the existing * apps working... */ switch (chip->usb_id) { case USB_ID(0x0763, 0x2003): /* M-Audio Audiophile USB */ if (device_setup[chip->index] == 0x00 && fp->altsetting == 6) pcm_format = SNDRV_PCM_FORMAT_S16_BE; else pcm_format = SNDRV_PCM_FORMAT_S16_LE; break; default: pcm_format = SNDRV_PCM_FORMAT_S16_LE; } } else { pcm_format = parse_audio_format_i_type(chip, fp, format, fmt); if (pcm_format < 0) return -1; } fp->format = pcm_format; fp->channels = fmt[4]; if (fp->channels < 1) { snd_printk(KERN_ERR "%d:%u:%d : invalid channels %d\n", chip->dev->devnum, fp->iface, fp->altsetting, fp->channels); return -1; } return parse_audio_format_rates(chip, fp, fmt, 7); } /* * prase the format type II descriptor */ static int parse_audio_format_ii(struct snd_usb_audio *chip, struct audioformat *fp, int format, unsigned char *fmt) { int brate, framesize; switch (format) { case USB_AUDIO_FORMAT_AC3: /* FIXME: there is no AC3 format defined yet */ // fp->format = SNDRV_PCM_FORMAT_AC3; fp->format = SNDRV_PCM_FORMAT_U8; /* temporarily hack to receive byte streams */ break; case USB_AUDIO_FORMAT_MPEG: fp->format = SNDRV_PCM_FORMAT_MPEG; break; default: snd_printd(KERN_INFO "%d:%u:%d : unknown format tag %#x is detected. processed as MPEG.\n", chip->dev->devnum, fp->iface, fp->altsetting, format); fp->format = SNDRV_PCM_FORMAT_MPEG; break; } fp->channels = 1; brate = combine_word(&fmt[4]); /* fmt[4,5] : wMaxBitRate (in kbps) */ framesize = combine_word(&fmt[6]); /* fmt[6,7]: wSamplesPerFrame */ snd_printd(KERN_INFO "found format II with max.bitrate = %d, frame size=%d\n", brate, framesize); fp->frame_size = framesize; return parse_audio_format_rates(chip, fp, fmt, 8); /* fmt[8..] sample rates */ } static int parse_audio_format(struct snd_usb_audio *chip, struct audioformat *fp, int format, unsigned char *fmt, int stream) { int err; switch (fmt[3]) { case USB_FORMAT_TYPE_I: case USB_FORMAT_TYPE_III: err = parse_audio_format_i(chip, fp, format, fmt); break; case USB_FORMAT_TYPE_II: err = parse_audio_format_ii(chip, fp, format, fmt); break; default: snd_printd(KERN_INFO "%d:%u:%d : format type %d is not supported yet\n", chip->dev->devnum, fp->iface, fp->altsetting, fmt[3]); return -1; } fp->fmt_type = fmt[3]; if (err < 0) return err; #if 1 /* FIXME: temporary hack for extigy/audigy 2 nx/zs */ /* extigy apparently supports sample rates other than 48k * but not in ordinary way. so we enable only 48k atm. */ if (chip->usb_id == USB_ID(0x041e, 0x3000) || chip->usb_id == USB_ID(0x041e, 0x3020) || chip->usb_id == USB_ID(0x041e, 0x3061)) { if (fmt[3] == USB_FORMAT_TYPE_I && fp->rates != SNDRV_PCM_RATE_48000 && fp->rates != SNDRV_PCM_RATE_96000) return -1; } #endif return 0; } static unsigned char parse_datainterval(struct snd_usb_audio *chip, struct usb_host_interface *alts) { if (snd_usb_get_speed(chip->dev) == USB_SPEED_HIGH && get_endpoint(alts, 0)->bInterval >= 1 && get_endpoint(alts, 0)->bInterval <= 4) return get_endpoint(alts, 0)->bInterval - 1; else return 0; } static int audiophile_skip_setting_quirk(struct snd_usb_audio *chip, int iface, int altno); static int parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no) { struct usb_device *dev; struct usb_interface *iface; struct usb_host_interface *alts; struct usb_interface_descriptor *altsd; int i, altno, err, stream; int format; struct audioformat *fp = NULL; unsigned char *fmt, *csep; int num; dev = chip->dev; /* parse the interface's altsettings */ iface = usb_ifnum_to_if(dev, iface_no); num = iface->num_altsetting; /* * Dallas DS4201 workaround: It presents 5 altsettings, but the last * one misses syncpipe, and does not produce any sound. */ if (chip->usb_id == USB_ID(0x04fa, 0x4201)) num = 4; for (i = 0; i < num; i++) { alts = &iface->altsetting[i]; altsd = get_iface_desc(alts); /* skip invalid one */ if ((altsd->bInterfaceClass != USB_CLASS_AUDIO && altsd->bInterfaceClass != USB_CLASS_VENDOR_SPEC) || (altsd->bInterfaceSubClass != USB_SUBCLASS_AUDIO_STREAMING && altsd->bInterfaceSubClass != USB_SUBCLASS_VENDOR_SPEC) || altsd->bNumEndpoints < 1 || le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize) == 0) continue; /* must be isochronous */ if ((get_endpoint(alts, 0)->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_ISOC) continue; /* check direction */ stream = (get_endpoint(alts, 0)->bEndpointAddress & USB_DIR_IN) ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK; altno = altsd->bAlternateSetting; /* audiophile usb: skip altsets incompatible with device_setup */ if (chip->usb_id == USB_ID(0x0763, 0x2003) && audiophile_skip_setting_quirk(chip, iface_no, altno)) continue; /* get audio formats */ fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, AS_GENERAL); if (!fmt) { snd_printk(KERN_ERR "%d:%u:%d : AS_GENERAL descriptor not found\n", dev->devnum, iface_no, altno); continue; } if (fmt[0] < 7) { snd_printk(KERN_ERR "%d:%u:%d : invalid AS_GENERAL desc\n", dev->devnum, iface_no, altno); continue; } format = (fmt[6] << 8) | fmt[5]; /* remember the format value */ /* get format type */ fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, FORMAT_TYPE); if (!fmt) { snd_printk(KERN_ERR "%d:%u:%d : no FORMAT_TYPE desc\n", dev->devnum, iface_no, altno); continue; } if (fmt[0] < 8) { snd_printk(KERN_ERR "%d:%u:%d : invalid FORMAT_TYPE desc\n", dev->devnum, iface_no, altno); continue; } /* * Blue Microphones workaround: The last altsetting is identical * with the previous one, except for a larger packet size, but * is actually a mislabeled two-channel setting; ignore it. */ if (fmt[4] == 1 && fmt[5] == 2 && altno == 2 && num == 3 && fp && fp->altsetting == 1 && fp->channels == 1 && fp->format == SNDRV_PCM_FORMAT_S16_LE && le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize) == fp->maxpacksize * 2) continue; csep = snd_usb_find_desc(alts->endpoint[0].extra, alts->endpoint[0].extralen, NULL, USB_DT_CS_ENDPOINT); /* Creamware Noah has this descriptor after the 2nd endpoint */ if (!csep && altsd->bNumEndpoints >= 2) csep = snd_usb_find_desc(alts->endpoint[1].extra, alts->endpoint[1].extralen, NULL, USB_DT_CS_ENDPOINT); if (!csep || csep[0] < 7 || csep[2] != EP_GENERAL) { snd_printk(KERN_WARNING "%d:%u:%d : no or invalid" " class specific endpoint descriptor\n", dev->devnum, iface_no, altno); csep = NULL; } fp = kzalloc(sizeof(*fp), GFP_KERNEL); if (! fp) { snd_printk(KERN_ERR "cannot malloc\n"); return -ENOMEM; } fp->iface = iface_no; fp->altsetting = altno; fp->altset_idx = i; fp->endpoint = get_endpoint(alts, 0)->bEndpointAddress; fp->ep_attr = get_endpoint(alts, 0)->bmAttributes; fp->datainterval = parse_datainterval(chip, alts); fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize); if (snd_usb_get_speed(dev) == USB_SPEED_HIGH) fp->maxpacksize = (((fp->maxpacksize >> 11) & 3) + 1) * (fp->maxpacksize & 0x7ff); fp->attributes = csep ? csep[3] : 0; /* some quirks for attributes here */ switch (chip->usb_id) { case USB_ID(0x0a92, 0x0053): /* AudioTrak Optoplay */ /* Optoplay sets the sample rate attribute although * it seems not supporting it in fact. */ fp->attributes &= ~EP_CS_ATTR_SAMPLE_RATE; break; case USB_ID(0x041e, 0x3020): /* Creative SB Audigy 2 NX */ case USB_ID(0x0763, 0x2003): /* M-Audio Audiophile USB */ /* doesn't set the sample rate attribute, but supports it */ fp->attributes |= EP_CS_ATTR_SAMPLE_RATE; break; case USB_ID(0x047f, 0x0ca1): /* plantronics headset */ case USB_ID(0x077d, 0x07af): /* Griffin iMic (note that there is an older model 77d:223) */ /* * plantronics headset and Griffin iMic have set adaptive-in * although it's really not... */ fp->ep_attr &= ~EP_ATTR_MASK; if (stream == SNDRV_PCM_STREAM_PLAYBACK) fp->ep_attr |= EP_ATTR_ADAPTIVE; else fp->ep_attr |= EP_ATTR_SYNC; break; } /* ok, let's parse further... */ if (parse_audio_format(chip, fp, format, fmt, stream) < 0) { kfree(fp->rate_table); kfree(fp); continue; } snd_printdd(KERN_INFO "%d:%u:%d: add audio endpoint %#x\n", dev->devnum, iface_no, altno, fp->endpoint); err = add_audio_endpoint(chip, stream, fp); if (err < 0) { kfree(fp->rate_table); kfree(fp); return err; } /* try to set the interface... */ usb_set_interface(chip->dev, iface_no, altno); init_usb_pitch(chip->dev, iface_no, alts, fp); init_usb_sample_rate(chip->dev, iface_no, alts, fp, fp->rate_max); } return 0; } /* * disconnect streams * called from snd_usb_audio_disconnect() */ static void snd_usb_stream_disconnect(struct list_head *head) { int idx; struct snd_usb_stream *as; struct snd_usb_substream *subs; as = list_entry(head, struct snd_usb_stream, list); for (idx = 0; idx < 2; idx++) { subs = &as->substream[idx]; if (!subs->num_formats) return; release_substream_urbs(subs, 1); subs->interface = -1; } } /* * parse audio control descriptor and create pcm/midi streams */ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) { struct usb_device *dev = chip->dev; struct usb_host_interface *host_iface; struct usb_interface *iface; unsigned char *p1; int i, j; /* find audiocontrol interface */ host_iface = &usb_ifnum_to_if(dev, ctrlif)->altsetting[0]; if (!(p1 = snd_usb_find_csint_desc(host_iface->extra, host_iface->extralen, NULL, HEADER))) { snd_printk(KERN_ERR "cannot find HEADER\n"); return -EINVAL; } if (! p1[7] || p1[0] < 8 + p1[7]) { snd_printk(KERN_ERR "invalid HEADER\n"); return -EINVAL; } /* * parse all USB audio streaming interfaces */ for (i = 0; i < p1[7]; i++) { struct usb_host_interface *alts; struct usb_interface_descriptor *altsd; j = p1[8 + i]; iface = usb_ifnum_to_if(dev, j); if (!iface) { snd_printk(KERN_ERR "%d:%u:%d : does not exist\n", dev->devnum, ctrlif, j); continue; } if (usb_interface_claimed(iface)) { snd_printdd(KERN_INFO "%d:%d:%d: skipping, already claimed\n", dev->devnum, ctrlif, j); continue; } alts = &iface->altsetting[0]; altsd = get_iface_desc(alts); if ((altsd->bInterfaceClass == USB_CLASS_AUDIO || altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC) && altsd->bInterfaceSubClass == USB_SUBCLASS_MIDI_STREAMING) { if (snd_usb_create_midi_interface(chip, iface, NULL) < 0) { snd_printk(KERN_ERR "%d:%u:%d: cannot create sequencer device\n", dev->devnum, ctrlif, j); continue; } usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L); continue; } if ((altsd->bInterfaceClass != USB_CLASS_AUDIO && altsd->bInterfaceClass != USB_CLASS_VENDOR_SPEC) || altsd->bInterfaceSubClass != USB_SUBCLASS_AUDIO_STREAMING) { snd_printdd(KERN_ERR "%d:%u:%d: skipping non-supported interface %d\n", dev->devnum, ctrlif, j, altsd->bInterfaceClass); /* skip non-supported classes */ continue; } if (snd_usb_get_speed(dev) == USB_SPEED_LOW) { snd_printk(KERN_ERR "low speed audio streaming not supported\n"); continue; } if (! parse_audio_endpoints(chip, j)) { usb_set_interface(dev, j, 0); /* reset the current interface */ usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L); } } return 0; } /* * create a stream for an endpoint/altsetting without proper descriptors */ static int create_fixed_stream_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, const struct snd_usb_audio_quirk *quirk) { struct audioformat *fp; struct usb_host_interface *alts; int stream, err; unsigned *rate_table = NULL; fp = kmemdup(quirk->data, sizeof(*fp), GFP_KERNEL); if (! fp) { snd_printk(KERN_ERR "cannot memdup\n"); return -ENOMEM; } if (fp->nr_rates > 0) { rate_table = kmalloc(sizeof(int) * fp->nr_rates, GFP_KERNEL); if (!rate_table) { kfree(fp); return -ENOMEM; } memcpy(rate_table, fp->rate_table, sizeof(int) * fp->nr_rates); fp->rate_table = rate_table; } stream = (fp->endpoint & USB_DIR_IN) ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK; err = add_audio_endpoint(chip, stream, fp); if (err < 0) { kfree(fp); kfree(rate_table); return err; } if (fp->iface != get_iface_desc(&iface->altsetting[0])->bInterfaceNumber || fp->altset_idx >= iface->num_altsetting) { kfree(fp); kfree(rate_table); return -EINVAL; } alts = &iface->altsetting[fp->altset_idx]; fp->datainterval = parse_datainterval(chip, alts); fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize); usb_set_interface(chip->dev, fp->iface, 0); init_usb_pitch(chip->dev, fp->iface, alts, fp); init_usb_sample_rate(chip->dev, fp->iface, alts, fp, fp->rate_max); return 0; } /* * create a stream for an interface with proper descriptors */ static int create_standard_audio_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, const struct snd_usb_audio_quirk *quirk) { struct usb_host_interface *alts; struct usb_interface_descriptor *altsd; int err; alts = &iface->altsetting[0]; altsd = get_iface_desc(alts); err = parse_audio_endpoints(chip, altsd->bInterfaceNumber); if (err < 0) { snd_printk(KERN_ERR "cannot setup if %d: error %d\n", altsd->bInterfaceNumber, err); return err; } /* reset the current interface */ usb_set_interface(chip->dev, altsd->bInterfaceNumber, 0); return 0; } /* * Create a stream for an Edirol UA-700/UA-25/UA-4FX interface. * The only way to detect the sample rate is by looking at wMaxPacketSize. */ static int create_uaxx_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, const struct snd_usb_audio_quirk *quirk) { static const struct audioformat ua_format = { .format = SNDRV_PCM_FORMAT_S24_3LE, .channels = 2, .fmt_type = USB_FORMAT_TYPE_I, .altsetting = 1, .altset_idx = 1, .rates = SNDRV_PCM_RATE_CONTINUOUS, }; struct usb_host_interface *alts; struct usb_interface_descriptor *altsd; struct audioformat *fp; int stream, err; /* both PCM and MIDI interfaces have 2 or more altsettings */ if (iface->num_altsetting < 2) return -ENXIO; alts = &iface->altsetting[1]; altsd = get_iface_desc(alts); if (altsd->bNumEndpoints == 2) { static const struct snd_usb_midi_endpoint_info ua700_ep = { .out_cables = 0x0003, .in_cables = 0x0003 }; static const struct snd_usb_audio_quirk ua700_quirk = { .type = QUIRK_MIDI_FIXED_ENDPOINT, .data = &ua700_ep }; static const struct snd_usb_midi_endpoint_info uaxx_ep = { .out_cables = 0x0001, .in_cables = 0x0001 }; static const struct snd_usb_audio_quirk uaxx_quirk = { .type = QUIRK_MIDI_FIXED_ENDPOINT, .data = &uaxx_ep }; if (chip->usb_id == USB_ID(0x0582, 0x002b)) return snd_usb_create_midi_interface(chip, iface, &ua700_quirk); else return snd_usb_create_midi_interface(chip, iface, &uaxx_quirk); } if (altsd->bNumEndpoints != 1) return -ENXIO; fp = kmalloc(sizeof(*fp), GFP_KERNEL); if (!fp) return -ENOMEM; memcpy(fp, &ua_format, sizeof(*fp)); fp->iface = altsd->bInterfaceNumber; fp->endpoint = get_endpoint(alts, 0)->bEndpointAddress; fp->ep_attr = get_endpoint(alts, 0)->bmAttributes; fp->datainterval = 0; fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize); switch (fp->maxpacksize) { case 0x120: fp->rate_max = fp->rate_min = 44100; break; case 0x138: case 0x140: fp->rate_max = fp->rate_min = 48000; break; case 0x258: case 0x260: fp->rate_max = fp->rate_min = 96000; break; default: snd_printk(KERN_ERR "unknown sample rate\n"); kfree(fp); return -ENXIO; } stream = (fp->endpoint & USB_DIR_IN) ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK; err = add_audio_endpoint(chip, stream, fp); if (err < 0) { kfree(fp); return err; } usb_set_interface(chip->dev, fp->iface, 0); return 0; } /* * Create a stream for an Edirol UA-1000 interface. */ static int create_ua1000_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, const struct snd_usb_audio_quirk *quirk) { static const struct audioformat ua1000_format = { .format = SNDRV_PCM_FORMAT_S32_LE, .fmt_type = USB_FORMAT_TYPE_I, .altsetting = 1, .altset_idx = 1, .attributes = 0, .rates = SNDRV_PCM_RATE_CONTINUOUS, }; struct usb_host_interface *alts; struct usb_interface_descriptor *altsd; struct audioformat *fp; int stream, err; if (iface->num_altsetting != 2) return -ENXIO; alts = &iface->altsetting[1]; altsd = get_iface_desc(alts); if (alts->extralen != 11 || alts->extra[1] != USB_DT_CS_INTERFACE || altsd->bNumEndpoints != 1) return -ENXIO; fp = kmemdup(&ua1000_format, sizeof(*fp), GFP_KERNEL); if (!fp) return -ENOMEM; fp->channels = alts->extra[4]; fp->iface = altsd->bInterfaceNumber; fp->endpoint = get_endpoint(alts, 0)->bEndpointAddress; fp->ep_attr = get_endpoint(alts, 0)->bmAttributes; fp->datainterval = parse_datainterval(chip, alts); fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize); fp->rate_max = fp->rate_min = combine_triple(&alts->extra[8]); stream = (fp->endpoint & USB_DIR_IN) ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK; err = add_audio_endpoint(chip, stream, fp); if (err < 0) { kfree(fp); return err; } /* FIXME: playback must be synchronized to capture */ usb_set_interface(chip->dev, fp->iface, 0); return 0; } /* * Create a stream for an Edirol UA-101 interface. * Copy, paste and modify from Edirol UA-1000 */ static int create_ua101_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, const struct snd_usb_audio_quirk *quirk) { static const struct audioformat ua101_format = { .format = SNDRV_PCM_FORMAT_S32_LE, .fmt_type = USB_FORMAT_TYPE_I, .altsetting = 1, .altset_idx = 1, .attributes = 0, .rates = SNDRV_PCM_RATE_CONTINUOUS, }; struct usb_host_interface *alts; struct usb_interface_descriptor *altsd; struct audioformat *fp; int stream, err; if (iface->num_altsetting != 2) return -ENXIO; alts = &iface->altsetting[1]; altsd = get_iface_desc(alts); if (alts->extralen != 18 || alts->extra[1] != USB_DT_CS_INTERFACE || altsd->bNumEndpoints != 1) return -ENXIO; fp = kmemdup(&ua101_format, sizeof(*fp), GFP_KERNEL); if (!fp) return -ENOMEM; fp->channels = alts->extra[11]; fp->iface = altsd->bInterfaceNumber; fp->endpoint = get_endpoint(alts, 0)->bEndpointAddress; fp->ep_attr = get_endpoint(alts, 0)->bmAttributes; fp->datainterval = parse_datainterval(chip, alts); fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize); fp->rate_max = fp->rate_min = combine_triple(&alts->extra[15]); stream = (fp->endpoint & USB_DIR_IN) ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK; err = add_audio_endpoint(chip, stream, fp); if (err < 0) { kfree(fp); return err; } /* FIXME: playback must be synchronized to capture */ usb_set_interface(chip->dev, fp->iface, 0); return 0; } static int snd_usb_create_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, const struct snd_usb_audio_quirk *quirk); /* * handle the quirks for the contained interfaces */ static int create_composite_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, const struct snd_usb_audio_quirk *quirk) { int probed_ifnum = get_iface_desc(iface->altsetting)->bInterfaceNumber; int err; for (quirk = quirk->data; quirk->ifnum >= 0; ++quirk) { iface = usb_ifnum_to_if(chip->dev, quirk->ifnum); if (!iface) continue; if (quirk->ifnum != probed_ifnum && usb_interface_claimed(iface)) continue; err = snd_usb_create_quirk(chip, iface, quirk); if (err < 0) return err; if (quirk->ifnum != probed_ifnum) usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L); } return 0; } static int ignore_interface_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, const struct snd_usb_audio_quirk *quirk) { return 0; } /* * boot quirks */ #define EXTIGY_FIRMWARE_SIZE_OLD 794 #define EXTIGY_FIRMWARE_SIZE_NEW 483 static int snd_usb_extigy_boot_quirk(struct usb_device *dev, struct usb_interface *intf) { struct usb_host_config *config = dev->actconfig; int err; if (le16_to_cpu(get_cfg_desc(config)->wTotalLength) == EXTIGY_FIRMWARE_SIZE_OLD || le16_to_cpu(get_cfg_desc(config)->wTotalLength) == EXTIGY_FIRMWARE_SIZE_NEW) { snd_printdd("sending Extigy boot sequence...\n"); /* Send message to force it to reconnect with full interface. */ err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev,0), 0x10, 0x43, 0x0001, 0x000a, NULL, 0, 1000); if (err < 0) snd_printdd("error sending boot message: %d\n", err); err = usb_get_descriptor(dev, USB_DT_DEVICE, 0, &dev->descriptor, sizeof(dev->descriptor)); config = dev->actconfig; if (err < 0) snd_printdd("error usb_get_descriptor: %d\n", err); err = usb_reset_configuration(dev); if (err < 0) snd_printdd("error usb_reset_configuration: %d\n", err); snd_printdd("extigy_boot: new boot length = %d\n", le16_to_cpu(get_cfg_desc(config)->wTotalLength)); return -ENODEV; /* quit this anyway */ } return 0; } static int snd_usb_audigy2nx_boot_quirk(struct usb_device *dev) { u8 buf = 1; snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), 0x2a, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER, 0, 0, &buf, 1, 1000); if (buf == 0) { snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), 0x29, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, 1, 2000, NULL, 0, 1000); return -ENODEV; } return 0; } /* * C-Media CM106/CM106+ have four 16-bit internal registers that are nicely * documented in the device's data sheet. */ static int snd_usb_cm106_write_int_reg(struct usb_device *dev, int reg, u16 value) { u8 buf[4]; buf[0] = 0x20; buf[1] = value & 0xff; buf[2] = (value >> 8) & 0xff; buf[3] = reg; return snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), USB_REQ_SET_CONFIGURATION, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT, 0, 0, &buf, 4, 1000); } static int snd_usb_cm106_boot_quirk(struct usb_device *dev) { /* * Enable line-out driver mode, set headphone source to front * channels, enable stereo mic. */ return snd_usb_cm106_write_int_reg(dev, 2, 0x8004); } /* * C-Media CM6206 is based on CM106 with two additional * registers that are not documented in the data sheet. * Values here are chosen based on sniffing USB traffic * under Windows. */ static int snd_usb_cm6206_boot_quirk(struct usb_device *dev) { int err, reg; int val[] = {0x200c, 0x3000, 0xf800, 0x143f, 0x0000, 0x3000}; for (reg = 0; reg < ARRAY_SIZE(val); reg++) { err = snd_usb_cm106_write_int_reg(dev, reg, val[reg]); if (err < 0) return err; } return err; } /* * Setup quirks */ #define AUDIOPHILE_SET 0x01 /* if set, parse device_setup */ #define AUDIOPHILE_SET_DTS 0x02 /* if set, enable DTS Digital Output */ #define AUDIOPHILE_SET_96K 0x04 /* 48-96KHz rate if set, 8-48KHz otherwise */ #define AUDIOPHILE_SET_24B 0x08 /* 24bits sample if set, 16bits otherwise */ #define AUDIOPHILE_SET_DI 0x10 /* if set, enable Digital Input */ #define AUDIOPHILE_SET_MASK 0x1F /* bit mask for setup value */ #define AUDIOPHILE_SET_24B_48K_DI 0x19 /* value for 24bits+48KHz+Digital Input */ #define AUDIOPHILE_SET_24B_48K_NOTDI 0x09 /* value for 24bits+48KHz+No Digital Input */ #define AUDIOPHILE_SET_16B_48K_DI 0x11 /* value for 16bits+48KHz+Digital Input */ #define AUDIOPHILE_SET_16B_48K_NOTDI 0x01 /* value for 16bits+48KHz+No Digital Input */ static int audiophile_skip_setting_quirk(struct snd_usb_audio *chip, int iface, int altno) { /* Reset ALL ifaces to 0 altsetting. * Call it for every possible altsetting of every interface. */ usb_set_interface(chip->dev, iface, 0); if (device_setup[chip->index] & AUDIOPHILE_SET) { if ((device_setup[chip->index] & AUDIOPHILE_SET_DTS) && altno != 6) return 1; /* skip this altsetting */ if ((device_setup[chip->index] & AUDIOPHILE_SET_96K) && altno != 1) return 1; /* skip this altsetting */ if ((device_setup[chip->index] & AUDIOPHILE_SET_MASK) == AUDIOPHILE_SET_24B_48K_DI && altno != 2) return 1; /* skip this altsetting */ if ((device_setup[chip->index] & AUDIOPHILE_SET_MASK) == AUDIOPHILE_SET_24B_48K_NOTDI && altno != 3) return 1; /* skip this altsetting */ if ((device_setup[chip->index] & AUDIOPHILE_SET_MASK) == AUDIOPHILE_SET_16B_48K_DI && altno != 4) return 1; /* skip this altsetting */ if ((device_setup[chip->index] & AUDIOPHILE_SET_MASK) == AUDIOPHILE_SET_16B_48K_NOTDI && altno != 5) return 1; /* skip this altsetting */ } return 0; /* keep this altsetting */ } /* * audio-interface quirks * * returns zero if no standard audio/MIDI parsing is needed. * returns a postive value if standard audio/midi interfaces are parsed * after this. * returns a negative value at error. */ static int snd_usb_create_quirk(struct snd_usb_audio *chip, struct usb_interface *iface, const struct snd_usb_audio_quirk *quirk) { typedef int (*quirk_func_t)(struct snd_usb_audio *, struct usb_interface *, const struct snd_usb_audio_quirk *); static const quirk_func_t quirk_funcs[] = { [QUIRK_IGNORE_INTERFACE] = ignore_interface_quirk, [QUIRK_COMPOSITE] = create_composite_quirk, [QUIRK_MIDI_STANDARD_INTERFACE] = snd_usb_create_midi_interface, [QUIRK_MIDI_FIXED_ENDPOINT] = snd_usb_create_midi_interface, [QUIRK_MIDI_YAMAHA] = snd_usb_create_midi_interface, [QUIRK_MIDI_MIDIMAN] = snd_usb_create_midi_interface, [QUIRK_MIDI_NOVATION] = snd_usb_create_midi_interface, [QUIRK_MIDI_FASTLANE] = snd_usb_create_midi_interface, [QUIRK_MIDI_EMAGIC] = snd_usb_create_midi_interface, [QUIRK_MIDI_CME] = snd_usb_create_midi_interface, [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk, [QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk, [QUIRK_AUDIO_EDIROL_UA1000] = create_ua1000_quirk, [QUIRK_AUDIO_EDIROL_UA101] = create_ua101_quirk, [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk }; if (quirk->type < QUIRK_TYPE_COUNT) { return quirk_funcs[quirk->type](chip, iface, quirk); } else { snd_printd(KERN_ERR "invalid quirk type %d\n", quirk->type); return -ENXIO; } } /* * common proc files to show the usb device info */ static void proc_audio_usbbus_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_usb_audio *chip = entry->private_data; if (!chip->shutdown) snd_iprintf(buffer, "%03d/%03d\n", chip->dev->bus->busnum, chip->dev->devnum); } static void proc_audio_usbid_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_usb_audio *chip = entry->private_data; if (!chip->shutdown) snd_iprintf(buffer, "%04x:%04x\n", USB_ID_VENDOR(chip->usb_id), USB_ID_PRODUCT(chip->usb_id)); } static void snd_usb_audio_create_proc(struct snd_usb_audio *chip) { struct snd_info_entry *entry; if (!snd_card_proc_new(chip->card, "usbbus", &entry)) snd_info_set_text_ops(entry, chip, proc_audio_usbbus_read); if (!snd_card_proc_new(chip->card, "usbid", &entry)) snd_info_set_text_ops(entry, chip, proc_audio_usbid_read); } /* * free the chip instance * * here we have to do not much, since pcm and controls are already freed * */ static int snd_usb_audio_free(struct snd_usb_audio *chip) { kfree(chip); return 0; } static int snd_usb_audio_dev_free(struct snd_device *device) { struct snd_usb_audio *chip = device->device_data; return snd_usb_audio_free(chip); } /* * create a chip instance and set its names. */ static int snd_usb_audio_create(struct usb_device *dev, int idx, const struct snd_usb_audio_quirk *quirk, struct snd_usb_audio **rchip) { struct snd_card *card; struct snd_usb_audio *chip; int err, len; char component[14]; static struct snd_device_ops ops = { .dev_free = snd_usb_audio_dev_free, }; *rchip = NULL; if (snd_usb_get_speed(dev) != USB_SPEED_LOW && snd_usb_get_speed(dev) != USB_SPEED_FULL && snd_usb_get_speed(dev) != USB_SPEED_HIGH) { snd_printk(KERN_ERR "unknown device speed %d\n", snd_usb_get_speed(dev)); return -ENXIO; } err = snd_card_create(index[idx], id[idx], THIS_MODULE, 0, &card); if (err < 0) { snd_printk(KERN_ERR "cannot create card instance %d\n", idx); return err; } chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (! chip) { snd_card_free(card); return -ENOMEM; } chip->index = idx; chip->dev = dev; chip->card = card; chip->usb_id = USB_ID(le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); INIT_LIST_HEAD(&chip->pcm_list); INIT_LIST_HEAD(&chip->midi_list); INIT_LIST_HEAD(&chip->mixer_list); if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_usb_audio_free(chip); snd_card_free(card); return err; } strcpy(card->driver, "USB-Audio"); sprintf(component, "USB%04x:%04x", USB_ID_VENDOR(chip->usb_id), USB_ID_PRODUCT(chip->usb_id)); snd_component_add(card, component); /* retrieve the device string as shortname */ if (quirk && quirk->product_name) { strlcpy(card->shortname, quirk->product_name, sizeof(card->shortname)); } else { if (!dev->descriptor.iProduct || usb_string(dev, dev->descriptor.iProduct, card->shortname, sizeof(card->shortname)) <= 0) { /* no name available from anywhere, so use ID */ sprintf(card->shortname, "USB Device %#04x:%#04x", USB_ID_VENDOR(chip->usb_id), USB_ID_PRODUCT(chip->usb_id)); } } /* retrieve the vendor and device strings as longname */ if (quirk && quirk->vendor_name) { len = strlcpy(card->longname, quirk->vendor_name, sizeof(card->longname)); } else { if (dev->descriptor.iManufacturer) len = usb_string(dev, dev->descriptor.iManufacturer, card->longname, sizeof(card->longname)); else len = 0; /* we don't really care if there isn't any vendor string */ } if (len > 0) strlcat(card->longname, " ", sizeof(card->longname)); strlcat(card->longname, card->shortname, sizeof(card->longname)); len = strlcat(card->longname, " at ", sizeof(card->longname)); if (len < sizeof(card->longname)) usb_make_path(dev, card->longname + len, sizeof(card->longname) - len); strlcat(card->longname, snd_usb_get_speed(dev) == USB_SPEED_LOW ? ", low speed" : snd_usb_get_speed(dev) == USB_SPEED_FULL ? ", full speed" : ", high speed", sizeof(card->longname)); snd_usb_audio_create_proc(chip); *rchip = chip; return 0; } /* * probe the active usb device * * note that this can be called multiple times per a device, when it * includes multiple audio control interfaces. * * thus we check the usb device pointer and creates the card instance * only at the first time. the successive calls of this function will * append the pcm interface to the corresponding card. */ static void *snd_usb_audio_probe(struct usb_device *dev, struct usb_interface *intf, const struct usb_device_id *usb_id) { const struct snd_usb_audio_quirk *quirk = (const struct snd_usb_audio_quirk *)usb_id->driver_info; int i, err; struct snd_usb_audio *chip; struct usb_host_interface *alts; int ifnum; u32 id; alts = &intf->altsetting[0]; ifnum = get_iface_desc(alts)->bInterfaceNumber; id = USB_ID(le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); if (quirk && quirk->ifnum >= 0 && ifnum != quirk->ifnum) goto __err_val; /* SB Extigy needs special boot-up sequence */ /* if more models come, this will go to the quirk list. */ if (id == USB_ID(0x041e, 0x3000)) { if (snd_usb_extigy_boot_quirk(dev, intf) < 0) goto __err_val; } /* SB Audigy 2 NX needs its own boot-up magic, too */ if (id == USB_ID(0x041e, 0x3020)) { if (snd_usb_audigy2nx_boot_quirk(dev) < 0) goto __err_val; } /* C-Media CM106 / Turtle Beach Audio Advantage Roadie */ if (id == USB_ID(0x10f5, 0x0200)) { if (snd_usb_cm106_boot_quirk(dev) < 0) goto __err_val; } /* C-Media CM6206 / CM106-Like Sound Device */ if (id == USB_ID(0x0d8c, 0x0102)) { if (snd_usb_cm6206_boot_quirk(dev) < 0) goto __err_val; } /* * found a config. now register to ALSA */ /* check whether it's already registered */ chip = NULL; mutex_lock(&register_mutex); for (i = 0; i < SNDRV_CARDS; i++) { if (usb_chip[i] && usb_chip[i]->dev == dev) { if (usb_chip[i]->shutdown) { snd_printk(KERN_ERR "USB device is in the shutdown state, cannot create a card instance\n"); goto __error; } chip = usb_chip[i]; break; } } if (! chip) { /* it's a fresh one. * now look for an empty slot and create a new card instance */ for (i = 0; i < SNDRV_CARDS; i++) if (enable[i] && ! usb_chip[i] && (vid[i] == -1 || vid[i] == USB_ID_VENDOR(id)) && (pid[i] == -1 || pid[i] == USB_ID_PRODUCT(id))) { if (snd_usb_audio_create(dev, i, quirk, &chip) < 0) { goto __error; } snd_card_set_dev(chip->card, &intf->dev); break; } if (!chip) { printk(KERN_ERR "no available usb audio device\n"); goto __error; } } err = 1; /* continue */ if (quirk && quirk->ifnum != QUIRK_NO_INTERFACE) { /* need some special handlings */ if ((err = snd_usb_create_quirk(chip, intf, quirk)) < 0) goto __error; } if (err > 0) { /* create normal USB audio interfaces */ if (snd_usb_create_streams(chip, ifnum) < 0 || snd_usb_create_mixer(chip, ifnum, ignore_ctl_error) < 0) { goto __error; } } /* we are allowed to call snd_card_register() many times */ if (snd_card_register(chip->card) < 0) { goto __error; } usb_chip[chip->index] = chip; chip->num_interfaces++; mutex_unlock(&register_mutex); return chip; __error: if (chip && !chip->num_interfaces) snd_card_free(chip->card); mutex_unlock(&register_mutex); __err_val: return NULL; } /* * we need to take care of counter, since disconnection can be called also * many times as well as usb_audio_probe(). */ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr) { struct snd_usb_audio *chip; struct snd_card *card; struct list_head *p; if (ptr == (void *)-1L) return; chip = ptr; card = chip->card; mutex_lock(&register_mutex); chip->shutdown = 1; chip->num_interfaces--; if (chip->num_interfaces <= 0) { snd_card_disconnect(card); /* release the pcm resources */ list_for_each(p, &chip->pcm_list) { snd_usb_stream_disconnect(p); } /* release the midi resources */ list_for_each(p, &chip->midi_list) { snd_usbmidi_disconnect(p); } /* release mixer resources */ list_for_each(p, &chip->mixer_list) { snd_usb_mixer_disconnect(p); } usb_chip[chip->index] = NULL; mutex_unlock(&register_mutex); snd_card_free_when_closed(card); } else { mutex_unlock(&register_mutex); } } /* * new 2.5 USB kernel API */ static int usb_audio_probe(struct usb_interface *intf, const struct usb_device_id *id) { void *chip; chip = snd_usb_audio_probe(interface_to_usbdev(intf), intf, id); if (chip) { usb_set_intfdata(intf, chip); return 0; } else return -EIO; } static void usb_audio_disconnect(struct usb_interface *intf) { snd_usb_audio_disconnect(interface_to_usbdev(intf), usb_get_intfdata(intf)); } #ifdef CONFIG_PM static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) { struct snd_usb_audio *chip = usb_get_intfdata(intf); struct list_head *p; struct snd_usb_stream *as; if (chip == (void *)-1L) return 0; snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); if (!chip->num_suspended_intf++) { list_for_each(p, &chip->pcm_list) { as = list_entry(p, struct snd_usb_stream, list); snd_pcm_suspend_all(as->pcm); } } return 0; } static int usb_audio_resume(struct usb_interface *intf) { struct snd_usb_audio *chip = usb_get_intfdata(intf); if (chip == (void *)-1L) return 0; if (--chip->num_suspended_intf) return 0; /* * ALSA leaves material resumption to user space * we just notify */ snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0); return 0; } #endif /* CONFIG_PM */ static int __init snd_usb_audio_init(void) { if (nrpacks < 1 || nrpacks > MAX_PACKS) { printk(KERN_WARNING "invalid nrpacks value.\n"); return -EINVAL; } return usb_register(&usb_audio_driver); } static void __exit snd_usb_audio_cleanup(void) { usb_deregister(&usb_audio_driver); } module_init(snd_usb_audio_init); module_exit(snd_usb_audio_cleanup);
gpl-2.0
Edgar86/boston-2.6.32.x
drivers/media/dvb/frontends/lgdt3305.c
558
26416
/* * Support for LGDT3305 - VSB/QAM * * Copyright (C) 2008, 2009 Michael Krufky <mkrufky@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <asm/div64.h> #include <linux/dvb/frontend.h> #include "dvb_math.h" #include "lgdt3305.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "set debug level (info=1, reg=2 (or-able))"); #define DBG_INFO 1 #define DBG_REG 2 #define lg_printk(kern, fmt, arg...) \ printk(kern "%s: " fmt, __func__, ##arg) #define lg_info(fmt, arg...) printk(KERN_INFO "lgdt3305: " fmt, ##arg) #define lg_warn(fmt, arg...) lg_printk(KERN_WARNING, fmt, ##arg) #define lg_err(fmt, arg...) lg_printk(KERN_ERR, fmt, ##arg) #define lg_dbg(fmt, arg...) if (debug & DBG_INFO) \ lg_printk(KERN_DEBUG, fmt, ##arg) #define lg_reg(fmt, arg...) if (debug & DBG_REG) \ lg_printk(KERN_DEBUG, fmt, ##arg) #define lg_fail(ret) \ ({ \ int __ret; \ __ret = (ret < 0); \ if (__ret) \ lg_err("error %d on line %d\n", ret, __LINE__); \ __ret; \ }) struct lgdt3305_state { struct i2c_adapter *i2c_adap; const struct lgdt3305_config *cfg; struct dvb_frontend frontend; fe_modulation_t current_modulation; u32 current_frequency; u32 snr; }; /* ------------------------------------------------------------------------ */ #define LGDT3305_GEN_CTRL_1 0x0000 #define LGDT3305_GEN_CTRL_2 0x0001 #define LGDT3305_GEN_CTRL_3 0x0002 #define LGDT3305_GEN_STATUS 0x0003 #define LGDT3305_GEN_CONTROL 0x0007 #define LGDT3305_GEN_CTRL_4 0x000a #define LGDT3305_DGTL_AGC_REF_1 0x0012 #define LGDT3305_DGTL_AGC_REF_2 0x0013 #define LGDT3305_CR_CTR_FREQ_1 0x0106 #define LGDT3305_CR_CTR_FREQ_2 0x0107 #define LGDT3305_CR_CTR_FREQ_3 0x0108 #define LGDT3305_CR_CTR_FREQ_4 0x0109 #define LGDT3305_CR_MSE_1 0x011b #define LGDT3305_CR_MSE_2 0x011c #define LGDT3305_CR_LOCK_STATUS 0x011d #define LGDT3305_CR_CTRL_7 0x0126 #define LGDT3305_AGC_POWER_REF_1 0x0300 #define LGDT3305_AGC_POWER_REF_2 0x0301 #define LGDT3305_AGC_DELAY_PT_1 0x0302 #define LGDT3305_AGC_DELAY_PT_2 0x0303 #define LGDT3305_RFAGC_LOOP_FLTR_BW_1 0x0306 #define LGDT3305_RFAGC_LOOP_FLTR_BW_2 0x0307 #define LGDT3305_IFBW_1 0x0308 #define LGDT3305_IFBW_2 0x0309 #define LGDT3305_AGC_CTRL_1 0x030c #define LGDT3305_AGC_CTRL_4 0x0314 #define LGDT3305_EQ_MSE_1 0x0413 #define LGDT3305_EQ_MSE_2 0x0414 #define LGDT3305_EQ_MSE_3 0x0415 #define LGDT3305_PT_MSE_1 0x0417 #define LGDT3305_PT_MSE_2 0x0418 #define LGDT3305_PT_MSE_3 0x0419 #define LGDT3305_FEC_BLOCK_CTRL 0x0504 #define LGDT3305_FEC_LOCK_STATUS 0x050a #define LGDT3305_FEC_PKT_ERR_1 0x050c #define LGDT3305_FEC_PKT_ERR_2 0x050d #define LGDT3305_TP_CTRL_1 0x050e #define LGDT3305_BERT_PERIOD 0x0801 #define LGDT3305_BERT_ERROR_COUNT_1 0x080a #define LGDT3305_BERT_ERROR_COUNT_2 0x080b #define LGDT3305_BERT_ERROR_COUNT_3 0x080c #define LGDT3305_BERT_ERROR_COUNT_4 0x080d static int lgdt3305_write_reg(struct lgdt3305_state *state, u16 reg, u8 val) { int ret; u8 buf[] = { reg >> 8, reg & 0xff, val }; struct i2c_msg msg = { .addr = state->cfg->i2c_addr, .flags = 0, .buf = buf, .len = 3, }; lg_reg("reg: 0x%04x, val: 0x%02x\n", reg, val); ret = i2c_transfer(state->i2c_adap, &msg, 1); if (ret != 1) { lg_err("error (addr %02x %02x <- %02x, err = %i)\n", msg.buf[0], msg.buf[1], msg.buf[2], ret); if (ret < 0) return ret; else return -EREMOTEIO; } return 0; } static int lgdt3305_read_reg(struct lgdt3305_state *state, u16 reg, u8 *val) { int ret; u8 reg_buf[] = { reg >> 8, reg & 0xff }; struct i2c_msg msg[] = { { .addr = state->cfg->i2c_addr, .flags = 0, .buf = reg_buf, .len = 2 }, { .addr = state->cfg->i2c_addr, .flags = I2C_M_RD, .buf = val, .len = 1 }, }; lg_reg("reg: 0x%04x\n", reg); ret = i2c_transfer(state->i2c_adap, msg, 2); if (ret != 2) { lg_err("error (addr %02x reg %04x error (ret == %i)\n", state->cfg->i2c_addr, reg, ret); if (ret < 0) return ret; else return -EREMOTEIO; } return 0; } #define read_reg(state, reg) \ ({ \ u8 __val; \ int ret = lgdt3305_read_reg(state, reg, &__val); \ if (lg_fail(ret)) \ __val = 0; \ __val; \ }) static int lgdt3305_set_reg_bit(struct lgdt3305_state *state, u16 reg, int bit, int onoff) { u8 val; int ret; lg_reg("reg: 0x%04x, bit: %d, level: %d\n", reg, bit, onoff); ret = lgdt3305_read_reg(state, reg, &val); if (lg_fail(ret)) goto fail; val &= ~(1 << bit); val |= (onoff & 1) << bit; ret = lgdt3305_write_reg(state, reg, val); fail: return ret; } struct lgdt3305_reg { u16 reg; u8 val; }; static int lgdt3305_write_regs(struct lgdt3305_state *state, struct lgdt3305_reg *regs, int len) { int i, ret; lg_reg("writing %d registers...\n", len); for (i = 0; i < len - 1; i++) { ret = lgdt3305_write_reg(state, regs[i].reg, regs[i].val); if (lg_fail(ret)) return ret; } return 0; } /* ------------------------------------------------------------------------ */ static int lgdt3305_soft_reset(struct lgdt3305_state *state) { int ret; lg_dbg("\n"); ret = lgdt3305_set_reg_bit(state, LGDT3305_GEN_CTRL_3, 0, 0); if (lg_fail(ret)) goto fail; msleep(20); ret = lgdt3305_set_reg_bit(state, LGDT3305_GEN_CTRL_3, 0, 1); fail: return ret; } static inline int lgdt3305_mpeg_mode(struct lgdt3305_state *state, enum lgdt3305_mpeg_mode mode) { lg_dbg("(%d)\n", mode); return lgdt3305_set_reg_bit(state, LGDT3305_TP_CTRL_1, 5, mode); } static int lgdt3305_mpeg_mode_polarity(struct lgdt3305_state *state, enum lgdt3305_tp_clock_edge edge, enum lgdt3305_tp_valid_polarity valid) { u8 val; int ret; lg_dbg("edge = %d, valid = %d\n", edge, valid); ret = lgdt3305_read_reg(state, LGDT3305_TP_CTRL_1, &val); if (lg_fail(ret)) goto fail; val &= ~0x09; if (edge) val |= 0x08; if (valid) val |= 0x01; ret = lgdt3305_write_reg(state, LGDT3305_TP_CTRL_1, val); if (lg_fail(ret)) goto fail; ret = lgdt3305_soft_reset(state); fail: return ret; } static int lgdt3305_set_modulation(struct lgdt3305_state *state, struct dvb_frontend_parameters *param) { u8 opermode; int ret; lg_dbg("\n"); ret = lgdt3305_read_reg(state, LGDT3305_GEN_CTRL_1, &opermode); if (lg_fail(ret)) goto fail; opermode &= ~0x03; switch (param->u.vsb.modulation) { case VSB_8: opermode |= 0x03; break; case QAM_64: opermode |= 0x00; break; case QAM_256: opermode |= 0x01; break; default: return -EINVAL; } ret = lgdt3305_write_reg(state, LGDT3305_GEN_CTRL_1, opermode); fail: return ret; } static int lgdt3305_set_filter_extension(struct lgdt3305_state *state, struct dvb_frontend_parameters *param) { int val; switch (param->u.vsb.modulation) { case VSB_8: val = 0; break; case QAM_64: case QAM_256: val = 1; break; default: return -EINVAL; } lg_dbg("val = %d\n", val); return lgdt3305_set_reg_bit(state, 0x043f, 2, val); } /* ------------------------------------------------------------------------ */ static int lgdt3305_passband_digital_agc(struct lgdt3305_state *state, struct dvb_frontend_parameters *param) { u16 agc_ref; switch (param->u.vsb.modulation) { case VSB_8: agc_ref = 0x32c4; break; case QAM_64: agc_ref = 0x2a00; break; case QAM_256: agc_ref = 0x2a80; break; default: return -EINVAL; } lg_dbg("agc ref: 0x%04x\n", agc_ref); lgdt3305_write_reg(state, LGDT3305_DGTL_AGC_REF_1, agc_ref >> 8); lgdt3305_write_reg(state, LGDT3305_DGTL_AGC_REF_2, agc_ref & 0xff); return 0; } static int lgdt3305_rfagc_loop(struct lgdt3305_state *state, struct dvb_frontend_parameters *param) { u16 ifbw, rfbw, agcdelay; switch (param->u.vsb.modulation) { case VSB_8: agcdelay = 0x04c0; rfbw = 0x8000; ifbw = 0x8000; break; case QAM_64: case QAM_256: agcdelay = 0x046b; rfbw = 0x8889; ifbw = 0x8888; break; default: return -EINVAL; } if (state->cfg->rf_agc_loop) { lg_dbg("agcdelay: 0x%04x, rfbw: 0x%04x\n", agcdelay, rfbw); /* rf agc loop filter bandwidth */ lgdt3305_write_reg(state, LGDT3305_AGC_DELAY_PT_1, agcdelay >> 8); lgdt3305_write_reg(state, LGDT3305_AGC_DELAY_PT_2, agcdelay & 0xff); lgdt3305_write_reg(state, LGDT3305_RFAGC_LOOP_FLTR_BW_1, rfbw >> 8); lgdt3305_write_reg(state, LGDT3305_RFAGC_LOOP_FLTR_BW_2, rfbw & 0xff); } else { lg_dbg("ifbw: 0x%04x\n", ifbw); /* if agc loop filter bandwidth */ lgdt3305_write_reg(state, LGDT3305_IFBW_1, ifbw >> 8); lgdt3305_write_reg(state, LGDT3305_IFBW_2, ifbw & 0xff); } return 0; } static int lgdt3305_agc_setup(struct lgdt3305_state *state, struct dvb_frontend_parameters *param) { int lockdten, acqen; switch (param->u.vsb.modulation) { case VSB_8: lockdten = 0; acqen = 0; break; case QAM_64: case QAM_256: lockdten = 1; acqen = 1; break; default: return -EINVAL; } lg_dbg("lockdten = %d, acqen = %d\n", lockdten, acqen); /* control agc function */ lgdt3305_write_reg(state, LGDT3305_AGC_CTRL_4, 0xe1 | lockdten << 1); lgdt3305_set_reg_bit(state, LGDT3305_AGC_CTRL_1, 2, acqen); return lgdt3305_rfagc_loop(state, param); } static int lgdt3305_set_agc_power_ref(struct lgdt3305_state *state, struct dvb_frontend_parameters *param) { u16 usref = 0; switch (param->u.vsb.modulation) { case VSB_8: if (state->cfg->usref_8vsb) usref = state->cfg->usref_8vsb; break; case QAM_64: if (state->cfg->usref_qam64) usref = state->cfg->usref_qam64; break; case QAM_256: if (state->cfg->usref_qam256) usref = state->cfg->usref_qam256; break; default: return -EINVAL; } if (usref) { lg_dbg("set manual mode: 0x%04x\n", usref); lgdt3305_set_reg_bit(state, LGDT3305_AGC_CTRL_1, 3, 1); lgdt3305_write_reg(state, LGDT3305_AGC_POWER_REF_1, 0xff & (usref >> 8)); lgdt3305_write_reg(state, LGDT3305_AGC_POWER_REF_2, 0xff & (usref >> 0)); } return 0; } /* ------------------------------------------------------------------------ */ static int lgdt3305_spectral_inversion(struct lgdt3305_state *state, struct dvb_frontend_parameters *param, int inversion) { int ret; lg_dbg("(%d)\n", inversion); switch (param->u.vsb.modulation) { case VSB_8: ret = lgdt3305_write_reg(state, LGDT3305_CR_CTRL_7, inversion ? 0xf9 : 0x79); break; case QAM_64: case QAM_256: ret = lgdt3305_write_reg(state, LGDT3305_FEC_BLOCK_CTRL, inversion ? 0xfd : 0xff); break; default: ret = -EINVAL; } return ret; } static int lgdt3305_set_if(struct lgdt3305_state *state, struct dvb_frontend_parameters *param) { u16 if_freq_khz; u8 nco1, nco2, nco3, nco4; u64 nco; switch (param->u.vsb.modulation) { case VSB_8: if_freq_khz = state->cfg->vsb_if_khz; break; case QAM_64: case QAM_256: if_freq_khz = state->cfg->qam_if_khz; break; default: return -EINVAL; } nco = if_freq_khz / 10; switch (param->u.vsb.modulation) { case VSB_8: nco <<= 24; do_div(nco, 625); break; case QAM_64: case QAM_256: nco <<= 28; do_div(nco, 625); break; default: return -EINVAL; } nco1 = (nco >> 24) & 0x3f; nco1 |= 0x40; nco2 = (nco >> 16) & 0xff; nco3 = (nco >> 8) & 0xff; nco4 = nco & 0xff; lgdt3305_write_reg(state, LGDT3305_CR_CTR_FREQ_1, nco1); lgdt3305_write_reg(state, LGDT3305_CR_CTR_FREQ_2, nco2); lgdt3305_write_reg(state, LGDT3305_CR_CTR_FREQ_3, nco3); lgdt3305_write_reg(state, LGDT3305_CR_CTR_FREQ_4, nco4); lg_dbg("%d KHz -> [%02x%02x%02x%02x]\n", if_freq_khz, nco1, nco2, nco3, nco4); return 0; } /* ------------------------------------------------------------------------ */ static int lgdt3305_i2c_gate_ctrl(struct dvb_frontend *fe, int enable) { struct lgdt3305_state *state = fe->demodulator_priv; if (state->cfg->deny_i2c_rptr) return 0; lg_dbg("(%d)\n", enable); return lgdt3305_set_reg_bit(state, LGDT3305_GEN_CTRL_2, 5, enable ? 0 : 1); } static int lgdt3305_sleep(struct dvb_frontend *fe) { struct lgdt3305_state *state = fe->demodulator_priv; u8 gen_ctrl_3, gen_ctrl_4; lg_dbg("\n"); gen_ctrl_3 = read_reg(state, LGDT3305_GEN_CTRL_3); gen_ctrl_4 = read_reg(state, LGDT3305_GEN_CTRL_4); /* hold in software reset while sleeping */ gen_ctrl_3 &= ~0x01; /* tristate the IF-AGC pin */ gen_ctrl_3 |= 0x02; /* tristate the RF-AGC pin */ gen_ctrl_3 |= 0x04; /* disable vsb/qam module */ gen_ctrl_4 &= ~0x01; /* disable adc module */ gen_ctrl_4 &= ~0x02; lgdt3305_write_reg(state, LGDT3305_GEN_CTRL_3, gen_ctrl_3); lgdt3305_write_reg(state, LGDT3305_GEN_CTRL_4, gen_ctrl_4); return 0; } static int lgdt3305_init(struct dvb_frontend *fe) { struct lgdt3305_state *state = fe->demodulator_priv; int ret; static struct lgdt3305_reg lgdt3305_init_data[] = { { .reg = LGDT3305_GEN_CTRL_1, .val = 0x03, }, { .reg = LGDT3305_GEN_CTRL_2, .val = 0xb0, }, { .reg = LGDT3305_GEN_CTRL_3, .val = 0x01, }, { .reg = LGDT3305_GEN_CONTROL, .val = 0x6f, }, { .reg = LGDT3305_GEN_CTRL_4, .val = 0x03, }, { .reg = LGDT3305_DGTL_AGC_REF_1, .val = 0x32, }, { .reg = LGDT3305_DGTL_AGC_REF_2, .val = 0xc4, }, { .reg = LGDT3305_CR_CTR_FREQ_1, .val = 0x00, }, { .reg = LGDT3305_CR_CTR_FREQ_2, .val = 0x00, }, { .reg = LGDT3305_CR_CTR_FREQ_3, .val = 0x00, }, { .reg = LGDT3305_CR_CTR_FREQ_4, .val = 0x00, }, { .reg = LGDT3305_CR_CTRL_7, .val = 0x79, }, { .reg = LGDT3305_AGC_POWER_REF_1, .val = 0x32, }, { .reg = LGDT3305_AGC_POWER_REF_2, .val = 0xc4, }, { .reg = LGDT3305_AGC_DELAY_PT_1, .val = 0x0d, }, { .reg = LGDT3305_AGC_DELAY_PT_2, .val = 0x30, }, { .reg = LGDT3305_RFAGC_LOOP_FLTR_BW_1, .val = 0x80, }, { .reg = LGDT3305_RFAGC_LOOP_FLTR_BW_2, .val = 0x00, }, { .reg = LGDT3305_IFBW_1, .val = 0x80, }, { .reg = LGDT3305_IFBW_2, .val = 0x00, }, { .reg = LGDT3305_AGC_CTRL_1, .val = 0x30, }, { .reg = LGDT3305_AGC_CTRL_4, .val = 0x61, }, { .reg = LGDT3305_FEC_BLOCK_CTRL, .val = 0xff, }, { .reg = LGDT3305_TP_CTRL_1, .val = 0x1b, }, }; lg_dbg("\n"); ret = lgdt3305_write_regs(state, lgdt3305_init_data, ARRAY_SIZE(lgdt3305_init_data)); if (lg_fail(ret)) goto fail; ret = lgdt3305_soft_reset(state); fail: return ret; } static int lgdt3305_set_parameters(struct dvb_frontend *fe, struct dvb_frontend_parameters *param) { struct lgdt3305_state *state = fe->demodulator_priv; int ret; lg_dbg("(%d, %d)\n", param->frequency, param->u.vsb.modulation); if (fe->ops.tuner_ops.set_params) { ret = fe->ops.tuner_ops.set_params(fe, param); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); if (lg_fail(ret)) goto fail; state->current_frequency = param->frequency; } ret = lgdt3305_set_modulation(state, param); if (lg_fail(ret)) goto fail; ret = lgdt3305_passband_digital_agc(state, param); if (lg_fail(ret)) goto fail; ret = lgdt3305_set_agc_power_ref(state, param); if (lg_fail(ret)) goto fail; ret = lgdt3305_agc_setup(state, param); if (lg_fail(ret)) goto fail; /* low if */ ret = lgdt3305_write_reg(state, LGDT3305_GEN_CONTROL, 0x2f); if (lg_fail(ret)) goto fail; ret = lgdt3305_set_reg_bit(state, LGDT3305_CR_CTR_FREQ_1, 6, 1); if (lg_fail(ret)) goto fail; ret = lgdt3305_set_if(state, param); if (lg_fail(ret)) goto fail; ret = lgdt3305_spectral_inversion(state, param, state->cfg->spectral_inversion ? 1 : 0); if (lg_fail(ret)) goto fail; ret = lgdt3305_set_filter_extension(state, param); if (lg_fail(ret)) goto fail; state->current_modulation = param->u.vsb.modulation; ret = lgdt3305_mpeg_mode(state, state->cfg->mpeg_mode); if (lg_fail(ret)) goto fail; /* lgdt3305_mpeg_mode_polarity calls lgdt3305_soft_reset */ ret = lgdt3305_mpeg_mode_polarity(state, state->cfg->tpclk_edge, state->cfg->tpvalid_polarity); fail: return ret; } static int lgdt3305_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *param) { struct lgdt3305_state *state = fe->demodulator_priv; lg_dbg("\n"); param->u.vsb.modulation = state->current_modulation; param->frequency = state->current_frequency; return 0; } /* ------------------------------------------------------------------------ */ static int lgdt3305_read_cr_lock_status(struct lgdt3305_state *state, int *locked) { u8 val; int ret; char *cr_lock_state = ""; *locked = 0; ret = lgdt3305_read_reg(state, LGDT3305_CR_LOCK_STATUS, &val); if (lg_fail(ret)) goto fail; switch (state->current_modulation) { case QAM_256: case QAM_64: if (val & (1 << 1)) *locked = 1; switch (val & 0x07) { case 0: cr_lock_state = "QAM UNLOCK"; break; case 4: cr_lock_state = "QAM 1stLock"; break; case 6: cr_lock_state = "QAM 2ndLock"; break; case 7: cr_lock_state = "QAM FinalLock"; break; default: cr_lock_state = "CLOCKQAM-INVALID!"; break; } break; case VSB_8: if (val & (1 << 7)) { *locked = 1; cr_lock_state = "CLOCKVSB"; } break; default: ret = -EINVAL; } lg_dbg("(%d) %s\n", *locked, cr_lock_state); fail: return ret; } static int lgdt3305_read_fec_lock_status(struct lgdt3305_state *state, int *locked) { u8 val; int ret, mpeg_lock, fec_lock, viterbi_lock; *locked = 0; switch (state->current_modulation) { case QAM_256: case QAM_64: ret = lgdt3305_read_reg(state, LGDT3305_FEC_LOCK_STATUS, &val); if (lg_fail(ret)) goto fail; mpeg_lock = (val & (1 << 0)) ? 1 : 0; fec_lock = (val & (1 << 2)) ? 1 : 0; viterbi_lock = (val & (1 << 3)) ? 1 : 0; *locked = mpeg_lock && fec_lock && viterbi_lock; lg_dbg("(%d) %s%s%s\n", *locked, mpeg_lock ? "mpeg lock " : "", fec_lock ? "fec lock " : "", viterbi_lock ? "viterbi lock" : ""); break; case VSB_8: default: ret = -EINVAL; } fail: return ret; } static int lgdt3305_read_status(struct dvb_frontend *fe, fe_status_t *status) { struct lgdt3305_state *state = fe->demodulator_priv; u8 val; int ret, signal, inlock, nofecerr, snrgood, cr_lock, fec_lock, sync_lock; *status = 0; ret = lgdt3305_read_reg(state, LGDT3305_GEN_STATUS, &val); if (lg_fail(ret)) goto fail; signal = (val & (1 << 4)) ? 1 : 0; inlock = (val & (1 << 3)) ? 0 : 1; sync_lock = (val & (1 << 2)) ? 1 : 0; nofecerr = (val & (1 << 1)) ? 1 : 0; snrgood = (val & (1 << 0)) ? 1 : 0; lg_dbg("%s%s%s%s%s\n", signal ? "SIGNALEXIST " : "", inlock ? "INLOCK " : "", sync_lock ? "SYNCLOCK " : "", nofecerr ? "NOFECERR " : "", snrgood ? "SNRGOOD " : ""); ret = lgdt3305_read_cr_lock_status(state, &cr_lock); if (lg_fail(ret)) goto fail; if (signal) *status |= FE_HAS_SIGNAL; if (cr_lock) *status |= FE_HAS_CARRIER; if (nofecerr) *status |= FE_HAS_VITERBI; if (sync_lock) *status |= FE_HAS_SYNC; switch (state->current_modulation) { case QAM_256: case QAM_64: ret = lgdt3305_read_fec_lock_status(state, &fec_lock); if (lg_fail(ret)) goto fail; if (fec_lock) *status |= FE_HAS_LOCK; break; case VSB_8: if (inlock) *status |= FE_HAS_LOCK; break; default: ret = -EINVAL; } fail: return ret; } /* ------------------------------------------------------------------------ */ /* borrowed from lgdt330x.c */ static u32 calculate_snr(u32 mse, u32 c) { if (mse == 0) /* no signal */ return 0; mse = intlog10(mse); if (mse > c) { /* Negative SNR, which is possible, but realisticly the demod will lose lock before the signal gets this bad. The API only allows for unsigned values, so just return 0 */ return 0; } return 10*(c - mse); } static int lgdt3305_read_snr(struct dvb_frontend *fe, u16 *snr) { struct lgdt3305_state *state = fe->demodulator_priv; u32 noise; /* noise value */ u32 c; /* per-modulation SNR calculation constant */ switch (state->current_modulation) { case VSB_8: #ifdef USE_PTMSE /* Use Phase Tracker Mean-Square Error Register */ /* SNR for ranges from -13.11 to +44.08 */ noise = ((read_reg(state, LGDT3305_PT_MSE_1) & 0x07) << 16) | (read_reg(state, LGDT3305_PT_MSE_2) << 8) | (read_reg(state, LGDT3305_PT_MSE_3) & 0xff); c = 73957994; /* log10(25*32^2)*2^24 */ #else /* Use Equalizer Mean-Square Error Register */ /* SNR for ranges from -16.12 to +44.08 */ noise = ((read_reg(state, LGDT3305_EQ_MSE_1) & 0x0f) << 16) | (read_reg(state, LGDT3305_EQ_MSE_2) << 8) | (read_reg(state, LGDT3305_EQ_MSE_3) & 0xff); c = 73957994; /* log10(25*32^2)*2^24 */ #endif break; case QAM_64: case QAM_256: noise = (read_reg(state, LGDT3305_CR_MSE_1) << 8) | (read_reg(state, LGDT3305_CR_MSE_2) & 0xff); c = (state->current_modulation == QAM_64) ? 97939837 : 98026066; /* log10(688128)*2^24 and log10(696320)*2^24 */ break; default: return -EINVAL; } state->snr = calculate_snr(noise, c); /* report SNR in dB * 10 */ *snr = (state->snr / ((1 << 24) / 10)); lg_dbg("noise = 0x%08x, snr = %d.%02d dB\n", noise, state->snr >> 24, (((state->snr >> 8) & 0xffff) * 100) >> 16); return 0; } static int lgdt3305_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { /* borrowed from lgdt330x.c * * Calculate strength from SNR up to 35dB * Even though the SNR can go higher than 35dB, * there is some comfort factor in having a range of * strong signals that can show at 100% */ struct lgdt3305_state *state = fe->demodulator_priv; u16 snr; int ret; *strength = 0; ret = fe->ops.read_snr(fe, &snr); if (lg_fail(ret)) goto fail; /* Rather than use the 8.8 value snr, use state->snr which is 8.24 */ /* scale the range 0 - 35*2^24 into 0 - 65535 */ if (state->snr >= 8960 * 0x10000) *strength = 0xffff; else *strength = state->snr / 8960; fail: return ret; } /* ------------------------------------------------------------------------ */ static int lgdt3305_read_ber(struct dvb_frontend *fe, u32 *ber) { *ber = 0; return 0; } static int lgdt3305_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { struct lgdt3305_state *state = fe->demodulator_priv; *ucblocks = (read_reg(state, LGDT3305_FEC_PKT_ERR_1) << 8) | (read_reg(state, LGDT3305_FEC_PKT_ERR_2) & 0xff); return 0; } static int lgdt3305_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *fe_tune_settings) { fe_tune_settings->min_delay_ms = 500; lg_dbg("\n"); return 0; } static void lgdt3305_release(struct dvb_frontend *fe) { struct lgdt3305_state *state = fe->demodulator_priv; lg_dbg("\n"); kfree(state); } static struct dvb_frontend_ops lgdt3305_ops; struct dvb_frontend *lgdt3305_attach(const struct lgdt3305_config *config, struct i2c_adapter *i2c_adap) { struct lgdt3305_state *state = NULL; int ret; u8 val; lg_dbg("(%d-%04x)\n", i2c_adap ? i2c_adapter_id(i2c_adap) : 0, config ? config->i2c_addr : 0); state = kzalloc(sizeof(struct lgdt3305_state), GFP_KERNEL); if (state == NULL) goto fail; state->cfg = config; state->i2c_adap = i2c_adap; memcpy(&state->frontend.ops, &lgdt3305_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; /* verify that we're talking to a lg dt3305 */ ret = lgdt3305_read_reg(state, LGDT3305_GEN_CTRL_2, &val); if ((lg_fail(ret)) | (val == 0)) goto fail; ret = lgdt3305_write_reg(state, 0x0808, 0x80); if (lg_fail(ret)) goto fail; ret = lgdt3305_read_reg(state, 0x0808, &val); if ((lg_fail(ret)) | (val != 0x80)) goto fail; ret = lgdt3305_write_reg(state, 0x0808, 0x00); if (lg_fail(ret)) goto fail; state->current_frequency = -1; state->current_modulation = -1; return &state->frontend; fail: lg_warn("unable to detect LGDT3305 hardware\n"); kfree(state); return NULL; } EXPORT_SYMBOL(lgdt3305_attach); static struct dvb_frontend_ops lgdt3305_ops = { .info = { .name = "LG Electronics LGDT3305 VSB/QAM Frontend", .type = FE_ATSC, .frequency_min = 54000000, .frequency_max = 858000000, .frequency_stepsize = 62500, .caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB }, .i2c_gate_ctrl = lgdt3305_i2c_gate_ctrl, .init = lgdt3305_init, .sleep = lgdt3305_sleep, .set_frontend = lgdt3305_set_parameters, .get_frontend = lgdt3305_get_frontend, .get_tune_settings = lgdt3305_get_tune_settings, .read_status = lgdt3305_read_status, .read_ber = lgdt3305_read_ber, .read_signal_strength = lgdt3305_read_signal_strength, .read_snr = lgdt3305_read_snr, .read_ucblocks = lgdt3305_read_ucblocks, .release = lgdt3305_release, }; MODULE_DESCRIPTION("LG Electronics LGDT3305 ATSC/QAM-B Demodulator Driver"); MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.1"); /* * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
C-Aniruddh/kernel_vortex
arch/x86/kernel/cpu/intel.c
814
21037
#include <linux/init.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/bitops.h> #include <linux/smp.h> #include <linux/sched.h> #include <linux/thread_info.h> #include <linux/module.h> #include <linux/uaccess.h> #include <asm/processor.h> #include <asm/pgtable.h> #include <asm/msr.h> #include <asm/bugs.h> #include <asm/cpu.h> #ifdef CONFIG_X86_64 #include <linux/topology.h> #endif #include "cpu.h" #ifdef CONFIG_X86_LOCAL_APIC #include <asm/mpspec.h> #include <asm/apic.h> #endif static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) { u64 misc_enable; /* Unmask CPUID levels if masked: */ if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); c->cpuid_level = cpuid_eax(0); get_cpu_cap(c); } } if ((c->x86 == 0xf && c->x86_model >= 0x03) || (c->x86 == 0x6 && c->x86_model >= 0x0e)) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) { unsigned lower_word; wrmsr(MSR_IA32_UCODE_REV, 0, 0); /* Required by the SDM */ sync_core(); rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode); } /* * Atom erratum AAE44/AAF40/AAG38/AAH41: * * A race condition between speculative fetches and invalidating * a large page. This is worked around in microcode, but we * need the microcode to have already been loaded... so if it is * not, recommend a BIOS update and disable large pages. */ if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 && c->microcode < 0x20e) { printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n"); clear_cpu_cap(c, X86_FEATURE_PSE); } #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_SYSENTER32); #else /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ if (c->x86 == 15 && c->x86_cache_alignment == 64) c->x86_cache_alignment = 128; #endif /* CPUID workaround for 0F33/0F34 CPU */ if (c->x86 == 0xF && c->x86_model == 0x3 && (c->x86_mask == 0x3 || c->x86_mask == 0x4)) c->x86_phys_bits = 36; /* * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate * with P/T states and does not stop in deep C-states. * * It is also reliable across cores and sockets. (but not across * cabinets - we turn it off in that case explicitly.) */ if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); if (!check_tsc_unstable()) sched_clock_stable = 1; } /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ if (c->x86 == 6) { switch (c->x86_model) { case 0x27: /* Penwell */ case 0x35: /* Cloverview */ set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3); break; default: break; } } /* * There is a known erratum on Pentium III and Core Solo * and Core Duo CPUs. * " Page with PAT set to WC while associated MTRR is UC * may consolidate to UC " * Because of this erratum, it is better to stick with * setting WC in MTRR rather than using PAT on these CPUs. * * Enable PAT WC only on P4, Core 2 or later CPUs. */ if (c->x86 == 6 && c->x86_model < 15) clear_cpu_cap(c, X86_FEATURE_PAT); #ifdef CONFIG_KMEMCHECK /* * P4s have a "fast strings" feature which causes single- * stepping REP instructions to only generate a #DB on * cache-line boundaries. * * Ingo Molnar reported a Pentium D (model 6) and a Xeon * (model 2) with the same problem. */ if (c->x86 == 15) { rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) { printk(KERN_INFO "kmemcheck: Disabling fast string operations\n"); misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING; wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); } } #endif /* * If fast string is not enabled in IA32_MISC_ENABLE for any reason, * clear the fast string and enhanced fast string CPU capabilities. */ if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) { printk(KERN_INFO "Disabled fast string operations\n"); setup_clear_cpu_cap(X86_FEATURE_REP_GOOD); setup_clear_cpu_cap(X86_FEATURE_ERMS); } } } #ifdef CONFIG_X86_32 /* * Early probe support logic for ppro memory erratum #50 * * This is called before we do cpu ident work */ int __cpuinit ppro_with_ram_bug(void) { /* Uses data from early_cpu_detect now */ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 1 && boot_cpu_data.x86_mask < 8) { printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n"); return 1; } return 0; } static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) { /* calling is from identify_secondary_cpu() ? */ if (!c->cpu_index) return; /* * Mask B, Pentium, but not Pentium MMX */ if (c->x86 == 5 && c->x86_mask >= 1 && c->x86_mask <= 4 && c->x86_model <= 3) { /* * Remember we have B step Pentia with bugs */ WARN_ONCE(1, "WARNING: SMP operation may be unreliable" "with B stepping processors.\n"); } } static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) { unsigned long lo, hi; #ifdef CONFIG_X86_F00F_BUG /* * All current models of Pentium and Pentium with MMX technology CPUs * have the F0 0F bug, which lets nonprivileged users lock up the * system. Announce that the fault handler will be checking for it. */ clear_cpu_bug(c, X86_BUG_F00F); if (!paravirt_enabled() && c->x86 == 5) { static int f00f_workaround_enabled; set_cpu_bug(c, X86_BUG_F00F); if (!f00f_workaround_enabled) { printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); f00f_workaround_enabled = 1; } } #endif /* * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until * model 3 mask 3 */ if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) clear_cpu_cap(c, X86_FEATURE_SEP); /* * P4 Xeon errata 037 workaround. * Hardware prefetcher may cause stale data to be loaded into the cache. */ if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) { printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; wrmsr(MSR_IA32_MISC_ENABLE, lo, hi); } } /* * See if we have a good local APIC by checking for buggy Pentia, * i.e. all B steppings and the C2 stepping of P54C when using their * integrated APIC (see 11AP erratum in "Pentium Processor * Specification Update"). */ if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 && (c->x86_mask < 0x6 || c->x86_mask == 0xb)) set_cpu_cap(c, X86_FEATURE_11AP); #ifdef CONFIG_X86_INTEL_USERCOPY /* * Set up the preferred alignment for movsl bulk memory moves */ switch (c->x86) { case 4: /* 486: untested */ break; case 5: /* Old Pentia: untested */ break; case 6: /* PII/PIII only like movsl with 8-byte alignment */ movsl_mask.mask = 7; break; case 15: /* P4 is OK down to 8-byte alignment */ movsl_mask.mask = 7; break; } #endif #ifdef CONFIG_X86_NUMAQ numaq_tsc_disable(); #endif intel_smp_check(c); } #else static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) { } #endif static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) { #ifdef CONFIG_NUMA unsigned node; int cpu = smp_processor_id(); /* Don't do the funky fallback heuristics the AMD version employs for now. */ node = numa_cpu_node(cpu); if (node == NUMA_NO_NODE || !node_online(node)) { /* reuse the value from init_cpu_to_node() */ node = cpu_to_node(cpu); } numa_set_node(cpu, node); #endif } /* * find out the number of processor cores on the die */ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) { unsigned int eax, ebx, ecx, edx; if (c->cpuid_level < 4) return 1; /* Intel has a non-standard dependency on %ecx for this CPUID level. */ cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); if (eax & 0x1f) return (eax >> 26) + 1; else return 1; } static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c) { /* Intel VMX MSR indicated features */ #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000 #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000 #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001 #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002 #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020 u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2; clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW); clear_cpu_cap(c, X86_FEATURE_VNMI); clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); clear_cpu_cap(c, X86_FEATURE_EPT); clear_cpu_cap(c, X86_FEATURE_VPID); rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high); msr_ctl = vmx_msr_high | vmx_msr_low; if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW) set_cpu_cap(c, X86_FEATURE_TPR_SHADOW); if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI) set_cpu_cap(c, X86_FEATURE_VNMI); if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) { rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, vmx_msr_low, vmx_msr_high); msr_ctl2 = vmx_msr_high | vmx_msr_low; if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) && (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)) set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) set_cpu_cap(c, X86_FEATURE_EPT); if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID) set_cpu_cap(c, X86_FEATURE_VPID); } } static void __cpuinit init_intel(struct cpuinfo_x86 *c) { unsigned int l2 = 0; early_init_intel(c); intel_workarounds(c); /* * Detect the extended topology information if available. This * will reinitialise the initial_apicid which will be used * in init_intel_cacheinfo() */ detect_extended_topology(c); l2 = init_intel_cacheinfo(c); if (c->cpuid_level > 9) { unsigned eax = cpuid_eax(10); /* Check for version and the number of counters */ if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); } if (cpu_has_xmm2) set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); if (cpu_has_ds) { unsigned int l1; rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); if (!(l1 & (1<<11))) set_cpu_cap(c, X86_FEATURE_BTS); if (!(l1 & (1<<12))) set_cpu_cap(c, X86_FEATURE_PEBS); } if (c->x86 == 6 && cpu_has_clflush && (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47)) set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); #ifdef CONFIG_X86_64 if (c->x86 == 15) c->x86_cache_alignment = c->x86_clflush_size * 2; if (c->x86 == 6) set_cpu_cap(c, X86_FEATURE_REP_GOOD); #else /* * Names for the Pentium II/Celeron processors * detectable only by also checking the cache size. * Dixon is NOT a Celeron. */ if (c->x86 == 6) { char *p = NULL; switch (c->x86_model) { case 5: if (l2 == 0) p = "Celeron (Covington)"; else if (l2 == 256) p = "Mobile Pentium II (Dixon)"; break; case 6: if (l2 == 128) p = "Celeron (Mendocino)"; else if (c->x86_mask == 0 || c->x86_mask == 5) p = "Celeron-A"; break; case 8: if (l2 == 128) p = "Celeron (Coppermine)"; break; } if (p) strcpy(c->x86_model_id, p); } if (c->x86 == 15) set_cpu_cap(c, X86_FEATURE_P4); if (c->x86 == 6) set_cpu_cap(c, X86_FEATURE_P3); #endif if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { /* * let's use the legacy cpuid vector 0x1 and 0x4 for topology * detection. */ c->x86_max_cores = intel_num_cpu_cores(c); #ifdef CONFIG_X86_32 detect_ht(c); #endif } /* Work around errata */ srat_detect_node(c); if (cpu_has(c, X86_FEATURE_VMX)) detect_vmx_virtcap(c); /* * Initialize MSR_IA32_ENERGY_PERF_BIAS if BIOS did not. * x86_energy_perf_policy(8) is available to change it at run-time */ if (cpu_has(c, X86_FEATURE_EPB)) { u64 epb; rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); if ((epb & 0xF) == ENERGY_PERF_BIAS_PERFORMANCE) { printk_once(KERN_WARNING "ENERGY_PERF_BIAS:" " Set to 'normal', was 'performance'\n" "ENERGY_PERF_BIAS: View and update with" " x86_energy_perf_policy(8)\n"); epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL; wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); } } } #ifdef CONFIG_X86_32 static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) { /* * Intel PIII Tualatin. This comes in two flavours. * One has 256kb of cache, the other 512. We have no way * to determine which, so we use a boottime override * for the 512kb model, and assume 256 otherwise. */ if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0)) size = 256; return size; } #endif #define TLB_INST_4K 0x01 #define TLB_INST_4M 0x02 #define TLB_INST_2M_4M 0x03 #define TLB_INST_ALL 0x05 #define TLB_INST_1G 0x06 #define TLB_DATA_4K 0x11 #define TLB_DATA_4M 0x12 #define TLB_DATA_2M_4M 0x13 #define TLB_DATA_4K_4M 0x14 #define TLB_DATA_1G 0x16 #define TLB_DATA0_4K 0x21 #define TLB_DATA0_4M 0x22 #define TLB_DATA0_2M_4M 0x23 #define STLB_4K 0x41 static const struct _tlb_table intel_tlb_table[] __cpuinitconst = { { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" }, { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" }, { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" }, { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" }, { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" }, { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages */" }, { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" }, { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" }, { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" }, { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" }, { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" }, { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" }, { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" }, { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" }, { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" }, { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" }, { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" }, { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" }, { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" }, { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" }, { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" }, { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" }, { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" }, { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" }, { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" }, { 0x00, 0, 0 } }; static void __cpuinit intel_tlb_lookup(const unsigned char desc) { unsigned char k; if (desc == 0) return; /* look up this descriptor in the table */ for (k = 0; intel_tlb_table[k].descriptor != desc && \ intel_tlb_table[k].descriptor != 0; k++) ; if (intel_tlb_table[k].tlb_type == 0) return; switch (intel_tlb_table[k].tlb_type) { case STLB_4K: if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; break; case TLB_INST_ALL: if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; break; case TLB_INST_4K: if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; break; case TLB_INST_4M: if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; break; case TLB_INST_2M_4M: if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; break; case TLB_DATA_4K: case TLB_DATA0_4K: if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; break; case TLB_DATA_4M: case TLB_DATA0_4M: if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; break; case TLB_DATA_2M_4M: case TLB_DATA0_2M_4M: if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries; if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; break; case TLB_DATA_4K_4M: if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; break; } } static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c) { switch ((c->x86 << 8) + c->x86_model) { case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ case 0x616: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ case 0x617: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */ case 0x61d: /* six-core 45 nm xeon "Dunnington" */ tlb_flushall_shift = -1; break; case 0x61a: /* 45 nm nehalem, "Bloomfield" */ case 0x61e: /* 45 nm nehalem, "Lynnfield" */ case 0x625: /* 32 nm nehalem, "Clarkdale" */ case 0x62c: /* 32 nm nehalem, "Gulftown" */ case 0x62e: /* 45 nm nehalem-ex, "Beckton" */ case 0x62f: /* 32 nm Xeon E7 */ tlb_flushall_shift = 6; break; case 0x62a: /* SandyBridge */ case 0x62d: /* SandyBridge, "Romely-EP" */ tlb_flushall_shift = 5; break; case 0x63a: /* Ivybridge */ tlb_flushall_shift = 2; break; default: tlb_flushall_shift = 6; } } static void __cpuinit intel_detect_tlb(struct cpuinfo_x86 *c) { int i, j, n; unsigned int regs[4]; unsigned char *desc = (unsigned char *)regs; if (c->cpuid_level < 2) return; /* Number of times to iterate */ n = cpuid_eax(2) & 0xFF; for (i = 0 ; i < n ; i++) { cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]); /* If bit 31 is set, this is an unknown format */ for (j = 0 ; j < 3 ; j++) if (regs[j] & (1 << 31)) regs[j] = 0; /* Byte 0 is level count, not a descriptor */ for (j = 1 ; j < 16 ; j++) intel_tlb_lookup(desc[j]); } intel_tlb_flushall_shift_set(c); } static const struct cpu_dev __cpuinitconst intel_cpu_dev = { .c_vendor = "Intel", .c_ident = { "GenuineIntel" }, #ifdef CONFIG_X86_32 .c_models = { { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = { [0] = "486 DX-25/33", [1] = "486 DX-50", [2] = "486 SX", [3] = "486 DX/2", [4] = "486 SL", [5] = "486 SX/2", [7] = "486 DX/2-WB", [8] = "486 DX/4", [9] = "486 DX/4-WB" } }, { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names = { [0] = "Pentium 60/66 A-step", [1] = "Pentium 60/66", [2] = "Pentium 75 - 200", [3] = "OverDrive PODP5V83", [4] = "Pentium MMX", [7] = "Mobile Pentium 75 - 200", [8] = "Mobile Pentium MMX" } }, { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names = { [0] = "Pentium Pro A-step", [1] = "Pentium Pro", [3] = "Pentium II (Klamath)", [4] = "Pentium II (Deschutes)", [5] = "Pentium II (Deschutes)", [6] = "Mobile Pentium II", [7] = "Pentium III (Katmai)", [8] = "Pentium III (Coppermine)", [10] = "Pentium III (Cascades)", [11] = "Pentium III (Tualatin)", } }, { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names = { [0] = "Pentium 4 (Unknown)", [1] = "Pentium 4 (Willamette)", [2] = "Pentium 4 (Northwood)", [4] = "Pentium 4 (Foster)", [5] = "Pentium 4 (Foster)", } }, }, .c_size_cache = intel_size_cache, #endif .c_detect_tlb = intel_detect_tlb, .c_early_init = early_init_intel, .c_init = init_intel, .c_x86_vendor = X86_VENDOR_INTEL, }; cpu_dev_register(intel_cpu_dev);
gpl-2.0
estiko/android_kernel_lenovo_a706_xtremeuv
drivers/media/rc/ene_ir.c
2862
31826
/* * driver for ENE KB3926 B/C/D/E/F CIR (pnp id: ENE0XXX) * * Copyright (C) 2010 Maxim Levitsky <maximlevitsky@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA * * Special thanks to: * Sami R. <maesesami@gmail.com> for lot of help in debugging and therefore * bringing to life support for transmission & learning mode. * * Charlie Andrews <charliethepilot@googlemail.com> for lots of help in * bringing up the support of new firmware buffer that is popular * on latest notebooks * * ENE for partial device documentation * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/pnp.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/slab.h> #include <media/rc-core.h> #include "ene_ir.h" static int sample_period; static bool learning_mode_force; static int debug; static bool txsim; static void ene_set_reg_addr(struct ene_device *dev, u16 reg) { outb(reg >> 8, dev->hw_io + ENE_ADDR_HI); outb(reg & 0xFF, dev->hw_io + ENE_ADDR_LO); } /* read a hardware register */ static u8 ene_read_reg(struct ene_device *dev, u16 reg) { u8 retval; ene_set_reg_addr(dev, reg); retval = inb(dev->hw_io + ENE_IO); dbg_regs("reg %04x == %02x", reg, retval); return retval; } /* write a hardware register */ static void ene_write_reg(struct ene_device *dev, u16 reg, u8 value) { dbg_regs("reg %04x <- %02x", reg, value); ene_set_reg_addr(dev, reg); outb(value, dev->hw_io + ENE_IO); } /* Set bits in hardware register */ static void ene_set_reg_mask(struct ene_device *dev, u16 reg, u8 mask) { dbg_regs("reg %04x |= %02x", reg, mask); ene_set_reg_addr(dev, reg); outb(inb(dev->hw_io + ENE_IO) | mask, dev->hw_io + ENE_IO); } /* Clear bits in hardware register */ static void ene_clear_reg_mask(struct ene_device *dev, u16 reg, u8 mask) { dbg_regs("reg %04x &= ~%02x ", reg, mask); ene_set_reg_addr(dev, reg); outb(inb(dev->hw_io + ENE_IO) & ~mask, dev->hw_io + ENE_IO); } /* A helper to set/clear a bit in register according to boolean variable */ static void ene_set_clear_reg_mask(struct ene_device *dev, u16 reg, u8 mask, bool set) { if (set) ene_set_reg_mask(dev, reg, mask); else ene_clear_reg_mask(dev, reg, mask); } /* detect hardware features */ static int ene_hw_detect(struct ene_device *dev) { u8 chip_major, chip_minor; u8 hw_revision, old_ver; u8 fw_reg2, fw_reg1; ene_clear_reg_mask(dev, ENE_ECSTS, ENE_ECSTS_RSRVD); chip_major = ene_read_reg(dev, ENE_ECVER_MAJOR); chip_minor = ene_read_reg(dev, ENE_ECVER_MINOR); ene_set_reg_mask(dev, ENE_ECSTS, ENE_ECSTS_RSRVD); hw_revision = ene_read_reg(dev, ENE_ECHV); old_ver = ene_read_reg(dev, ENE_HW_VER_OLD); dev->pll_freq = (ene_read_reg(dev, ENE_PLLFRH) << 4) + (ene_read_reg(dev, ENE_PLLFRL) >> 4); if (sample_period != ENE_DEFAULT_SAMPLE_PERIOD) dev->rx_period_adjust = dev->pll_freq == ENE_DEFAULT_PLL_FREQ ? 2 : 4; if (hw_revision == 0xFF) { pr_warn("device seems to be disabled\n"); pr_warn("send a mail to lirc-list@lists.sourceforge.net\n"); pr_warn("please attach output of acpidump and dmidecode\n"); return -ENODEV; } pr_notice("chip is 0x%02x%02x - kbver = 0x%02x, rev = 0x%02x\n", chip_major, chip_minor, old_ver, hw_revision); pr_notice("PLL freq = %d\n", dev->pll_freq); if (chip_major == 0x33) { pr_warn("chips 0x33xx aren't supported\n"); return -ENODEV; } if (chip_major == 0x39 && chip_minor == 0x26 && hw_revision == 0xC0) { dev->hw_revision = ENE_HW_C; pr_notice("KB3926C detected\n"); } else if (old_ver == 0x24 && hw_revision == 0xC0) { dev->hw_revision = ENE_HW_B; pr_notice("KB3926B detected\n"); } else { dev->hw_revision = ENE_HW_D; pr_notice("KB3926D or higher detected\n"); } /* detect features hardware supports */ if (dev->hw_revision < ENE_HW_C) return 0; fw_reg1 = ene_read_reg(dev, ENE_FW1); fw_reg2 = ene_read_reg(dev, ENE_FW2); pr_notice("Firmware regs: %02x %02x\n", fw_reg1, fw_reg2); dev->hw_use_gpio_0a = !!(fw_reg2 & ENE_FW2_GP0A); dev->hw_learning_and_tx_capable = !!(fw_reg2 & ENE_FW2_LEARNING); dev->hw_extra_buffer = !!(fw_reg1 & ENE_FW1_HAS_EXTRA_BUF); if (dev->hw_learning_and_tx_capable) dev->hw_fan_input = !!(fw_reg2 & ENE_FW2_FAN_INPUT); pr_notice("Hardware features:\n"); if (dev->hw_learning_and_tx_capable) { pr_notice("* Supports transmitting & learning mode\n"); pr_notice(" This feature is rare and therefore,\n"); pr_notice(" you are welcome to test it,\n"); pr_notice(" and/or contact the author via:\n"); pr_notice(" lirc-list@lists.sourceforge.net\n"); pr_notice(" or maximlevitsky@gmail.com\n"); pr_notice("* Uses GPIO %s for IR raw input\n", dev->hw_use_gpio_0a ? "40" : "0A"); if (dev->hw_fan_input) pr_notice("* Uses unused fan feedback input as source of demodulated IR data\n"); } if (!dev->hw_fan_input) pr_notice("* Uses GPIO %s for IR demodulated input\n", dev->hw_use_gpio_0a ? "0A" : "40"); if (dev->hw_extra_buffer) pr_notice("* Uses new style input buffer\n"); return 0; } /* Read properities of hw sample buffer */ static void ene_rx_setup_hw_buffer(struct ene_device *dev) { u16 tmp; ene_rx_read_hw_pointer(dev); dev->r_pointer = dev->w_pointer; if (!dev->hw_extra_buffer) { dev->buffer_len = ENE_FW_PACKET_SIZE * 2; return; } tmp = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER); tmp |= ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER+1) << 8; dev->extra_buf1_address = tmp; dev->extra_buf1_len = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 2); tmp = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 3); tmp |= ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 4) << 8; dev->extra_buf2_address = tmp; dev->extra_buf2_len = ene_read_reg(dev, ENE_FW_SAMPLE_BUFFER + 5); dev->buffer_len = dev->extra_buf1_len + dev->extra_buf2_len + 8; pr_notice("Hardware uses 2 extended buffers:\n"); pr_notice(" 0x%04x - len : %d\n", dev->extra_buf1_address, dev->extra_buf1_len); pr_notice(" 0x%04x - len : %d\n", dev->extra_buf2_address, dev->extra_buf2_len); pr_notice("Total buffer len = %d\n", dev->buffer_len); if (dev->buffer_len > 64 || dev->buffer_len < 16) goto error; if (dev->extra_buf1_address > 0xFBFC || dev->extra_buf1_address < 0xEC00) goto error; if (dev->extra_buf2_address > 0xFBFC || dev->extra_buf2_address < 0xEC00) goto error; if (dev->r_pointer > dev->buffer_len) goto error; ene_set_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND); return; error: pr_warn("Error validating extra buffers, device probably won't work\n"); dev->hw_extra_buffer = false; ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND); } /* Restore the pointers to extra buffers - to make module reload work*/ static void ene_rx_restore_hw_buffer(struct ene_device *dev) { if (!dev->hw_extra_buffer) return; ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 0, dev->extra_buf1_address & 0xFF); ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 1, dev->extra_buf1_address >> 8); ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 2, dev->extra_buf1_len); ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 3, dev->extra_buf2_address & 0xFF); ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 4, dev->extra_buf2_address >> 8); ene_write_reg(dev, ENE_FW_SAMPLE_BUFFER + 5, dev->extra_buf2_len); ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_EXTRA_BUF_HND); } /* Read hardware write pointer */ static void ene_rx_read_hw_pointer(struct ene_device *dev) { if (dev->hw_extra_buffer) dev->w_pointer = ene_read_reg(dev, ENE_FW_RX_POINTER); else dev->w_pointer = ene_read_reg(dev, ENE_FW2) & ENE_FW2_BUF_WPTR ? 0 : ENE_FW_PACKET_SIZE; dbg_verbose("RB: HW write pointer: %02x, driver read pointer: %02x", dev->w_pointer, dev->r_pointer); } /* Gets address of next sample from HW ring buffer */ static int ene_rx_get_sample_reg(struct ene_device *dev) { int r_pointer; if (dev->r_pointer == dev->w_pointer) { dbg_verbose("RB: hit end, try update w_pointer"); ene_rx_read_hw_pointer(dev); } if (dev->r_pointer == dev->w_pointer) { dbg_verbose("RB: end of data at %d", dev->r_pointer); return 0; } dbg_verbose("RB: reading at offset %d", dev->r_pointer); r_pointer = dev->r_pointer; dev->r_pointer++; if (dev->r_pointer == dev->buffer_len) dev->r_pointer = 0; dbg_verbose("RB: next read will be from offset %d", dev->r_pointer); if (r_pointer < 8) { dbg_verbose("RB: read at main buffer at %d", r_pointer); return ENE_FW_SAMPLE_BUFFER + r_pointer; } r_pointer -= 8; if (r_pointer < dev->extra_buf1_len) { dbg_verbose("RB: read at 1st extra buffer at %d", r_pointer); return dev->extra_buf1_address + r_pointer; } r_pointer -= dev->extra_buf1_len; if (r_pointer < dev->extra_buf2_len) { dbg_verbose("RB: read at 2nd extra buffer at %d", r_pointer); return dev->extra_buf2_address + r_pointer; } dbg("attempt to read beyond ring buffer end"); return 0; } /* Sense current received carrier */ void ene_rx_sense_carrier(struct ene_device *dev) { DEFINE_IR_RAW_EVENT(ev); int carrier, duty_cycle; int period = ene_read_reg(dev, ENE_CIRCAR_PRD); int hperiod = ene_read_reg(dev, ENE_CIRCAR_HPRD); if (!(period & ENE_CIRCAR_PRD_VALID)) return; period &= ~ENE_CIRCAR_PRD_VALID; if (!period) return; dbg("RX: hardware carrier period = %02x", period); dbg("RX: hardware carrier pulse period = %02x", hperiod); carrier = 2000000 / period; duty_cycle = (hperiod * 100) / period; dbg("RX: sensed carrier = %d Hz, duty cycle %d%%", carrier, duty_cycle); if (dev->carrier_detect_enabled) { ev.carrier_report = true; ev.carrier = carrier; ev.duty_cycle = duty_cycle; ir_raw_event_store(dev->rdev, &ev); } } /* this enables/disables the CIR RX engine */ static void ene_rx_enable_cir_engine(struct ene_device *dev, bool enable) { ene_set_clear_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_RX_EN | ENE_CIRCFG_RX_IRQ, enable); } /* this selects input for CIR engine. Ether GPIO 0A or GPIO40*/ static void ene_rx_select_input(struct ene_device *dev, bool gpio_0a) { ene_set_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_GPIO0A, gpio_0a); } /* * this enables alternative input via fan tachometer sensor and bypasses * the hw CIR engine */ static void ene_rx_enable_fan_input(struct ene_device *dev, bool enable) { if (!dev->hw_fan_input) return; if (!enable) ene_write_reg(dev, ENE_FAN_AS_IN1, 0); else { ene_write_reg(dev, ENE_FAN_AS_IN1, ENE_FAN_AS_IN1_EN); ene_write_reg(dev, ENE_FAN_AS_IN2, ENE_FAN_AS_IN2_EN); } } /* setup the receiver for RX*/ static void ene_rx_setup(struct ene_device *dev) { bool learning_mode = dev->learning_mode_enabled || dev->carrier_detect_enabled; int sample_period_adjust = 0; dbg("RX: setup receiver, learning mode = %d", learning_mode); /* This selects RLC input and clears CFG2 settings */ ene_write_reg(dev, ENE_CIRCFG2, 0x00); /* set sample period*/ if (sample_period == ENE_DEFAULT_SAMPLE_PERIOD) sample_period_adjust = dev->pll_freq == ENE_DEFAULT_PLL_FREQ ? 1 : 2; ene_write_reg(dev, ENE_CIRRLC_CFG, (sample_period + sample_period_adjust) | ENE_CIRRLC_CFG_OVERFLOW); /* revB doesn't support inputs */ if (dev->hw_revision < ENE_HW_C) goto select_timeout; if (learning_mode) { WARN_ON(!dev->hw_learning_and_tx_capable); /* Enable the opposite of the normal input That means that if GPIO40 is normally used, use GPIO0A and vice versa. This input will carry non demodulated signal, and we will tell the hw to demodulate it itself */ ene_rx_select_input(dev, !dev->hw_use_gpio_0a); dev->rx_fan_input_inuse = false; /* Enable carrier demodulation */ ene_set_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_CARR_DEMOD); /* Enable carrier detection */ ene_write_reg(dev, ENE_CIRCAR_PULS, 0x63); ene_set_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_CARR_DETECT, dev->carrier_detect_enabled || debug); } else { if (dev->hw_fan_input) dev->rx_fan_input_inuse = true; else ene_rx_select_input(dev, dev->hw_use_gpio_0a); /* Disable carrier detection & demodulation */ ene_clear_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_CARR_DEMOD); ene_clear_reg_mask(dev, ENE_CIRCFG2, ENE_CIRCFG2_CARR_DETECT); } select_timeout: if (dev->rx_fan_input_inuse) { dev->rdev->rx_resolution = US_TO_NS(ENE_FW_SAMPLE_PERIOD_FAN); /* Fan input doesn't support timeouts, it just ends the input with a maximum sample */ dev->rdev->min_timeout = dev->rdev->max_timeout = US_TO_NS(ENE_FW_SMPL_BUF_FAN_MSK * ENE_FW_SAMPLE_PERIOD_FAN); } else { dev->rdev->rx_resolution = US_TO_NS(sample_period); /* Theoreticly timeout is unlimited, but we cap it * because it was seen that on one device, it * would stop sending spaces after around 250 msec. * Besides, this is close to 2^32 anyway and timeout is u32. */ dev->rdev->min_timeout = US_TO_NS(127 * sample_period); dev->rdev->max_timeout = US_TO_NS(200000); } if (dev->hw_learning_and_tx_capable) dev->rdev->tx_resolution = US_TO_NS(sample_period); if (dev->rdev->timeout > dev->rdev->max_timeout) dev->rdev->timeout = dev->rdev->max_timeout; if (dev->rdev->timeout < dev->rdev->min_timeout) dev->rdev->timeout = dev->rdev->min_timeout; } /* Enable the device for receive */ static void ene_rx_enable(struct ene_device *dev) { u8 reg_value; /* Enable system interrupt */ if (dev->hw_revision < ENE_HW_C) { ene_write_reg(dev, ENEB_IRQ, dev->irq << 1); ene_write_reg(dev, ENEB_IRQ_UNK1, 0x01); } else { reg_value = ene_read_reg(dev, ENE_IRQ) & 0xF0; reg_value |= ENE_IRQ_UNK_EN; reg_value &= ~ENE_IRQ_STATUS; reg_value |= (dev->irq & ENE_IRQ_MASK); ene_write_reg(dev, ENE_IRQ, reg_value); } /* Enable inputs */ ene_rx_enable_fan_input(dev, dev->rx_fan_input_inuse); ene_rx_enable_cir_engine(dev, !dev->rx_fan_input_inuse); /* ack any pending irqs - just in case */ ene_irq_status(dev); /* enable firmware bits */ ene_set_reg_mask(dev, ENE_FW1, ENE_FW1_ENABLE | ENE_FW1_IRQ); /* enter idle mode */ ir_raw_event_set_idle(dev->rdev, true); dev->rx_enabled = true; } /* Disable the device receiver */ static void ene_rx_disable(struct ene_device *dev) { /* disable inputs */ ene_rx_enable_cir_engine(dev, false); ene_rx_enable_fan_input(dev, false); /* disable hardware IRQ and firmware flag */ ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_ENABLE | ENE_FW1_IRQ); ir_raw_event_set_idle(dev->rdev, true); dev->rx_enabled = false; } /* This resets the receiver. Useful to stop stream of spaces at end of * transmission */ static void ene_rx_reset(struct ene_device *dev) { ene_clear_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_RX_EN); ene_set_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_RX_EN); } /* Set up the TX carrier frequency and duty cycle */ static void ene_tx_set_carrier(struct ene_device *dev) { u8 tx_puls_width; unsigned long flags; spin_lock_irqsave(&dev->hw_lock, flags); ene_set_clear_reg_mask(dev, ENE_CIRCFG, ENE_CIRCFG_TX_CARR, dev->tx_period > 0); if (!dev->tx_period) goto unlock; BUG_ON(dev->tx_duty_cycle >= 100 || dev->tx_duty_cycle <= 0); tx_puls_width = dev->tx_period / (100 / dev->tx_duty_cycle); if (!tx_puls_width) tx_puls_width = 1; dbg("TX: pulse distance = %d * 500 ns", dev->tx_period); dbg("TX: pulse width = %d * 500 ns", tx_puls_width); ene_write_reg(dev, ENE_CIRMOD_PRD, dev->tx_period | ENE_CIRMOD_PRD_POL); ene_write_reg(dev, ENE_CIRMOD_HPRD, tx_puls_width); unlock: spin_unlock_irqrestore(&dev->hw_lock, flags); } /* Enable/disable transmitters */ static void ene_tx_set_transmitters(struct ene_device *dev) { unsigned long flags; spin_lock_irqsave(&dev->hw_lock, flags); ene_set_clear_reg_mask(dev, ENE_GPIOFS8, ENE_GPIOFS8_GPIO41, !!(dev->transmitter_mask & 0x01)); ene_set_clear_reg_mask(dev, ENE_GPIOFS1, ENE_GPIOFS1_GPIO0D, !!(dev->transmitter_mask & 0x02)); spin_unlock_irqrestore(&dev->hw_lock, flags); } /* prepare transmission */ static void ene_tx_enable(struct ene_device *dev) { u8 conf1 = ene_read_reg(dev, ENE_CIRCFG); u8 fwreg2 = ene_read_reg(dev, ENE_FW2); dev->saved_conf1 = conf1; /* Show information about currently connected transmitter jacks */ if (fwreg2 & ENE_FW2_EMMITER1_CONN) dbg("TX: Transmitter #1 is connected"); if (fwreg2 & ENE_FW2_EMMITER2_CONN) dbg("TX: Transmitter #2 is connected"); if (!(fwreg2 & (ENE_FW2_EMMITER1_CONN | ENE_FW2_EMMITER2_CONN))) pr_warn("TX: transmitter cable isn't connected!\n"); /* disable receive on revc */ if (dev->hw_revision == ENE_HW_C) conf1 &= ~ENE_CIRCFG_RX_EN; /* Enable TX engine */ conf1 |= ENE_CIRCFG_TX_EN | ENE_CIRCFG_TX_IRQ; ene_write_reg(dev, ENE_CIRCFG, conf1); } /* end transmission */ static void ene_tx_disable(struct ene_device *dev) { ene_write_reg(dev, ENE_CIRCFG, dev->saved_conf1); dev->tx_buffer = NULL; } /* TX one sample - must be called with dev->hw_lock*/ static void ene_tx_sample(struct ene_device *dev) { u8 raw_tx; u32 sample; bool pulse = dev->tx_sample_pulse; if (!dev->tx_buffer) { pr_warn("TX: BUG: attempt to transmit NULL buffer\n"); return; } /* Grab next TX sample */ if (!dev->tx_sample) { if (dev->tx_pos == dev->tx_len) { if (!dev->tx_done) { dbg("TX: no more data to send"); dev->tx_done = true; goto exit; } else { dbg("TX: last sample sent by hardware"); ene_tx_disable(dev); complete(&dev->tx_complete); return; } } sample = dev->tx_buffer[dev->tx_pos++]; dev->tx_sample_pulse = !dev->tx_sample_pulse; dev->tx_sample = DIV_ROUND_CLOSEST(sample, sample_period); if (!dev->tx_sample) dev->tx_sample = 1; } raw_tx = min(dev->tx_sample , (unsigned int)ENE_CIRRLC_OUT_MASK); dev->tx_sample -= raw_tx; dbg("TX: sample %8d (%s)", raw_tx * sample_period, pulse ? "pulse" : "space"); if (pulse) raw_tx |= ENE_CIRRLC_OUT_PULSE; ene_write_reg(dev, dev->tx_reg ? ENE_CIRRLC_OUT1 : ENE_CIRRLC_OUT0, raw_tx); dev->tx_reg = !dev->tx_reg; exit: /* simulate TX done interrupt */ if (txsim) mod_timer(&dev->tx_sim_timer, jiffies + HZ / 500); } /* timer to simulate tx done interrupt */ static void ene_tx_irqsim(unsigned long data) { struct ene_device *dev = (struct ene_device *)data; unsigned long flags; spin_lock_irqsave(&dev->hw_lock, flags); ene_tx_sample(dev); spin_unlock_irqrestore(&dev->hw_lock, flags); } /* read irq status and ack it */ static int ene_irq_status(struct ene_device *dev) { u8 irq_status; u8 fw_flags1, fw_flags2; int retval = 0; fw_flags2 = ene_read_reg(dev, ENE_FW2); if (dev->hw_revision < ENE_HW_C) { irq_status = ene_read_reg(dev, ENEB_IRQ_STATUS); if (!(irq_status & ENEB_IRQ_STATUS_IR)) return 0; ene_clear_reg_mask(dev, ENEB_IRQ_STATUS, ENEB_IRQ_STATUS_IR); return ENE_IRQ_RX; } irq_status = ene_read_reg(dev, ENE_IRQ); if (!(irq_status & ENE_IRQ_STATUS)) return 0; /* original driver does that twice - a workaround ? */ ene_write_reg(dev, ENE_IRQ, irq_status & ~ENE_IRQ_STATUS); ene_write_reg(dev, ENE_IRQ, irq_status & ~ENE_IRQ_STATUS); /* check RX interrupt */ if (fw_flags2 & ENE_FW2_RXIRQ) { retval |= ENE_IRQ_RX; ene_write_reg(dev, ENE_FW2, fw_flags2 & ~ENE_FW2_RXIRQ); } /* check TX interrupt */ fw_flags1 = ene_read_reg(dev, ENE_FW1); if (fw_flags1 & ENE_FW1_TXIRQ) { ene_write_reg(dev, ENE_FW1, fw_flags1 & ~ENE_FW1_TXIRQ); retval |= ENE_IRQ_TX; } return retval; } /* interrupt handler */ static irqreturn_t ene_isr(int irq, void *data) { u16 hw_value, reg; int hw_sample, irq_status; bool pulse; unsigned long flags; irqreturn_t retval = IRQ_NONE; struct ene_device *dev = (struct ene_device *)data; DEFINE_IR_RAW_EVENT(ev); spin_lock_irqsave(&dev->hw_lock, flags); dbg_verbose("ISR called"); ene_rx_read_hw_pointer(dev); irq_status = ene_irq_status(dev); if (!irq_status) goto unlock; retval = IRQ_HANDLED; if (irq_status & ENE_IRQ_TX) { dbg_verbose("TX interrupt"); if (!dev->hw_learning_and_tx_capable) { dbg("TX interrupt on unsupported device!"); goto unlock; } ene_tx_sample(dev); } if (!(irq_status & ENE_IRQ_RX)) goto unlock; dbg_verbose("RX interrupt"); if (dev->hw_learning_and_tx_capable) ene_rx_sense_carrier(dev); /* On hardware that don't support extra buffer we need to trust the interrupt and not track the read pointer */ if (!dev->hw_extra_buffer) dev->r_pointer = dev->w_pointer == 0 ? ENE_FW_PACKET_SIZE : 0; while (1) { reg = ene_rx_get_sample_reg(dev); dbg_verbose("next sample to read at: %04x", reg); if (!reg) break; hw_value = ene_read_reg(dev, reg); if (dev->rx_fan_input_inuse) { int offset = ENE_FW_SMPL_BUF_FAN - ENE_FW_SAMPLE_BUFFER; /* read high part of the sample */ hw_value |= ene_read_reg(dev, reg + offset) << 8; pulse = hw_value & ENE_FW_SMPL_BUF_FAN_PLS; /* clear space bit, and other unused bits */ hw_value &= ENE_FW_SMPL_BUF_FAN_MSK; hw_sample = hw_value * ENE_FW_SAMPLE_PERIOD_FAN; } else { pulse = !(hw_value & ENE_FW_SAMPLE_SPACE); hw_value &= ~ENE_FW_SAMPLE_SPACE; hw_sample = hw_value * sample_period; if (dev->rx_period_adjust) { hw_sample *= 100; hw_sample /= (100 + dev->rx_period_adjust); } } if (!dev->hw_extra_buffer && !hw_sample) { dev->r_pointer = dev->w_pointer; continue; } dbg("RX: %d (%s)", hw_sample, pulse ? "pulse" : "space"); ev.duration = US_TO_NS(hw_sample); ev.pulse = pulse; ir_raw_event_store_with_filter(dev->rdev, &ev); } ir_raw_event_handle(dev->rdev); unlock: spin_unlock_irqrestore(&dev->hw_lock, flags); return retval; } /* Initialize default settings */ static void ene_setup_default_settings(struct ene_device *dev) { dev->tx_period = 32; dev->tx_duty_cycle = 50; /*%*/ dev->transmitter_mask = 0x03; dev->learning_mode_enabled = learning_mode_force; /* Set reasonable default timeout */ dev->rdev->timeout = US_TO_NS(150000); } /* Upload all hardware settings at once. Used at load and resume time */ static void ene_setup_hw_settings(struct ene_device *dev) { if (dev->hw_learning_and_tx_capable) { ene_tx_set_carrier(dev); ene_tx_set_transmitters(dev); } ene_rx_setup(dev); } /* outside interface: called on first open*/ static int ene_open(struct rc_dev *rdev) { struct ene_device *dev = rdev->priv; unsigned long flags; spin_lock_irqsave(&dev->hw_lock, flags); ene_rx_enable(dev); spin_unlock_irqrestore(&dev->hw_lock, flags); return 0; } /* outside interface: called on device close*/ static void ene_close(struct rc_dev *rdev) { struct ene_device *dev = rdev->priv; unsigned long flags; spin_lock_irqsave(&dev->hw_lock, flags); ene_rx_disable(dev); spin_unlock_irqrestore(&dev->hw_lock, flags); } /* outside interface: set transmitter mask */ static int ene_set_tx_mask(struct rc_dev *rdev, u32 tx_mask) { struct ene_device *dev = rdev->priv; dbg("TX: attempt to set transmitter mask %02x", tx_mask); /* invalid txmask */ if (!tx_mask || tx_mask & ~0x03) { dbg("TX: invalid mask"); /* return count of transmitters */ return 2; } dev->transmitter_mask = tx_mask; ene_tx_set_transmitters(dev); return 0; } /* outside interface : set tx carrier */ static int ene_set_tx_carrier(struct rc_dev *rdev, u32 carrier) { struct ene_device *dev = rdev->priv; u32 period = 2000000 / carrier; dbg("TX: attempt to set tx carrier to %d kHz", carrier); if (period && (period > ENE_CIRMOD_PRD_MAX || period < ENE_CIRMOD_PRD_MIN)) { dbg("TX: out of range %d-%d kHz carrier", 2000 / ENE_CIRMOD_PRD_MIN, 2000 / ENE_CIRMOD_PRD_MAX); return -1; } dev->tx_period = period; ene_tx_set_carrier(dev); return 0; } /*outside interface : set tx duty cycle */ static int ene_set_tx_duty_cycle(struct rc_dev *rdev, u32 duty_cycle) { struct ene_device *dev = rdev->priv; dbg("TX: setting duty cycle to %d%%", duty_cycle); dev->tx_duty_cycle = duty_cycle; ene_tx_set_carrier(dev); return 0; } /* outside interface: enable learning mode */ static int ene_set_learning_mode(struct rc_dev *rdev, int enable) { struct ene_device *dev = rdev->priv; unsigned long flags; if (enable == dev->learning_mode_enabled) return 0; spin_lock_irqsave(&dev->hw_lock, flags); dev->learning_mode_enabled = enable; ene_rx_disable(dev); ene_rx_setup(dev); ene_rx_enable(dev); spin_unlock_irqrestore(&dev->hw_lock, flags); return 0; } static int ene_set_carrier_report(struct rc_dev *rdev, int enable) { struct ene_device *dev = rdev->priv; unsigned long flags; if (enable == dev->carrier_detect_enabled) return 0; spin_lock_irqsave(&dev->hw_lock, flags); dev->carrier_detect_enabled = enable; ene_rx_disable(dev); ene_rx_setup(dev); ene_rx_enable(dev); spin_unlock_irqrestore(&dev->hw_lock, flags); return 0; } /* outside interface: enable or disable idle mode */ static void ene_set_idle(struct rc_dev *rdev, bool idle) { struct ene_device *dev = rdev->priv; if (idle) { ene_rx_reset(dev); dbg("RX: end of data"); } } /* outside interface: transmit */ static int ene_transmit(struct rc_dev *rdev, unsigned *buf, unsigned n) { struct ene_device *dev = rdev->priv; unsigned long flags; dev->tx_buffer = buf; dev->tx_len = n; dev->tx_pos = 0; dev->tx_reg = 0; dev->tx_done = 0; dev->tx_sample = 0; dev->tx_sample_pulse = 0; dbg("TX: %d samples", dev->tx_len); spin_lock_irqsave(&dev->hw_lock, flags); ene_tx_enable(dev); /* Transmit first two samples */ ene_tx_sample(dev); ene_tx_sample(dev); spin_unlock_irqrestore(&dev->hw_lock, flags); if (wait_for_completion_timeout(&dev->tx_complete, 2 * HZ) == 0) { dbg("TX: timeout"); spin_lock_irqsave(&dev->hw_lock, flags); ene_tx_disable(dev); spin_unlock_irqrestore(&dev->hw_lock, flags); } else dbg("TX: done"); return n; } /* probe entry */ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id) { int error = -ENOMEM; struct rc_dev *rdev; struct ene_device *dev; /* allocate memory */ dev = kzalloc(sizeof(struct ene_device), GFP_KERNEL); rdev = rc_allocate_device(); if (!dev || !rdev) goto error1; /* validate resources */ error = -ENODEV; /* init these to -1, as 0 is valid for both */ dev->hw_io = -1; dev->irq = -1; if (!pnp_port_valid(pnp_dev, 0) || pnp_port_len(pnp_dev, 0) < ENE_IO_SIZE) goto error; if (!pnp_irq_valid(pnp_dev, 0)) goto error; spin_lock_init(&dev->hw_lock); pnp_set_drvdata(pnp_dev, dev); dev->pnp_dev = pnp_dev; /* don't allow too short/long sample periods */ if (sample_period < 5 || sample_period > 0x7F) sample_period = ENE_DEFAULT_SAMPLE_PERIOD; /* detect hardware version and features */ error = ene_hw_detect(dev); if (error) goto error; if (!dev->hw_learning_and_tx_capable && txsim) { dev->hw_learning_and_tx_capable = true; setup_timer(&dev->tx_sim_timer, ene_tx_irqsim, (long unsigned int)dev); pr_warn("Simulation of TX activated\n"); } if (!dev->hw_learning_and_tx_capable) learning_mode_force = false; rdev->driver_type = RC_DRIVER_IR_RAW; rdev->allowed_protos = RC_TYPE_ALL; rdev->priv = dev; rdev->open = ene_open; rdev->close = ene_close; rdev->s_idle = ene_set_idle; rdev->driver_name = ENE_DRIVER_NAME; rdev->map_name = RC_MAP_RC6_MCE; rdev->input_name = "ENE eHome Infrared Remote Receiver"; if (dev->hw_learning_and_tx_capable) { rdev->s_learning_mode = ene_set_learning_mode; init_completion(&dev->tx_complete); rdev->tx_ir = ene_transmit; rdev->s_tx_mask = ene_set_tx_mask; rdev->s_tx_carrier = ene_set_tx_carrier; rdev->s_tx_duty_cycle = ene_set_tx_duty_cycle; rdev->s_carrier_report = ene_set_carrier_report; rdev->input_name = "ENE eHome Infrared Remote Transceiver"; } dev->rdev = rdev; ene_rx_setup_hw_buffer(dev); ene_setup_default_settings(dev); ene_setup_hw_settings(dev); device_set_wakeup_capable(&pnp_dev->dev, true); device_set_wakeup_enable(&pnp_dev->dev, true); /* claim the resources */ error = -EBUSY; dev->hw_io = pnp_port_start(pnp_dev, 0); if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) { dev->hw_io = -1; dev->irq = -1; goto error; } dev->irq = pnp_irq(pnp_dev, 0); if (request_irq(dev->irq, ene_isr, IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) { dev->irq = -1; goto error; } error = rc_register_device(rdev); if (error < 0) goto error; pr_notice("driver has been successfully loaded\n"); return 0; error: if (dev && dev->irq >= 0) free_irq(dev->irq, dev); if (dev && dev->hw_io >= 0) release_region(dev->hw_io, ENE_IO_SIZE); error1: rc_free_device(rdev); kfree(dev); return error; } /* main unload function */ static void ene_remove(struct pnp_dev *pnp_dev) { struct ene_device *dev = pnp_get_drvdata(pnp_dev); unsigned long flags; spin_lock_irqsave(&dev->hw_lock, flags); ene_rx_disable(dev); ene_rx_restore_hw_buffer(dev); spin_unlock_irqrestore(&dev->hw_lock, flags); free_irq(dev->irq, dev); release_region(dev->hw_io, ENE_IO_SIZE); rc_unregister_device(dev->rdev); kfree(dev); } /* enable wake on IR (wakes on specific button on original remote) */ static void ene_enable_wake(struct ene_device *dev, int enable) { enable = enable && device_may_wakeup(&dev->pnp_dev->dev); dbg("wake on IR %s", enable ? "enabled" : "disabled"); ene_set_clear_reg_mask(dev, ENE_FW1, ENE_FW1_WAKE, enable); } #ifdef CONFIG_PM static int ene_suspend(struct pnp_dev *pnp_dev, pm_message_t state) { struct ene_device *dev = pnp_get_drvdata(pnp_dev); ene_enable_wake(dev, true); /* TODO: add support for wake pattern */ return 0; } static int ene_resume(struct pnp_dev *pnp_dev) { struct ene_device *dev = pnp_get_drvdata(pnp_dev); ene_setup_hw_settings(dev); if (dev->rx_enabled) ene_rx_enable(dev); ene_enable_wake(dev, false); return 0; } #endif static void ene_shutdown(struct pnp_dev *pnp_dev) { struct ene_device *dev = pnp_get_drvdata(pnp_dev); ene_enable_wake(dev, true); } static const struct pnp_device_id ene_ids[] = { {.id = "ENE0100",}, {.id = "ENE0200",}, {.id = "ENE0201",}, {.id = "ENE0202",}, {}, }; static struct pnp_driver ene_driver = { .name = ENE_DRIVER_NAME, .id_table = ene_ids, .flags = PNP_DRIVER_RES_DO_NOT_CHANGE, .probe = ene_probe, .remove = __devexit_p(ene_remove), #ifdef CONFIG_PM .suspend = ene_suspend, .resume = ene_resume, #endif .shutdown = ene_shutdown, }; static int __init ene_init(void) { return pnp_register_driver(&ene_driver); } static void ene_exit(void) { pnp_unregister_driver(&ene_driver); } module_param(sample_period, int, S_IRUGO); MODULE_PARM_DESC(sample_period, "Hardware sample period (50 us default)"); module_param(learning_mode_force, bool, S_IRUGO); MODULE_PARM_DESC(learning_mode_force, "Enable learning mode by default"); module_param(debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug level"); module_param(txsim, bool, S_IRUGO); MODULE_PARM_DESC(txsim, "Simulate TX features on unsupported hardware (dangerous)"); MODULE_DEVICE_TABLE(pnp, ene_ids); MODULE_DESCRIPTION ("Infrared input driver for KB3926B/C/D/E/F " "(aka ENE0100/ENE0200/ENE0201/ENE0202) CIR port"); MODULE_AUTHOR("Maxim Levitsky"); MODULE_LICENSE("GPL"); module_init(ene_init); module_exit(ene_exit);
gpl-2.0
randomblame/a500_2.6
drivers/block/paride/pg.c
3118
16808
/* pg.c (c) 1998 Grant R. Guenther <grant@torque.net> Under the terms of the GNU General Public License. The pg driver provides a simple character device interface for sending ATAPI commands to a device. With the exception of the ATAPI reset operation, all operations are performed by a pair of read and write operations to the appropriate /dev/pgN device. A write operation delivers a command and any outbound data in a single buffer. Normally, the write will succeed unless the device is offline or malfunctioning, or there is already another command pending. If the write succeeds, it should be followed immediately by a read operation, to obtain any returned data and status information. A read will fail if there is no operation in progress. As a special case, the device can be reset with a write operation, and in this case, no following read is expected, or permitted. There are no ioctl() operations. Any single operation may transfer at most PG_MAX_DATA bytes. Note that the driver must copy the data through an internal buffer. In keeping with all current ATAPI devices, command packets are assumed to be exactly 12 bytes in length. To permit future changes to this interface, the headers in the read and write buffers contain a single character "magic" flag. Currently this flag must be the character "P". By default, the driver will autoprobe for a single parallel port ATAPI device, but if their individual parameters are specified, the driver can handle up to 4 devices. To use this device, you must have the following device special files defined: /dev/pg0 c 97 0 /dev/pg1 c 97 1 /dev/pg2 c 97 2 /dev/pg3 c 97 3 (You'll need to change the 97 to something else if you use the 'major' parameter to install the driver on a different major number.) The behaviour of the pg driver can be altered by setting some parameters from the insmod command line. The following parameters are adjustable: drive0 These four arguments can be arrays of drive1 1-6 integers as follows: drive2 drive3 <prt>,<pro>,<uni>,<mod>,<slv>,<dly> Where, <prt> is the base of the parallel port address for the corresponding drive. (required) <pro> is the protocol number for the adapter that supports this drive. These numbers are logged by 'paride' when the protocol modules are initialised. (0 if not given) <uni> for those adapters that support chained devices, this is the unit selector for the chain of devices on the given port. It should be zero for devices that don't support chaining. (0 if not given) <mod> this can be -1 to choose the best mode, or one of the mode numbers supported by the adapter. (-1 if not given) <slv> ATAPI devices can be jumpered to master or slave. Set this to 0 to choose the master drive, 1 to choose the slave, -1 (the default) to choose the first drive found. <dly> some parallel ports require the driver to go more slowly. -1 sets a default value that should work with the chosen protocol. Otherwise, set this to a small integer, the larger it is the slower the port i/o. In some cases, setting this to zero will speed up the device. (default -1) major You may use this parameter to overide the default major number (97) that this driver will use. Be sure to change the device name as well. name This parameter is a character string that contains the name the kernel will use for this device (in /proc output, for instance). (default "pg"). verbose This parameter controls the amount of logging that is done by the driver. Set it to 0 for quiet operation, to 1 to enable progress messages while the driver probes for devices, or to 2 for full debug logging. (default 0) If this driver is built into the kernel, you can use the following command line parameters, with the same values as the corresponding module parameters listed above: pg.drive0 pg.drive1 pg.drive2 pg.drive3 In addition, you can use the parameter pg.disable to disable the driver entirely. */ /* Changes: 1.01 GRG 1998.06.16 Bug fixes 1.02 GRG 1998.09.24 Added jumbo support */ #define PG_VERSION "1.02" #define PG_MAJOR 97 #define PG_NAME "pg" #define PG_UNITS 4 #ifndef PI_PG #define PI_PG 4 #endif /* Here are things one can override from the insmod command. Most are autoprobed by paride unless set here. Verbose is 0 by default. */ static int verbose = 0; static int major = PG_MAJOR; static char *name = PG_NAME; static int disable = 0; static int drive0[6] = { 0, 0, 0, -1, -1, -1 }; static int drive1[6] = { 0, 0, 0, -1, -1, -1 }; static int drive2[6] = { 0, 0, 0, -1, -1, -1 }; static int drive3[6] = { 0, 0, 0, -1, -1, -1 }; static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3}; static int pg_drive_count; enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY}; /* end of parameters */ #include <linux/module.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/mtio.h> #include <linux/pg.h> #include <linux/device.h> #include <linux/sched.h> /* current, TASK_* */ #include <linux/mutex.h> #include <linux/jiffies.h> #include <asm/uaccess.h> module_param(verbose, bool, 0644); module_param(major, int, 0); module_param(name, charp, 0); module_param_array(drive0, int, NULL, 0); module_param_array(drive1, int, NULL, 0); module_param_array(drive2, int, NULL, 0); module_param_array(drive3, int, NULL, 0); #include "paride.h" #define PG_SPIN_DEL 50 /* spin delay in micro-seconds */ #define PG_SPIN 200 #define PG_TMO HZ #define PG_RESET_TMO 10*HZ #define STAT_ERR 0x01 #define STAT_INDEX 0x02 #define STAT_ECC 0x04 #define STAT_DRQ 0x08 #define STAT_SEEK 0x10 #define STAT_WRERR 0x20 #define STAT_READY 0x40 #define STAT_BUSY 0x80 #define ATAPI_IDENTIFY 0x12 static DEFINE_MUTEX(pg_mutex); static int pg_open(struct inode *inode, struct file *file); static int pg_release(struct inode *inode, struct file *file); static ssize_t pg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos); static ssize_t pg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos); static int pg_detect(void); #define PG_NAMELEN 8 struct pg { struct pi_adapter pia; /* interface to paride layer */ struct pi_adapter *pi; int busy; /* write done, read expected */ int start; /* jiffies at command start */ int dlen; /* transfer size requested */ unsigned long timeout; /* timeout requested */ int status; /* last sense key */ int drive; /* drive */ unsigned long access; /* count of active opens ... */ int present; /* device present ? */ char *bufptr; char name[PG_NAMELEN]; /* pg0, pg1, ... */ }; static struct pg devices[PG_UNITS]; static int pg_identify(struct pg *dev, int log); static char pg_scratch[512]; /* scratch block buffer */ static struct class *pg_class; /* kernel glue structures */ static const struct file_operations pg_fops = { .owner = THIS_MODULE, .read = pg_read, .write = pg_write, .open = pg_open, .release = pg_release, .llseek = noop_llseek, }; static void pg_init_units(void) { int unit; pg_drive_count = 0; for (unit = 0; unit < PG_UNITS; unit++) { int *parm = *drives[unit]; struct pg *dev = &devices[unit]; dev->pi = &dev->pia; clear_bit(0, &dev->access); dev->busy = 0; dev->present = 0; dev->bufptr = NULL; dev->drive = parm[D_SLV]; snprintf(dev->name, PG_NAMELEN, "%s%c", name, 'a'+unit); if (parm[D_PRT]) pg_drive_count++; } } static inline int status_reg(struct pg *dev) { return pi_read_regr(dev->pi, 1, 6); } static inline int read_reg(struct pg *dev, int reg) { return pi_read_regr(dev->pi, 0, reg); } static inline void write_reg(struct pg *dev, int reg, int val) { pi_write_regr(dev->pi, 0, reg, val); } static inline u8 DRIVE(struct pg *dev) { return 0xa0+0x10*dev->drive; } static void pg_sleep(int cs) { schedule_timeout_interruptible(cs); } static int pg_wait(struct pg *dev, int go, int stop, unsigned long tmo, char *msg) { int j, r, e, s, p, to; dev->status = 0; j = 0; while ((((r = status_reg(dev)) & go) || (stop && (!(r & stop)))) && time_before(jiffies, tmo)) { if (j++ < PG_SPIN) udelay(PG_SPIN_DEL); else pg_sleep(1); } to = time_after_eq(jiffies, tmo); if ((r & (STAT_ERR & stop)) || to) { s = read_reg(dev, 7); e = read_reg(dev, 1); p = read_reg(dev, 2); if (verbose > 1) printk("%s: %s: stat=0x%x err=0x%x phase=%d%s\n", dev->name, msg, s, e, p, to ? " timeout" : ""); if (to) e |= 0x100; dev->status = (e >> 4) & 0xff; return -1; } return 0; } static int pg_command(struct pg *dev, char *cmd, int dlen, unsigned long tmo) { int k; pi_connect(dev->pi); write_reg(dev, 6, DRIVE(dev)); if (pg_wait(dev, STAT_BUSY | STAT_DRQ, 0, tmo, "before command")) goto fail; write_reg(dev, 4, dlen % 256); write_reg(dev, 5, dlen / 256); write_reg(dev, 7, 0xa0); /* ATAPI packet command */ if (pg_wait(dev, STAT_BUSY, STAT_DRQ, tmo, "command DRQ")) goto fail; if (read_reg(dev, 2) != 1) { printk("%s: command phase error\n", dev->name); goto fail; } pi_write_block(dev->pi, cmd, 12); if (verbose > 1) { printk("%s: Command sent, dlen=%d packet= ", dev->name, dlen); for (k = 0; k < 12; k++) printk("%02x ", cmd[k] & 0xff); printk("\n"); } return 0; fail: pi_disconnect(dev->pi); return -1; } static int pg_completion(struct pg *dev, char *buf, unsigned long tmo) { int r, d, n, p; r = pg_wait(dev, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR, tmo, "completion"); dev->dlen = 0; while (read_reg(dev, 7) & STAT_DRQ) { d = (read_reg(dev, 4) + 256 * read_reg(dev, 5)); n = ((d + 3) & 0xfffc); p = read_reg(dev, 2) & 3; if (p == 0) pi_write_block(dev->pi, buf, n); if (p == 2) pi_read_block(dev->pi, buf, n); if (verbose > 1) printk("%s: %s %d bytes\n", dev->name, p ? "Read" : "Write", n); dev->dlen += (1 - p) * d; buf += d; r = pg_wait(dev, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR, tmo, "completion"); } pi_disconnect(dev->pi); return r; } static int pg_reset(struct pg *dev) { int i, k, err; int expect[5] = { 1, 1, 1, 0x14, 0xeb }; int got[5]; pi_connect(dev->pi); write_reg(dev, 6, DRIVE(dev)); write_reg(dev, 7, 8); pg_sleep(20 * HZ / 1000); k = 0; while ((k++ < PG_RESET_TMO) && (status_reg(dev) & STAT_BUSY)) pg_sleep(1); for (i = 0; i < 5; i++) got[i] = read_reg(dev, i + 1); err = memcmp(expect, got, sizeof(got)) ? -1 : 0; if (verbose) { printk("%s: Reset (%d) signature = ", dev->name, k); for (i = 0; i < 5; i++) printk("%3x", got[i]); if (err) printk(" (incorrect)"); printk("\n"); } pi_disconnect(dev->pi); return err; } static void xs(char *buf, char *targ, int len) { char l = '\0'; int k; for (k = 0; k < len; k++) { char c = *buf++; if (c != ' ' && c != l) l = *targ++ = c; } if (l == ' ') targ--; *targ = '\0'; } static int pg_identify(struct pg *dev, int log) { int s; char *ms[2] = { "master", "slave" }; char mf[10], id[18]; char id_cmd[12] = { ATAPI_IDENTIFY, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 }; char buf[36]; s = pg_command(dev, id_cmd, 36, jiffies + PG_TMO); if (s) return -1; s = pg_completion(dev, buf, jiffies + PG_TMO); if (s) return -1; if (log) { xs(buf + 8, mf, 8); xs(buf + 16, id, 16); printk("%s: %s %s, %s\n", dev->name, mf, id, ms[dev->drive]); } return 0; } /* * returns 0, with id set if drive is detected * -1, if drive detection failed */ static int pg_probe(struct pg *dev) { if (dev->drive == -1) { for (dev->drive = 0; dev->drive <= 1; dev->drive++) if (!pg_reset(dev)) return pg_identify(dev, 1); } else { if (!pg_reset(dev)) return pg_identify(dev, 1); } return -1; } static int pg_detect(void) { struct pg *dev = &devices[0]; int k, unit; printk("%s: %s version %s, major %d\n", name, name, PG_VERSION, major); k = 0; if (pg_drive_count == 0) { if (pi_init(dev->pi, 1, -1, -1, -1, -1, -1, pg_scratch, PI_PG, verbose, dev->name)) { if (!pg_probe(dev)) { dev->present = 1; k++; } else pi_release(dev->pi); } } else for (unit = 0; unit < PG_UNITS; unit++, dev++) { int *parm = *drives[unit]; if (!parm[D_PRT]) continue; if (pi_init(dev->pi, 0, parm[D_PRT], parm[D_MOD], parm[D_UNI], parm[D_PRO], parm[D_DLY], pg_scratch, PI_PG, verbose, dev->name)) { if (!pg_probe(dev)) { dev->present = 1; k++; } else pi_release(dev->pi); } } if (k) return 0; printk("%s: No ATAPI device detected\n", name); return -1; } static int pg_open(struct inode *inode, struct file *file) { int unit = iminor(inode) & 0x7f; struct pg *dev = &devices[unit]; int ret = 0; mutex_lock(&pg_mutex); if ((unit >= PG_UNITS) || (!dev->present)) { ret = -ENODEV; goto out; } if (test_and_set_bit(0, &dev->access)) { ret = -EBUSY; goto out; } if (dev->busy) { pg_reset(dev); dev->busy = 0; } pg_identify(dev, (verbose > 1)); dev->bufptr = kmalloc(PG_MAX_DATA, GFP_KERNEL); if (dev->bufptr == NULL) { clear_bit(0, &dev->access); printk("%s: buffer allocation failed\n", dev->name); ret = -ENOMEM; goto out; } file->private_data = dev; out: mutex_unlock(&pg_mutex); return ret; } static int pg_release(struct inode *inode, struct file *file) { struct pg *dev = file->private_data; kfree(dev->bufptr); dev->bufptr = NULL; clear_bit(0, &dev->access); return 0; } static ssize_t pg_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct pg *dev = filp->private_data; struct pg_write_hdr hdr; int hs = sizeof (hdr); if (dev->busy) return -EBUSY; if (count < hs) return -EINVAL; if (copy_from_user(&hdr, buf, hs)) return -EFAULT; if (hdr.magic != PG_MAGIC) return -EINVAL; if (hdr.dlen > PG_MAX_DATA) return -EINVAL; if ((count - hs) > PG_MAX_DATA) return -EINVAL; if (hdr.func == PG_RESET) { if (count != hs) return -EINVAL; if (pg_reset(dev)) return -EIO; return count; } if (hdr.func != PG_COMMAND) return -EINVAL; dev->start = jiffies; dev->timeout = hdr.timeout * HZ + HZ / 2 + jiffies; if (pg_command(dev, hdr.packet, hdr.dlen, jiffies + PG_TMO)) { if (dev->status & 0x10) return -ETIME; return -EIO; } dev->busy = 1; if (copy_from_user(dev->bufptr, buf + hs, count - hs)) return -EFAULT; return count; } static ssize_t pg_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { struct pg *dev = filp->private_data; struct pg_read_hdr hdr; int hs = sizeof (hdr); int copy; if (!dev->busy) return -EINVAL; if (count < hs) return -EINVAL; dev->busy = 0; if (pg_completion(dev, dev->bufptr, dev->timeout)) if (dev->status & 0x10) return -ETIME; hdr.magic = PG_MAGIC; hdr.dlen = dev->dlen; copy = 0; if (hdr.dlen < 0) { hdr.dlen = -1 * hdr.dlen; copy = hdr.dlen; if (copy > (count - hs)) copy = count - hs; } hdr.duration = (jiffies - dev->start + HZ / 2) / HZ; hdr.scsi = dev->status & 0x0f; if (copy_to_user(buf, &hdr, hs)) return -EFAULT; if (copy > 0) if (copy_to_user(buf + hs, dev->bufptr, copy)) return -EFAULT; return copy + hs; } static int __init pg_init(void) { int unit; int err; if (disable){ err = -EINVAL; goto out; } pg_init_units(); if (pg_detect()) { err = -ENODEV; goto out; } err = register_chrdev(major, name, &pg_fops); if (err < 0) { printk("pg_init: unable to get major number %d\n", major); for (unit = 0; unit < PG_UNITS; unit++) { struct pg *dev = &devices[unit]; if (dev->present) pi_release(dev->pi); } goto out; } major = err; /* In case the user specified `major=0' (dynamic) */ pg_class = class_create(THIS_MODULE, "pg"); if (IS_ERR(pg_class)) { err = PTR_ERR(pg_class); goto out_chrdev; } for (unit = 0; unit < PG_UNITS; unit++) { struct pg *dev = &devices[unit]; if (dev->present) device_create(pg_class, NULL, MKDEV(major, unit), NULL, "pg%u", unit); } err = 0; goto out; out_chrdev: unregister_chrdev(major, "pg"); out: return err; } static void __exit pg_exit(void) { int unit; for (unit = 0; unit < PG_UNITS; unit++) { struct pg *dev = &devices[unit]; if (dev->present) device_destroy(pg_class, MKDEV(major, unit)); } class_destroy(pg_class); unregister_chrdev(major, name); for (unit = 0; unit < PG_UNITS; unit++) { struct pg *dev = &devices[unit]; if (dev->present) pi_release(dev->pi); } } MODULE_LICENSE("GPL"); module_init(pg_init) module_exit(pg_exit)
gpl-2.0
barome/AK-onePone
drivers/video/msm/lcdc_nt35582_wvga.c
3630
13582
/* Copyright (c) 2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/delay.h> #include <linux/module.h> #ifdef CONFIG_SPI_QUP #include <linux/spi/spi.h> #endif #include <mach/gpio.h> #include <mach/pmic.h> #include "msm_fb.h" #define LCDC_NT35582_PANEL_NAME "lcdc_nt35582_wvga" #define WRITE_FIRST_TRANS 0x20 #define WRITE_SECOND_TRANS 0x00 #define WRITE_THIRD_TRANS 0x40 #define READ_FIRST_TRANS 0x20 #define READ_SECOND_TRANS 0x00 #define READ_THIRD_TRANS 0xC0 #ifdef CONFIG_SPI_QUP #define LCDC_NT35582_SPI_DEVICE_NAME "lcdc_nt35582_spi" static struct spi_device *spi_client; #endif struct nt35582_state_type { boolean display_on; int bl_level; }; static struct nt35582_state_type nt35582_state = { 0 }; static int gpio_backlight_en; static struct msm_panel_common_pdata *lcdc_nt35582_pdata; static int spi_write_2bytes(struct spi_device *spi, unsigned char reg_high_addr, unsigned char reg_low_addr) { char tx_buf[4]; int rc; struct spi_message m; struct spi_transfer t; memset(&t, 0, sizeof t); t.tx_buf = tx_buf; spi_setup(spi); spi_message_init(&m); spi_message_add_tail(&t, &m); tx_buf[0] = WRITE_FIRST_TRANS; tx_buf[1] = reg_high_addr; tx_buf[2] = WRITE_SECOND_TRANS; tx_buf[3] = reg_low_addr; t.rx_buf = NULL; t.len = 4; t.bits_per_word = 16; rc = spi_sync(spi, &m); if (rc) pr_err("write spi command failed!\n"); return rc; } static int spi_write_3bytes(struct spi_device *spi, unsigned char reg_high_addr, unsigned char reg_low_addr, unsigned char write_data) { char tx_buf[6]; int rc; struct spi_message m; struct spi_transfer t; memset(&t, 0, sizeof t); t.tx_buf = tx_buf; spi_setup(spi); spi_message_init(&m); spi_message_add_tail(&t, &m); tx_buf[0] = WRITE_FIRST_TRANS; tx_buf[1] = reg_high_addr; tx_buf[2] = WRITE_SECOND_TRANS; tx_buf[3] = reg_low_addr; tx_buf[4] = WRITE_THIRD_TRANS; tx_buf[5] = write_data; t.rx_buf = NULL; t.len = 6; t.bits_per_word = 16; rc = spi_sync(spi, &m); if (rc) pr_err("write spi command failed!\n"); return rc; } static int spi_read_bytes(struct spi_device *spi, unsigned char reg_high_addr, unsigned char reg_low_addr, unsigned char *read_value) { char tx_buf[6]; char rx_buf[6]; int rc; struct spi_message m; struct spi_transfer t; memset(&t, 0, sizeof t); t.tx_buf = tx_buf; spi_setup(spi); spi_message_init(&m); spi_message_add_tail(&t, &m); tx_buf[0] = READ_FIRST_TRANS; tx_buf[1] = reg_high_addr; tx_buf[2] = READ_SECOND_TRANS; tx_buf[3] = reg_low_addr; tx_buf[4] = READ_THIRD_TRANS; tx_buf[5] = 0x00; t.rx_buf = rx_buf; t.len = 6; t.bits_per_word = 16; rc = spi_sync(spi, &m); if (rc) pr_err("write spi command failed!\n"); else *read_value = rx_buf[5]; return rc; } static void nt35582_disp_on(void) { uint32 panel_id1 = 0, panel_id2 = 0; if (!nt35582_state.display_on) { /* GVDD setting */ spi_write_3bytes(spi_client, 0xC0, 0x00, 0xC0); spi_write_3bytes(spi_client, 0xC0, 0x01, 0x00); spi_write_3bytes(spi_client, 0xC0, 0x02, 0xC0); spi_write_3bytes(spi_client, 0xC0, 0x03, 0x00); /* Power setting */ spi_write_3bytes(spi_client, 0xC1, 0x00, 0x40); spi_write_3bytes(spi_client, 0xC2, 0x00, 0x21); spi_write_3bytes(spi_client, 0xC2, 0x02, 0x02); /* Gamma setting */ spi_write_3bytes(spi_client, 0xE0, 0x00, 0x0E); spi_write_3bytes(spi_client, 0xE0, 0x01, 0x54); spi_write_3bytes(spi_client, 0xE0, 0x02, 0x63); spi_write_3bytes(spi_client, 0xE0, 0x03, 0x76); spi_write_3bytes(spi_client, 0xE0, 0x04, 0x1F); spi_write_3bytes(spi_client, 0xE0, 0x05, 0x31); spi_write_3bytes(spi_client, 0xE0, 0x06, 0x62); spi_write_3bytes(spi_client, 0xE0, 0x07, 0x78); spi_write_3bytes(spi_client, 0xE0, 0x08, 0x1F); spi_write_3bytes(spi_client, 0xE0, 0x09, 0x25); spi_write_3bytes(spi_client, 0xE0, 0x0A, 0xB3); spi_write_3bytes(spi_client, 0xE0, 0x0B, 0x17); spi_write_3bytes(spi_client, 0xE0, 0x0C, 0x38); spi_write_3bytes(spi_client, 0xE0, 0x0D, 0x5A); spi_write_3bytes(spi_client, 0xE0, 0x0E, 0xA2); spi_write_3bytes(spi_client, 0xE0, 0x0F, 0xA2); spi_write_3bytes(spi_client, 0xE0, 0x10, 0x24); spi_write_3bytes(spi_client, 0xE0, 0x11, 0x57); spi_write_3bytes(spi_client, 0xE1, 0x00, 0x0E); spi_write_3bytes(spi_client, 0xE1, 0x01, 0x54); spi_write_3bytes(spi_client, 0xE1, 0x02, 0x63); spi_write_3bytes(spi_client, 0xE1, 0x03, 0x76); spi_write_3bytes(spi_client, 0xE1, 0x04, 0x1F); spi_write_3bytes(spi_client, 0xE1, 0x05, 0x31); spi_write_3bytes(spi_client, 0xE1, 0x06, 0X62); spi_write_3bytes(spi_client, 0xE1, 0x07, 0x78); spi_write_3bytes(spi_client, 0xE1, 0x08, 0x1F); spi_write_3bytes(spi_client, 0xE1, 0x09, 0x25); spi_write_3bytes(spi_client, 0xE1, 0x0A, 0xB3); spi_write_3bytes(spi_client, 0xE1, 0x0B, 0x17); spi_write_3bytes(spi_client, 0xE1, 0x0C, 0x38); spi_write_3bytes(spi_client, 0xE1, 0x0D, 0x5A); spi_write_3bytes(spi_client, 0xE1, 0x0E, 0xA2); spi_write_3bytes(spi_client, 0xE1, 0x0F, 0xA2); spi_write_3bytes(spi_client, 0xE1, 0x10, 0x24); spi_write_3bytes(spi_client, 0xE1, 0x11, 0x57); spi_write_3bytes(spi_client, 0xE2, 0x00, 0x0E); spi_write_3bytes(spi_client, 0xE2, 0x01, 0x54); spi_write_3bytes(spi_client, 0xE2, 0x02, 0x63); spi_write_3bytes(spi_client, 0xE2, 0x03, 0x76); spi_write_3bytes(spi_client, 0xE2, 0x04, 0x1F); spi_write_3bytes(spi_client, 0xE2, 0x05, 0x31); spi_write_3bytes(spi_client, 0xE2, 0x06, 0x62); spi_write_3bytes(spi_client, 0xE2, 0x07, 0x78); spi_write_3bytes(spi_client, 0xE2, 0x08, 0x1F); spi_write_3bytes(spi_client, 0xE2, 0x09, 0x25); spi_write_3bytes(spi_client, 0xE2, 0x0A, 0xB3); spi_write_3bytes(spi_client, 0xE2, 0x0B, 0x17); spi_write_3bytes(spi_client, 0xE2, 0x0C, 0x38); spi_write_3bytes(spi_client, 0xE2, 0x0D, 0x5A); spi_write_3bytes(spi_client, 0xE2, 0x0E, 0xA2); spi_write_3bytes(spi_client, 0xE2, 0x0F, 0xA2); spi_write_3bytes(spi_client, 0xE2, 0x10, 0x24); spi_write_3bytes(spi_client, 0xE2, 0x11, 0x57); spi_write_3bytes(spi_client, 0xE3, 0x00, 0x0E); spi_write_3bytes(spi_client, 0xE3, 0x01, 0x54); spi_write_3bytes(spi_client, 0xE3, 0x02, 0x63); spi_write_3bytes(spi_client, 0xE3, 0x03, 0x76); spi_write_3bytes(spi_client, 0xE3, 0x04, 0x1F); spi_write_3bytes(spi_client, 0xE3, 0x05, 0x31); spi_write_3bytes(spi_client, 0xE3, 0x06, 0x62); spi_write_3bytes(spi_client, 0xE3, 0x07, 0x78); spi_write_3bytes(spi_client, 0xE3, 0x08, 0x1F); spi_write_3bytes(spi_client, 0xE3, 0x09, 0x25); spi_write_3bytes(spi_client, 0xE3, 0x0A, 0xB3); spi_write_3bytes(spi_client, 0xE3, 0x0B, 0x17); spi_write_3bytes(spi_client, 0xE3, 0x0C, 0x38); spi_write_3bytes(spi_client, 0xE3, 0x0D, 0x5A); spi_write_3bytes(spi_client, 0xE3, 0x0E, 0xA2); spi_write_3bytes(spi_client, 0xE3, 0x0F, 0xA2); spi_write_3bytes(spi_client, 0xE3, 0x10, 0x24); spi_write_3bytes(spi_client, 0xE3, 0x11, 0x57); spi_write_3bytes(spi_client, 0xE4, 0x00, 0x48); spi_write_3bytes(spi_client, 0xE4, 0x01, 0x6B); spi_write_3bytes(spi_client, 0xE4, 0x02, 0x84); spi_write_3bytes(spi_client, 0xE4, 0x03, 0x9B); spi_write_3bytes(spi_client, 0xE4, 0x04, 0x1F); spi_write_3bytes(spi_client, 0xE4, 0x05, 0x31); spi_write_3bytes(spi_client, 0xE4, 0x06, 0x62); spi_write_3bytes(spi_client, 0xE4, 0x07, 0x78); spi_write_3bytes(spi_client, 0xE4, 0x08, 0x1F); spi_write_3bytes(spi_client, 0xE4, 0x09, 0x25); spi_write_3bytes(spi_client, 0xE4, 0x0A, 0xB3); spi_write_3bytes(spi_client, 0xE4, 0x0B, 0x17); spi_write_3bytes(spi_client, 0xE4, 0x0C, 0x38); spi_write_3bytes(spi_client, 0xE4, 0x0D, 0x5A); spi_write_3bytes(spi_client, 0xE4, 0x0E, 0xA2); spi_write_3bytes(spi_client, 0xE4, 0x0F, 0xA2); spi_write_3bytes(spi_client, 0xE4, 0x10, 0x24); spi_write_3bytes(spi_client, 0xE4, 0x11, 0x57); spi_write_3bytes(spi_client, 0xE5, 0x00, 0x48); spi_write_3bytes(spi_client, 0xE5, 0x01, 0x6B); spi_write_3bytes(spi_client, 0xE5, 0x02, 0x84); spi_write_3bytes(spi_client, 0xE5, 0x03, 0x9B); spi_write_3bytes(spi_client, 0xE5, 0x04, 0x1F); spi_write_3bytes(spi_client, 0xE5, 0x05, 0x31); spi_write_3bytes(spi_client, 0xE5, 0x06, 0x62); spi_write_3bytes(spi_client, 0xE5, 0x07, 0x78); spi_write_3bytes(spi_client, 0xE5, 0x08, 0x1F); spi_write_3bytes(spi_client, 0xE5, 0x09, 0x25); spi_write_3bytes(spi_client, 0xE5, 0x0A, 0xB3); spi_write_3bytes(spi_client, 0xE5, 0x0B, 0x17); spi_write_3bytes(spi_client, 0xE5, 0x0C, 0x38); spi_write_3bytes(spi_client, 0xE5, 0x0D, 0x5A); spi_write_3bytes(spi_client, 0xE5, 0x0E, 0xA2); spi_write_3bytes(spi_client, 0xE5, 0x0F, 0xA2); spi_write_3bytes(spi_client, 0xE5, 0x10, 0x24); spi_write_3bytes(spi_client, 0xE5, 0x11, 0x57); /* Data format setting */ spi_write_3bytes(spi_client, 0x3A, 0x00, 0x70); /* Reverse PCLK signal of LCM to meet Qualcomm's platform */ spi_write_3bytes(spi_client, 0x3B, 0x00, 0x2B); /* Scan direstion setting */ spi_write_3bytes(spi_client, 0x36, 0x00, 0x00); /* Sleep out */ spi_write_2bytes(spi_client, 0x11, 0x00); msleep(120); /* Display on */ spi_write_2bytes(spi_client, 0x29, 0x00); pr_info("%s: LCM SPI display on CMD finished...\n", __func__); msleep(200); nt35582_state.display_on = TRUE; } /* Test to read RDDID. It should be 0x0055h and 0x0082h */ spi_read_bytes(spi_client, 0x10, 0x80, (unsigned char *)&panel_id1); spi_read_bytes(spi_client, 0x11, 0x80, (unsigned char *)&panel_id2); pr_info(KERN_INFO "nt35582_disp_on: LCM_ID=[0x%x, 0x%x]\n", panel_id1, panel_id2); } static int lcdc_nt35582_panel_on(struct platform_device *pdev) { nt35582_disp_on(); return 0; } static int lcdc_nt35582_panel_off(struct platform_device *pdev) { nt35582_state.display_on = FALSE; return 0; } static void lcdc_nt35582_set_backlight(struct msm_fb_data_type *mfd) { int bl_level; int i = 0, step = 0; bl_level = mfd->bl_level; if (bl_level == nt35582_state.bl_level) return; else nt35582_state.bl_level = bl_level; if (bl_level == 0) { gpio_set_value_cansleep(gpio_backlight_en, 0); return; } /* Level:0~31 mapping to step 32~1 */ step = 32 - bl_level; for (i = 0; i < step; i++) { gpio_set_value_cansleep(gpio_backlight_en, 0); ndelay(5); gpio_set_value_cansleep(gpio_backlight_en, 1); ndelay(5); } } static int __devinit nt35582_probe(struct platform_device *pdev) { if (pdev->id == 0) { lcdc_nt35582_pdata = pdev->dev.platform_data; return 0; } gpio_backlight_en = *(lcdc_nt35582_pdata->gpio_num); msm_fb_add_device(pdev); return 0; } #ifdef CONFIG_SPI_QUP static int __devinit lcdc_nt35582_spi_probe(struct spi_device *spi) { spi_client = spi; spi_client->bits_per_word = 16; spi_client->chip_select = 0; spi_client->max_speed_hz = 1100000; spi_client->mode = SPI_MODE_0; spi_setup(spi_client); return 0; } static int __devexit lcdc_nt35582_spi_remove(struct spi_device *spi) { spi_client = NULL; return 0; } static struct spi_driver lcdc_nt35582_spi_driver = { .driver = { .name = LCDC_NT35582_SPI_DEVICE_NAME, .owner = THIS_MODULE, }, .probe = lcdc_nt35582_spi_probe, .remove = __devexit_p(lcdc_nt35582_spi_remove), }; #endif static struct platform_driver this_driver = { .probe = nt35582_probe, .driver = { .name = LCDC_NT35582_PANEL_NAME, }, }; static struct msm_fb_panel_data nt35582_panel_data = { .on = lcdc_nt35582_panel_on, .off = lcdc_nt35582_panel_off, .set_backlight = lcdc_nt35582_set_backlight, }; static struct platform_device this_device = { .name = LCDC_NT35582_PANEL_NAME, .id = 1, .dev = { .platform_data = &nt35582_panel_data, } }; static int __init lcdc_nt35582_panel_init(void) { int ret; struct msm_panel_info *pinfo; #ifdef CONFIG_FB_MSM_LCDC_AUTO_DETECT if (msm_fb_detect_client(LCDC_NT35582_PANEL_NAME)) { pr_err("detect failed\n"); return 0; } #endif ret = platform_driver_register(&this_driver); if (ret) { pr_err("Fails to platform_driver_register...\n"); return ret; } pinfo = &nt35582_panel_data.panel_info; pinfo->xres = 480; pinfo->yres = 800; MSM_FB_SINGLE_MODE_PANEL(pinfo); pinfo->type = LCDC_PANEL; pinfo->pdest = DISPLAY_1; pinfo->wait_cycle = 0; pinfo->bpp = 24; pinfo->fb_num = 2; pinfo->clk_rate = 25600000; pinfo->bl_max = 31; pinfo->bl_min = 1; pinfo->lcdc.h_back_porch = 10; /* hsw = 8 + hbp=184 */ pinfo->lcdc.h_front_porch = 10; pinfo->lcdc.h_pulse_width = 2; pinfo->lcdc.v_back_porch = 4; /* vsw=1 + vbp = 2 */ pinfo->lcdc.v_front_porch = 10; pinfo->lcdc.v_pulse_width = 2; pinfo->lcdc.border_clr = 0; /* blk */ pinfo->lcdc.underflow_clr = 0xff; /* blue */ pinfo->lcdc.hsync_skew = 0; ret = platform_device_register(&this_device); if (ret) { pr_err("not able to register the device\n"); goto fail_driver; } #ifdef CONFIG_SPI_QUP ret = spi_register_driver(&lcdc_nt35582_spi_driver); if (ret) { pr_err("not able to register spi\n"); goto fail_device; } #endif return ret; #ifdef CONFIG_SPI_QUP fail_device: platform_device_unregister(&this_device); #endif fail_driver: platform_driver_unregister(&this_driver); return ret; } device_initcall(lcdc_nt35582_panel_init);
gpl-2.0
svtronics/kernel-pandaboard-ES-RevB3
drivers/net/wireless/rtl818x/rtl8180/max2820.c
8494
4102
/* * Radio tuning for Maxim max2820 on RTL8180 * * Copyright 2007 Andrea Merello <andreamrl@tiscali.it> * * Code from the BSD driver and the rtl8181 project have been * very useful to understand certain things * * I want to thanks the Authors of such projects and the Ndiswrapper * project Authors. * * A special Big Thanks also is for all people who donated me cards, * making possible the creation of the original rtl8180 driver * from which this code is derived! * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/pci.h> #include <linux/delay.h> #include <net/mac80211.h> #include "rtl8180.h" #include "max2820.h" static const u32 max2820_chan[] = { 12, /* CH 1 */ 17, 22, 27, 32, 37, 42, 47, 52, 57, 62, 67, 72, 84, /* CH 14 */ }; static void write_max2820(struct ieee80211_hw *dev, u8 addr, u32 data) { struct rtl8180_priv *priv = dev->priv; u32 phy_config; phy_config = 0x90 + (data & 0xf); phy_config <<= 16; phy_config += addr; phy_config <<= 8; phy_config += (data >> 4) & 0xff; rtl818x_iowrite32(priv, (__le32 __iomem *) &priv->map->RFPinsOutput, phy_config); msleep(1); } static void max2820_write_phy_antenna(struct ieee80211_hw *dev, short chan) { struct rtl8180_priv *priv = dev->priv; u8 ant; ant = MAXIM_ANTENNA; if (priv->rfparam & RF_PARAM_ANTBDEFAULT) ant |= BB_ANTENNA_B; if (chan == 14) ant |= BB_ANTATTEN_CHAN14; rtl8180_write_phy(dev, 0x10, ant); } static u8 max2820_rf_calc_rssi(u8 agc, u8 sq) { bool odd; odd = !!(agc & 1); agc >>= 1; if (odd) agc += 76; else agc += 66; /* TODO: change addends above to avoid mult / div below */ return 65 * agc / 100; } static void max2820_rf_set_channel(struct ieee80211_hw *dev, struct ieee80211_conf *conf) { struct rtl8180_priv *priv = dev->priv; int channel = conf ? ieee80211_frequency_to_channel(conf->channel->center_freq) : 1; unsigned int chan_idx = channel - 1; u32 txpw = priv->channels[chan_idx].hw_value & 0xFF; u32 chan = max2820_chan[chan_idx]; /* While philips SA2400 drive the PA bias from * sa2400, for MAXIM we do this directly from BB */ rtl8180_write_phy(dev, 3, txpw); max2820_write_phy_antenna(dev, channel); write_max2820(dev, 3, chan); } static void max2820_rf_stop(struct ieee80211_hw *dev) { rtl8180_write_phy(dev, 3, 0x8); write_max2820(dev, 1, 0); } static void max2820_rf_init(struct ieee80211_hw *dev) { struct rtl8180_priv *priv = dev->priv; /* MAXIM from netbsd driver */ write_max2820(dev, 0, 0x007); /* test mode as indicated in datasheet */ write_max2820(dev, 1, 0x01e); /* enable register */ write_max2820(dev, 2, 0x001); /* synt register */ max2820_rf_set_channel(dev, NULL); write_max2820(dev, 4, 0x313); /* rx register */ /* PA is driven directly by the BB, we keep the MAXIM bias * at the highest value in case that setting it to lower * values may introduce some further attenuation somewhere.. */ write_max2820(dev, 5, 0x00f); /* baseband configuration */ rtl8180_write_phy(dev, 0, 0x88); /* sys1 */ rtl8180_write_phy(dev, 3, 0x08); /* txagc */ rtl8180_write_phy(dev, 4, 0xf8); /* lnadet */ rtl8180_write_phy(dev, 5, 0x90); /* ifagcinit */ rtl8180_write_phy(dev, 6, 0x1a); /* ifagclimit */ rtl8180_write_phy(dev, 7, 0x64); /* ifagcdet */ max2820_write_phy_antenna(dev, 1); rtl8180_write_phy(dev, 0x11, 0x88); /* trl */ if (rtl818x_ioread8(priv, &priv->map->CONFIG2) & RTL818X_CONFIG2_ANTENNA_DIV) rtl8180_write_phy(dev, 0x12, 0xc7); else rtl8180_write_phy(dev, 0x12, 0x47); rtl8180_write_phy(dev, 0x13, 0x9b); rtl8180_write_phy(dev, 0x19, 0x0); /* CHESTLIM */ rtl8180_write_phy(dev, 0x1a, 0x9f); /* CHSQLIM */ max2820_rf_set_channel(dev, NULL); } const struct rtl818x_rf_ops max2820_rf_ops = { .name = "Maxim", .init = max2820_rf_init, .stop = max2820_rf_stop, .set_chan = max2820_rf_set_channel, .calc_rssi = max2820_rf_calc_rssi, };
gpl-2.0
Litew/android_kernel_motorola_olympus
drivers/net/wireless/rtl818x/rtl8180/max2820.c
8494
4102
/* * Radio tuning for Maxim max2820 on RTL8180 * * Copyright 2007 Andrea Merello <andreamrl@tiscali.it> * * Code from the BSD driver and the rtl8181 project have been * very useful to understand certain things * * I want to thanks the Authors of such projects and the Ndiswrapper * project Authors. * * A special Big Thanks also is for all people who donated me cards, * making possible the creation of the original rtl8180 driver * from which this code is derived! * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/pci.h> #include <linux/delay.h> #include <net/mac80211.h> #include "rtl8180.h" #include "max2820.h" static const u32 max2820_chan[] = { 12, /* CH 1 */ 17, 22, 27, 32, 37, 42, 47, 52, 57, 62, 67, 72, 84, /* CH 14 */ }; static void write_max2820(struct ieee80211_hw *dev, u8 addr, u32 data) { struct rtl8180_priv *priv = dev->priv; u32 phy_config; phy_config = 0x90 + (data & 0xf); phy_config <<= 16; phy_config += addr; phy_config <<= 8; phy_config += (data >> 4) & 0xff; rtl818x_iowrite32(priv, (__le32 __iomem *) &priv->map->RFPinsOutput, phy_config); msleep(1); } static void max2820_write_phy_antenna(struct ieee80211_hw *dev, short chan) { struct rtl8180_priv *priv = dev->priv; u8 ant; ant = MAXIM_ANTENNA; if (priv->rfparam & RF_PARAM_ANTBDEFAULT) ant |= BB_ANTENNA_B; if (chan == 14) ant |= BB_ANTATTEN_CHAN14; rtl8180_write_phy(dev, 0x10, ant); } static u8 max2820_rf_calc_rssi(u8 agc, u8 sq) { bool odd; odd = !!(agc & 1); agc >>= 1; if (odd) agc += 76; else agc += 66; /* TODO: change addends above to avoid mult / div below */ return 65 * agc / 100; } static void max2820_rf_set_channel(struct ieee80211_hw *dev, struct ieee80211_conf *conf) { struct rtl8180_priv *priv = dev->priv; int channel = conf ? ieee80211_frequency_to_channel(conf->channel->center_freq) : 1; unsigned int chan_idx = channel - 1; u32 txpw = priv->channels[chan_idx].hw_value & 0xFF; u32 chan = max2820_chan[chan_idx]; /* While philips SA2400 drive the PA bias from * sa2400, for MAXIM we do this directly from BB */ rtl8180_write_phy(dev, 3, txpw); max2820_write_phy_antenna(dev, channel); write_max2820(dev, 3, chan); } static void max2820_rf_stop(struct ieee80211_hw *dev) { rtl8180_write_phy(dev, 3, 0x8); write_max2820(dev, 1, 0); } static void max2820_rf_init(struct ieee80211_hw *dev) { struct rtl8180_priv *priv = dev->priv; /* MAXIM from netbsd driver */ write_max2820(dev, 0, 0x007); /* test mode as indicated in datasheet */ write_max2820(dev, 1, 0x01e); /* enable register */ write_max2820(dev, 2, 0x001); /* synt register */ max2820_rf_set_channel(dev, NULL); write_max2820(dev, 4, 0x313); /* rx register */ /* PA is driven directly by the BB, we keep the MAXIM bias * at the highest value in case that setting it to lower * values may introduce some further attenuation somewhere.. */ write_max2820(dev, 5, 0x00f); /* baseband configuration */ rtl8180_write_phy(dev, 0, 0x88); /* sys1 */ rtl8180_write_phy(dev, 3, 0x08); /* txagc */ rtl8180_write_phy(dev, 4, 0xf8); /* lnadet */ rtl8180_write_phy(dev, 5, 0x90); /* ifagcinit */ rtl8180_write_phy(dev, 6, 0x1a); /* ifagclimit */ rtl8180_write_phy(dev, 7, 0x64); /* ifagcdet */ max2820_write_phy_antenna(dev, 1); rtl8180_write_phy(dev, 0x11, 0x88); /* trl */ if (rtl818x_ioread8(priv, &priv->map->CONFIG2) & RTL818X_CONFIG2_ANTENNA_DIV) rtl8180_write_phy(dev, 0x12, 0xc7); else rtl8180_write_phy(dev, 0x12, 0x47); rtl8180_write_phy(dev, 0x13, 0x9b); rtl8180_write_phy(dev, 0x19, 0x0); /* CHESTLIM */ rtl8180_write_phy(dev, 0x1a, 0x9f); /* CHSQLIM */ max2820_rf_set_channel(dev, NULL); } const struct rtl818x_rf_ops max2820_rf_ops = { .name = "Maxim", .init = max2820_rf_init, .stop = max2820_rf_stop, .set_chan = max2820_rf_set_channel, .calc_rssi = max2820_rf_calc_rssi, };
gpl-2.0
fear130986/GT-I9195_EUR_KK_Opensource_kernel
drivers/net/ethernet/i825xx/lasi_82596.c
9262
6883
/* lasi_82596.c -- driver for the intel 82596 ethernet controller, as munged into HPPA boxen . This driver is based upon 82596.c, original credits are below... but there were too many hoops which HP wants jumped through to keep this code in there in a sane manner. 3 primary sources of the mess -- 1) hppa needs *lots* of cacheline flushing to keep this kind of MMIO running. 2) The 82596 needs to see all of its pointers as their physical address. Thus virt_to_bus/bus_to_virt are *everywhere*. 3) The implementation HP is using seems to be significantly pickier about when and how the command and RX units are started. some command ordering was changed. Examination of the mach driver leads one to believe that there might be a saner way to pull this off... anyone who feels like a full rewrite can be my guest. Split 02/13/2000 Sam Creasey (sammy@oh.verio.com) 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de) 03/02/2000 changes for better/correct(?) cache-flushing (deller) */ /* 82596.c: A generic 82596 ethernet driver for linux. */ /* Based on Apricot.c Written 1994 by Mark Evans. This driver is for the Apricot 82596 bus-master interface Modularised 12/94 Mark Evans Modified to support the 82596 ethernet chips on 680x0 VME boards. by Richard Hirst <richard@sleepie.demon.co.uk> Renamed to be 82596.c 980825: Changed to receive directly in to sk_buffs which are allocated at open() time. Eliminates copy on incoming frames (small ones are still copied). Shared data now held in a non-cached page, so we can run on 68060 in copyback mode. TBD: * look at deferring rx frames rather than discarding (as per tulip) * handle tx ring full as per tulip * performance test to tune rx_copybreak Most of my modifications relate to the braindead big-endian implementation by Intel. When the i596 is operating in 'big-endian' mode, it thinks a 32 bit value of 0x12345678 should be stored as 0x56781234. This is a real pain, when you have linked lists which are shared by the 680x0 and the i596. Driver skeleton Written 1993 by Donald Becker. Copyright 1993 United States Government as represented by the Director, National Security Agency. This software may only be used and distributed according to the terms of the GNU General Public License as modified by SRC, incorporated herein by reference. The author may be reached as becker@scyld.com, or C/O Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403 */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/types.h> #include <linux/bitops.h> #include <linux/dma-mapping.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/pdc.h> #include <asm/parisc-device.h> #define LASI_82596_DRIVER_VERSION "LASI 82596 driver - Revision: 1.30" #define PA_I82596_RESET 0 /* Offsets relative to LASI-LAN-Addr.*/ #define PA_CPU_PORT_L_ACCESS 4 #define PA_CHANNEL_ATTENTION 8 #define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */ #define DMA_ALLOC dma_alloc_noncoherent #define DMA_FREE dma_free_noncoherent #define DMA_WBACK(ndev, addr, len) \ do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0) #define DMA_INV(ndev, addr, len) \ do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_FROM_DEVICE); } while (0) #define DMA_WBACK_INV(ndev, addr, len) \ do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_BIDIRECTIONAL); } while (0) #define SYSBUS 0x0000006c; /* big endian CPU, 82596 "big" endian mode */ #define SWAP32(x) (((u32)(x)<<16) | ((((u32)(x)))>>16)) #define SWAP16(x) (x) #include "lib82596.c" MODULE_AUTHOR("Richard Hirst"); MODULE_DESCRIPTION("i82596 driver"); MODULE_LICENSE("GPL"); module_param(i596_debug, int, 0); MODULE_PARM_DESC(i596_debug, "lasi_82596 debug mask"); static inline void ca(struct net_device *dev) { gsc_writel(0, dev->base_addr + PA_CHANNEL_ATTENTION); } static void mpu_port(struct net_device *dev, int c, dma_addr_t x) { struct i596_private *lp = netdev_priv(dev); u32 v = (u32) (c) | (u32) (x); u16 a, b; if (lp->options & OPT_SWAP_PORT) { a = v >> 16; b = v & 0xffff; } else { a = v & 0xffff; b = v >> 16; } gsc_writel(a, dev->base_addr + PA_CPU_PORT_L_ACCESS); udelay(1); gsc_writel(b, dev->base_addr + PA_CPU_PORT_L_ACCESS); } #define LAN_PROM_ADDR 0xF0810000 static int __devinit lan_init_chip(struct parisc_device *dev) { struct net_device *netdevice; struct i596_private *lp; int retval; int i; if (!dev->irq) { printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n", __FILE__, (unsigned long)dev->hpa.start); return -ENODEV; } printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n", (unsigned long)dev->hpa.start, dev->irq); netdevice = alloc_etherdev(sizeof(struct i596_private)); if (!netdevice) return -ENOMEM; SET_NETDEV_DEV(netdevice, &dev->dev); parisc_set_drvdata (dev, netdevice); netdevice->base_addr = dev->hpa.start; netdevice->irq = dev->irq; if (pdc_lan_station_id(netdevice->dev_addr, netdevice->base_addr)) { for (i = 0; i < 6; i++) { netdevice->dev_addr[i] = gsc_readb(LAN_PROM_ADDR + i); } printk(KERN_INFO "%s: MAC of HP700 LAN read from EEPROM\n", __FILE__); } lp = netdev_priv(netdevice); lp->options = dev->id.sversion == 0x72 ? OPT_SWAP_PORT : 0; retval = i82596_probe(netdevice); if (retval) { free_netdev(netdevice); return -ENODEV; } return retval; } static int __devexit lan_remove_chip (struct parisc_device *pdev) { struct net_device *dev = parisc_get_drvdata(pdev); struct i596_private *lp = netdev_priv(dev); unregister_netdev (dev); DMA_FREE(&pdev->dev, sizeof(struct i596_private), (void *)lp->dma, lp->dma_addr); free_netdev (dev); return 0; } static struct parisc_device_id lan_tbl[] = { { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008a }, { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00072 }, { 0, } }; MODULE_DEVICE_TABLE(parisc, lan_tbl); static struct parisc_driver lan_driver = { .name = "lasi_82596", .id_table = lan_tbl, .probe = lan_init_chip, .remove = __devexit_p(lan_remove_chip), }; static int __devinit lasi_82596_init(void) { printk(KERN_INFO LASI_82596_DRIVER_VERSION "\n"); return register_parisc_driver(&lan_driver); } module_init(lasi_82596_init); static void __exit lasi_82596_exit(void) { unregister_parisc_driver(&lan_driver); } module_exit(lasi_82596_exit);
gpl-2.0
motley-git/TF201-Kernel-Lite
drivers/ide/ns87415.c
9774
9196
/* * Copyright (C) 1997-1998 Mark Lord <mlord@pobox.com> * Copyright (C) 1998 Eddie C. Dost <ecd@skynet.be> * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> * Copyright (C) 2004 Grant Grundler <grundler at parisc-linux.org> * * Inspired by an earlier effort from David S. Miller <davem@redhat.com> */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/ide.h> #include <linux/init.h> #include <asm/io.h> #define DRV_NAME "ns87415" #ifdef CONFIG_SUPERIO /* SUPERIO 87560 is a PoS chip that NatSem denies exists. * Unfortunately, it's built-in on all Astro-based PA-RISC workstations * which use the integrated NS87514 cell for CD-ROM support. * i.e we have to support for CD-ROM installs. * See drivers/parisc/superio.c for more gory details. */ #include <asm/superio.h> #define SUPERIO_IDE_MAX_RETRIES 25 /* Because of a defect in Super I/O, all reads of the PCI DMA status * registers, IDE status register and the IDE select register need to be * retried */ static u8 superio_ide_inb (unsigned long port) { u8 tmp; int retries = SUPERIO_IDE_MAX_RETRIES; /* printk(" [ reading port 0x%x with retry ] ", port); */ do { tmp = inb(port); if (tmp == 0) udelay(50); } while (tmp == 0 && retries-- > 0); return tmp; } static u8 superio_read_status(ide_hwif_t *hwif) { return superio_ide_inb(hwif->io_ports.status_addr); } static u8 superio_dma_sff_read_status(ide_hwif_t *hwif) { return superio_ide_inb(hwif->dma_base + ATA_DMA_STATUS); } static void superio_tf_read(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid) { struct ide_io_ports *io_ports = &drive->hwif->io_ports; if (valid & IDE_VALID_ERROR) tf->error = inb(io_ports->feature_addr); if (valid & IDE_VALID_NSECT) tf->nsect = inb(io_ports->nsect_addr); if (valid & IDE_VALID_LBAL) tf->lbal = inb(io_ports->lbal_addr); if (valid & IDE_VALID_LBAM) tf->lbam = inb(io_ports->lbam_addr); if (valid & IDE_VALID_LBAH) tf->lbah = inb(io_ports->lbah_addr); if (valid & IDE_VALID_DEVICE) tf->device = superio_ide_inb(io_ports->device_addr); } static void ns87415_dev_select(ide_drive_t *drive); static const struct ide_tp_ops superio_tp_ops = { .exec_command = ide_exec_command, .read_status = superio_read_status, .read_altstatus = ide_read_altstatus, .write_devctl = ide_write_devctl, .dev_select = ns87415_dev_select, .tf_load = ide_tf_load, .tf_read = superio_tf_read, .input_data = ide_input_data, .output_data = ide_output_data, }; static void __devinit superio_init_iops(struct hwif_s *hwif) { struct pci_dev *pdev = to_pci_dev(hwif->dev); u32 dma_stat; u8 port = hwif->channel, tmp; dma_stat = (pci_resource_start(pdev, 4) & ~3) + (!port ? 2 : 0xa); /* Clear error/interrupt, enable dma */ tmp = superio_ide_inb(dma_stat); outb(tmp | 0x66, dma_stat); } #else #define superio_dma_sff_read_status ide_dma_sff_read_status #endif static unsigned int ns87415_count = 0, ns87415_control[MAX_HWIFS] = { 0 }; /* * This routine either enables/disables (according to IDE_DFLAG_PRESENT) * the IRQ associated with the port, * and selects either PIO or DMA handshaking for the next I/O operation. */ static void ns87415_prepare_drive (ide_drive_t *drive, unsigned int use_dma) { ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); unsigned int bit, other, new, *old = (unsigned int *) hwif->select_data; unsigned long flags; local_irq_save(flags); new = *old; /* Adjust IRQ enable bit */ bit = 1 << (8 + hwif->channel); if (drive->dev_flags & IDE_DFLAG_PRESENT) new &= ~bit; else new |= bit; /* Select PIO or DMA, DMA may only be selected for one drive/channel. */ bit = 1 << (20 + (drive->dn & 1) + (hwif->channel << 1)); other = 1 << (20 + (1 - (drive->dn & 1)) + (hwif->channel << 1)); new = use_dma ? ((new & ~other) | bit) : (new & ~bit); if (new != *old) { unsigned char stat; /* * Don't change DMA engine settings while Write Buffers * are busy. */ (void) pci_read_config_byte(dev, 0x43, &stat); while (stat & 0x03) { udelay(1); (void) pci_read_config_byte(dev, 0x43, &stat); } *old = new; (void) pci_write_config_dword(dev, 0x40, new); /* * And let things settle... */ udelay(10); } local_irq_restore(flags); } static void ns87415_dev_select(ide_drive_t *drive) { ns87415_prepare_drive(drive, !!(drive->dev_flags & IDE_DFLAG_USING_DMA)); outb(drive->select | ATA_DEVICE_OBS, drive->hwif->io_ports.device_addr); } static void ns87415_dma_start(ide_drive_t *drive) { ns87415_prepare_drive(drive, 1); ide_dma_start(drive); } static int ns87415_dma_end(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u8 dma_stat = 0, dma_cmd = 0; dma_stat = hwif->dma_ops->dma_sff_read_status(hwif); /* get DMA command mode */ dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); /* stop DMA */ outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD); /* from ERRATA: clear the INTR & ERROR bits */ dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); outb(dma_cmd | 6, hwif->dma_base + ATA_DMA_CMD); ns87415_prepare_drive(drive, 0); /* verify good DMA status */ return (dma_stat & 7) != 4; } static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif) { struct pci_dev *dev = to_pci_dev(hwif->dev); unsigned int ctrl, using_inta; u8 progif; #ifdef __sparc_v9__ int timeout; u8 stat; #endif /* * We cannot probe for IRQ: both ports share common IRQ on INTA. * Also, leave IRQ masked during drive probing, to prevent infinite * interrupts from a potentially floating INTA.. * * IRQs get unmasked in dev_select() when drive is first used. */ (void) pci_read_config_dword(dev, 0x40, &ctrl); (void) pci_read_config_byte(dev, 0x09, &progif); /* is irq in "native" mode? */ using_inta = progif & (1 << (hwif->channel << 1)); if (!using_inta) using_inta = ctrl & (1 << (4 + hwif->channel)); if (hwif->mate) { hwif->select_data = hwif->mate->select_data; } else { hwif->select_data = (unsigned long) &ns87415_control[ns87415_count++]; ctrl |= (1 << 8) | (1 << 9); /* mask both IRQs */ if (using_inta) ctrl &= ~(1 << 6); /* unmask INTA */ *((unsigned int *)hwif->select_data) = ctrl; (void) pci_write_config_dword(dev, 0x40, ctrl); /* * Set prefetch size to 512 bytes for both ports, * but don't turn on/off prefetching here. */ pci_write_config_byte(dev, 0x55, 0xee); #ifdef __sparc_v9__ /* * XXX: Reset the device, if we don't it will not respond to * dev_select() properly during first ide_probe_port(). */ timeout = 10000; outb(12, hwif->io_ports.ctl_addr); udelay(10); outb(8, hwif->io_ports.ctl_addr); do { udelay(50); stat = hwif->tp_ops->read_status(hwif); if (stat == 0xff) break; } while ((stat & ATA_BUSY) && --timeout); #endif } if (!using_inta) hwif->irq = pci_get_legacy_ide_irq(dev, hwif->channel); if (!hwif->dma_base) return; outb(0x60, hwif->dma_base + ATA_DMA_STATUS); } static const struct ide_tp_ops ns87415_tp_ops = { .exec_command = ide_exec_command, .read_status = ide_read_status, .read_altstatus = ide_read_altstatus, .write_devctl = ide_write_devctl, .dev_select = ns87415_dev_select, .tf_load = ide_tf_load, .tf_read = ide_tf_read, .input_data = ide_input_data, .output_data = ide_output_data, }; static const struct ide_dma_ops ns87415_dma_ops = { .dma_host_set = ide_dma_host_set, .dma_setup = ide_dma_setup, .dma_start = ns87415_dma_start, .dma_end = ns87415_dma_end, .dma_test_irq = ide_dma_test_irq, .dma_lost_irq = ide_dma_lost_irq, .dma_timer_expiry = ide_dma_sff_timer_expiry, .dma_sff_read_status = superio_dma_sff_read_status, }; static const struct ide_port_info ns87415_chipset __devinitdata = { .name = DRV_NAME, .init_hwif = init_hwif_ns87415, .tp_ops = &ns87415_tp_ops, .dma_ops = &ns87415_dma_ops, .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | IDE_HFLAG_NO_ATAPI_DMA, }; static int __devinit ns87415_init_one(struct pci_dev *dev, const struct pci_device_id *id) { struct ide_port_info d = ns87415_chipset; #ifdef CONFIG_SUPERIO if (PCI_SLOT(dev->devfn) == 0xE) { /* Built-in - assume it's under superio. */ d.init_iops = superio_init_iops; d.tp_ops = &superio_tp_ops; } #endif return ide_pci_init_one(dev, &d, NULL); } static const struct pci_device_id ns87415_pci_tbl[] = { { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87415), 0 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, ns87415_pci_tbl); static struct pci_driver ns87415_pci_driver = { .name = "NS87415_IDE", .id_table = ns87415_pci_tbl, .probe = ns87415_init_one, .remove = ide_pci_remove, .suspend = ide_pci_suspend, .resume = ide_pci_resume, }; static int __init ns87415_ide_init(void) { return ide_pci_register_driver(&ns87415_pci_driver); } static void __exit ns87415_ide_exit(void) { pci_unregister_driver(&ns87415_pci_driver); } module_init(ns87415_ide_init); module_exit(ns87415_ide_exit); MODULE_AUTHOR("Mark Lord, Eddie Dost, Andre Hedrick"); MODULE_DESCRIPTION("PCI driver module for NS87415 IDE"); MODULE_LICENSE("GPL");
gpl-2.0
multipath-tcp/mptcp-rpi
drivers/mtd/maps/sc520cdp.c
10798
9142
/* sc520cdp.c -- MTD map driver for AMD SC520 Customer Development Platform * * Copyright (C) 2001 Sysgo Real-Time Solutions GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * * The SC520CDP is an evaluation board for the Elan SC520 processor available * from AMD. It has two banks of 32-bit Flash ROM, each 8 Megabytes in size, * and up to 512 KiB of 8-bit DIL Flash ROM. * For details see http://www.amd.com/products/epd/desiging/evalboards/18.elansc520/520_cdp_brief/index.html */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/concat.h> /* ** The Embedded Systems BIOS decodes the first FLASH starting at ** 0x8400000. This is a *terrible* place for it because accessing ** the flash at this location causes the A22 address line to be high ** (that's what 0x8400000 binary's ought to be). But this is the highest ** order address line on the raw flash devices themselves!! ** This causes the top HALF of the flash to be accessed first. Beyond ** the physical limits of the flash, the flash chip aliases over (to ** 0x880000 which causes the bottom half to be accessed. This splits the ** flash into two and inverts it! If you then try to access this from another ** program that does NOT do this insanity, then you *will* access the ** first half of the flash, but not find what you expect there. That ** stuff is in the *second* half! Similarly, the address used by the ** BIOS for the second FLASH bank is also quite a bad choice. ** If REPROGRAM_PAR is defined below (the default), then this driver will ** choose more useful addresses for the FLASH banks by reprogramming the ** responsible PARxx registers in the SC520's MMCR region. This will ** cause the settings to be incompatible with the BIOS's settings, which ** shouldn't be a problem since you are running Linux, (i.e. the BIOS is ** not much use anyway). However, if you need to be compatible with ** the BIOS for some reason, just undefine REPROGRAM_PAR. */ #define REPROGRAM_PAR #ifdef REPROGRAM_PAR /* These are the addresses we want.. */ #define WINDOW_ADDR_0 0x08800000 #define WINDOW_ADDR_1 0x09000000 #define WINDOW_ADDR_2 0x09800000 /* .. and these are the addresses the BIOS gives us */ #define WINDOW_ADDR_0_BIOS 0x08400000 #define WINDOW_ADDR_1_BIOS 0x08c00000 #define WINDOW_ADDR_2_BIOS 0x09400000 #else #define WINDOW_ADDR_0 0x08400000 #define WINDOW_ADDR_1 0x08C00000 #define WINDOW_ADDR_2 0x09400000 #endif #define WINDOW_SIZE_0 0x00800000 #define WINDOW_SIZE_1 0x00800000 #define WINDOW_SIZE_2 0x00080000 static struct map_info sc520cdp_map[] = { { .name = "SC520CDP Flash Bank #0", .size = WINDOW_SIZE_0, .bankwidth = 4, .phys = WINDOW_ADDR_0 }, { .name = "SC520CDP Flash Bank #1", .size = WINDOW_SIZE_1, .bankwidth = 4, .phys = WINDOW_ADDR_1 }, { .name = "SC520CDP DIL Flash", .size = WINDOW_SIZE_2, .bankwidth = 1, .phys = WINDOW_ADDR_2 }, }; #define NUM_FLASH_BANKS ARRAY_SIZE(sc520cdp_map) static struct mtd_info *mymtd[NUM_FLASH_BANKS]; static struct mtd_info *merged_mtd; #ifdef REPROGRAM_PAR /* ** The SC520 MMCR (memory mapped control register) region resides ** at 0xFFFEF000. The 16 Programmable Address Region (PAR) registers ** are at offset 0x88 in the MMCR: */ #define SC520_MMCR_BASE 0xFFFEF000 #define SC520_MMCR_EXTENT 0x1000 #define SC520_PAR(x) ((0x88/sizeof(unsigned long)) + (x)) #define NUM_SC520_PAR 16 /* total number of PAR registers */ /* ** The highest three bits in a PAR register determine what target ** device is controlled by this PAR. Here, only ROMCS? and BOOTCS ** devices are of interest. */ #define SC520_PAR_BOOTCS (0x4<<29) #define SC520_PAR_ROMCS0 (0x5<<29) #define SC520_PAR_ROMCS1 (0x6<<29) #define SC520_PAR_TRGDEV (0x7<<29) /* ** Bits 28 thru 26 determine some attributes for the ** region controlled by the PAR. (We only use non-cacheable) */ #define SC520_PAR_WRPROT (1<<26) /* write protected */ #define SC520_PAR_NOCACHE (1<<27) /* non-cacheable */ #define SC520_PAR_NOEXEC (1<<28) /* code execution denied */ /* ** Bit 25 determines the granularity: 4K or 64K */ #define SC520_PAR_PG_SIZ4 (0<<25) #define SC520_PAR_PG_SIZ64 (1<<25) /* ** Build a value to be written into a PAR register. ** We only need ROM entries, 64K page size: */ #define SC520_PAR_ENTRY(trgdev, address, size) \ ((trgdev) | SC520_PAR_NOCACHE | SC520_PAR_PG_SIZ64 | \ (address) >> 16 | (((size) >> 16) - 1) << 14) struct sc520_par_table { unsigned long trgdev; unsigned long new_par; unsigned long default_address; }; static const struct sc520_par_table par_table[NUM_FLASH_BANKS] = { { /* Flash Bank #0: selected by ROMCS0 */ SC520_PAR_ROMCS0, SC520_PAR_ENTRY(SC520_PAR_ROMCS0, WINDOW_ADDR_0, WINDOW_SIZE_0), WINDOW_ADDR_0_BIOS }, { /* Flash Bank #1: selected by ROMCS1 */ SC520_PAR_ROMCS1, SC520_PAR_ENTRY(SC520_PAR_ROMCS1, WINDOW_ADDR_1, WINDOW_SIZE_1), WINDOW_ADDR_1_BIOS }, { /* DIL (BIOS) Flash: selected by BOOTCS */ SC520_PAR_BOOTCS, SC520_PAR_ENTRY(SC520_PAR_BOOTCS, WINDOW_ADDR_2, WINDOW_SIZE_2), WINDOW_ADDR_2_BIOS } }; static void sc520cdp_setup_par(void) { volatile unsigned long __iomem *mmcr; unsigned long mmcr_val; int i, j; /* map in SC520's MMCR area */ mmcr = ioremap_nocache(SC520_MMCR_BASE, SC520_MMCR_EXTENT); if(!mmcr) { /* ioremap_nocache failed: skip the PAR reprogramming */ /* force physical address fields to BIOS defaults: */ for(i = 0; i < NUM_FLASH_BANKS; i++) sc520cdp_map[i].phys = par_table[i].default_address; return; } /* ** Find the PARxx registers that are responsible for activating ** ROMCS0, ROMCS1 and BOOTCS. Reprogram each of these with a ** new value from the table. */ for(i = 0; i < NUM_FLASH_BANKS; i++) { /* for each par_table entry */ for(j = 0; j < NUM_SC520_PAR; j++) { /* for each PAR register */ mmcr_val = mmcr[SC520_PAR(j)]; /* if target device field matches, reprogram the PAR */ if((mmcr_val & SC520_PAR_TRGDEV) == par_table[i].trgdev) { mmcr[SC520_PAR(j)] = par_table[i].new_par; break; } } if(j == NUM_SC520_PAR) { /* no matching PAR found: try default BIOS address */ printk(KERN_NOTICE "Could not find PAR responsible for %s\n", sc520cdp_map[i].name); printk(KERN_NOTICE "Trying default address 0x%lx\n", par_table[i].default_address); sc520cdp_map[i].phys = par_table[i].default_address; } } iounmap(mmcr); } #endif static int __init init_sc520cdp(void) { int i, devices_found = 0; #ifdef REPROGRAM_PAR /* reprogram PAR registers so flash appears at the desired addresses */ sc520cdp_setup_par(); #endif for (i = 0; i < NUM_FLASH_BANKS; i++) { printk(KERN_NOTICE "SC520 CDP flash device: 0x%Lx at 0x%Lx\n", (unsigned long long)sc520cdp_map[i].size, (unsigned long long)sc520cdp_map[i].phys); sc520cdp_map[i].virt = ioremap_nocache(sc520cdp_map[i].phys, sc520cdp_map[i].size); if (!sc520cdp_map[i].virt) { printk("Failed to ioremap_nocache\n"); return -EIO; } simple_map_init(&sc520cdp_map[i]); mymtd[i] = do_map_probe("cfi_probe", &sc520cdp_map[i]); if(!mymtd[i]) mymtd[i] = do_map_probe("jedec_probe", &sc520cdp_map[i]); if(!mymtd[i]) mymtd[i] = do_map_probe("map_rom", &sc520cdp_map[i]); if (mymtd[i]) { mymtd[i]->owner = THIS_MODULE; ++devices_found; } else { iounmap(sc520cdp_map[i].virt); } } if(devices_found >= 2) { /* Combine the two flash banks into a single MTD device & register it: */ merged_mtd = mtd_concat_create(mymtd, 2, "SC520CDP Flash Banks #0 and #1"); if(merged_mtd) mtd_device_register(merged_mtd, NULL, 0); } if(devices_found == 3) /* register the third (DIL-Flash) device */ mtd_device_register(mymtd[2], NULL, 0); return(devices_found ? 0 : -ENXIO); } static void __exit cleanup_sc520cdp(void) { int i; if (merged_mtd) { mtd_device_unregister(merged_mtd); mtd_concat_destroy(merged_mtd); } if (mymtd[2]) mtd_device_unregister(mymtd[2]); for (i = 0; i < NUM_FLASH_BANKS; i++) { if (mymtd[i]) map_destroy(mymtd[i]); if (sc520cdp_map[i].virt) { iounmap(sc520cdp_map[i].virt); sc520cdp_map[i].virt = NULL; } } } module_init(init_sc520cdp); module_exit(cleanup_sc520cdp); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Sysgo Real-Time Solutions GmbH"); MODULE_DESCRIPTION("MTD map driver for AMD SC520 Customer Development Platform");
gpl-2.0
actnextgendev/android_kernel_samsung_expressatt
arch/mips/pnx8550/common/pci.c
11822
3832
/* * * BRIEF MODULE DESCRIPTION * * Author: source@mvista.com * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. */ #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/init.h> #include <pci.h> #include <glb.h> #include <nand.h> static struct resource pci_io_resource = { .start = PNX8550_PCIIO + 0x1000, /* reserve regacy I/O space */ .end = PNX8550_PCIIO + PNX8550_PCIIO_SIZE, .name = "pci IO space", .flags = IORESOURCE_IO }; static struct resource pci_mem_resource = { .start = PNX8550_PCIMEM, .end = PNX8550_PCIMEM + PNX8550_PCIMEM_SIZE - 1, .name = "pci memory space", .flags = IORESOURCE_MEM }; extern struct pci_ops pnx8550_pci_ops; static struct pci_controller pnx8550_controller = { .pci_ops = &pnx8550_pci_ops, .io_map_base = PNX8550_PORT_BASE, .io_resource = &pci_io_resource, .mem_resource = &pci_mem_resource, }; /* Return the total size of DRAM-memory, (RANK0 + RANK1) */ static inline unsigned long get_system_mem_size(void) { /* Read IP2031_RANK0_ADDR_LO */ unsigned long dram_r0_lo = inl(PCI_BASE | 0x65010); /* Read IP2031_RANK1_ADDR_HI */ unsigned long dram_r1_hi = inl(PCI_BASE | 0x65018); return dram_r1_hi - dram_r0_lo + 1; } static int __init pnx8550_pci_setup(void) { int pci_mem_code; int mem_size = get_system_mem_size() >> 20; /* Clear the Global 2 Register, PCI Inta Output Enable Registers Bit 1:Enable DAC Powerdown -> 0:DACs are enabled and are working normally 1:DACs are powerdown Bit 0:Enable of PCI inta output -> 0 = Disable PCI inta output 1 = Enable PCI inta output */ PNX8550_GLB2_ENAB_INTA_O = 0; /* Calc the PCI mem size code */ if (mem_size >= 128) pci_mem_code = SIZE_128M; else if (mem_size >= 64) pci_mem_code = SIZE_64M; else if (mem_size >= 32) pci_mem_code = SIZE_32M; else pci_mem_code = SIZE_16M; /* Set PCI_XIO registers */ outl(pci_mem_resource.start, PCI_BASE | PCI_BASE1_LO); outl(pci_mem_resource.end + 1, PCI_BASE | PCI_BASE1_HI); outl(pci_io_resource.start, PCI_BASE | PCI_BASE2_LO); outl(pci_io_resource.end, PCI_BASE | PCI_BASE2_HI); /* Send memory transaction via PCI_BASE2 */ outl(0x00000001, PCI_BASE | PCI_IO); /* Unlock the setup register */ outl(0xca, PCI_BASE | PCI_UNLOCKREG); /* * BAR0 of PNX8550 (pci base 10) must be zero in order for ide * to work, and in order for bus_to_baddr to work without any * hacks. */ outl(0x00000000, PCI_BASE | PCI_BASE10); /* *These two bars are set by default or the boot code. * However, it's safer to set them here so we're not boot * code dependent. */ outl(0x1be00000, PCI_BASE | PCI_BASE14); /* PNX MMIO */ outl(PNX8550_NAND_BASE_ADDR, PCI_BASE | PCI_BASE18); /* XIO */ outl(PCI_EN_TA | PCI_EN_PCI2MMI | PCI_EN_XIO | PCI_SETUP_BASE18_SIZE(SIZE_32M) | PCI_SETUP_BASE18_EN | PCI_SETUP_BASE14_EN | PCI_SETUP_BASE10_PREF | PCI_SETUP_BASE10_SIZE(pci_mem_code) | PCI_SETUP_CFGMANAGE_EN | PCI_SETUP_PCIARB_EN, PCI_BASE | PCI_SETUP); /* PCI_SETUP */ outl(0x00000000, PCI_BASE | PCI_CTRL); /* PCI_CONTROL */ register_pci_controller(&pnx8550_controller); return 0; } arch_initcall(pnx8550_pci_setup);
gpl-2.0
LeonardKoenig/android_kernel_coolpad_8860U
fs/ocfs2/cluster/ver.c
12590
1222
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * ver.c * * version string * * Copyright (C) 2002, 2005 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include "ver.h" #define CLUSTER_BUILD_VERSION "1.5.0" #define VERSION_STR "OCFS2 Node Manager " CLUSTER_BUILD_VERSION void cluster_print_version(void) { printk(KERN_INFO "%s\n", VERSION_STR); } MODULE_DESCRIPTION(VERSION_STR); MODULE_VERSION(CLUSTER_BUILD_VERSION);
gpl-2.0
RootWizard/AeroKernel
fs/efs/file.c
12846
1190
/* * file.c * * Copyright (c) 1999 Al Smith * * Portions derived from work (c) 1995,1996 Christian Vogelgsang. */ #include <linux/buffer_head.h> #include "efs.h" int efs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int error = -EROFS; long phys; if (create) return error; if (iblock >= inode->i_blocks) { #ifdef DEBUG /* * i have no idea why this happens as often as it does */ printk(KERN_WARNING "EFS: bmap(): block %d >= %ld (filesize %ld)\n", block, inode->i_blocks, inode->i_size); #endif return 0; } phys = efs_map_block(inode, iblock); if (phys) map_bh(bh_result, inode->i_sb, phys); return 0; } int efs_bmap(struct inode *inode, efs_block_t block) { if (block < 0) { printk(KERN_WARNING "EFS: bmap(): block < 0\n"); return 0; } /* are we about to read past the end of a file ? */ if (!(block < inode->i_blocks)) { #ifdef DEBUG /* * i have no idea why this happens as often as it does */ printk(KERN_WARNING "EFS: bmap(): block %d >= %ld (filesize %ld)\n", block, inode->i_blocks, inode->i_size); #endif return 0; } return efs_map_block(inode, block); }
gpl-2.0
ktraghavendra/linux
drivers/tty/serial/8250/8250_mtk.c
47
7802
/* * Mediatek 8250 driver. * * Copyright (c) 2014 MundoReader S.L. * Author: Matthias Brugger <matthias.bgg@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/clk.h> #include <linux/io.h> #include <linux/init.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/serial_8250.h> #include <linux/serial_reg.h> #include "8250.h" #define UART_MTK_HIGHS 0x09 /* Highspeed register */ #define UART_MTK_SAMPLE_COUNT 0x0a /* Sample count register */ #define UART_MTK_SAMPLE_POINT 0x0b /* Sample point register */ #define MTK_UART_RATE_FIX 0x0d /* UART Rate Fix Register */ struct mtk8250_data { int line; struct clk *uart_clk; struct clk *bus_clk; }; static void mtk8250_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { unsigned long flags; unsigned int baud, quot; struct uart_8250_port *up = container_of(port, struct uart_8250_port, port); serial8250_do_set_termios(port, termios, old); /* * Mediatek UARTs use an extra highspeed register (UART_MTK_HIGHS) * * We need to recalcualte the quot register, as the claculation depends * on the vaule in the highspeed register. * * Some baudrates are not supported by the chip, so we use the next * lower rate supported and update termios c_flag. * * If highspeed register is set to 3, we need to specify sample count * and sample point to increase accuracy. If not, we reset the * registers to their default values. */ baud = uart_get_baud_rate(port, termios, old, port->uartclk / 16 / 0xffff, port->uartclk / 16); if (baud <= 115200) { serial_port_out(port, UART_MTK_HIGHS, 0x0); quot = uart_get_divisor(port, baud); } else if (baud <= 576000) { serial_port_out(port, UART_MTK_HIGHS, 0x2); /* Set to next lower baudrate supported */ if ((baud == 500000) || (baud == 576000)) baud = 460800; quot = DIV_ROUND_UP(port->uartclk, 4 * baud); } else { serial_port_out(port, UART_MTK_HIGHS, 0x3); /* Set to highest baudrate supported */ if (baud >= 1152000) baud = 921600; quot = DIV_ROUND_UP(port->uartclk, 256 * baud); } /* * Ok, we're now changing the port state. Do it with * interrupts disabled. */ spin_lock_irqsave(&port->lock, flags); /* set DLAB we have cval saved in up->lcr from the call to the core */ serial_port_out(port, UART_LCR, up->lcr | UART_LCR_DLAB); serial_dl_write(up, quot); /* reset DLAB */ serial_port_out(port, UART_LCR, up->lcr); if (baud > 460800) { unsigned int tmp; tmp = DIV_ROUND_CLOSEST(port->uartclk, quot * baud); serial_port_out(port, UART_MTK_SAMPLE_COUNT, tmp - 1); serial_port_out(port, UART_MTK_SAMPLE_POINT, (tmp - 2) >> 1); } else { serial_port_out(port, UART_MTK_SAMPLE_COUNT, 0x00); serial_port_out(port, UART_MTK_SAMPLE_POINT, 0xff); } spin_unlock_irqrestore(&port->lock, flags); /* Don't rewrite B0 */ if (tty_termios_baud_rate(termios)) tty_termios_encode_baud_rate(termios, baud, baud); } static int mtk8250_runtime_suspend(struct device *dev) { struct mtk8250_data *data = dev_get_drvdata(dev); clk_disable_unprepare(data->uart_clk); clk_disable_unprepare(data->bus_clk); return 0; } static int mtk8250_runtime_resume(struct device *dev) { struct mtk8250_data *data = dev_get_drvdata(dev); int err; err = clk_prepare_enable(data->uart_clk); if (err) { dev_warn(dev, "Can't enable clock\n"); return err; } err = clk_prepare_enable(data->bus_clk); if (err) { dev_warn(dev, "Can't enable bus clock\n"); return err; } return 0; } static void mtk8250_do_pm(struct uart_port *port, unsigned int state, unsigned int old) { if (!state) pm_runtime_get_sync(port->dev); serial8250_do_pm(port, state, old); if (state) pm_runtime_put_sync_suspend(port->dev); } static int mtk8250_probe_of(struct platform_device *pdev, struct uart_port *p, struct mtk8250_data *data) { data->uart_clk = devm_clk_get(&pdev->dev, "baud"); if (IS_ERR(data->uart_clk)) { /* * For compatibility with older device trees try unnamed * clk when no baud clk can be found. */ data->uart_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(data->uart_clk)) { dev_warn(&pdev->dev, "Can't get uart clock\n"); return PTR_ERR(data->uart_clk); } return 0; } data->bus_clk = devm_clk_get(&pdev->dev, "bus"); if (IS_ERR(data->bus_clk)) return PTR_ERR(data->bus_clk); return 0; } static int mtk8250_probe(struct platform_device *pdev) { struct uart_8250_port uart = {}; struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct resource *irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); struct mtk8250_data *data; int err; if (!regs || !irq) { dev_err(&pdev->dev, "no registers/irq defined\n"); return -EINVAL; } uart.port.membase = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); if (!uart.port.membase) return -ENOMEM; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; if (pdev->dev.of_node) { err = mtk8250_probe_of(pdev, &uart.port, data); if (err) return err; } else return -ENODEV; spin_lock_init(&uart.port.lock); uart.port.mapbase = regs->start; uart.port.irq = irq->start; uart.port.pm = mtk8250_do_pm; uart.port.type = PORT_16550; uart.port.flags = UPF_BOOT_AUTOCONF | UPF_FIXED_PORT; uart.port.dev = &pdev->dev; uart.port.iotype = UPIO_MEM32; uart.port.regshift = 2; uart.port.private_data = data; uart.port.set_termios = mtk8250_set_termios; uart.port.uartclk = clk_get_rate(data->uart_clk); /* Disable Rate Fix function */ writel(0x0, uart.port.membase + (MTK_UART_RATE_FIX << uart.port.regshift)); platform_set_drvdata(pdev, data); pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) { err = mtk8250_runtime_resume(&pdev->dev); if (err) return err; } data->line = serial8250_register_8250_port(&uart); if (data->line < 0) return data->line; return 0; } #ifdef CONFIG_PM_SLEEP static int mtk8250_suspend(struct device *dev) { struct mtk8250_data *data = dev_get_drvdata(dev); serial8250_suspend_port(data->line); return 0; } static int mtk8250_resume(struct device *dev) { struct mtk8250_data *data = dev_get_drvdata(dev); serial8250_resume_port(data->line); return 0; } #endif /* CONFIG_PM_SLEEP */ static const struct dev_pm_ops mtk8250_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(mtk8250_suspend, mtk8250_resume) SET_RUNTIME_PM_OPS(mtk8250_runtime_suspend, mtk8250_runtime_resume, NULL) }; static const struct of_device_id mtk8250_of_match[] = { { .compatible = "mediatek,mt6577-uart" }, { /* Sentinel */ } }; static struct platform_driver mtk8250_platform_driver = { .driver = { .name = "mt6577-uart", .pm = &mtk8250_pm_ops, .of_match_table = mtk8250_of_match, .suppress_bind_attrs = true, }, .probe = mtk8250_probe, }; builtin_platform_driver(mtk8250_platform_driver); #ifdef CONFIG_SERIAL_8250_CONSOLE static int __init early_mtk8250_setup(struct earlycon_device *device, const char *options) { if (!device->port.membase) return -ENODEV; device->port.iotype = UPIO_MEM32; return early_serial8250_setup(device, NULL); } OF_EARLYCON_DECLARE(mtk8250, "mediatek,mt6577-uart", early_mtk8250_setup); #endif
gpl-2.0
TeamChopsticks/samsung_hercskynote_kernels
drivers/media/video/msm/csi/msm_ispif.c
303
17179
/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/delay.h> #include <linux/clk.h> #include <linux/io.h> #include <mach/gpio.h> #include <mach/camera.h> #include "msm_ispif.h" #include "msm.h" #define V4L2_IDENT_ISPIF 50001 #define CSID_VERSION_V2 0x2000011 /* ISPIF registers */ #define ISPIF_RST_CMD_ADDR 0X00 #define ISPIF_INTF_CMD_ADDR 0X04 #define ISPIF_CTRL_ADDR 0X08 #define ISPIF_INPUT_SEL_ADDR 0X0C #define ISPIF_PIX_INTF_CID_MASK_ADDR 0X10 #define ISPIF_RDI_INTF_CID_MASK_ADDR 0X14 #define ISPIF_PIX_1_INTF_CID_MASK_ADDR 0X38 #define ISPIF_RDI_1_INTF_CID_MASK_ADDR 0X3C #define ISPIF_PIX_STATUS_ADDR 0X24 #define ISPIF_RDI_STATUS_ADDR 0X28 #define ISPIF_RDI_1_STATUS_ADDR 0X64 #define ISPIF_IRQ_MASK_ADDR 0X0100 #define ISPIF_IRQ_CLEAR_ADDR 0X0104 #define ISPIF_IRQ_STATUS_ADDR 0X0108 #define ISPIF_IRQ_MASK_1_ADDR 0X010C #define ISPIF_IRQ_CLEAR_1_ADDR 0X0110 #define ISPIF_IRQ_STATUS_1_ADDR 0X0114 #define ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR 0x0124 /*ISPIF RESET BITS*/ #define VFE_CLK_DOMAIN_RST 31 #define RDI_CLK_DOMAIN_RST 30 #define PIX_CLK_DOMAIN_RST 29 #define AHB_CLK_DOMAIN_RST 28 #define RDI_1_CLK_DOMAIN_RST 27 #define RDI_1_VFE_RST_STB 13 #define RDI_1_CSID_RST_STB 12 #define RDI_VFE_RST_STB 7 #define RDI_CSID_RST_STB 6 #define PIX_VFE_RST_STB 4 #define PIX_CSID_RST_STB 3 #define SW_REG_RST_STB 2 #define MISC_LOGIC_RST_STB 1 #define STROBED_RST_EN 0 #define PIX_INTF_0_OVERFLOW_IRQ 12 #define RAW_INTF_0_OVERFLOW_IRQ 25 #define RAW_INTF_1_OVERFLOW_IRQ 25 #define RESET_DONE_IRQ 27 #define ISPIF_IRQ_STATUS_MASK 0xA493000 #define ISPIF_IRQ_1_STATUS_MASK 0xA493000 #define ISPIF_IRQ_STATUS_RDI_SOF_MASK 0x492000 #define ISPIF_IRQ_GLOBAL_CLEAR_CMD 0x1 #define MAX_CID 15 static struct ispif_device *ispif; atomic_t ispif_irq_cnt; spinlock_t ispif_tasklet_lock; struct list_head ispif_tasklet_q; static uint32_t global_intf_cmd_mask = 0xFFFFFFFF; static int msm_ispif_intf_reset(uint8_t intfmask) { int rc = 0; uint32_t data = 0x1; uint8_t intfnum = 0, mask = intfmask; while (mask != 0) { if (!(intfmask & (0x1 << intfnum))) { mask >>= 1; intfnum++; continue; } switch (intfnum) { case PIX0: data = (0x1 << STROBED_RST_EN) + (0x1 << PIX_VFE_RST_STB) + (0x1 << PIX_CSID_RST_STB); break; case RDI0: data = (0x1 << STROBED_RST_EN) + (0x1 << RDI_VFE_RST_STB) + (0x1 << RDI_CSID_RST_STB); break; case RDI1: data = (0x1 << STROBED_RST_EN) + (0x1 << RDI_1_VFE_RST_STB) + (0x1 << RDI_1_CSID_RST_STB); break; default: rc = -EINVAL; break; } mask >>= 1; intfnum++; } /*end while */ if (rc >= 0) { msm_io_w(data, ispif->base + ISPIF_RST_CMD_ADDR); rc = wait_for_completion_interruptible(&ispif->reset_complete); } return rc; } static int msm_ispif_reset(void) { uint32_t data = (0x1 << STROBED_RST_EN) + (0x1 << SW_REG_RST_STB) + (0x1 << MISC_LOGIC_RST_STB) + (0x1 << PIX_VFE_RST_STB) + (0x1 << PIX_CSID_RST_STB) + (0x1 << RDI_VFE_RST_STB) + (0x1 << RDI_CSID_RST_STB) + (0x1 << RDI_1_VFE_RST_STB) + (0x1 << RDI_1_CSID_RST_STB); msm_io_w(data, ispif->base + ISPIF_RST_CMD_ADDR); return wait_for_completion_interruptible(&ispif->reset_complete); } static int msm_ispif_subdev_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { BUG_ON(!chip); chip->ident = V4L2_IDENT_ISPIF; chip->revision = 0; return 0; } static void msm_ispif_sel_csid_core(uint8_t intftype, uint8_t csid) { int rc = 0; uint32_t data; if (ispif->ispif_clk[intftype] == NULL) { pr_err("%s: ispif NULL clk\n", __func__); return; } rc = clk_set_rate(ispif->ispif_clk[intftype], csid); if (rc < 0) pr_err("%s: clk_set_rate failed %d\n", __func__, rc); data = msm_io_r(ispif->base + ISPIF_INPUT_SEL_ADDR); data |= csid<<(intftype*4); msm_io_w(data, ispif->base + ISPIF_INPUT_SEL_ADDR); } static void msm_ispif_enable_intf_cids(uint8_t intftype, uint16_t cid_mask) { uint32_t data; mutex_lock(&ispif->mutex); switch (intftype) { case PIX0: data = msm_io_r(ispif->base + ISPIF_PIX_INTF_CID_MASK_ADDR); data |= cid_mask; msm_io_w(data, ispif->base + ISPIF_PIX_INTF_CID_MASK_ADDR); break; case RDI0: data = msm_io_r(ispif->base + ISPIF_RDI_INTF_CID_MASK_ADDR); data |= cid_mask; msm_io_w(data, ispif->base + ISPIF_RDI_INTF_CID_MASK_ADDR); break; case RDI1: data = msm_io_r(ispif->base + ISPIF_RDI_1_INTF_CID_MASK_ADDR); data |= cid_mask; msm_io_w(data, ispif->base + ISPIF_RDI_1_INTF_CID_MASK_ADDR); break; } mutex_unlock(&ispif->mutex); } static int msm_ispif_config(struct msm_ispif_params_list *params_list) { uint32_t params_len; struct msm_ispif_params *ispif_params; uint32_t data, data1; int rc = 0, i = 0; params_len = params_list->len; ispif_params = params_list->params; CDBG("Enable interface\n"); data = msm_io_r(ispif->base + ISPIF_PIX_STATUS_ADDR); data1 = msm_io_r(ispif->base + ISPIF_RDI_STATUS_ADDR); if (((data & 0xf) != 0xf) || ((data1 & 0xf) != 0xf)) return -EBUSY; msm_io_w(0x00000000, ispif->base + ISPIF_IRQ_MASK_ADDR); for (i = 0; i < params_len; i++) { msm_ispif_sel_csid_core(ispif_params[i].intftype, ispif_params[i].csid); msm_ispif_enable_intf_cids(ispif_params[i].intftype, ispif_params[i].cid_mask); } msm_io_w(ISPIF_IRQ_STATUS_MASK, ispif->base + ISPIF_IRQ_MASK_ADDR); msm_io_w(ISPIF_IRQ_STATUS_MASK, ispif->base + ISPIF_IRQ_CLEAR_ADDR); msm_io_w(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR); return rc; } static uint32_t msm_ispif_get_cid_mask(uint8_t intftype) { uint32_t mask = 0; switch (intftype) { case PIX0: mask = msm_io_r(ispif->base + ISPIF_PIX_INTF_CID_MASK_ADDR); break; case RDI0: mask = msm_io_r(ispif->base + ISPIF_RDI_INTF_CID_MASK_ADDR); break; case RDI1: mask = msm_io_r(ispif->base + ISPIF_RDI_1_INTF_CID_MASK_ADDR); break; default: break; } return mask; } static void msm_ispif_intf_cmd(uint8_t intfmask, uint8_t intf_cmd_mask) { uint8_t vc = 0, val = 0; uint8_t mask = intfmask, intfnum = 0; uint32_t cid_mask = 0; while (mask != 0) { if (!(intfmask & (0x1 << intfnum))) { mask >>= 1; intfnum++; continue; } cid_mask = msm_ispif_get_cid_mask(intfnum); vc = 0; while (cid_mask != 0) { if ((cid_mask & 0xf) != 0x0) { val = (intf_cmd_mask>>(vc*2)) & 0x3; global_intf_cmd_mask |= (0x3 << ((vc * 2) + (intfnum * 8))); global_intf_cmd_mask &= ~((0x3 & ~val) << ((vc * 2) + (intfnum * 8))); } vc++; cid_mask >>= 4; } mask >>= 1; intfnum++; } msm_io_w(global_intf_cmd_mask, ispif->base + ISPIF_INTF_CMD_ADDR); } static int msm_ispif_abort_intf_transfer(uint8_t intfmask) { int rc = 0; uint8_t intf_cmd_mask = 0xAA; uint8_t intfnum = 0, mask = intfmask; mutex_lock(&ispif->mutex); msm_ispif_intf_cmd(intfmask, intf_cmd_mask); while (mask != 0) { if (intfmask & (0x1 << intfnum)) global_intf_cmd_mask |= (0xFF << (intfnum * 8)); mask >>= 1; intfnum++; } mutex_unlock(&ispif->mutex); return rc; } static int msm_ispif_start_intf_transfer(uint8_t intfmask) { uint8_t intf_cmd_mask = 0x55; int rc = 0; mutex_lock(&ispif->mutex); rc = msm_ispif_intf_reset(intfmask); msm_ispif_intf_cmd(intfmask, intf_cmd_mask); mutex_unlock(&ispif->mutex); return rc; } static int msm_ispif_stop_intf_transfer(uint8_t intfmask) { int rc = 0; uint8_t intf_cmd_mask = 0x00; uint8_t intfnum = 0, mask = intfmask; mutex_lock(&ispif->mutex); msm_ispif_intf_cmd(intfmask, intf_cmd_mask); while (mask != 0) { if (intfmask & (0x1 << intfnum)) { switch (intfnum) { case PIX0: while ((msm_io_r(ispif->base + ISPIF_PIX_STATUS_ADDR) & 0xf) != 0xf) { CDBG("Wait for pix0 Idle\n"); } break; case RDI0: while ((msm_io_r(ispif->base + ISPIF_RDI_STATUS_ADDR) & 0xf) != 0xf) { CDBG("Wait for rdi0 Idle\n"); } break; case RDI1: while ((msm_io_r(ispif->base + ISPIF_RDI_1_STATUS_ADDR) & 0xf) != 0xf) { CDBG("Wait for rdi1 Idle\n"); } break; default: break; } global_intf_cmd_mask |= (0xFF << (intfnum * 8)); } mask >>= 1; intfnum++; } mutex_unlock(&ispif->mutex); return rc; } static int msm_ispif_subdev_video_s_stream(struct v4l2_subdev *sd, int enable) { struct ispif_device *ispif = (struct ispif_device *)v4l2_get_subdevdata(sd); int32_t cmd = enable & ((1<<ISPIF_S_STREAM_SHIFT)-1); enum msm_ispif_intftype intf = enable >> ISPIF_S_STREAM_SHIFT; int rc = -EINVAL; BUG_ON(!ispif); switch (cmd) { case ISPIF_ON_FRAME_BOUNDARY: rc = msm_ispif_start_intf_transfer(intf); break; case ISPIF_OFF_FRAME_BOUNDARY: rc = msm_ispif_stop_intf_transfer(intf); break; case ISPIF_OFF_IMMEDIATELY: rc = msm_ispif_abort_intf_transfer(intf); break; default: break; } return rc; } static void ispif_do_tasklet(unsigned long data) { unsigned long flags; struct ispif_isr_queue_cmd *qcmd = NULL; CDBG("=== ispif_do_tasklet start ===\n"); while (atomic_read(&ispif_irq_cnt)) { spin_lock_irqsave(&ispif_tasklet_lock, flags); qcmd = list_first_entry(&ispif_tasklet_q, struct ispif_isr_queue_cmd, list); atomic_sub(1, &ispif_irq_cnt); if (!qcmd) { spin_unlock_irqrestore(&ispif_tasklet_lock, flags); return; } list_del(&qcmd->list); spin_unlock_irqrestore(&ispif_tasklet_lock, flags); if (qcmd->ispifInterruptStatus0 & ISPIF_IRQ_STATUS_RDI_SOF_MASK) { CDBG("ispif rdi irq status\n"); } if (qcmd->ispifInterruptStatus1 & ISPIF_IRQ_STATUS_RDI_SOF_MASK) { CDBG("ispif rdi1 irq status\n"); } kfree(qcmd); } CDBG("=== ispif_do_tasklet end ===\n"); } DECLARE_TASKLET(ispif_tasklet, ispif_do_tasklet, 0); static void ispif_process_irq(struct ispif_irq_status *out) { unsigned long flags; struct ispif_isr_queue_cmd *qcmd; CDBG("ispif_process_irq\n"); qcmd = kzalloc(sizeof(struct ispif_isr_queue_cmd), GFP_ATOMIC); if (!qcmd) { pr_err("ispif_process_irq: qcmd malloc failed!\n"); return; } qcmd->ispifInterruptStatus0 = out->ispifIrqStatus0; qcmd->ispifInterruptStatus1 = out->ispifIrqStatus1; spin_lock_irqsave(&ispif_tasklet_lock, flags); list_add_tail(&qcmd->list, &ispif_tasklet_q); atomic_add(1, &ispif_irq_cnt); spin_unlock_irqrestore(&ispif_tasklet_lock, flags); tasklet_schedule(&ispif_tasklet); return; } static inline void msm_ispif_read_irq_status(struct ispif_irq_status *out) { out->ispifIrqStatus0 = msm_io_r(ispif->base + ISPIF_IRQ_STATUS_ADDR); out->ispifIrqStatus1 = msm_io_r(ispif->base + ISPIF_IRQ_STATUS_1_ADDR); msm_io_w(out->ispifIrqStatus0, ispif->base + ISPIF_IRQ_CLEAR_ADDR); msm_io_w(out->ispifIrqStatus1, ispif->base + ISPIF_IRQ_CLEAR_1_ADDR); CDBG("ispif->irq: Irq_status0 = 0x%x\n", out->ispifIrqStatus0); if (out->ispifIrqStatus0 & ISPIF_IRQ_STATUS_MASK) { if (out->ispifIrqStatus0 & (0x1 << RESET_DONE_IRQ)) complete(&ispif->reset_complete); if (out->ispifIrqStatus0 & (0x1 << PIX_INTF_0_OVERFLOW_IRQ)) pr_err("%s: pix intf 0 overflow.\n", __func__); if (out->ispifIrqStatus0 & (0x1 << RAW_INTF_0_OVERFLOW_IRQ)) pr_err("%s: rdi intf 0 overflow.\n", __func__); if ((out->ispifIrqStatus0 & ISPIF_IRQ_STATUS_RDI_SOF_MASK) || (out->ispifIrqStatus1 & ISPIF_IRQ_STATUS_RDI_SOF_MASK)) { ispif_process_irq(out); } } msm_io_w(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR); } static irqreturn_t msm_io_ispif_irq(int irq_num, void *data) { struct ispif_irq_status irq; msm_ispif_read_irq_status(&irq); return IRQ_HANDLED; } static struct msm_cam_clk_info ispif_clk_info[] = { {"csi_pix_clk", 0}, {"csi_rdi_clk", 0}, {"csi_pix1_clk", 0}, {"csi_rdi1_clk", 0}, {"csi_rdi2_clk", 0}, }; static int msm_ispif_init(const uint32_t *csid_version) { int rc = 0; spin_lock_init(&ispif_tasklet_lock); INIT_LIST_HEAD(&ispif_tasklet_q); rc = request_irq(ispif->irq->start, msm_io_ispif_irq, IRQF_TRIGGER_RISING, "ispif", 0); global_intf_cmd_mask = 0xFFFFFFFF; init_completion(&ispif->reset_complete); ispif->csid_version = *csid_version; if (ispif->csid_version == CSID_VERSION_V2) { rc = msm_cam_clk_enable(&ispif->pdev->dev, ispif_clk_info, ispif->ispif_clk, ARRAY_SIZE(ispif_clk_info), 1); if (rc < 0) return rc; } else { rc = msm_cam_clk_enable(&ispif->pdev->dev, ispif_clk_info, ispif->ispif_clk, 2, 1); if (rc < 0) return rc; } rc = msm_ispif_reset(); return rc; } static void msm_ispif_release(struct v4l2_subdev *sd) { struct ispif_device *ispif = (struct ispif_device *)v4l2_get_subdevdata(sd); CDBG("%s, free_irq\n", __func__); free_irq(ispif->irq->start, 0); tasklet_kill(&ispif_tasklet); if (ispif->csid_version == CSID_VERSION_V2) msm_cam_clk_enable(&ispif->pdev->dev, ispif_clk_info, ispif->ispif_clk, ARRAY_SIZE(ispif_clk_info), 0); else msm_cam_clk_enable(&ispif->pdev->dev, ispif_clk_info, ispif->ispif_clk, 2, 0); } void msm_ispif_vfe_get_cid(uint8_t intftype, char *cids, int *num) { uint32_t data = 0; int i = 0, j = 0; switch (intftype) { case PIX0: data = msm_io_r(ispif->base + ISPIF_PIX_INTF_CID_MASK_ADDR); break; case RDI0: data = msm_io_r(ispif->base + ISPIF_RDI_INTF_CID_MASK_ADDR); break; case RDI1: data = msm_io_r(ispif->base + ISPIF_RDI_1_INTF_CID_MASK_ADDR); break; default: break; } for (i = 0; i <= MAX_CID; i++) { if ((data & 0x1) == 0x1) { cids[j++] = i; (*num)++; } data >>= 1; } } static long msm_ispif_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { switch (cmd) { case VIDIOC_MSM_ISPIF_CFG: return msm_ispif_config((struct msm_ispif_params_list *)arg); case VIDIOC_MSM_ISPIF_INIT: return msm_ispif_init((uint32_t *)arg); case VIDIOC_MSM_ISPIF_RELEASE: msm_ispif_release(sd); default: return -ENOIOCTLCMD; } } static struct v4l2_subdev_core_ops msm_ispif_subdev_core_ops = { .g_chip_ident = &msm_ispif_subdev_g_chip_ident, .ioctl = &msm_ispif_subdev_ioctl, }; static struct v4l2_subdev_video_ops msm_ispif_subdev_video_ops = { .s_stream = &msm_ispif_subdev_video_s_stream, }; static const struct v4l2_subdev_ops msm_ispif_subdev_ops = { .core = &msm_ispif_subdev_core_ops, .video = &msm_ispif_subdev_video_ops, }; static int __devinit ispif_probe(struct platform_device *pdev) { int rc = 0; CDBG("%s\n", __func__); ispif = kzalloc(sizeof(struct ispif_device), GFP_KERNEL); if (!ispif) { pr_err("%s: no enough memory\n", __func__); return -ENOMEM; } v4l2_subdev_init(&ispif->subdev, &msm_ispif_subdev_ops); v4l2_set_subdevdata(&ispif->subdev, ispif); platform_set_drvdata(pdev, &ispif->subdev); snprintf(ispif->subdev.name, sizeof(ispif->subdev.name), "ispif"); mutex_init(&ispif->mutex); ispif->mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ispif"); if (!ispif->mem) { pr_err("%s: no mem resource?\n", __func__); rc = -ENODEV; goto ispif_no_resource; } ispif->irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "ispif"); if (!ispif->irq) { pr_err("%s: no irq resource?\n", __func__); rc = -ENODEV; goto ispif_no_resource; } ispif->io = request_mem_region(ispif->mem->start, resource_size(ispif->mem), pdev->name); if (!ispif->io) { pr_err("%s: no valid mem region\n", __func__); rc = -EBUSY; goto ispif_no_resource; } ispif->base = ioremap(ispif->mem->start, resource_size(ispif->mem)); if (!ispif->base) { rc = -ENOMEM; goto ispif_no_mem; } ispif->pdev = pdev; return 0; ispif_no_mem: release_mem_region(ispif->mem->start, resource_size(ispif->mem)); ispif_no_resource: mutex_destroy(&ispif->mutex); kfree(ispif); return rc; } static struct platform_driver ispif_driver = { .probe = ispif_probe, .driver = { .name = MSM_ISPIF_DRV_NAME, .owner = THIS_MODULE, }, }; static int __init msm_ispif_init_module(void) { return platform_driver_register(&ispif_driver); } static void __exit msm_ispif_exit_module(void) { platform_driver_unregister(&ispif_driver); } module_init(msm_ispif_init_module); module_exit(msm_ispif_exit_module); MODULE_DESCRIPTION("MSM ISP Interface driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
zarboz/Monarudo_M7_port
drivers/platform/msm/sps/sps_rm.c
303
16596
/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/memory.h> #include "spsi.h" #include "sps_core.h" #define SPSRM_MAX_DESC_FIFO_SIZE 0xffff #define SPSRM_MAX_DATA_FIFO_SIZE 0xffff static struct sps_rm *sps_rm; int sps_rm_init(struct sps_rm *rm, u32 options) { sps_rm = rm; INIT_LIST_HEAD(&sps_rm->connections_q); mutex_init(&sps_rm->lock); return 0; } void sps_rm_config_init(struct sps_connect *connect) { memset(connect, SPSRM_CLEAR, sizeof(*connect)); } static void sps_rm_remove_ref(struct sps_connection *map) { map->refs--; if (map->refs <= 0) { if (map->client_src != NULL || map->client_dest != NULL) SPS_ERR("sps:Failed to allocate connection struct"); list_del(&map->list); kfree(map); } } static int sps_rm_map_match(const struct sps_connect *cfg, const struct sps_connection *map) { if (cfg->source != map->src.dev || cfg->destination != map->dest.dev) return false; if (cfg->src_pipe_index != SPSRM_CLEAR && cfg->src_pipe_index != map->src.pipe_index) return false; if (cfg->dest_pipe_index != SPSRM_CLEAR && cfg->dest_pipe_index != map->dest.pipe_index) return false; if (cfg->config != map->config) return false; if (cfg->desc.size != SPSRM_CLEAR) { if (cfg->desc.size != map->desc.size) return false; if (cfg->desc.phys_base != SPSRM_CLEAR && cfg->desc.base != (void *)SPSRM_CLEAR && (cfg->desc.phys_base != map->desc.phys_base || cfg->desc.base != map->desc.base)) { return false; } } if (cfg->data.size != SPSRM_CLEAR) { if (cfg->data.size != map->data.size) return false; if (cfg->data.phys_base != SPSRM_CLEAR && cfg->data.base != (void *)SPSRM_CLEAR && (cfg->data.phys_base != map->data.phys_base || cfg->data.base != map->data.base)) return false; } return true; } static struct sps_connection *find_unconnected(struct sps_pipe *pipe) { struct sps_connect *cfg = &pipe->connect; struct sps_connection *map; list_for_each_entry(map, &sps_rm->connections_q, list) { if (sps_rm_map_match(cfg, map)) if ((cfg->mode == SPS_MODE_SRC && map->client_src == NULL) || (cfg->mode != SPS_MODE_SRC && map->client_dest == NULL)) return map; } return NULL; } static int sps_rm_assign(struct sps_pipe *pipe, struct sps_connection *map) { struct sps_connect *cfg = &pipe->connect; if ((cfg->mode == SPS_MODE_SRC && map->client_src != NULL) || (cfg->mode != SPS_MODE_SRC && map->client_dest != NULL)) { SPS_ERR("sps:The end point is already connected.\n"); return SPS_ERROR; } if ((cfg->mode == SPS_MODE_SRC && map->src.bam == NULL) || (cfg->mode != SPS_MODE_SRC && map->dest.bam == NULL)) { SPS_ERR("sps:The end point is empty.\n"); return SPS_ERROR; } if (cfg->mode == SPS_MODE_SRC) { map->client_src = pipe; pipe->bam = map->src.bam; pipe->pipe_index = map->src.pipe_index; if (pipe->connect.event_thresh != SPSRM_CLEAR) map->src.event_threshold = pipe->connect.event_thresh; if (pipe->connect.lock_group != SPSRM_CLEAR) map->src.lock_group = pipe->connect.lock_group; } else { map->client_dest = pipe; pipe->bam = map->dest.bam; pipe->pipe_index = map->dest.pipe_index; if (pipe->connect.event_thresh != SPSRM_CLEAR) map->dest.event_threshold = pipe->connect.event_thresh; if (pipe->connect.lock_group != SPSRM_CLEAR) map->dest.lock_group = pipe->connect.lock_group; } pipe->map = map; SPS_DBG("sps:sps_rm_assign.bam 0x%x.pipe_index=%d\n", BAM_ID(pipe->bam), pipe->pipe_index); pipe->connect.src_pipe_index = map->src.pipe_index; pipe->connect.dest_pipe_index = map->dest.pipe_index; pipe->connect.desc = map->desc; pipe->connect.data = map->data; pipe->client_state = SPS_STATE_ALLOCATE; return 0; } static void sps_rm_free_map_rsrc(struct sps_connection *map) { struct sps_bam *bam; if (map->client_src != NULL || map->client_dest != NULL) return; if (map->alloc_src_pipe != SPS_BAM_PIPE_INVALID) { bam = map->src.bam; sps_bam_pipe_free(bam, map->src.pipe_index); #ifdef CONFIG_SPS_SUPPORT_BAMDMA if ((bam->props.options & SPS_BAM_OPT_BAMDMA)) sps_dma_pipe_free(bam, map->src.pipe_index); #endif map->alloc_src_pipe = SPS_BAM_PIPE_INVALID; map->src.pipe_index = SPS_BAM_PIPE_INVALID; } if (map->alloc_dest_pipe != SPS_BAM_PIPE_INVALID) { bam = map->dest.bam; sps_bam_pipe_free(bam, map->dest.pipe_index); #ifdef CONFIG_SPS_SUPPORT_BAMDMA if ((bam->props.options & SPS_BAM_OPT_BAMDMA)) { sps_dma_pipe_free(bam, map->dest.pipe_index); } #endif map->alloc_dest_pipe = SPS_BAM_PIPE_INVALID; map->dest.pipe_index = SPS_BAM_PIPE_INVALID; } if (map->alloc_desc_base != SPS_ADDR_INVALID) { sps_mem_free_io(map->alloc_desc_base, map->desc.size); map->alloc_desc_base = SPS_ADDR_INVALID; map->desc.phys_base = SPS_ADDR_INVALID; } if (map->alloc_data_base != SPS_ADDR_INVALID) { sps_mem_free_io(map->alloc_data_base, map->data.size); map->alloc_data_base = SPS_ADDR_INVALID; map->data.phys_base = SPS_ADDR_INVALID; } } static void sps_rm_init_map(struct sps_connection *map, const struct sps_connect *cfg) { memset(map, 0, sizeof(*map)); map->desc.phys_base = SPS_ADDR_INVALID; map->data.phys_base = SPS_ADDR_INVALID; map->alloc_desc_base = SPS_ADDR_INVALID; map->alloc_data_base = SPS_ADDR_INVALID; map->alloc_src_pipe = SPS_BAM_PIPE_INVALID; map->alloc_dest_pipe = SPS_BAM_PIPE_INVALID; map->src.dev = cfg->source; map->dest.dev = cfg->destination; map->desc.size = cfg->desc.size; map->data.size = cfg->data.size; map->config = cfg->config; if (map->desc.size != SPSRM_CLEAR && cfg->desc.phys_base != SPSRM_CLEAR && cfg->desc.base != (void *)SPSRM_CLEAR) map->desc = cfg->desc; if (map->data.size != SPSRM_CLEAR && cfg->data.phys_base != SPSRM_CLEAR && cfg->data.base != (void *)SPSRM_CLEAR) map->data = cfg->data; if (cfg->src_pipe_index != SPSRM_CLEAR) map->src.pipe_index = cfg->src_pipe_index; else map->src.pipe_index = SPS_BAM_PIPE_INVALID; if (cfg->dest_pipe_index != SPSRM_CLEAR) map->dest.pipe_index = cfg->dest_pipe_index; else map->dest.pipe_index = SPS_BAM_PIPE_INVALID; } static struct sps_connection *sps_rm_create(struct sps_pipe *pipe) { struct sps_connection *map; struct sps_bam *bam; u32 desc_size; u32 data_size; enum sps_mode dir; int success = false; map = kzalloc(sizeof(*map), GFP_KERNEL); if (map == NULL) { SPS_ERR("sps:Failed to allocate connection struct"); return NULL; } sps_rm_init_map(map, &pipe->connect); dir = pipe->connect.mode; success = false; map->src.bam = sps_h2bam(map->src.dev); if (map->src.bam == NULL) { if (map->src.dev != SPS_DEV_HANDLE_MEM) { SPS_ERR("sps:Invalid BAM handle: 0x%x", map->src.dev); goto exit_err; } map->src.pipe_index = SPS_BAM_PIPE_INVALID; } map->dest.bam = sps_h2bam(map->dest.dev); if (map->dest.bam == NULL) { if (map->dest.dev != SPS_DEV_HANDLE_MEM) { SPS_ERR("sps:Invalid BAM handle: 0x%x", map->dest.dev); goto exit_err; } map->dest.pipe_index = SPS_BAM_PIPE_INVALID; } if ((dir == SPS_MODE_SRC && map->src.bam == NULL) || (dir != SPS_MODE_SRC && map->dest.bam == NULL)) { SPS_ERR("sps:Invalid BAM endpt: dir %d src 0x%x dest 0x%x", dir, map->src.dev, map->dest.dev); goto exit_err; } if (map->src.bam != NULL) { bam = map->src.bam; map->alloc_src_pipe = sps_bam_pipe_alloc(bam, map->src.pipe_index); if (map->alloc_src_pipe == SPS_BAM_PIPE_INVALID) goto exit_err; map->src.pipe_index = map->alloc_src_pipe; #ifdef CONFIG_SPS_SUPPORT_BAMDMA if ((bam->props.options & SPS_BAM_OPT_BAMDMA)) { int rc; rc = sps_dma_pipe_alloc(bam, map->src.pipe_index, SPS_MODE_SRC); if (rc) { SPS_ERR("sps:Failed to alloc BAM-DMA pipe: %d", map->src.pipe_index); goto exit_err; } } #endif map->src.bam_phys = bam->props.phys_addr; map->src.event_threshold = bam->props.event_threshold; } if (map->dest.bam != NULL) { bam = map->dest.bam; map->alloc_dest_pipe = sps_bam_pipe_alloc(bam, map->dest.pipe_index); if (map->alloc_dest_pipe == SPS_BAM_PIPE_INVALID) goto exit_err; map->dest.pipe_index = map->alloc_dest_pipe; #ifdef CONFIG_SPS_SUPPORT_BAMDMA if ((bam->props.options & SPS_BAM_OPT_BAMDMA)) { int rc; rc = sps_dma_pipe_alloc(bam, map->dest.pipe_index, SPS_MODE_DEST); if (rc) { SPS_ERR("sps:Failed to alloc BAM-DMA pipe: %d", map->dest.pipe_index); goto exit_err; } } #endif map->dest.bam_phys = bam->props.phys_addr; map->dest.event_threshold = bam->props.event_threshold; } desc_size = 0; data_size = 0; if (map->src.bam != NULL) { bam = map->src.bam; desc_size = bam->props.desc_size; data_size = bam->props.data_size; } if (map->dest.bam != NULL) { bam = map->dest.bam; if (bam->props.desc_size > desc_size) desc_size = bam->props.desc_size; if (bam->props.data_size > data_size) data_size = bam->props.data_size; } if (map->desc.size == SPSRM_CLEAR) map->desc.size = desc_size; if (map->src.bam != NULL && map->dest.bam != NULL) { if (map->data.size == SPSRM_CLEAR) map->data.size = data_size; } else { map->data.size = 0; } if (map->desc.size > SPSRM_MAX_DESC_FIFO_SIZE) { SPS_ERR("sps:Invalid desc FIFO size: 0x%x", map->desc.size); goto exit_err; } if (map->src.bam != NULL && map->dest.bam != NULL && map->data.size > SPSRM_MAX_DATA_FIFO_SIZE) { SPS_ERR("sps:Invalid data FIFO size: 0x%x", map->data.size); goto exit_err; } if (map->desc.size && map->desc.phys_base == SPS_ADDR_INVALID) { map->alloc_desc_base = sps_mem_alloc_io(map->desc.size); if (map->alloc_desc_base == SPS_ADDR_INVALID) { SPS_ERR("sps:I/O memory allocation failure:0x%x", map->desc.size); goto exit_err; } map->desc.phys_base = map->alloc_desc_base; map->desc.base = spsi_get_mem_ptr(map->desc.phys_base); if (map->desc.base == NULL) { SPS_ERR("sps:Cannot get virt addr for I/O buffer:0x%x", map->desc.phys_base); goto exit_err; } } if (map->data.size && map->data.phys_base == SPS_ADDR_INVALID) { map->alloc_data_base = sps_mem_alloc_io(map->data.size); if (map->alloc_data_base == SPS_ADDR_INVALID) { SPS_ERR("sps:I/O memory allocation failure:0x%x", map->data.size); goto exit_err; } map->data.phys_base = map->alloc_data_base; map->data.base = spsi_get_mem_ptr(map->data.phys_base); if (map->data.base == NULL) { SPS_ERR("sps:Cannot get virt addr for I/O buffer:0x%x", map->data.phys_base); goto exit_err; } } if (sps_rm_assign(pipe, map)) { SPS_ERR("sps:failed to assign a connection to the client.\n"); goto exit_err; } success = true; exit_err: if (!success) { sps_rm_free_map_rsrc(map); kfree(map); return NULL; } return map; } static int sps_rm_free(struct sps_pipe *pipe) { struct sps_connection *map = (void *)pipe->map; struct sps_connect *cfg = &pipe->connect; mutex_lock(&sps_rm->lock); if (cfg->mode == SPS_MODE_SRC) map->client_src = NULL; else map->client_dest = NULL; pipe->map = NULL; pipe->client_state = SPS_STATE_DISCONNECT; sps_rm_free_map_rsrc(map); sps_rm_remove_ref(map); mutex_unlock(&sps_rm->lock); return 0; } static int sps_rm_alloc(struct sps_pipe *pipe) { struct sps_connection *map; int result = SPS_ERROR; if (pipe->connect.sps_reserved != SPSRM_CLEAR) { u32 source = pipe->connect.source; u32 destination = pipe->connect.destination; enum sps_mode mode = pipe->connect.mode; u32 config = pipe->connect.config; memset(&pipe->connect, SPSRM_CLEAR, sizeof(pipe->connect)); pipe->connect.source = source; pipe->connect.destination = destination; pipe->connect.mode = mode; pipe->connect.config = config; } if (pipe->connect.config == SPSRM_CLEAR) pipe->connect.config = SPS_CONFIG_DEFAULT; if (pipe->connect.config != SPS_CONFIG_DEFAULT) { if (sps_map_find(&pipe->connect)) { SPS_ERR("sps:Failed to find connection mapping"); return SPS_ERROR; } } mutex_lock(&sps_rm->lock); if (IS_SPS_STATE_OK(pipe)) { SPS_ERR("sps:Client connection already allocated"); goto exit_err; } map = find_unconnected(pipe); if (map != NULL) { if (sps_rm_assign(pipe, map)) map = NULL; } if (map == NULL) { map = sps_rm_create(pipe); if (map == NULL) { SPS_ERR("sps:Failed to allocate connection"); goto exit_err; } list_add_tail(&map->list, &sps_rm->connections_q); } map->refs++; result = 0; exit_err: mutex_unlock(&sps_rm->lock); if (result) return SPS_ERROR; return 0; } static int sps_rm_disconnect(struct sps_pipe *pipe) { sps_rm_free(pipe); return 0; } int sps_rm_state_change(struct sps_pipe *pipe, u32 state) { int auto_enable = false; int result; if (pipe->client_state == SPS_STATE_DISCONNECT && state == SPS_STATE_ALLOCATE) { if (sps_rm_alloc(pipe)) { SPS_ERR("sps:Fail to allocate resource for" " BAM 0x%x pipe %d", (u32) pipe->bam, pipe->pipe_index); return SPS_ERROR; } } if (pipe->client_state == SPS_STATE_ALLOCATE && state == SPS_STATE_CONNECT) { struct sps_bam_connect_param params; memset(&params, 0, sizeof(params)); params.mode = pipe->connect.mode; if (pipe->connect.options != SPSRM_CLEAR) { params.options = pipe->connect.options; params.irq_gen_addr = pipe->connect.irq_gen_addr; params.irq_gen_data = pipe->connect.irq_gen_data; } result = sps_bam_pipe_connect(pipe, &params); if (result) { SPS_ERR("sps:Failed to connect BAM 0x%x pipe %d", (u32) pipe->bam, pipe->pipe_index); return SPS_ERROR; } pipe->client_state = SPS_STATE_CONNECT; if (pipe->connect.source == SPS_DEV_HANDLE_MEM || pipe->connect.destination == SPS_DEV_HANDLE_MEM) { if (pipe->map->desc.size != 0 && pipe->map->desc.phys_base != SPS_ADDR_INVALID) auto_enable = true; } } if (pipe->client_state == SPS_STATE_CONNECT && !(state == SPS_STATE_DISABLE || state == SPS_STATE_DISCONNECT) && (state == SPS_STATE_ENABLE || auto_enable || (pipe->connect.options & SPS_O_AUTO_ENABLE))) { result = sps_bam_pipe_enable(pipe->bam, pipe->pipe_index); if (result) { SPS_ERR("sps:Failed to set BAM 0x%x pipe %d flow on", pipe->bam->props.phys_addr, pipe->pipe_index); return SPS_ERROR; } #ifdef CONFIG_SPS_SUPPORT_BAMDMA if ((pipe->bam->props.options & SPS_BAM_OPT_BAMDMA)) { result = sps_dma_pipe_enable(pipe->bam, pipe->pipe_index); if (result) { SPS_ERR("sps:Failed to activate BAM-DMA" " pipe: %d", pipe->pipe_index); return SPS_ERROR; } } #endif pipe->client_state = SPS_STATE_ENABLE; } if (pipe->client_state == SPS_STATE_ENABLE && (state == SPS_STATE_DISABLE || state == SPS_STATE_DISCONNECT)) { result = sps_bam_pipe_disable(pipe->bam, pipe->pipe_index); if (result) { SPS_ERR("sps:Failed to set BAM 0x%x pipe %d flow off", pipe->bam->props.phys_addr, pipe->pipe_index); return SPS_ERROR; } pipe->client_state = SPS_STATE_CONNECT; } if (pipe->client_state == SPS_STATE_CONNECT && state == SPS_STATE_DISCONNECT) { struct sps_connection *map; u32 pipe_index; if (pipe->connect.mode == SPS_MODE_SRC) pipe_index = pipe->map->src.pipe_index; else pipe_index = pipe->map->dest.pipe_index; result = sps_bam_pipe_disconnect(pipe->bam, pipe_index); if (result) { SPS_ERR("sps:Failed to disconnect BAM 0x%x pipe %d", pipe->bam->props.phys_addr, pipe->pipe_index); return SPS_ERROR; } map = (void *)pipe->map; if (pipe->connect.mode == SPS_MODE_SRC) map->client_src = NULL; else if (pipe->connect.mode == SPS_MODE_DEST) map->client_dest = NULL; sps_rm_disconnect(pipe); pipe->map = NULL; pipe->bam = NULL; pipe->client_state = SPS_STATE_DISCONNECT; } return 0; }
gpl-2.0
buglabs/android-froyo-kernel
drivers/staging/wlan-ng/p80211netdev.c
559
30311
/* src/p80211/p80211knetdev.c * * Linux Kernel net device interface * * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. * -------------------------------------------------------------------- * * linux-wlan * * The contents of this file are subject to the Mozilla Public * License Version 1.1 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of * the License at http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or * implied. See the License for the specific language governing * rights and limitations under the License. * * Alternatively, the contents of this file may be used under the * terms of the GNU Public License version 2 (the "GPL"), in which * case the provisions of the GPL are applicable instead of the * above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use * your version of this file under the MPL, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete * the provisions above, a recipient may use your version of this * file under either the MPL or the GPL. * * -------------------------------------------------------------------- * * Inquiries regarding the linux-wlan Open Source project can be * made directly to: * * AbsoluteValue Systems Inc. * info@linux-wlan.com * http://www.linux-wlan.com * * -------------------------------------------------------------------- * * Portions of the development of this software were funded by * Intersil Corporation as part of PRISM(R) chipset product development. * * -------------------------------------------------------------------- * * The functions required for a Linux network device are defined here. * * -------------------------------------------------------------------- */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/proc_fs.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/kmod.h> #include <linux/if_arp.h> #include <linux/wireless.h> #include <linux/sockios.h> #include <linux/etherdevice.h> #include <linux/if_ether.h> #include <linux/byteorder/generic.h> #include <linux/bitops.h> #include <linux/uaccess.h> #include <asm/byteorder.h> #ifdef SIOCETHTOOL #include <linux/ethtool.h> #endif #include <net/iw_handler.h> #include <net/net_namespace.h> #include "p80211types.h" #include "p80211hdr.h" #include "p80211conv.h" #include "p80211mgmt.h" #include "p80211msg.h" #include "p80211netdev.h" #include "p80211ioctl.h" #include "p80211req.h" #include "p80211metastruct.h" #include "p80211metadef.h" /* Support functions */ static void p80211netdev_rx_bh(unsigned long arg); /* netdevice method functions */ static int p80211knetdev_init(netdevice_t *netdev); static struct net_device_stats *p80211knetdev_get_stats(netdevice_t *netdev); static int p80211knetdev_open(netdevice_t *netdev); static int p80211knetdev_stop(netdevice_t *netdev); static int p80211knetdev_hard_start_xmit(struct sk_buff *skb, netdevice_t *netdev); static void p80211knetdev_set_multicast_list(netdevice_t *dev); static int p80211knetdev_do_ioctl(netdevice_t *dev, struct ifreq *ifr, int cmd); static int p80211knetdev_set_mac_address(netdevice_t *dev, void *addr); static void p80211knetdev_tx_timeout(netdevice_t *netdev); static int p80211_rx_typedrop(wlandevice_t *wlandev, u16 fc); int wlan_watchdog = 5000; module_param(wlan_watchdog, int, 0644); MODULE_PARM_DESC(wlan_watchdog, "transmit timeout in milliseconds"); int wlan_wext_write = 1; module_param(wlan_wext_write, int, 0644); MODULE_PARM_DESC(wlan_wext_write, "enable write wireless extensions"); /*---------------------------------------------------------------- * p80211knetdev_init * * Init method for a Linux netdevice. Called in response to * register_netdev. * * Arguments: * none * * Returns: * nothing ----------------------------------------------------------------*/ static int p80211knetdev_init(netdevice_t *netdev) { /* Called in response to register_netdev */ /* This is usually the probe function, but the probe has */ /* already been done by the MSD and the create_kdev */ /* function. All we do here is return success */ return 0; } /*---------------------------------------------------------------- * p80211knetdev_get_stats * * Statistics retrieval for linux netdevices. Here we're reporting * the Linux i/f level statistics. Hence, for the primary numbers, * we don't want to report the numbers from the MIB. Eventually, * it might be useful to collect some of the error counters though. * * Arguments: * netdev Linux netdevice * * Returns: * the address of the statistics structure ----------------------------------------------------------------*/ static struct net_device_stats *p80211knetdev_get_stats(netdevice_t * netdev) { wlandevice_t *wlandev = netdev->ml_priv; /* TODO: review the MIB stats for items that correspond to linux stats */ return &(wlandev->linux_stats); } /*---------------------------------------------------------------- * p80211knetdev_open * * Linux netdevice open method. Following a successful call here, * the device is supposed to be ready for tx and rx. In our * situation that may not be entirely true due to the state of the * MAC below. * * Arguments: * netdev Linux network device structure * * Returns: * zero on success, non-zero otherwise ----------------------------------------------------------------*/ static int p80211knetdev_open(netdevice_t *netdev) { int result = 0; /* success */ wlandevice_t *wlandev = netdev->ml_priv; /* Check to make sure the MSD is running */ if (wlandev->msdstate != WLAN_MSD_RUNNING) return -ENODEV; /* Tell the MSD to open */ if (wlandev->open != NULL) { result = wlandev->open(wlandev); if (result == 0) { netif_start_queue(wlandev->netdev); wlandev->state = WLAN_DEVICE_OPEN; } } else { result = -EAGAIN; } return result; } /*---------------------------------------------------------------- * p80211knetdev_stop * * Linux netdevice stop (close) method. Following this call, * no frames should go up or down through this interface. * * Arguments: * netdev Linux network device structure * * Returns: * zero on success, non-zero otherwise ----------------------------------------------------------------*/ static int p80211knetdev_stop(netdevice_t *netdev) { int result = 0; wlandevice_t *wlandev = netdev->ml_priv; if (wlandev->close != NULL) result = wlandev->close(wlandev); netif_stop_queue(wlandev->netdev); wlandev->state = WLAN_DEVICE_CLOSED; return result; } /*---------------------------------------------------------------- * p80211netdev_rx * * Frame receive function called by the mac specific driver. * * Arguments: * wlandev WLAN network device structure * skb skbuff containing a full 802.11 frame. * Returns: * nothing * Side effects: * ----------------------------------------------------------------*/ void p80211netdev_rx(wlandevice_t *wlandev, struct sk_buff *skb) { /* Enqueue for post-irq processing */ skb_queue_tail(&wlandev->nsd_rxq, skb); tasklet_schedule(&wlandev->rx_bh); return; } /*---------------------------------------------------------------- * p80211netdev_rx_bh * * Deferred processing of all received frames. * * Arguments: * wlandev WLAN network device structure * skb skbuff containing a full 802.11 frame. * Returns: * nothing * Side effects: * ----------------------------------------------------------------*/ static void p80211netdev_rx_bh(unsigned long arg) { wlandevice_t *wlandev = (wlandevice_t *) arg; struct sk_buff *skb = NULL; netdevice_t *dev = wlandev->netdev; p80211_hdr_a3_t *hdr; u16 fc; /* Let's empty our our queue */ while ((skb = skb_dequeue(&wlandev->nsd_rxq))) { if (wlandev->state == WLAN_DEVICE_OPEN) { if (dev->type != ARPHRD_ETHER) { /* RAW frame; we shouldn't convert it */ /* XXX Append the Prism Header here instead. */ /* set up various data fields */ skb->dev = dev; skb_reset_mac_header(skb); skb->ip_summed = CHECKSUM_NONE; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_80211_RAW); dev->last_rx = jiffies; wlandev->linux_stats.rx_packets++; wlandev->linux_stats.rx_bytes += skb->len; netif_rx_ni(skb); continue; } else { hdr = (p80211_hdr_a3_t *) skb->data; fc = le16_to_cpu(hdr->fc); if (p80211_rx_typedrop(wlandev, fc)) { dev_kfree_skb(skb); continue; } /* perform mcast filtering */ if (wlandev->netdev->flags & IFF_ALLMULTI) { /* allow my local address through */ if (memcmp (hdr->a1, wlandev->netdev->dev_addr, ETH_ALEN) != 0) { /* but reject anything else that isn't multicast */ if (!(hdr->a1[0] & 0x01)) { dev_kfree_skb(skb); continue; } } } if (skb_p80211_to_ether (wlandev, wlandev->ethconv, skb) == 0) { skb->dev->last_rx = jiffies; wlandev->linux_stats.rx_packets++; wlandev->linux_stats.rx_bytes += skb->len; netif_rx_ni(skb); continue; } pr_debug("p80211_to_ether failed.\n"); } } dev_kfree_skb(skb); } } /*---------------------------------------------------------------- * p80211knetdev_hard_start_xmit * * Linux netdevice method for transmitting a frame. * * Arguments: * skb Linux sk_buff containing the frame. * netdev Linux netdevice. * * Side effects: * If the lower layers report that buffers are full. netdev->tbusy * will be set to prevent higher layers from sending more traffic. * * Note: If this function returns non-zero, higher layers retain * ownership of the skb. * * Returns: * zero on success, non-zero on failure. ----------------------------------------------------------------*/ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb, netdevice_t *netdev) { int result = 0; int txresult = -1; wlandevice_t *wlandev = netdev->ml_priv; p80211_hdr_t p80211_hdr; p80211_metawep_t p80211_wep; if (skb == NULL) return NETDEV_TX_OK; if (wlandev->state != WLAN_DEVICE_OPEN) { result = 1; goto failed; } memset(&p80211_hdr, 0, sizeof(p80211_hdr_t)); memset(&p80211_wep, 0, sizeof(p80211_metawep_t)); if (netif_queue_stopped(netdev)) { pr_debug("called when queue stopped.\n"); result = 1; goto failed; } netif_stop_queue(netdev); /* Check to see that a valid mode is set */ switch (wlandev->macmode) { case WLAN_MACMODE_IBSS_STA: case WLAN_MACMODE_ESS_STA: case WLAN_MACMODE_ESS_AP: break; default: /* Mode isn't set yet, just drop the frame * and return success . * TODO: we need a saner way to handle this */ if (skb->protocol != ETH_P_80211_RAW) { netif_start_queue(wlandev->netdev); printk(KERN_NOTICE "Tx attempt prior to association, frame dropped.\n"); wlandev->linux_stats.tx_dropped++; result = 0; goto failed; } break; } /* Check for raw transmits */ if (skb->protocol == ETH_P_80211_RAW) { if (!capable(CAP_NET_ADMIN)) { result = 1; goto failed; } /* move the header over */ memcpy(&p80211_hdr, skb->data, sizeof(p80211_hdr_t)); skb_pull(skb, sizeof(p80211_hdr_t)); } else { if (skb_ether_to_p80211 (wlandev, wlandev->ethconv, skb, &p80211_hdr, &p80211_wep) != 0) { /* convert failed */ pr_debug("ether_to_80211(%d) failed.\n", wlandev->ethconv); result = 1; goto failed; } } if (wlandev->txframe == NULL) { result = 1; goto failed; } netdev->trans_start = jiffies; wlandev->linux_stats.tx_packets++; /* count only the packet payload */ wlandev->linux_stats.tx_bytes += skb->len; txresult = wlandev->txframe(wlandev, skb, &p80211_hdr, &p80211_wep); if (txresult == 0) { /* success and more buf */ /* avail, re: hw_txdata */ netif_wake_queue(wlandev->netdev); result = NETDEV_TX_OK; } else if (txresult == 1) { /* success, no more avail */ pr_debug("txframe success, no more bufs\n"); /* netdev->tbusy = 1; don't set here, irqhdlr */ /* may have already cleared it */ result = NETDEV_TX_OK; } else if (txresult == 2) { /* alloc failure, drop frame */ pr_debug("txframe returned alloc_fail\n"); result = NETDEV_TX_BUSY; } else { /* buffer full or queue busy, drop frame. */ pr_debug("txframe returned full or busy\n"); result = NETDEV_TX_BUSY; } failed: /* Free up the WEP buffer if it's not the same as the skb */ if ((p80211_wep.data) && (p80211_wep.data != skb->data)) kzfree(p80211_wep.data); /* we always free the skb here, never in a lower level. */ if (!result) dev_kfree_skb(skb); return result; } /*---------------------------------------------------------------- * p80211knetdev_set_multicast_list * * Called from higher lavers whenever there's a need to set/clear * promiscuous mode or rewrite the multicast list. * * Arguments: * none * * Returns: * nothing ----------------------------------------------------------------*/ static void p80211knetdev_set_multicast_list(netdevice_t *dev) { wlandevice_t *wlandev = dev->ml_priv; /* TODO: real multicast support as well */ if (wlandev->set_multicast_list) wlandev->set_multicast_list(wlandev, dev); } #ifdef SIOCETHTOOL static int p80211netdev_ethtool(wlandevice_t *wlandev, void __user *useraddr) { u32 ethcmd; struct ethtool_drvinfo info; struct ethtool_value edata; memset(&info, 0, sizeof(info)); memset(&edata, 0, sizeof(edata)); if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd))) return -EFAULT; switch (ethcmd) { case ETHTOOL_GDRVINFO: info.cmd = ethcmd; snprintf(info.driver, sizeof(info.driver), "p80211_%s", wlandev->nsdname); snprintf(info.version, sizeof(info.version), "%s", WLAN_RELEASE); if (copy_to_user(useraddr, &info, sizeof(info))) return -EFAULT; return 0; #ifdef ETHTOOL_GLINK case ETHTOOL_GLINK: edata.cmd = ethcmd; if (wlandev->linkstatus && (wlandev->macmode != WLAN_MACMODE_NONE)) { edata.data = 1; } else { edata.data = 0; } if (copy_to_user(useraddr, &edata, sizeof(edata))) return -EFAULT; return 0; } #endif return -EOPNOTSUPP; } #endif /*---------------------------------------------------------------- * p80211knetdev_do_ioctl * * Handle an ioctl call on one of our devices. Everything Linux * ioctl specific is done here. Then we pass the contents of the * ifr->data to the request message handler. * * Arguments: * dev Linux kernel netdevice * ifr Our private ioctl request structure, typed for the * generic struct ifreq so we can use ptr to func * w/o cast. * * Returns: * zero on success, a negative errno on failure. Possible values: * -ENETDOWN Device isn't up. * -EBUSY cmd already in progress * -ETIME p80211 cmd timed out (MSD may have its own timers) * -EFAULT memory fault copying msg from user buffer * -ENOMEM unable to allocate kernel msg buffer * -ENOSYS bad magic, it the cmd really for us? * -EintR sleeping on cmd, awakened by signal, cmd cancelled. * * Call Context: * Process thread (ioctl caller). TODO: SMP support may require * locks. ----------------------------------------------------------------*/ static int p80211knetdev_do_ioctl(netdevice_t *dev, struct ifreq *ifr, int cmd) { int result = 0; p80211ioctl_req_t *req = (p80211ioctl_req_t *) ifr; wlandevice_t *wlandev = dev->ml_priv; u8 *msgbuf; pr_debug("rx'd ioctl, cmd=%d, len=%d\n", cmd, req->len); #ifdef SIOCETHTOOL if (cmd == SIOCETHTOOL) { result = p80211netdev_ethtool(wlandev, (void __user *)ifr->ifr_data); goto bail; } #endif /* Test the magic, assume ifr is good if it's there */ if (req->magic != P80211_IOCTL_MAGIC) { result = -ENOSYS; goto bail; } if (cmd == P80211_IFTEST) { result = 0; goto bail; } else if (cmd != P80211_IFREQ) { result = -ENOSYS; goto bail; } /* Allocate a buf of size req->len */ if ((msgbuf = kmalloc(req->len, GFP_KERNEL))) { if (copy_from_user(msgbuf, (void __user *)req->data, req->len)) result = -EFAULT; else result = p80211req_dorequest(wlandev, msgbuf); if (result == 0) { if (copy_to_user ((void __user *)req->data, msgbuf, req->len)) { result = -EFAULT; } } kfree(msgbuf); } else { result = -ENOMEM; } bail: return result; /* If allocate,copyfrom or copyto fails, return errno */ } /*---------------------------------------------------------------- * p80211knetdev_set_mac_address * * Handles the ioctl for changing the MACAddress of a netdevice * * references: linux/netdevice.h and drivers/net/net_init.c * * NOTE: [MSM] We only prevent address changes when the netdev is * up. We don't control anything based on dot11 state. If the * address is changed on a STA that's currently associated, you * will probably lose the ability to send and receive data frames. * Just be aware. Therefore, this should usually only be done * prior to scan/join/auth/assoc. * * Arguments: * dev netdevice struct * addr the new MACAddress (a struct) * * Returns: * zero on success, a negative errno on failure. Possible values: * -EBUSY device is bussy (cmd not possible) * -and errors returned by: p80211req_dorequest(..) * * by: Collin R. Mulliner <collin@mulliner.org> ----------------------------------------------------------------*/ static int p80211knetdev_set_mac_address(netdevice_t *dev, void *addr) { struct sockaddr *new_addr = addr; p80211msg_dot11req_mibset_t dot11req; p80211item_unk392_t *mibattr; p80211item_pstr6_t *macaddr; p80211item_uint32_t *resultcode; int result = 0; /* If we're running, we don't allow MAC address changes */ if (netif_running(dev)) return -EBUSY; /* Set up some convenience pointers. */ mibattr = &dot11req.mibattribute; macaddr = (p80211item_pstr6_t *) & mibattr->data; resultcode = &dot11req.resultcode; /* Set up a dot11req_mibset */ memset(&dot11req, 0, sizeof(p80211msg_dot11req_mibset_t)); dot11req.msgcode = DIDmsg_dot11req_mibset; dot11req.msglen = sizeof(p80211msg_dot11req_mibset_t); memcpy(dot11req.devname, ((wlandevice_t *) dev->ml_priv)->name, WLAN_DEVNAMELEN_MAX - 1); /* Set up the mibattribute argument */ mibattr->did = DIDmsg_dot11req_mibset_mibattribute; mibattr->status = P80211ENUM_msgitem_status_data_ok; mibattr->len = sizeof(mibattr->data); macaddr->did = DIDmib_dot11mac_dot11OperationTable_dot11MACAddress; macaddr->status = P80211ENUM_msgitem_status_data_ok; macaddr->len = sizeof(macaddr->data); macaddr->data.len = ETH_ALEN; memcpy(&macaddr->data.data, new_addr->sa_data, ETH_ALEN); /* Set up the resultcode argument */ resultcode->did = DIDmsg_dot11req_mibset_resultcode; resultcode->status = P80211ENUM_msgitem_status_no_value; resultcode->len = sizeof(resultcode->data); resultcode->data = 0; /* now fire the request */ result = p80211req_dorequest(dev->ml_priv, (u8 *) & dot11req); /* If the request wasn't successful, report an error and don't * change the netdev address */ if (result != 0 || resultcode->data != P80211ENUM_resultcode_success) { printk(KERN_ERR "Low-level driver failed dot11req_mibset(dot11MACAddress).\n"); result = -EADDRNOTAVAIL; } else { /* everything's ok, change the addr in netdev */ memcpy(dev->dev_addr, new_addr->sa_data, dev->addr_len); } return result; } static int wlan_change_mtu(netdevice_t *dev, int new_mtu) { /* 2312 is max 802.11 payload, 20 is overhead, (ether + llc +snap) and another 8 for wep. */ if ((new_mtu < 68) || (new_mtu > (2312 - 20 - 8))) return -EINVAL; dev->mtu = new_mtu; return 0; } static const struct net_device_ops p80211_netdev_ops = { .ndo_init = p80211knetdev_init, .ndo_open = p80211knetdev_open, .ndo_stop = p80211knetdev_stop, .ndo_get_stats = p80211knetdev_get_stats, .ndo_start_xmit = p80211knetdev_hard_start_xmit, .ndo_set_multicast_list = p80211knetdev_set_multicast_list, .ndo_do_ioctl = p80211knetdev_do_ioctl, .ndo_set_mac_address = p80211knetdev_set_mac_address, .ndo_tx_timeout = p80211knetdev_tx_timeout, .ndo_change_mtu = wlan_change_mtu, .ndo_validate_addr = eth_validate_addr, }; /*---------------------------------------------------------------- * wlan_setup * * Roughly matches the functionality of ether_setup. Here * we set up any members of the wlandevice structure that are common * to all devices. Additionally, we allocate a linux 'struct device' * and perform the same setup as ether_setup. * * Note: It's important that the caller have setup the wlandev->name * ptr prior to calling this function. * * Arguments: * wlandev ptr to the wlandev structure for the * interface. * Returns: * zero on success, non-zero otherwise. * Call Context: * Should be process thread. We'll assume it might be * interrupt though. When we add support for statically * compiled drivers, this function will be called in the * context of the kernel startup code. ----------------------------------------------------------------*/ int wlan_setup(wlandevice_t *wlandev) { int result = 0; netdevice_t *dev; /* Set up the wlandev */ wlandev->state = WLAN_DEVICE_CLOSED; wlandev->ethconv = WLAN_ETHCONV_8021h; wlandev->macmode = WLAN_MACMODE_NONE; /* Set up the rx queue */ skb_queue_head_init(&wlandev->nsd_rxq); tasklet_init(&wlandev->rx_bh, p80211netdev_rx_bh, (unsigned long)wlandev); /* Allocate and initialize the struct device */ dev = alloc_netdev(0, "wlan%d", ether_setup); if (dev == NULL) { printk(KERN_ERR "Failed to alloc netdev.\n"); result = 1; } else { wlandev->netdev = dev; dev->ml_priv = wlandev; dev->netdev_ops = &p80211_netdev_ops; dev->wireless_handlers = &p80211wext_handler_def; netif_stop_queue(dev); netif_carrier_off(dev); } return result; } /*---------------------------------------------------------------- * wlan_unsetup * * This function is paired with the wlan_setup routine. It should * be called after unregister_wlandev. Basically, all it does is * free the 'struct device' that's associated with the wlandev. * We do it here because the 'struct device' isn't allocated * explicitly in the driver code, it's done in wlan_setup. To * do the free in the driver might seem like 'magic'. * * Arguments: * wlandev ptr to the wlandev structure for the * interface. * Returns: * zero on success, non-zero otherwise. * Call Context: * Should be process thread. We'll assume it might be * interrupt though. When we add support for statically * compiled drivers, this function will be called in the * context of the kernel startup code. ----------------------------------------------------------------*/ int wlan_unsetup(wlandevice_t *wlandev) { int result = 0; tasklet_kill(&wlandev->rx_bh); if (wlandev->netdev == NULL) { printk(KERN_ERR "called without wlandev->netdev set.\n"); result = 1; } else { free_netdev(wlandev->netdev); wlandev->netdev = NULL; } return 0; } /*---------------------------------------------------------------- * register_wlandev * * Roughly matches the functionality of register_netdev. This function * is called after the driver has successfully probed and set up the * resources for the device. It's now ready to become a named device * in the Linux system. * * First we allocate a name for the device (if not already set), then * we call the Linux function register_netdevice. * * Arguments: * wlandev ptr to the wlandev structure for the * interface. * Returns: * zero on success, non-zero otherwise. * Call Context: * Can be either interrupt or not. ----------------------------------------------------------------*/ int register_wlandev(wlandevice_t *wlandev) { int i = 0; i = register_netdev(wlandev->netdev); if (i) return i; return 0; } /*---------------------------------------------------------------- * unregister_wlandev * * Roughly matches the functionality of unregister_netdev. This * function is called to remove a named device from the system. * * First we tell linux that the device should no longer exist. * Then we remove it from the list of known wlan devices. * * Arguments: * wlandev ptr to the wlandev structure for the * interface. * Returns: * zero on success, non-zero otherwise. * Call Context: * Can be either interrupt or not. ----------------------------------------------------------------*/ int unregister_wlandev(wlandevice_t *wlandev) { struct sk_buff *skb; unregister_netdev(wlandev->netdev); /* Now to clean out the rx queue */ while ((skb = skb_dequeue(&wlandev->nsd_rxq))) dev_kfree_skb(skb); return 0; } /*---------------------------------------------------------------- * p80211netdev_hwremoved * * Hardware removed notification. This function should be called * immediately after an MSD has detected that the underlying hardware * has been yanked out from under us. The primary things we need * to do are: * - Mark the wlandev * - Prevent any further traffic from the knetdev i/f * - Prevent any further requests from mgmt i/f * - If there are any waitq'd mgmt requests or mgmt-frame exchanges, * shut them down. * - Call the MSD hwremoved function. * * The remainder of the cleanup will be handled by unregister(). * Our primary goal here is to prevent as much tickling of the MSD * as possible since the MSD is already in a 'wounded' state. * * TODO: As new features are added, this function should be * updated. * * Arguments: * wlandev WLAN network device structure * Returns: * nothing * Side effects: * * Call context: * Usually interrupt. ----------------------------------------------------------------*/ void p80211netdev_hwremoved(wlandevice_t *wlandev) { wlandev->hwremoved = 1; if (wlandev->state == WLAN_DEVICE_OPEN) netif_stop_queue(wlandev->netdev); netif_device_detach(wlandev->netdev); } /*---------------------------------------------------------------- * p80211_rx_typedrop * * Classifies the frame, increments the appropriate counter, and * returns 0|1|2 indicating whether the driver should handle, ignore, or * drop the frame * * Arguments: * wlandev wlan device structure * fc frame control field * * Returns: * zero if the frame should be handled by the driver, * one if the frame should be ignored * anything else means we drop it. * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ static int p80211_rx_typedrop(wlandevice_t *wlandev, u16 fc) { u16 ftype; u16 fstype; int drop = 0; /* Classify frame, increment counter */ ftype = WLAN_GET_FC_FTYPE(fc); fstype = WLAN_GET_FC_FSTYPE(fc); #if 0 pr_debug("rx_typedrop : ftype=%d fstype=%d.\n", ftype, fstype); #endif switch (ftype) { case WLAN_FTYPE_MGMT: if ((wlandev->netdev->flags & IFF_PROMISC) || (wlandev->netdev->flags & IFF_ALLMULTI)) { drop = 1; break; } pr_debug("rx'd mgmt:\n"); wlandev->rx.mgmt++; switch (fstype) { case WLAN_FSTYPE_ASSOCREQ: /* printk("assocreq"); */ wlandev->rx.assocreq++; break; case WLAN_FSTYPE_ASSOCRESP: /* printk("assocresp"); */ wlandev->rx.assocresp++; break; case WLAN_FSTYPE_REASSOCREQ: /* printk("reassocreq"); */ wlandev->rx.reassocreq++; break; case WLAN_FSTYPE_REASSOCRESP: /* printk("reassocresp"); */ wlandev->rx.reassocresp++; break; case WLAN_FSTYPE_PROBEREQ: /* printk("probereq"); */ wlandev->rx.probereq++; break; case WLAN_FSTYPE_PROBERESP: /* printk("proberesp"); */ wlandev->rx.proberesp++; break; case WLAN_FSTYPE_BEACON: /* printk("beacon"); */ wlandev->rx.beacon++; break; case WLAN_FSTYPE_ATIM: /* printk("atim"); */ wlandev->rx.atim++; break; case WLAN_FSTYPE_DISASSOC: /* printk("disassoc"); */ wlandev->rx.disassoc++; break; case WLAN_FSTYPE_AUTHEN: /* printk("authen"); */ wlandev->rx.authen++; break; case WLAN_FSTYPE_DEAUTHEN: /* printk("deauthen"); */ wlandev->rx.deauthen++; break; default: /* printk("unknown"); */ wlandev->rx.mgmt_unknown++; break; } /* printk("\n"); */ drop = 2; break; case WLAN_FTYPE_CTL: if ((wlandev->netdev->flags & IFF_PROMISC) || (wlandev->netdev->flags & IFF_ALLMULTI)) { drop = 1; break; } pr_debug("rx'd ctl:\n"); wlandev->rx.ctl++; switch (fstype) { case WLAN_FSTYPE_PSPOLL: /* printk("pspoll"); */ wlandev->rx.pspoll++; break; case WLAN_FSTYPE_RTS: /* printk("rts"); */ wlandev->rx.rts++; break; case WLAN_FSTYPE_CTS: /* printk("cts"); */ wlandev->rx.cts++; break; case WLAN_FSTYPE_ACK: /* printk("ack"); */ wlandev->rx.ack++; break; case WLAN_FSTYPE_CFEND: /* printk("cfend"); */ wlandev->rx.cfend++; break; case WLAN_FSTYPE_CFENDCFACK: /* printk("cfendcfack"); */ wlandev->rx.cfendcfack++; break; default: /* printk("unknown"); */ wlandev->rx.ctl_unknown++; break; } /* printk("\n"); */ drop = 2; break; case WLAN_FTYPE_DATA: wlandev->rx.data++; switch (fstype) { case WLAN_FSTYPE_DATAONLY: wlandev->rx.dataonly++; break; case WLAN_FSTYPE_DATA_CFACK: wlandev->rx.data_cfack++; break; case WLAN_FSTYPE_DATA_CFPOLL: wlandev->rx.data_cfpoll++; break; case WLAN_FSTYPE_DATA_CFACK_CFPOLL: wlandev->rx.data__cfack_cfpoll++; break; case WLAN_FSTYPE_NULL: pr_debug("rx'd data:null\n"); wlandev->rx.null++; break; case WLAN_FSTYPE_CFACK: pr_debug("rx'd data:cfack\n"); wlandev->rx.cfack++; break; case WLAN_FSTYPE_CFPOLL: pr_debug("rx'd data:cfpoll\n"); wlandev->rx.cfpoll++; break; case WLAN_FSTYPE_CFACK_CFPOLL: pr_debug("rx'd data:cfack_cfpoll\n"); wlandev->rx.cfack_cfpoll++; break; default: /* printk("unknown"); */ wlandev->rx.data_unknown++; break; } break; } return drop; } static void p80211knetdev_tx_timeout(netdevice_t *netdev) { wlandevice_t *wlandev = netdev->ml_priv; if (wlandev->tx_timeout) { wlandev->tx_timeout(wlandev); } else { printk(KERN_WARNING "Implement tx_timeout for %s\n", wlandev->nsdname); netif_wake_queue(wlandev->netdev); } }
gpl-2.0
souljaboy11792/linux
drivers/net/can/softing/softing_main.c
559
22047
/* * Copyright (C) 2008-2010 * * - Kurt Van Dijck, EIA Electronics * * This program is free software; you can redistribute it and/or modify * it under the terms of the version 2 of the GNU General Public License * as published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <asm/io.h> #include "softing.h" #define TX_ECHO_SKB_MAX (((TXMAX+1)/2)-1) /* * test is a specific CAN netdev * is online (ie. up 'n running, not sleeping, not busoff */ static inline int canif_is_active(struct net_device *netdev) { struct can_priv *can = netdev_priv(netdev); if (!netif_running(netdev)) return 0; return (can->state <= CAN_STATE_ERROR_PASSIVE); } /* reset DPRAM */ static inline void softing_set_reset_dpram(struct softing *card) { if (card->pdat->generation >= 2) { spin_lock_bh(&card->spin); iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) & ~1, &card->dpram[DPRAM_V2_RESET]); spin_unlock_bh(&card->spin); } } static inline void softing_clr_reset_dpram(struct softing *card) { if (card->pdat->generation >= 2) { spin_lock_bh(&card->spin); iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) | 1, &card->dpram[DPRAM_V2_RESET]); spin_unlock_bh(&card->spin); } } /* trigger the tx queue-ing */ static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct softing_priv *priv = netdev_priv(dev); struct softing *card = priv->card; int ret; uint8_t *ptr; uint8_t fifo_wr, fifo_rd; struct can_frame *cf = (struct can_frame *)skb->data; uint8_t buf[DPRAM_TX_SIZE]; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; spin_lock(&card->spin); ret = NETDEV_TX_BUSY; if (!card->fw.up || (card->tx.pending >= TXMAX) || (priv->tx.pending >= TX_ECHO_SKB_MAX)) goto xmit_done; fifo_wr = ioread8(&card->dpram[DPRAM_TX_WR]); fifo_rd = ioread8(&card->dpram[DPRAM_TX_RD]); if (fifo_wr == fifo_rd) /* fifo full */ goto xmit_done; memset(buf, 0, sizeof(buf)); ptr = buf; *ptr = CMD_TX; if (cf->can_id & CAN_RTR_FLAG) *ptr |= CMD_RTR; if (cf->can_id & CAN_EFF_FLAG) *ptr |= CMD_XTD; if (priv->index) *ptr |= CMD_BUS2; ++ptr; *ptr++ = cf->can_dlc; *ptr++ = (cf->can_id >> 0); *ptr++ = (cf->can_id >> 8); if (cf->can_id & CAN_EFF_FLAG) { *ptr++ = (cf->can_id >> 16); *ptr++ = (cf->can_id >> 24); } else { /* increment 1, not 2 as you might think */ ptr += 1; } if (!(cf->can_id & CAN_RTR_FLAG)) memcpy(ptr, &cf->data[0], cf->can_dlc); memcpy_toio(&card->dpram[DPRAM_TX + DPRAM_TX_SIZE * fifo_wr], buf, DPRAM_TX_SIZE); if (++fifo_wr >= DPRAM_TX_CNT) fifo_wr = 0; iowrite8(fifo_wr, &card->dpram[DPRAM_TX_WR]); card->tx.last_bus = priv->index; ++card->tx.pending; ++priv->tx.pending; can_put_echo_skb(skb, dev, priv->tx.echo_put); ++priv->tx.echo_put; if (priv->tx.echo_put >= TX_ECHO_SKB_MAX) priv->tx.echo_put = 0; /* can_put_echo_skb() saves the skb, safe to return TX_OK */ ret = NETDEV_TX_OK; xmit_done: spin_unlock(&card->spin); if (card->tx.pending >= TXMAX) { int j; for (j = 0; j < ARRAY_SIZE(card->net); ++j) { if (card->net[j]) netif_stop_queue(card->net[j]); } } if (ret != NETDEV_TX_OK) netif_stop_queue(dev); return ret; } /* * shortcut for skb delivery */ int softing_netdev_rx(struct net_device *netdev, const struct can_frame *msg, ktime_t ktime) { struct sk_buff *skb; struct can_frame *cf; skb = alloc_can_skb(netdev, &cf); if (!skb) return -ENOMEM; memcpy(cf, msg, sizeof(*msg)); skb->tstamp = ktime; return netif_rx(skb); } /* * softing_handle_1 * pop 1 entry from the DPRAM queue, and process */ static int softing_handle_1(struct softing *card) { struct net_device *netdev; struct softing_priv *priv; ktime_t ktime; struct can_frame msg; int cnt = 0, lost_msg; uint8_t fifo_rd, fifo_wr, cmd; uint8_t *ptr; uint32_t tmp_u32; uint8_t buf[DPRAM_RX_SIZE]; memset(&msg, 0, sizeof(msg)); /* test for lost msgs */ lost_msg = ioread8(&card->dpram[DPRAM_RX_LOST]); if (lost_msg) { int j; /* reset condition */ iowrite8(0, &card->dpram[DPRAM_RX_LOST]); /* prepare msg */ msg.can_id = CAN_ERR_FLAG | CAN_ERR_CRTL; msg.can_dlc = CAN_ERR_DLC; msg.data[1] = CAN_ERR_CRTL_RX_OVERFLOW; /* * service to all busses, we don't know which it was applicable * but only service busses that are online */ for (j = 0; j < ARRAY_SIZE(card->net); ++j) { netdev = card->net[j]; if (!netdev) continue; if (!canif_is_active(netdev)) /* a dead bus has no overflows */ continue; ++netdev->stats.rx_over_errors; softing_netdev_rx(netdev, &msg, ktime_set(0, 0)); } /* prepare for other use */ memset(&msg, 0, sizeof(msg)); ++cnt; } fifo_rd = ioread8(&card->dpram[DPRAM_RX_RD]); fifo_wr = ioread8(&card->dpram[DPRAM_RX_WR]); if (++fifo_rd >= DPRAM_RX_CNT) fifo_rd = 0; if (fifo_wr == fifo_rd) return cnt; memcpy_fromio(buf, &card->dpram[DPRAM_RX + DPRAM_RX_SIZE*fifo_rd], DPRAM_RX_SIZE); mb(); /* trigger dual port RAM */ iowrite8(fifo_rd, &card->dpram[DPRAM_RX_RD]); ptr = buf; cmd = *ptr++; if (cmd == 0xff) /* not quite useful, probably the card has got out */ return 0; netdev = card->net[0]; if (cmd & CMD_BUS2) netdev = card->net[1]; priv = netdev_priv(netdev); if (cmd & CMD_ERR) { uint8_t can_state, state; state = *ptr++; msg.can_id = CAN_ERR_FLAG; msg.can_dlc = CAN_ERR_DLC; if (state & SF_MASK_BUSOFF) { can_state = CAN_STATE_BUS_OFF; msg.can_id |= CAN_ERR_BUSOFF; state = STATE_BUSOFF; } else if (state & SF_MASK_EPASSIVE) { can_state = CAN_STATE_ERROR_PASSIVE; msg.can_id |= CAN_ERR_CRTL; msg.data[1] = CAN_ERR_CRTL_TX_PASSIVE; state = STATE_EPASSIVE; } else { can_state = CAN_STATE_ERROR_ACTIVE; msg.can_id |= CAN_ERR_CRTL; state = STATE_EACTIVE; } /* update DPRAM */ iowrite8(state, &card->dpram[priv->index ? DPRAM_INFO_BUSSTATE2 : DPRAM_INFO_BUSSTATE]); /* timestamp */ tmp_u32 = le32_to_cpup((void *)ptr); ptr += 4; ktime = softing_raw2ktime(card, tmp_u32); ++netdev->stats.rx_errors; /* update internal status */ if (can_state != priv->can.state) { priv->can.state = can_state; if (can_state == CAN_STATE_ERROR_PASSIVE) ++priv->can.can_stats.error_passive; else if (can_state == CAN_STATE_BUS_OFF) { /* this calls can_close_cleanup() */ can_bus_off(netdev); netif_stop_queue(netdev); } /* trigger socketcan */ softing_netdev_rx(netdev, &msg, ktime); } } else { if (cmd & CMD_RTR) msg.can_id |= CAN_RTR_FLAG; msg.can_dlc = get_can_dlc(*ptr++); if (cmd & CMD_XTD) { msg.can_id |= CAN_EFF_FLAG; msg.can_id |= le32_to_cpup((void *)ptr); ptr += 4; } else { msg.can_id |= le16_to_cpup((void *)ptr); ptr += 2; } /* timestamp */ tmp_u32 = le32_to_cpup((void *)ptr); ptr += 4; ktime = softing_raw2ktime(card, tmp_u32); if (!(msg.can_id & CAN_RTR_FLAG)) memcpy(&msg.data[0], ptr, 8); ptr += 8; /* update socket */ if (cmd & CMD_ACK) { /* acknowledge, was tx msg */ struct sk_buff *skb; skb = priv->can.echo_skb[priv->tx.echo_get]; if (skb) skb->tstamp = ktime; can_get_echo_skb(netdev, priv->tx.echo_get); ++priv->tx.echo_get; if (priv->tx.echo_get >= TX_ECHO_SKB_MAX) priv->tx.echo_get = 0; if (priv->tx.pending) --priv->tx.pending; if (card->tx.pending) --card->tx.pending; ++netdev->stats.tx_packets; if (!(msg.can_id & CAN_RTR_FLAG)) netdev->stats.tx_bytes += msg.can_dlc; } else { int ret; ret = softing_netdev_rx(netdev, &msg, ktime); if (ret == NET_RX_SUCCESS) { ++netdev->stats.rx_packets; if (!(msg.can_id & CAN_RTR_FLAG)) netdev->stats.rx_bytes += msg.can_dlc; } else { ++netdev->stats.rx_dropped; } } } ++cnt; return cnt; } /* * real interrupt handler */ static irqreturn_t softing_irq_thread(int irq, void *dev_id) { struct softing *card = (struct softing *)dev_id; struct net_device *netdev; struct softing_priv *priv; int j, offset, work_done; work_done = 0; spin_lock_bh(&card->spin); while (softing_handle_1(card) > 0) { ++card->irq.svc_count; ++work_done; } spin_unlock_bh(&card->spin); /* resume tx queue's */ offset = card->tx.last_bus; for (j = 0; j < ARRAY_SIZE(card->net); ++j) { if (card->tx.pending >= TXMAX) break; netdev = card->net[(j + offset + 1) % card->pdat->nbus]; if (!netdev) continue; priv = netdev_priv(netdev); if (!canif_is_active(netdev)) /* it makes no sense to wake dead busses */ continue; if (priv->tx.pending >= TX_ECHO_SKB_MAX) continue; ++work_done; netif_wake_queue(netdev); } return work_done ? IRQ_HANDLED : IRQ_NONE; } /* * interrupt routines: * schedule the 'real interrupt handler' */ static irqreturn_t softing_irq_v2(int irq, void *dev_id) { struct softing *card = (struct softing *)dev_id; uint8_t ir; ir = ioread8(&card->dpram[DPRAM_V2_IRQ_TOHOST]); iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]); return (1 == ir) ? IRQ_WAKE_THREAD : IRQ_NONE; } static irqreturn_t softing_irq_v1(int irq, void *dev_id) { struct softing *card = (struct softing *)dev_id; uint8_t ir; ir = ioread8(&card->dpram[DPRAM_IRQ_TOHOST]); iowrite8(0, &card->dpram[DPRAM_IRQ_TOHOST]); return ir ? IRQ_WAKE_THREAD : IRQ_NONE; } /* * netdev/candev inter-operability */ static int softing_netdev_open(struct net_device *ndev) { int ret; /* check or determine and set bittime */ ret = open_candev(ndev); if (!ret) ret = softing_startstop(ndev, 1); return ret; } static int softing_netdev_stop(struct net_device *ndev) { int ret; netif_stop_queue(ndev); /* softing cycle does close_candev() */ ret = softing_startstop(ndev, 0); return ret; } static int softing_candev_set_mode(struct net_device *ndev, enum can_mode mode) { int ret; switch (mode) { case CAN_MODE_START: /* softing_startstop does close_candev() */ ret = softing_startstop(ndev, 1); return ret; case CAN_MODE_STOP: case CAN_MODE_SLEEP: return -EOPNOTSUPP; } return 0; } /* * Softing device management helpers */ int softing_enable_irq(struct softing *card, int enable) { int ret; if (!card->irq.nr) { return 0; } else if (card->irq.requested && !enable) { free_irq(card->irq.nr, card); card->irq.requested = 0; } else if (!card->irq.requested && enable) { ret = request_threaded_irq(card->irq.nr, (card->pdat->generation >= 2) ? softing_irq_v2 : softing_irq_v1, softing_irq_thread, IRQF_SHARED, dev_name(&card->pdev->dev), card); if (ret) { dev_alert(&card->pdev->dev, "request_threaded_irq(%u) failed\n", card->irq.nr); return ret; } card->irq.requested = 1; } return 0; } static void softing_card_shutdown(struct softing *card) { int fw_up = 0; if (mutex_lock_interruptible(&card->fw.lock)) /* return -ERESTARTSYS */; fw_up = card->fw.up; card->fw.up = 0; if (card->irq.requested && card->irq.nr) { free_irq(card->irq.nr, card); card->irq.requested = 0; } if (fw_up) { if (card->pdat->enable_irq) card->pdat->enable_irq(card->pdev, 0); softing_set_reset_dpram(card); if (card->pdat->reset) card->pdat->reset(card->pdev, 1); } mutex_unlock(&card->fw.lock); } static __devinit int softing_card_boot(struct softing *card) { int ret, j; static const uint8_t stream[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, }; unsigned char back[sizeof(stream)]; if (mutex_lock_interruptible(&card->fw.lock)) return -ERESTARTSYS; if (card->fw.up) { mutex_unlock(&card->fw.lock); return 0; } /* reset board */ if (card->pdat->enable_irq) card->pdat->enable_irq(card->pdev, 1); /* boot card */ softing_set_reset_dpram(card); if (card->pdat->reset) card->pdat->reset(card->pdev, 1); for (j = 0; (j + sizeof(stream)) < card->dpram_size; j += sizeof(stream)) { memcpy_toio(&card->dpram[j], stream, sizeof(stream)); /* flush IO cache */ mb(); memcpy_fromio(back, &card->dpram[j], sizeof(stream)); if (!memcmp(back, stream, sizeof(stream))) continue; /* memory is not equal */ dev_alert(&card->pdev->dev, "dpram failed at 0x%04x\n", j); ret = -EIO; goto failed; } wmb(); /* load boot firmware */ ret = softing_load_fw(card->pdat->boot.fw, card, card->dpram, card->dpram_size, card->pdat->boot.offs - card->pdat->boot.addr); if (ret < 0) goto failed; /* load loader firmware */ ret = softing_load_fw(card->pdat->load.fw, card, card->dpram, card->dpram_size, card->pdat->load.offs - card->pdat->load.addr); if (ret < 0) goto failed; if (card->pdat->reset) card->pdat->reset(card->pdev, 0); softing_clr_reset_dpram(card); ret = softing_bootloader_command(card, 0, "card boot"); if (ret < 0) goto failed; ret = softing_load_app_fw(card->pdat->app.fw, card); if (ret < 0) goto failed; ret = softing_chip_poweron(card); if (ret < 0) goto failed; card->fw.up = 1; mutex_unlock(&card->fw.lock); return 0; failed: card->fw.up = 0; if (card->pdat->enable_irq) card->pdat->enable_irq(card->pdev, 0); softing_set_reset_dpram(card); if (card->pdat->reset) card->pdat->reset(card->pdev, 1); mutex_unlock(&card->fw.lock); return ret; } /* * netdev sysfs */ static ssize_t show_channel(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *ndev = to_net_dev(dev); struct softing_priv *priv = netdev2softing(ndev); return sprintf(buf, "%i\n", priv->index); } static ssize_t show_chip(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *ndev = to_net_dev(dev); struct softing_priv *priv = netdev2softing(ndev); return sprintf(buf, "%i\n", priv->chip); } static ssize_t show_output(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *ndev = to_net_dev(dev); struct softing_priv *priv = netdev2softing(ndev); return sprintf(buf, "0x%02x\n", priv->output); } static ssize_t store_output(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct net_device *ndev = to_net_dev(dev); struct softing_priv *priv = netdev2softing(ndev); struct softing *card = priv->card; unsigned long val; int ret; ret = strict_strtoul(buf, 0, &val); if (ret < 0) return ret; val &= 0xFF; ret = mutex_lock_interruptible(&card->fw.lock); if (ret) return -ERESTARTSYS; if (netif_running(ndev)) { mutex_unlock(&card->fw.lock); return -EBUSY; } priv->output = val; mutex_unlock(&card->fw.lock); return count; } static const DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL); static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL); static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output); static const struct attribute *const netdev_sysfs_attrs[] = { &dev_attr_channel.attr, &dev_attr_chip.attr, &dev_attr_output.attr, NULL, }; static const struct attribute_group netdev_sysfs_group = { .name = NULL, .attrs = (struct attribute **)netdev_sysfs_attrs, }; static const struct net_device_ops softing_netdev_ops = { .ndo_open = softing_netdev_open, .ndo_stop = softing_netdev_stop, .ndo_start_xmit = softing_netdev_start_xmit, }; static const struct can_bittiming_const softing_btr_const = { .name = "softing", .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, /* overruled */ .brp_min = 1, .brp_max = 32, /* overruled */ .brp_inc = 1, }; static __devinit struct net_device *softing_netdev_create(struct softing *card, uint16_t chip_id) { struct net_device *netdev; struct softing_priv *priv; netdev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX); if (!netdev) { dev_alert(&card->pdev->dev, "alloc_candev failed\n"); return NULL; } priv = netdev_priv(netdev); priv->netdev = netdev; priv->card = card; memcpy(&priv->btr_const, &softing_btr_const, sizeof(priv->btr_const)); priv->btr_const.brp_max = card->pdat->max_brp; priv->btr_const.sjw_max = card->pdat->max_sjw; priv->can.bittiming_const = &priv->btr_const; priv->can.clock.freq = 8000000; priv->chip = chip_id; priv->output = softing_default_output(netdev); SET_NETDEV_DEV(netdev, &card->pdev->dev); netdev->flags |= IFF_ECHO; netdev->netdev_ops = &softing_netdev_ops; priv->can.do_set_mode = softing_candev_set_mode; priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; return netdev; } static __devinit int softing_netdev_register(struct net_device *netdev) { int ret; netdev->sysfs_groups[0] = &netdev_sysfs_group; ret = register_candev(netdev); if (ret) { dev_alert(&netdev->dev, "register failed\n"); return ret; } return 0; } static void softing_netdev_cleanup(struct net_device *netdev) { unregister_candev(netdev); free_candev(netdev); } /* * sysfs for Platform device */ #define DEV_ATTR_RO(name, member) \ static ssize_t show_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct softing *card = platform_get_drvdata(to_platform_device(dev)); \ return sprintf(buf, "%u\n", card->member); \ } \ static DEVICE_ATTR(name, 0444, show_##name, NULL) #define DEV_ATTR_RO_STR(name, member) \ static ssize_t show_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct softing *card = platform_get_drvdata(to_platform_device(dev)); \ return sprintf(buf, "%s\n", card->member); \ } \ static DEVICE_ATTR(name, 0444, show_##name, NULL) DEV_ATTR_RO(serial, id.serial); DEV_ATTR_RO_STR(firmware, pdat->app.fw); DEV_ATTR_RO(firmware_version, id.fw_version); DEV_ATTR_RO_STR(hardware, pdat->name); DEV_ATTR_RO(hardware_version, id.hw_version); DEV_ATTR_RO(license, id.license); DEV_ATTR_RO(frequency, id.freq); DEV_ATTR_RO(txpending, tx.pending); static struct attribute *softing_pdev_attrs[] = { &dev_attr_serial.attr, &dev_attr_firmware.attr, &dev_attr_firmware_version.attr, &dev_attr_hardware.attr, &dev_attr_hardware_version.attr, &dev_attr_license.attr, &dev_attr_frequency.attr, &dev_attr_txpending.attr, NULL, }; static const struct attribute_group softing_pdev_group = { .name = NULL, .attrs = softing_pdev_attrs, }; /* * platform driver */ static __devexit int softing_pdev_remove(struct platform_device *pdev) { struct softing *card = platform_get_drvdata(pdev); int j; /* first, disable card*/ softing_card_shutdown(card); for (j = 0; j < ARRAY_SIZE(card->net); ++j) { if (!card->net[j]) continue; softing_netdev_cleanup(card->net[j]); card->net[j] = NULL; } sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group); iounmap(card->dpram); kfree(card); return 0; } static __devinit int softing_pdev_probe(struct platform_device *pdev) { const struct softing_platform_data *pdat = pdev->dev.platform_data; struct softing *card; struct net_device *netdev; struct softing_priv *priv; struct resource *pres; int ret; int j; if (!pdat) { dev_warn(&pdev->dev, "no platform data\n"); return -EINVAL; } if (pdat->nbus > ARRAY_SIZE(card->net)) { dev_warn(&pdev->dev, "%u nets??\n", pdat->nbus); return -EINVAL; } card = kzalloc(sizeof(*card), GFP_KERNEL); if (!card) return -ENOMEM; card->pdat = pdat; card->pdev = pdev; platform_set_drvdata(pdev, card); mutex_init(&card->fw.lock); spin_lock_init(&card->spin); ret = -EINVAL; pres = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!pres) goto platform_resource_failed; card->dpram_phys = pres->start; card->dpram_size = resource_size(pres); card->dpram = ioremap_nocache(card->dpram_phys, card->dpram_size); if (!card->dpram) { dev_alert(&card->pdev->dev, "dpram ioremap failed\n"); goto ioremap_failed; } pres = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (pres) card->irq.nr = pres->start; /* reset card */ ret = softing_card_boot(card); if (ret < 0) { dev_alert(&pdev->dev, "failed to boot\n"); goto boot_failed; } /* only now, the chip's are known */ card->id.freq = card->pdat->freq; ret = sysfs_create_group(&pdev->dev.kobj, &softing_pdev_group); if (ret < 0) { dev_alert(&card->pdev->dev, "sysfs failed\n"); goto sysfs_failed; } ret = -ENOMEM; for (j = 0; j < ARRAY_SIZE(card->net); ++j) { card->net[j] = netdev = softing_netdev_create(card, card->id.chip[j]); if (!netdev) { dev_alert(&pdev->dev, "failed to make can[%i]", j); goto netdev_failed; } priv = netdev_priv(card->net[j]); priv->index = j; ret = softing_netdev_register(netdev); if (ret) { free_candev(netdev); card->net[j] = NULL; dev_alert(&card->pdev->dev, "failed to register can[%i]\n", j); goto netdev_failed; } } dev_info(&card->pdev->dev, "%s ready.\n", card->pdat->name); return 0; netdev_failed: for (j = 0; j < ARRAY_SIZE(card->net); ++j) { if (!card->net[j]) continue; softing_netdev_cleanup(card->net[j]); } sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group); sysfs_failed: softing_card_shutdown(card); boot_failed: iounmap(card->dpram); ioremap_failed: platform_resource_failed: kfree(card); return ret; } static struct platform_driver softing_driver = { .driver = { .name = "softing", .owner = THIS_MODULE, }, .probe = softing_pdev_probe, .remove = __devexit_p(softing_pdev_remove), }; MODULE_ALIAS("platform:softing"); static int __init softing_start(void) { return platform_driver_register(&softing_driver); } static void __exit softing_stop(void) { platform_driver_unregister(&softing_driver); } module_init(softing_start); module_exit(softing_stop); MODULE_DESCRIPTION("Softing DPRAM CAN driver"); MODULE_AUTHOR("Kurt Van Dijck <kurt.van.dijck@eia.be>"); MODULE_LICENSE("GPL v2");
gpl-2.0
byoungm/linux-kernel-test
drivers/atm/fore200e.c
1071
90777
/* A FORE Systems 200E-series driver for ATM on Linux. Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003. Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de). This driver simultaneously supports PCA-200E and SBA-200E adapters on i386, alpha (untested), powerpc, sparc and sparc64 architectures. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/capability.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/atmdev.h> #include <linux/sonet.h> #include <linux/atm_suni.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/firmware.h> #include <asm/io.h> #include <asm/string.h> #include <asm/page.h> #include <asm/irq.h> #include <asm/dma.h> #include <asm/byteorder.h> #include <asm/uaccess.h> #include <linux/atomic.h> #ifdef CONFIG_SBUS #include <linux/of.h> #include <linux/of_device.h> #include <asm/idprom.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/pgtable.h> #endif #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */ #define FORE200E_USE_TASKLET #endif #if 0 /* enable the debugging code of the buffer supply queues */ #define FORE200E_BSQ_DEBUG #endif #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */ #define FORE200E_52BYTE_AAL0_SDU #endif #include "fore200e.h" #include "suni.h" #define FORE200E_VERSION "0.3e" #define FORE200E "fore200e: " #if 0 /* override .config */ #define CONFIG_ATM_FORE200E_DEBUG 1 #endif #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0) #define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \ printk(FORE200E format, ##args); } while (0) #else #define DPRINTK(level, format, args...) do {} while (0) #endif #define FORE200E_ALIGN(addr, alignment) \ ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr)) #define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type)) #define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ]) #define FORE200E_NEXT_ENTRY(index, modulo) (index = ((index) + 1) % (modulo)) #if 1 #define ASSERT(expr) if (!(expr)) { \ printk(FORE200E "assertion failed! %s[%d]: %s\n", \ __func__, __LINE__, #expr); \ panic(FORE200E "%s", __func__); \ } #else #define ASSERT(expr) do {} while (0) #endif static const struct atmdev_ops fore200e_ops; static const struct fore200e_bus fore200e_bus[]; static LIST_HEAD(fore200e_boards); MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen"); MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION); MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E"); static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = { { BUFFER_S1_NBR, BUFFER_L1_NBR }, { BUFFER_S2_NBR, BUFFER_L2_NBR } }; static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = { { BUFFER_S1_SIZE, BUFFER_L1_SIZE }, { BUFFER_S2_SIZE, BUFFER_L2_SIZE } }; #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0) static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" }; #endif #if 0 /* currently unused */ static int fore200e_fore2atm_aal(enum fore200e_aal aal) { switch(aal) { case FORE200E_AAL0: return ATM_AAL0; case FORE200E_AAL34: return ATM_AAL34; case FORE200E_AAL5: return ATM_AAL5; } return -EINVAL; } #endif static enum fore200e_aal fore200e_atm2fore_aal(int aal) { switch(aal) { case ATM_AAL0: return FORE200E_AAL0; case ATM_AAL34: return FORE200E_AAL34; case ATM_AAL1: case ATM_AAL2: case ATM_AAL5: return FORE200E_AAL5; } return -EINVAL; } static char* fore200e_irq_itoa(int irq) { static char str[8]; sprintf(str, "%d", irq); return str; } /* allocate and align a chunk of memory intended to hold the data behing exchanged between the driver and the adapter (using streaming DVMA) */ static int fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction) { unsigned long offset = 0; if (alignment <= sizeof(int)) alignment = 0; chunk->alloc_size = size + alignment; chunk->align_size = size; chunk->direction = direction; chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA); if (chunk->alloc_addr == NULL) return -ENOMEM; if (alignment > 0) offset = FORE200E_ALIGN(chunk->alloc_addr, alignment); chunk->align_addr = chunk->alloc_addr + offset; chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction); return 0; } /* free a chunk of memory */ static void fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk) { fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction); kfree(chunk->alloc_addr); } static void fore200e_spin(int msecs) { unsigned long timeout = jiffies + msecs_to_jiffies(msecs); while (time_before(jiffies, timeout)); } static int fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs) { unsigned long timeout = jiffies + msecs_to_jiffies(msecs); int ok; mb(); do { if ((ok = (*addr == val)) || (*addr & STATUS_ERROR)) break; } while (time_before(jiffies, timeout)); #if 1 if (!ok) { printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n", *addr, val); } #endif return ok; } static int fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs) { unsigned long timeout = jiffies + msecs_to_jiffies(msecs); int ok; do { if ((ok = (fore200e->bus->read(addr) == val))) break; } while (time_before(jiffies, timeout)); #if 1 if (!ok) { printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n", fore200e->bus->read(addr), val); } #endif return ok; } static void fore200e_free_rx_buf(struct fore200e* fore200e) { int scheme, magn, nbr; struct buffer* buffer; for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) { for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) { struct chunk* data = &buffer[ nbr ].data; if (data->alloc_addr != NULL) fore200e_chunk_free(fore200e, data); } } } } } static void fore200e_uninit_bs_queue(struct fore200e* fore200e) { int scheme, magn; for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status; struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block; if (status->alloc_addr) fore200e->bus->dma_chunk_free(fore200e, status); if (rbd_block->alloc_addr) fore200e->bus->dma_chunk_free(fore200e, rbd_block); } } } static int fore200e_reset(struct fore200e* fore200e, int diag) { int ok; fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET; fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat); fore200e->bus->reset(fore200e); if (diag) { ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000); if (ok == 0) { printk(FORE200E "device %s self-test failed\n", fore200e->name); return -ENODEV; } printk(FORE200E "device %s self-test passed\n", fore200e->name); fore200e->state = FORE200E_STATE_RESET; } return 0; } static void fore200e_shutdown(struct fore200e* fore200e) { printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n", fore200e->name, fore200e->phys_base, fore200e_irq_itoa(fore200e->irq)); if (fore200e->state > FORE200E_STATE_RESET) { /* first, reset the board to prevent further interrupts or data transfers */ fore200e_reset(fore200e, 0); } /* then, release all allocated resources */ switch(fore200e->state) { case FORE200E_STATE_COMPLETE: kfree(fore200e->stats); case FORE200E_STATE_IRQ: free_irq(fore200e->irq, fore200e->atm_dev); case FORE200E_STATE_ALLOC_BUF: fore200e_free_rx_buf(fore200e); case FORE200E_STATE_INIT_BSQ: fore200e_uninit_bs_queue(fore200e); case FORE200E_STATE_INIT_RXQ: fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status); fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd); case FORE200E_STATE_INIT_TXQ: fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status); fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd); case FORE200E_STATE_INIT_CMDQ: fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status); case FORE200E_STATE_INITIALIZE: /* nothing to do for that state */ case FORE200E_STATE_START_FW: /* nothing to do for that state */ case FORE200E_STATE_RESET: /* nothing to do for that state */ case FORE200E_STATE_MAP: fore200e->bus->unmap(fore200e); case FORE200E_STATE_CONFIGURE: /* nothing to do for that state */ case FORE200E_STATE_REGISTER: /* XXX shouldn't we *start* by deregistering the device? */ atm_dev_deregister(fore200e->atm_dev); case FORE200E_STATE_BLANK: /* nothing to do for that state */ break; } } #ifdef CONFIG_PCI static u32 fore200e_pca_read(volatile u32 __iomem *addr) { /* on big-endian hosts, the board is configured to convert the endianess of slave RAM accesses */ return le32_to_cpu(readl(addr)); } static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr) { /* on big-endian hosts, the board is configured to convert the endianess of slave RAM accesses */ writel(cpu_to_le32(val), addr); } static u32 fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction) { u32 dma_addr = dma_map_single(&((struct pci_dev *) fore200e->bus_dev)->dev, virt_addr, size, direction); DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n", virt_addr, size, direction, dma_addr); return dma_addr; } static void fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction) { DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); dma_unmap_single(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction); } static void fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction) { DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); dma_sync_single_for_cpu(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction); } static void fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction) { DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); dma_sync_single_for_device(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction); } /* allocate a DMA consistent chunk of memory intended to act as a communication mechanism (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */ static int fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int nbr, int alignment) { /* returned chunks are page-aligned */ chunk->alloc_size = size * nbr; chunk->alloc_addr = dma_alloc_coherent(&((struct pci_dev *) fore200e->bus_dev)->dev, chunk->alloc_size, &chunk->dma_addr, GFP_KERNEL); if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0)) return -ENOMEM; chunk->align_addr = chunk->alloc_addr; return 0; } /* free a DMA consistent chunk of memory */ static void fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk) { dma_free_coherent(&((struct pci_dev *) fore200e->bus_dev)->dev, chunk->alloc_size, chunk->alloc_addr, chunk->dma_addr); } static int fore200e_pca_irq_check(struct fore200e* fore200e) { /* this is a 1 bit register */ int irq_posted = readl(fore200e->regs.pca.psr); #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2) if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) { DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number); } #endif return irq_posted; } static void fore200e_pca_irq_ack(struct fore200e* fore200e) { writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr); } static void fore200e_pca_reset(struct fore200e* fore200e) { writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr); fore200e_spin(10); writel(0, fore200e->regs.pca.hcr); } static int fore200e_pca_map(struct fore200e* fore200e) { DPRINTK(2, "device %s being mapped in memory\n", fore200e->name); fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH); if (fore200e->virt_base == NULL) { printk(FORE200E "can't map device %s\n", fore200e->name); return -EFAULT; } DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base); /* gain access to the PCA specific registers */ fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET; fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET; fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET; fore200e->state = FORE200E_STATE_MAP; return 0; } static void fore200e_pca_unmap(struct fore200e* fore200e) { DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name); if (fore200e->virt_base != NULL) iounmap(fore200e->virt_base); } static int fore200e_pca_configure(struct fore200e *fore200e) { struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev; u8 master_ctrl, latency; DPRINTK(2, "device %s being configured\n", fore200e->name); if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) { printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n"); return -EIO; } pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl); master_ctrl = master_ctrl #if defined(__BIG_ENDIAN) /* request the PCA board to convert the endianess of slave RAM accesses */ | PCA200E_CTRL_CONVERT_ENDIAN #endif #if 0 | PCA200E_CTRL_DIS_CACHE_RD | PCA200E_CTRL_DIS_WRT_INVAL | PCA200E_CTRL_ENA_CONT_REQ_MODE | PCA200E_CTRL_2_CACHE_WRT_INVAL #endif | PCA200E_CTRL_LARGE_PCI_BURSTS; pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl); /* raise latency from 32 (default) to 192, as this seems to prevent NIC lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition. this may impact the performances of other PCI devices on the same bus, though */ latency = 192; pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency); fore200e->state = FORE200E_STATE_CONFIGURE; return 0; } static int __init fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom) { struct host_cmdq* cmdq = &fore200e->host_cmdq; struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; struct prom_opcode opcode; int ok; u32 prom_dma; FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); opcode.opcode = OPCODE_GET_PROM; opcode.pad = 0; prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE); fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr); *entry->status = STATUS_PENDING; fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode); ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); *entry->status = STATUS_FREE; fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE); if (ok == 0) { printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name); return -EIO; } #if defined(__BIG_ENDIAN) #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) )) /* MAC address is stored as little-endian */ swap_here(&prom->mac_addr[0]); swap_here(&prom->mac_addr[4]); #endif return 0; } static int fore200e_pca_proc_read(struct fore200e* fore200e, char *page) { struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev; return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n", pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn)); } #endif /* CONFIG_PCI */ #ifdef CONFIG_SBUS static u32 fore200e_sba_read(volatile u32 __iomem *addr) { return sbus_readl(addr); } static void fore200e_sba_write(u32 val, volatile u32 __iomem *addr) { sbus_writel(val, addr); } static u32 fore200e_sba_dma_map(struct fore200e *fore200e, void* virt_addr, int size, int direction) { struct platform_device *op = fore200e->bus_dev; u32 dma_addr; dma_addr = dma_map_single(&op->dev, virt_addr, size, direction); DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n", virt_addr, size, direction, dma_addr); return dma_addr; } static void fore200e_sba_dma_unmap(struct fore200e *fore200e, u32 dma_addr, int size, int direction) { struct platform_device *op = fore200e->bus_dev; DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n", dma_addr, size, direction); dma_unmap_single(&op->dev, dma_addr, size, direction); } static void fore200e_sba_dma_sync_for_cpu(struct fore200e *fore200e, u32 dma_addr, int size, int direction) { struct platform_device *op = fore200e->bus_dev; DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); dma_sync_single_for_cpu(&op->dev, dma_addr, size, direction); } static void fore200e_sba_dma_sync_for_device(struct fore200e *fore200e, u32 dma_addr, int size, int direction) { struct platform_device *op = fore200e->bus_dev; DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); dma_sync_single_for_device(&op->dev, dma_addr, size, direction); } /* Allocate a DVMA consistent chunk of memory intended to act as a communication mechanism * (to hold descriptors, status, queues, etc.) shared by the driver and the adapter. */ static int fore200e_sba_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk, int size, int nbr, int alignment) { struct platform_device *op = fore200e->bus_dev; chunk->alloc_size = chunk->align_size = size * nbr; /* returned chunks are page-aligned */ chunk->alloc_addr = dma_alloc_coherent(&op->dev, chunk->alloc_size, &chunk->dma_addr, GFP_ATOMIC); if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0)) return -ENOMEM; chunk->align_addr = chunk->alloc_addr; return 0; } /* free a DVMA consistent chunk of memory */ static void fore200e_sba_dma_chunk_free(struct fore200e *fore200e, struct chunk *chunk) { struct platform_device *op = fore200e->bus_dev; dma_free_coherent(&op->dev, chunk->alloc_size, chunk->alloc_addr, chunk->dma_addr); } static void fore200e_sba_irq_enable(struct fore200e *fore200e) { u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY; fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr); } static int fore200e_sba_irq_check(struct fore200e *fore200e) { return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ; } static void fore200e_sba_irq_ack(struct fore200e *fore200e) { u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY; fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr); } static void fore200e_sba_reset(struct fore200e *fore200e) { fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr); fore200e_spin(10); fore200e->bus->write(0, fore200e->regs.sba.hcr); } static int __init fore200e_sba_map(struct fore200e *fore200e) { struct platform_device *op = fore200e->bus_dev; unsigned int bursts; /* gain access to the SBA specific registers */ fore200e->regs.sba.hcr = of_ioremap(&op->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR"); fore200e->regs.sba.bsr = of_ioremap(&op->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR"); fore200e->regs.sba.isr = of_ioremap(&op->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR"); fore200e->virt_base = of_ioremap(&op->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM"); if (!fore200e->virt_base) { printk(FORE200E "unable to map RAM of device %s\n", fore200e->name); return -EFAULT; } DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base); fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */ /* get the supported DVMA burst sizes */ bursts = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0x00); if (sbus_can_dma_64bit()) sbus_set_sbus64(&op->dev, bursts); fore200e->state = FORE200E_STATE_MAP; return 0; } static void fore200e_sba_unmap(struct fore200e *fore200e) { struct platform_device *op = fore200e->bus_dev; of_iounmap(&op->resource[0], fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH); of_iounmap(&op->resource[1], fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH); of_iounmap(&op->resource[2], fore200e->regs.sba.isr, SBA200E_ISR_LENGTH); of_iounmap(&op->resource[3], fore200e->virt_base, SBA200E_RAM_LENGTH); } static int __init fore200e_sba_configure(struct fore200e *fore200e) { fore200e->state = FORE200E_STATE_CONFIGURE; return 0; } static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_data *prom) { struct platform_device *op = fore200e->bus_dev; const u8 *prop; int len; prop = of_get_property(op->dev.of_node, "madaddrlo2", &len); if (!prop) return -ENODEV; memcpy(&prom->mac_addr[4], prop, 4); prop = of_get_property(op->dev.of_node, "madaddrhi4", &len); if (!prop) return -ENODEV; memcpy(&prom->mac_addr[2], prop, 4); prom->serial_number = of_getintprop_default(op->dev.of_node, "serialnumber", 0); prom->hw_revision = of_getintprop_default(op->dev.of_node, "promversion", 0); return 0; } static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page) { struct platform_device *op = fore200e->bus_dev; const struct linux_prom_registers *regs; regs = of_get_property(op->dev.of_node, "reg", NULL); return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n", (regs ? regs->which_io : 0), op->dev.of_node->name); } #endif /* CONFIG_SBUS */ static void fore200e_tx_irq(struct fore200e* fore200e) { struct host_txq* txq = &fore200e->host_txq; struct host_txq_entry* entry; struct atm_vcc* vcc; struct fore200e_vc_map* vc_map; if (fore200e->host_txq.txing == 0) return; for (;;) { entry = &txq->host_entry[ txq->tail ]; if ((*entry->status & STATUS_COMPLETE) == 0) { break; } DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n", entry, txq->tail, entry->vc_map, entry->skb); /* free copy of misaligned data */ kfree(entry->data); /* remove DMA mapping */ fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length, DMA_TO_DEVICE); vc_map = entry->vc_map; /* vcc closed since the time the entry was submitted for tx? */ if ((vc_map->vcc == NULL) || (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) { DPRINTK(1, "no ready vcc found for PDU sent on device %d\n", fore200e->atm_dev->number); dev_kfree_skb_any(entry->skb); } else { ASSERT(vc_map->vcc); /* vcc closed then immediately re-opened? */ if (vc_map->incarn != entry->incarn) { /* when a vcc is closed, some PDUs may be still pending in the tx queue. if the same vcc is immediately re-opened, those pending PDUs must not be popped after the completion of their emission, as they refer to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc would be decremented by the size of the (unrelated) skb, possibly leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc. we thus bind the tx entry to the current incarnation of the vcc when the entry is submitted for tx. When the tx later completes, if the incarnation number of the tx entry does not match the one of the vcc, then this implies that the vcc has been closed then re-opened. we thus just drop the skb here. */ DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n", fore200e->atm_dev->number); dev_kfree_skb_any(entry->skb); } else { vcc = vc_map->vcc; ASSERT(vcc); /* notify tx completion */ if (vcc->pop) { vcc->pop(vcc, entry->skb); } else { dev_kfree_skb_any(entry->skb); } #if 1 /* race fixed by the above incarnation mechanism, but... */ if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) { atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0); } #endif /* check error condition */ if (*entry->status & STATUS_ERROR) atomic_inc(&vcc->stats->tx_err); else atomic_inc(&vcc->stats->tx); } } *entry->status = STATUS_FREE; fore200e->host_txq.txing--; FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX); } } #ifdef FORE200E_BSQ_DEBUG int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn) { struct buffer* buffer; int count = 0; buffer = bsq->freebuf; while (buffer) { if (buffer->supplied) { printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n", where, scheme, magn, buffer->index); } if (buffer->magn != magn) { printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n", where, scheme, magn, buffer->index, buffer->magn); } if (buffer->scheme != scheme) { printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n", where, scheme, magn, buffer->index, buffer->scheme); } if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) { printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n", where, scheme, magn, buffer->index); } count++; buffer = buffer->next; } if (count != bsq->freebuf_count) { printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n", where, scheme, magn, count, bsq->freebuf_count); } return 0; } #endif static void fore200e_supply(struct fore200e* fore200e) { int scheme, magn, i; struct host_bsq* bsq; struct host_bsq_entry* entry; struct buffer* buffer; for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { bsq = &fore200e->host_bsq[ scheme ][ magn ]; #ifdef FORE200E_BSQ_DEBUG bsq_audit(1, bsq, scheme, magn); #endif while (bsq->freebuf_count >= RBD_BLK_SIZE) { DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n", RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count); entry = &bsq->host_entry[ bsq->head ]; for (i = 0; i < RBD_BLK_SIZE; i++) { /* take the first buffer in the free buffer list */ buffer = bsq->freebuf; if (!buffer) { printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n", scheme, magn, bsq->freebuf_count); return; } bsq->freebuf = buffer->next; #ifdef FORE200E_BSQ_DEBUG if (buffer->supplied) printk(FORE200E "queue %d.%d, buffer %lu already supplied\n", scheme, magn, buffer->index); buffer->supplied = 1; #endif entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr; entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer); } FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS); /* decrease accordingly the number of free rx buffers */ bsq->freebuf_count -= RBD_BLK_SIZE; *entry->status = STATUS_PENDING; fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr); } } } } static int fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd) { struct sk_buff* skb; struct buffer* buffer; struct fore200e_vcc* fore200e_vcc; int i, pdu_len = 0; #ifdef FORE200E_52BYTE_AAL0_SDU u32 cell_header = 0; #endif ASSERT(vcc); fore200e_vcc = FORE200E_VCC(vcc); ASSERT(fore200e_vcc); #ifdef FORE200E_52BYTE_AAL0_SDU if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) { cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) | (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) | (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) | (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) | rpd->atm_header.clp; pdu_len = 4; } #endif /* compute total PDU length */ for (i = 0; i < rpd->nseg; i++) pdu_len += rpd->rsd[ i ].length; skb = alloc_skb(pdu_len, GFP_ATOMIC); if (skb == NULL) { DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len); atomic_inc(&vcc->stats->rx_drop); return -ENOMEM; } __net_timestamp(skb); #ifdef FORE200E_52BYTE_AAL0_SDU if (cell_header) { *((u32*)skb_put(skb, 4)) = cell_header; } #endif /* reassemble segments */ for (i = 0; i < rpd->nseg; i++) { /* rebuild rx buffer address from rsd handle */ buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle); /* Make device DMA transfer visible to CPU. */ fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE); memcpy(skb_put(skb, rpd->rsd[ i ].length), buffer->data.align_addr, rpd->rsd[ i ].length); /* Now let the device get at it again. */ fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE); } DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize); if (pdu_len < fore200e_vcc->rx_min_pdu) fore200e_vcc->rx_min_pdu = pdu_len; if (pdu_len > fore200e_vcc->rx_max_pdu) fore200e_vcc->rx_max_pdu = pdu_len; fore200e_vcc->rx_pdu++; /* push PDU */ if (atm_charge(vcc, skb->truesize) == 0) { DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n", vcc->itf, vcc->vpi, vcc->vci); dev_kfree_skb_any(skb); atomic_inc(&vcc->stats->rx_drop); return -ENOMEM; } ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); vcc->push(vcc, skb); atomic_inc(&vcc->stats->rx); ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); return 0; } static void fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd) { struct host_bsq* bsq; struct buffer* buffer; int i; for (i = 0; i < rpd->nseg; i++) { /* rebuild rx buffer address from rsd handle */ buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle); bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ]; #ifdef FORE200E_BSQ_DEBUG bsq_audit(2, bsq, buffer->scheme, buffer->magn); if (buffer->supplied == 0) printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n", buffer->scheme, buffer->magn, buffer->index); buffer->supplied = 0; #endif /* re-insert the buffer into the free buffer list */ buffer->next = bsq->freebuf; bsq->freebuf = buffer; /* then increment the number of free rx buffers */ bsq->freebuf_count++; } } static void fore200e_rx_irq(struct fore200e* fore200e) { struct host_rxq* rxq = &fore200e->host_rxq; struct host_rxq_entry* entry; struct atm_vcc* vcc; struct fore200e_vc_map* vc_map; for (;;) { entry = &rxq->host_entry[ rxq->head ]; /* no more received PDUs */ if ((*entry->status & STATUS_COMPLETE) == 0) break; vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); if ((vc_map->vcc == NULL) || (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) { DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n", fore200e->atm_dev->number, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); } else { vcc = vc_map->vcc; ASSERT(vcc); if ((*entry->status & STATUS_ERROR) == 0) { fore200e_push_rpd(fore200e, vcc, entry->rpd); } else { DPRINTK(2, "damaged PDU on %d.%d.%d\n", fore200e->atm_dev->number, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); atomic_inc(&vcc->stats->rx_err); } } FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX); fore200e_collect_rpd(fore200e, entry->rpd); /* rewrite the rpd address to ack the received PDU */ fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr); *entry->status = STATUS_FREE; fore200e_supply(fore200e); } } #ifndef FORE200E_USE_TASKLET static void fore200e_irq(struct fore200e* fore200e) { unsigned long flags; spin_lock_irqsave(&fore200e->q_lock, flags); fore200e_rx_irq(fore200e); spin_unlock_irqrestore(&fore200e->q_lock, flags); spin_lock_irqsave(&fore200e->q_lock, flags); fore200e_tx_irq(fore200e); spin_unlock_irqrestore(&fore200e->q_lock, flags); } #endif static irqreturn_t fore200e_interrupt(int irq, void* dev) { struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev); if (fore200e->bus->irq_check(fore200e) == 0) { DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number); return IRQ_NONE; } DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number); #ifdef FORE200E_USE_TASKLET tasklet_schedule(&fore200e->tx_tasklet); tasklet_schedule(&fore200e->rx_tasklet); #else fore200e_irq(fore200e); #endif fore200e->bus->irq_ack(fore200e); return IRQ_HANDLED; } #ifdef FORE200E_USE_TASKLET static void fore200e_tx_tasklet(unsigned long data) { struct fore200e* fore200e = (struct fore200e*) data; unsigned long flags; DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number); spin_lock_irqsave(&fore200e->q_lock, flags); fore200e_tx_irq(fore200e); spin_unlock_irqrestore(&fore200e->q_lock, flags); } static void fore200e_rx_tasklet(unsigned long data) { struct fore200e* fore200e = (struct fore200e*) data; unsigned long flags; DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number); spin_lock_irqsave(&fore200e->q_lock, flags); fore200e_rx_irq((struct fore200e*) data); spin_unlock_irqrestore(&fore200e->q_lock, flags); } #endif static int fore200e_select_scheme(struct atm_vcc* vcc) { /* fairly balance the VCs over (identical) buffer schemes */ int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO; DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n", vcc->itf, vcc->vpi, vcc->vci, scheme); return scheme; } static int fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu) { struct host_cmdq* cmdq = &fore200e->host_cmdq; struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; struct activate_opcode activ_opcode; struct deactivate_opcode deactiv_opcode; struct vpvc vpvc; int ok; enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal); FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); if (activate) { FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc); activ_opcode.opcode = OPCODE_ACTIVATE_VCIN; activ_opcode.aal = aal; activ_opcode.scheme = FORE200E_VCC(vcc)->scheme; activ_opcode.pad = 0; } else { deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN; deactiv_opcode.pad = 0; } vpvc.vci = vcc->vci; vpvc.vpi = vcc->vpi; *entry->status = STATUS_PENDING; if (activate) { #ifdef FORE200E_52BYTE_AAL0_SDU mtu = 48; #endif /* the MTU is not used by the cp, except in the case of AAL0 */ fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu); fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc); fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode); } else { fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc); fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode); } ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); *entry->status = STATUS_FREE; if (ok == 0) { printk(FORE200E "unable to %s VC %d.%d.%d\n", activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci); return -EIO; } DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci, activate ? "open" : "clos"); return 0; } #define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */ static void fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate) { if (qos->txtp.max_pcr < ATM_OC3_PCR) { /* compute the data cells to idle cells ratio from the tx PCR */ rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR; rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells; } else { /* disable rate control */ rate->data_cells = rate->idle_cells = 0; } } static int fore200e_open(struct atm_vcc *vcc) { struct fore200e* fore200e = FORE200E_DEV(vcc->dev); struct fore200e_vcc* fore200e_vcc; struct fore200e_vc_map* vc_map; unsigned long flags; int vci = vcc->vci; short vpi = vcc->vpi; ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS)); ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS)); spin_lock_irqsave(&fore200e->q_lock, flags); vc_map = FORE200E_VC_MAP(fore200e, vpi, vci); if (vc_map->vcc) { spin_unlock_irqrestore(&fore200e->q_lock, flags); printk(FORE200E "VC %d.%d.%d already in use\n", fore200e->atm_dev->number, vpi, vci); return -EINVAL; } vc_map->vcc = vcc; spin_unlock_irqrestore(&fore200e->q_lock, flags); fore200e_vcc = kzalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC); if (fore200e_vcc == NULL) { vc_map->vcc = NULL; return -ENOMEM; } DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; " "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), fore200e_traffic_class[ vcc->qos.txtp.traffic_class ], vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu, fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ], vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu); /* pseudo-CBR bandwidth requested? */ if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { mutex_lock(&fore200e->rate_mtx); if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) { mutex_unlock(&fore200e->rate_mtx); kfree(fore200e_vcc); vc_map->vcc = NULL; return -EAGAIN; } /* reserve bandwidth */ fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr; mutex_unlock(&fore200e->rate_mtx); } vcc->itf = vcc->dev->number; set_bit(ATM_VF_PARTIAL,&vcc->flags); set_bit(ATM_VF_ADDR, &vcc->flags); vcc->dev_data = fore200e_vcc; if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) { vc_map->vcc = NULL; clear_bit(ATM_VF_ADDR, &vcc->flags); clear_bit(ATM_VF_PARTIAL,&vcc->flags); vcc->dev_data = NULL; fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; kfree(fore200e_vcc); return -EINVAL; } /* compute rate control parameters */ if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate); set_bit(ATM_VF_HASQOS, &vcc->flags); DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr, fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells); } fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1; fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0; fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0; /* new incarnation of the vcc */ vc_map->incarn = ++fore200e->incarn_count; /* VC unusable before this flag is set */ set_bit(ATM_VF_READY, &vcc->flags); return 0; } static void fore200e_close(struct atm_vcc* vcc) { struct fore200e* fore200e = FORE200E_DEV(vcc->dev); struct fore200e_vcc* fore200e_vcc; struct fore200e_vc_map* vc_map; unsigned long flags; ASSERT(vcc); ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS)); ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS)); DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal)); clear_bit(ATM_VF_READY, &vcc->flags); fore200e_activate_vcin(fore200e, 0, vcc, 0); spin_lock_irqsave(&fore200e->q_lock, flags); vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci); /* the vc is no longer considered as "in use" by fore200e_open() */ vc_map->vcc = NULL; vcc->itf = vcc->vci = vcc->vpi = 0; fore200e_vcc = FORE200E_VCC(vcc); vcc->dev_data = NULL; spin_unlock_irqrestore(&fore200e->q_lock, flags); /* release reserved bandwidth, if any */ if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { mutex_lock(&fore200e->rate_mtx); fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; mutex_unlock(&fore200e->rate_mtx); clear_bit(ATM_VF_HASQOS, &vcc->flags); } clear_bit(ATM_VF_ADDR, &vcc->flags); clear_bit(ATM_VF_PARTIAL,&vcc->flags); ASSERT(fore200e_vcc); kfree(fore200e_vcc); } static int fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb) { struct fore200e* fore200e = FORE200E_DEV(vcc->dev); struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc); struct fore200e_vc_map* vc_map; struct host_txq* txq = &fore200e->host_txq; struct host_txq_entry* entry; struct tpd* tpd; struct tpd_haddr tpd_haddr; int retry = CONFIG_ATM_FORE200E_TX_RETRY; int tx_copy = 0; int tx_len = skb->len; u32* cell_header = NULL; unsigned char* skb_data; int skb_len; unsigned char* data; unsigned long flags; ASSERT(vcc); ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); ASSERT(fore200e); ASSERT(fore200e_vcc); if (!test_bit(ATM_VF_READY, &vcc->flags)) { DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi); dev_kfree_skb_any(skb); return -EINVAL; } #ifdef FORE200E_52BYTE_AAL0_SDU if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) { cell_header = (u32*) skb->data; skb_data = skb->data + 4; /* skip 4-byte cell header */ skb_len = tx_len = skb->len - 4; DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header); } else #endif { skb_data = skb->data; skb_len = skb->len; } if (((unsigned long)skb_data) & 0x3) { DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name); tx_copy = 1; tx_len = skb_len; } if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) { /* this simply NUKES the PCA board */ DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name); tx_copy = 1; tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD; } if (tx_copy) { data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA); if (data == NULL) { if (vcc->pop) { vcc->pop(vcc, skb); } else { dev_kfree_skb_any(skb); } return -ENOMEM; } memcpy(data, skb_data, skb_len); if (skb_len < tx_len) memset(data + skb_len, 0x00, tx_len - skb_len); } else { data = skb_data; } vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci); ASSERT(vc_map->vcc == vcc); retry_here: spin_lock_irqsave(&fore200e->q_lock, flags); entry = &txq->host_entry[ txq->head ]; if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) { /* try to free completed tx queue entries */ fore200e_tx_irq(fore200e); if (*entry->status != STATUS_FREE) { spin_unlock_irqrestore(&fore200e->q_lock, flags); /* retry once again? */ if (--retry > 0) { udelay(50); goto retry_here; } atomic_inc(&vcc->stats->tx_err); fore200e->tx_sat++; DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n", fore200e->name, fore200e->cp_queues->heartbeat); if (vcc->pop) { vcc->pop(vcc, skb); } else { dev_kfree_skb_any(skb); } if (tx_copy) kfree(data); return -ENOBUFS; } } entry->incarn = vc_map->incarn; entry->vc_map = vc_map; entry->skb = skb; entry->data = tx_copy ? data : NULL; tpd = entry->tpd; tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE); tpd->tsd[ 0 ].length = tx_len; FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX); txq->txing++; /* The dma_map call above implies a dma_sync so the device can use it, * thus no explicit dma_sync call is necessary here. */ DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), tpd->tsd[0].length, skb_len); if (skb_len < fore200e_vcc->tx_min_pdu) fore200e_vcc->tx_min_pdu = skb_len; if (skb_len > fore200e_vcc->tx_max_pdu) fore200e_vcc->tx_max_pdu = skb_len; fore200e_vcc->tx_pdu++; /* set tx rate control information */ tpd->rate.data_cells = fore200e_vcc->rate.data_cells; tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells; if (cell_header) { tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP); tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT; tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT; tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT; tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT; } else { /* set the ATM header, common to all cells conveying the PDU */ tpd->atm_header.clp = 0; tpd->atm_header.plt = 0; tpd->atm_header.vci = vcc->vci; tpd->atm_header.vpi = vcc->vpi; tpd->atm_header.gfc = 0; } tpd->spec.length = tx_len; tpd->spec.nseg = 1; tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal); tpd->spec.intr = 1; tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */ tpd_haddr.pad = 0; tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */ *entry->status = STATUS_PENDING; fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr); spin_unlock_irqrestore(&fore200e->q_lock, flags); return 0; } static int fore200e_getstats(struct fore200e* fore200e) { struct host_cmdq* cmdq = &fore200e->host_cmdq; struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; struct stats_opcode opcode; int ok; u32 stats_dma_addr; if (fore200e->stats == NULL) { fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA); if (fore200e->stats == NULL) return -ENOMEM; } stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats, sizeof(struct stats), DMA_FROM_DEVICE); FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); opcode.opcode = OPCODE_GET_STATS; opcode.pad = 0; fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr); *entry->status = STATUS_PENDING; fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode); ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); *entry->status = STATUS_FREE; fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE); if (ok == 0) { printk(FORE200E "unable to get statistics from device %s\n", fore200e->name); return -EIO; } return 0; } static int fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen) { /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */ DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n", vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen); return -EINVAL; } static int fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, unsigned int optlen) { /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */ DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n", vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen); return -EINVAL; } #if 0 /* currently unused */ static int fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs) { struct host_cmdq* cmdq = &fore200e->host_cmdq; struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; struct oc3_opcode opcode; int ok; u32 oc3_regs_dma_addr; oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE); FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); opcode.opcode = OPCODE_GET_OC3; opcode.reg = 0; opcode.value = 0; opcode.mask = 0; fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr); *entry->status = STATUS_PENDING; fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode); ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); *entry->status = STATUS_FREE; fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE); if (ok == 0) { printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name); return -EIO; } return 0; } #endif static int fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask) { struct host_cmdq* cmdq = &fore200e->host_cmdq; struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; struct oc3_opcode opcode; int ok; DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask); FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); opcode.opcode = OPCODE_SET_OC3; opcode.reg = reg; opcode.value = value; opcode.mask = mask; fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr); *entry->status = STATUS_PENDING; fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode); ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); *entry->status = STATUS_FREE; if (ok == 0) { printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name); return -EIO; } return 0; } static int fore200e_setloop(struct fore200e* fore200e, int loop_mode) { u32 mct_value, mct_mask; int error; if (!capable(CAP_NET_ADMIN)) return -EPERM; switch (loop_mode) { case ATM_LM_NONE: mct_value = 0; mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE; break; case ATM_LM_LOC_PHY: mct_value = mct_mask = SUNI_MCT_DLE; break; case ATM_LM_RMT_PHY: mct_value = mct_mask = SUNI_MCT_LLE; break; default: return -EINVAL; } error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask); if (error == 0) fore200e->loop_mode = loop_mode; return error; } static int fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg) { struct sonet_stats tmp; if (fore200e_getstats(fore200e) < 0) return -EIO; tmp.section_bip = be32_to_cpu(fore200e->stats->oc3.section_bip8_errors); tmp.line_bip = be32_to_cpu(fore200e->stats->oc3.line_bip24_errors); tmp.path_bip = be32_to_cpu(fore200e->stats->oc3.path_bip8_errors); tmp.line_febe = be32_to_cpu(fore200e->stats->oc3.line_febe_errors); tmp.path_febe = be32_to_cpu(fore200e->stats->oc3.path_febe_errors); tmp.corr_hcs = be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors); tmp.uncorr_hcs = be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors); tmp.tx_cells = be32_to_cpu(fore200e->stats->aal0.cells_transmitted) + be32_to_cpu(fore200e->stats->aal34.cells_transmitted) + be32_to_cpu(fore200e->stats->aal5.cells_transmitted); tmp.rx_cells = be32_to_cpu(fore200e->stats->aal0.cells_received) + be32_to_cpu(fore200e->stats->aal34.cells_received) + be32_to_cpu(fore200e->stats->aal5.cells_received); if (arg) return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0; return 0; } static int fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg) { struct fore200e* fore200e = FORE200E_DEV(dev); DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg); switch (cmd) { case SONET_GETSTAT: return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg); case SONET_GETDIAG: return put_user(0, (int __user *)arg) ? -EFAULT : 0; case ATM_SETLOOP: return fore200e_setloop(fore200e, (int)(unsigned long)arg); case ATM_GETLOOP: return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0; case ATM_QUERYLOOP: return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0; } return -ENOSYS; /* not implemented */ } static int fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags) { struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc); struct fore200e* fore200e = FORE200E_DEV(vcc->dev); if (!test_bit(ATM_VF_READY, &vcc->flags)) { DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi); return -EINVAL; } DPRINTK(2, "change_qos %d.%d.%d, " "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; " "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n" "available_cell_rate = %u", vcc->itf, vcc->vpi, vcc->vci, fore200e_traffic_class[ qos->txtp.traffic_class ], qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu, fore200e_traffic_class[ qos->rxtp.traffic_class ], qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu, flags, fore200e->available_cell_rate); if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) { mutex_lock(&fore200e->rate_mtx); if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) { mutex_unlock(&fore200e->rate_mtx); return -EAGAIN; } fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; fore200e->available_cell_rate -= qos->txtp.max_pcr; mutex_unlock(&fore200e->rate_mtx); memcpy(&vcc->qos, qos, sizeof(struct atm_qos)); /* update rate control parameters */ fore200e_rate_ctrl(qos, &fore200e_vcc->rate); set_bit(ATM_VF_HASQOS, &vcc->flags); return 0; } return -EINVAL; } static int fore200e_irq_request(struct fore200e *fore200e) { if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) { printk(FORE200E "unable to reserve IRQ %s for device %s\n", fore200e_irq_itoa(fore200e->irq), fore200e->name); return -EBUSY; } printk(FORE200E "IRQ %s reserved for device %s\n", fore200e_irq_itoa(fore200e->irq), fore200e->name); #ifdef FORE200E_USE_TASKLET tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e); tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e); #endif fore200e->state = FORE200E_STATE_IRQ; return 0; } static int fore200e_get_esi(struct fore200e *fore200e) { struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA); int ok, i; if (!prom) return -ENOMEM; ok = fore200e->bus->prom_read(fore200e, prom); if (ok < 0) { kfree(prom); return -EBUSY; } printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %pM\n", fore200e->name, (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */ prom->serial_number & 0xFFFF, &prom->mac_addr[2]); for (i = 0; i < ESI_LEN; i++) { fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ]; } kfree(prom); return 0; } static int fore200e_alloc_rx_buf(struct fore200e *fore200e) { int scheme, magn, nbr, size, i; struct host_bsq* bsq; struct buffer* buffer; for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { bsq = &fore200e->host_bsq[ scheme ][ magn ]; nbr = fore200e_rx_buf_nbr[ scheme ][ magn ]; size = fore200e_rx_buf_size[ scheme ][ magn ]; DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn); /* allocate the array of receive buffers */ buffer = bsq->buffer = kzalloc(nbr * sizeof(struct buffer), GFP_KERNEL); if (buffer == NULL) return -ENOMEM; bsq->freebuf = NULL; for (i = 0; i < nbr; i++) { buffer[ i ].scheme = scheme; buffer[ i ].magn = magn; #ifdef FORE200E_BSQ_DEBUG buffer[ i ].index = i; buffer[ i ].supplied = 0; #endif /* allocate the receive buffer body */ if (fore200e_chunk_alloc(fore200e, &buffer[ i ].data, size, fore200e->bus->buffer_alignment, DMA_FROM_DEVICE) < 0) { while (i > 0) fore200e_chunk_free(fore200e, &buffer[ --i ].data); kfree(buffer); return -ENOMEM; } /* insert the buffer into the free buffer list */ buffer[ i ].next = bsq->freebuf; bsq->freebuf = &buffer[ i ]; } /* all the buffers are free, initially */ bsq->freebuf_count = nbr; #ifdef FORE200E_BSQ_DEBUG bsq_audit(3, bsq, scheme, magn); #endif } } fore200e->state = FORE200E_STATE_ALLOC_BUF; return 0; } static int fore200e_init_bs_queue(struct fore200e *fore200e) { int scheme, magn, i; struct host_bsq* bsq; struct cp_bsq_entry __iomem * cp_entry; for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn); bsq = &fore200e->host_bsq[ scheme ][ magn ]; /* allocate and align the array of status words */ if (fore200e->bus->dma_chunk_alloc(fore200e, &bsq->status, sizeof(enum status), QUEUE_SIZE_BS, fore200e->bus->status_alignment) < 0) { return -ENOMEM; } /* allocate and align the array of receive buffer descriptors */ if (fore200e->bus->dma_chunk_alloc(fore200e, &bsq->rbd_block, sizeof(struct rbd_block), QUEUE_SIZE_BS, fore200e->bus->descr_alignment) < 0) { fore200e->bus->dma_chunk_free(fore200e, &bsq->status); return -ENOMEM; } /* get the base address of the cp resident buffer supply queue entries */ cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]); /* fill the host resident and cp resident buffer supply queue entries */ for (i = 0; i < QUEUE_SIZE_BS; i++) { bsq->host_entry[ i ].status = FORE200E_INDEX(bsq->status.align_addr, enum status, i); bsq->host_entry[ i ].rbd_block = FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i); bsq->host_entry[ i ].rbd_block_dma = FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i); bsq->host_entry[ i ].cp_entry = &cp_entry[ i ]; *bsq->host_entry[ i ].status = STATUS_FREE; fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i), &cp_entry[ i ].status_haddr); } } } fore200e->state = FORE200E_STATE_INIT_BSQ; return 0; } static int fore200e_init_rx_queue(struct fore200e *fore200e) { struct host_rxq* rxq = &fore200e->host_rxq; struct cp_rxq_entry __iomem * cp_entry; int i; DPRINTK(2, "receive queue is being initialized\n"); /* allocate and align the array of status words */ if (fore200e->bus->dma_chunk_alloc(fore200e, &rxq->status, sizeof(enum status), QUEUE_SIZE_RX, fore200e->bus->status_alignment) < 0) { return -ENOMEM; } /* allocate and align the array of receive PDU descriptors */ if (fore200e->bus->dma_chunk_alloc(fore200e, &rxq->rpd, sizeof(struct rpd), QUEUE_SIZE_RX, fore200e->bus->descr_alignment) < 0) { fore200e->bus->dma_chunk_free(fore200e, &rxq->status); return -ENOMEM; } /* get the base address of the cp resident rx queue entries */ cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq); /* fill the host resident and cp resident rx entries */ for (i=0; i < QUEUE_SIZE_RX; i++) { rxq->host_entry[ i ].status = FORE200E_INDEX(rxq->status.align_addr, enum status, i); rxq->host_entry[ i ].rpd = FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i); rxq->host_entry[ i ].rpd_dma = FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i); rxq->host_entry[ i ].cp_entry = &cp_entry[ i ]; *rxq->host_entry[ i ].status = STATUS_FREE; fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i), &cp_entry[ i ].status_haddr); fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i), &cp_entry[ i ].rpd_haddr); } /* set the head entry of the queue */ rxq->head = 0; fore200e->state = FORE200E_STATE_INIT_RXQ; return 0; } static int fore200e_init_tx_queue(struct fore200e *fore200e) { struct host_txq* txq = &fore200e->host_txq; struct cp_txq_entry __iomem * cp_entry; int i; DPRINTK(2, "transmit queue is being initialized\n"); /* allocate and align the array of status words */ if (fore200e->bus->dma_chunk_alloc(fore200e, &txq->status, sizeof(enum status), QUEUE_SIZE_TX, fore200e->bus->status_alignment) < 0) { return -ENOMEM; } /* allocate and align the array of transmit PDU descriptors */ if (fore200e->bus->dma_chunk_alloc(fore200e, &txq->tpd, sizeof(struct tpd), QUEUE_SIZE_TX, fore200e->bus->descr_alignment) < 0) { fore200e->bus->dma_chunk_free(fore200e, &txq->status); return -ENOMEM; } /* get the base address of the cp resident tx queue entries */ cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq); /* fill the host resident and cp resident tx entries */ for (i=0; i < QUEUE_SIZE_TX; i++) { txq->host_entry[ i ].status = FORE200E_INDEX(txq->status.align_addr, enum status, i); txq->host_entry[ i ].tpd = FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i); txq->host_entry[ i ].tpd_dma = FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i); txq->host_entry[ i ].cp_entry = &cp_entry[ i ]; *txq->host_entry[ i ].status = STATUS_FREE; fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i), &cp_entry[ i ].status_haddr); /* although there is a one-to-one mapping of tx queue entries and tpds, we do not write here the DMA (physical) base address of each tpd into the related cp resident entry, because the cp relies on this write operation to detect that a new pdu has been submitted for tx */ } /* set the head and tail entries of the queue */ txq->head = 0; txq->tail = 0; fore200e->state = FORE200E_STATE_INIT_TXQ; return 0; } static int fore200e_init_cmd_queue(struct fore200e *fore200e) { struct host_cmdq* cmdq = &fore200e->host_cmdq; struct cp_cmdq_entry __iomem * cp_entry; int i; DPRINTK(2, "command queue is being initialized\n"); /* allocate and align the array of status words */ if (fore200e->bus->dma_chunk_alloc(fore200e, &cmdq->status, sizeof(enum status), QUEUE_SIZE_CMD, fore200e->bus->status_alignment) < 0) { return -ENOMEM; } /* get the base address of the cp resident cmd queue entries */ cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq); /* fill the host resident and cp resident cmd entries */ for (i=0; i < QUEUE_SIZE_CMD; i++) { cmdq->host_entry[ i ].status = FORE200E_INDEX(cmdq->status.align_addr, enum status, i); cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ]; *cmdq->host_entry[ i ].status = STATUS_FREE; fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i), &cp_entry[ i ].status_haddr); } /* set the head entry of the queue */ cmdq->head = 0; fore200e->state = FORE200E_STATE_INIT_CMDQ; return 0; } static void fore200e_param_bs_queue(struct fore200e *fore200e, enum buffer_scheme scheme, enum buffer_magn magn, int queue_length, int pool_size, int supply_blksize) { struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ]; fore200e->bus->write(queue_length, &bs_spec->queue_length); fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size); fore200e->bus->write(pool_size, &bs_spec->pool_size); fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize); } static int fore200e_initialize(struct fore200e *fore200e) { struct cp_queues __iomem * cpq; int ok, scheme, magn; DPRINTK(2, "device %s being initialized\n", fore200e->name); mutex_init(&fore200e->rate_mtx); spin_lock_init(&fore200e->q_lock); cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET; /* enable cp to host interrupts */ fore200e->bus->write(1, &cpq->imask); if (fore200e->bus->irq_enable) fore200e->bus->irq_enable(fore200e); fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect); fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len); fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len); fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len); fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension); fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension); for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) fore200e_param_bs_queue(fore200e, scheme, magn, QUEUE_SIZE_BS, fore200e_rx_buf_nbr[ scheme ][ magn ], RBD_BLK_SIZE); /* issue the initialize command */ fore200e->bus->write(STATUS_PENDING, &cpq->init.status); fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode); ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000); if (ok == 0) { printk(FORE200E "device %s initialization failed\n", fore200e->name); return -ENODEV; } printk(FORE200E "device %s initialized\n", fore200e->name); fore200e->state = FORE200E_STATE_INITIALIZE; return 0; } static void fore200e_monitor_putc(struct fore200e *fore200e, char c) { struct cp_monitor __iomem * monitor = fore200e->cp_monitor; #if 0 printk("%c", c); #endif fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send); } static int fore200e_monitor_getc(struct fore200e *fore200e) { struct cp_monitor __iomem * monitor = fore200e->cp_monitor; unsigned long timeout = jiffies + msecs_to_jiffies(50); int c; while (time_before(jiffies, timeout)) { c = (int) fore200e->bus->read(&monitor->soft_uart.recv); if (c & FORE200E_CP_MONITOR_UART_AVAIL) { fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv); #if 0 printk("%c", c & 0xFF); #endif return c & 0xFF; } } return -1; } static void fore200e_monitor_puts(struct fore200e *fore200e, char *str) { while (*str) { /* the i960 monitor doesn't accept any new character if it has something to say */ while (fore200e_monitor_getc(fore200e) >= 0); fore200e_monitor_putc(fore200e, *str++); } while (fore200e_monitor_getc(fore200e) >= 0); } #ifdef __LITTLE_ENDIAN #define FW_EXT ".bin" #else #define FW_EXT "_ecd.bin2" #endif static int fore200e_load_and_start_fw(struct fore200e *fore200e) { const struct firmware *firmware; struct device *device; struct fw_header *fw_header; const __le32 *fw_data; u32 fw_size; u32 __iomem *load_addr; char buf[48]; int err = -ENODEV; if (strcmp(fore200e->bus->model_name, "PCA-200E") == 0) device = &((struct pci_dev *) fore200e->bus_dev)->dev; #ifdef CONFIG_SBUS else if (strcmp(fore200e->bus->model_name, "SBA-200E") == 0) device = &((struct platform_device *) fore200e->bus_dev)->dev; #endif else return err; sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT); if ((err = request_firmware(&firmware, buf, device)) < 0) { printk(FORE200E "problem loading firmware image %s\n", fore200e->bus->model_name); return err; } fw_data = (__le32 *) firmware->data; fw_size = firmware->size / sizeof(u32); fw_header = (struct fw_header *) firmware->data; load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset); DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n", fore200e->name, load_addr, fw_size); if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) { printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name); goto release; } for (; fw_size--; fw_data++, load_addr++) fore200e->bus->write(le32_to_cpu(*fw_data), load_addr); DPRINTK(2, "device %s firmware being started\n", fore200e->name); #if defined(__sparc_v9__) /* reported to be required by SBA cards on some sparc64 hosts */ fore200e_spin(100); #endif sprintf(buf, "\rgo %x\r", le32_to_cpu(fw_header->start_offset)); fore200e_monitor_puts(fore200e, buf); if (fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000) == 0) { printk(FORE200E "device %s firmware didn't start\n", fore200e->name); goto release; } printk(FORE200E "device %s firmware started\n", fore200e->name); fore200e->state = FORE200E_STATE_START_FW; err = 0; release: release_firmware(firmware); return err; } static int fore200e_register(struct fore200e *fore200e, struct device *parent) { struct atm_dev* atm_dev; DPRINTK(2, "device %s being registered\n", fore200e->name); atm_dev = atm_dev_register(fore200e->bus->proc_name, parent, &fore200e_ops, -1, NULL); if (atm_dev == NULL) { printk(FORE200E "unable to register device %s\n", fore200e->name); return -ENODEV; } atm_dev->dev_data = fore200e; fore200e->atm_dev = atm_dev; atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS; atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS; fore200e->available_cell_rate = ATM_OC3_PCR; fore200e->state = FORE200E_STATE_REGISTER; return 0; } static int fore200e_init(struct fore200e *fore200e, struct device *parent) { if (fore200e_register(fore200e, parent) < 0) return -ENODEV; if (fore200e->bus->configure(fore200e) < 0) return -ENODEV; if (fore200e->bus->map(fore200e) < 0) return -ENODEV; if (fore200e_reset(fore200e, 1) < 0) return -ENODEV; if (fore200e_load_and_start_fw(fore200e) < 0) return -ENODEV; if (fore200e_initialize(fore200e) < 0) return -ENODEV; if (fore200e_init_cmd_queue(fore200e) < 0) return -ENOMEM; if (fore200e_init_tx_queue(fore200e) < 0) return -ENOMEM; if (fore200e_init_rx_queue(fore200e) < 0) return -ENOMEM; if (fore200e_init_bs_queue(fore200e) < 0) return -ENOMEM; if (fore200e_alloc_rx_buf(fore200e) < 0) return -ENOMEM; if (fore200e_get_esi(fore200e) < 0) return -EIO; if (fore200e_irq_request(fore200e) < 0) return -EBUSY; fore200e_supply(fore200e); /* all done, board initialization is now complete */ fore200e->state = FORE200E_STATE_COMPLETE; return 0; } #ifdef CONFIG_SBUS static const struct of_device_id fore200e_sba_match[]; static int fore200e_sba_probe(struct platform_device *op) { const struct of_device_id *match; const struct fore200e_bus *bus; struct fore200e *fore200e; static int index = 0; int err; match = of_match_device(fore200e_sba_match, &op->dev); if (!match) return -EINVAL; bus = match->data; fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); if (!fore200e) return -ENOMEM; fore200e->bus = bus; fore200e->bus_dev = op; fore200e->irq = op->archdata.irqs[0]; fore200e->phys_base = op->resource[0].start; sprintf(fore200e->name, "%s-%d", bus->model_name, index); err = fore200e_init(fore200e, &op->dev); if (err < 0) { fore200e_shutdown(fore200e); kfree(fore200e); return err; } index++; dev_set_drvdata(&op->dev, fore200e); return 0; } static int fore200e_sba_remove(struct platform_device *op) { struct fore200e *fore200e = dev_get_drvdata(&op->dev); fore200e_shutdown(fore200e); kfree(fore200e); return 0; } static const struct of_device_id fore200e_sba_match[] = { { .name = SBA200E_PROM_NAME, .data = (void *) &fore200e_bus[1], }, {}, }; MODULE_DEVICE_TABLE(of, fore200e_sba_match); static struct platform_driver fore200e_sba_driver = { .driver = { .name = "fore_200e", .of_match_table = fore200e_sba_match, }, .probe = fore200e_sba_probe, .remove = fore200e_sba_remove, }; #endif #ifdef CONFIG_PCI static int fore200e_pca_detect(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent) { const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data; struct fore200e* fore200e; int err = 0; static int index = 0; if (pci_enable_device(pci_dev)) { err = -EINVAL; goto out; } if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) { err = -EINVAL; goto out; } fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); if (fore200e == NULL) { err = -ENOMEM; goto out_disable; } fore200e->bus = bus; fore200e->bus_dev = pci_dev; fore200e->irq = pci_dev->irq; fore200e->phys_base = pci_resource_start(pci_dev, 0); sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1); pci_set_master(pci_dev); printk(FORE200E "device %s found at 0x%lx, IRQ %s\n", fore200e->bus->model_name, fore200e->phys_base, fore200e_irq_itoa(fore200e->irq)); sprintf(fore200e->name, "%s-%d", bus->model_name, index); err = fore200e_init(fore200e, &pci_dev->dev); if (err < 0) { fore200e_shutdown(fore200e); goto out_free; } ++index; pci_set_drvdata(pci_dev, fore200e); out: return err; out_free: kfree(fore200e); out_disable: pci_disable_device(pci_dev); goto out; } static void fore200e_pca_remove_one(struct pci_dev *pci_dev) { struct fore200e *fore200e; fore200e = pci_get_drvdata(pci_dev); fore200e_shutdown(fore200e); kfree(fore200e); pci_disable_device(pci_dev); } static struct pci_device_id fore200e_pca_tbl[] = { { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &fore200e_bus[0] }, { 0, } }; MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl); static struct pci_driver fore200e_pca_driver = { .name = "fore_200e", .probe = fore200e_pca_detect, .remove = fore200e_pca_remove_one, .id_table = fore200e_pca_tbl, }; #endif static int __init fore200e_module_init(void) { int err = 0; printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n"); #ifdef CONFIG_SBUS err = platform_driver_register(&fore200e_sba_driver); if (err) return err; #endif #ifdef CONFIG_PCI err = pci_register_driver(&fore200e_pca_driver); #endif #ifdef CONFIG_SBUS if (err) platform_driver_unregister(&fore200e_sba_driver); #endif return err; } static void __exit fore200e_module_cleanup(void) { #ifdef CONFIG_PCI pci_unregister_driver(&fore200e_pca_driver); #endif #ifdef CONFIG_SBUS platform_driver_unregister(&fore200e_sba_driver); #endif } static int fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page) { struct fore200e* fore200e = FORE200E_DEV(dev); struct fore200e_vcc* fore200e_vcc; struct atm_vcc* vcc; int i, len, left = *pos; unsigned long flags; if (!left--) { if (fore200e_getstats(fore200e) < 0) return -EIO; len = sprintf(page,"\n" " device:\n" " internal name:\t\t%s\n", fore200e->name); /* print bus-specific information */ if (fore200e->bus->proc_read) len += fore200e->bus->proc_read(fore200e, page + len); len += sprintf(page + len, " interrupt line:\t\t%s\n" " physical base address:\t0x%p\n" " virtual base address:\t0x%p\n" " factory address (ESI):\t%pM\n" " board serial number:\t\t%d\n\n", fore200e_irq_itoa(fore200e->irq), (void*)fore200e->phys_base, fore200e->virt_base, fore200e->esi, fore200e->esi[4] * 256 + fore200e->esi[5]); return len; } if (!left--) return sprintf(page, " free small bufs, scheme 1:\t%d\n" " free large bufs, scheme 1:\t%d\n" " free small bufs, scheme 2:\t%d\n" " free large bufs, scheme 2:\t%d\n", fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count, fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count, fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count, fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count); if (!left--) { u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat); len = sprintf(page,"\n\n" " cell processor:\n" " heartbeat state:\t\t"); if (hb >> 16 != 0xDEAD) len += sprintf(page + len, "0x%08x\n", hb); else len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF); return len; } if (!left--) { static const char* media_name[] = { "unshielded twisted pair", "multimode optical fiber ST", "multimode optical fiber SC", "single-mode optical fiber ST", "single-mode optical fiber SC", "unknown" }; static const char* oc3_mode[] = { "normal operation", "diagnostic loopback", "line loopback", "unknown" }; u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release); u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release); u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision); u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type)); u32 oc3_index; if (media_index > 4) media_index = 5; switch (fore200e->loop_mode) { case ATM_LM_NONE: oc3_index = 0; break; case ATM_LM_LOC_PHY: oc3_index = 1; break; case ATM_LM_RMT_PHY: oc3_index = 2; break; default: oc3_index = 3; } return sprintf(page, " firmware release:\t\t%d.%d.%d\n" " monitor release:\t\t%d.%d\n" " media type:\t\t\t%s\n" " OC-3 revision:\t\t0x%x\n" " OC-3 mode:\t\t\t%s", fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24, mon960_release >> 16, mon960_release << 16 >> 16, media_name[ media_index ], oc3_revision, oc3_mode[ oc3_index ]); } if (!left--) { struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor; return sprintf(page, "\n\n" " monitor:\n" " version number:\t\t%d\n" " boot status word:\t\t0x%08x\n", fore200e->bus->read(&cp_monitor->mon_version), fore200e->bus->read(&cp_monitor->bstat)); } if (!left--) return sprintf(page, "\n" " device statistics:\n" " 4b5b:\n" " crc_header_errors:\t\t%10u\n" " framing_errors:\t\t%10u\n", be32_to_cpu(fore200e->stats->phy.crc_header_errors), be32_to_cpu(fore200e->stats->phy.framing_errors)); if (!left--) return sprintf(page, "\n" " OC-3:\n" " section_bip8_errors:\t%10u\n" " path_bip8_errors:\t\t%10u\n" " line_bip24_errors:\t\t%10u\n" " line_febe_errors:\t\t%10u\n" " path_febe_errors:\t\t%10u\n" " corr_hcs_errors:\t\t%10u\n" " ucorr_hcs_errors:\t\t%10u\n", be32_to_cpu(fore200e->stats->oc3.section_bip8_errors), be32_to_cpu(fore200e->stats->oc3.path_bip8_errors), be32_to_cpu(fore200e->stats->oc3.line_bip24_errors), be32_to_cpu(fore200e->stats->oc3.line_febe_errors), be32_to_cpu(fore200e->stats->oc3.path_febe_errors), be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors), be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors)); if (!left--) return sprintf(page,"\n" " ATM:\t\t\t\t cells\n" " TX:\t\t\t%10u\n" " RX:\t\t\t%10u\n" " vpi out of range:\t\t%10u\n" " vpi no conn:\t\t%10u\n" " vci out of range:\t\t%10u\n" " vci no conn:\t\t%10u\n", be32_to_cpu(fore200e->stats->atm.cells_transmitted), be32_to_cpu(fore200e->stats->atm.cells_received), be32_to_cpu(fore200e->stats->atm.vpi_bad_range), be32_to_cpu(fore200e->stats->atm.vpi_no_conn), be32_to_cpu(fore200e->stats->atm.vci_bad_range), be32_to_cpu(fore200e->stats->atm.vci_no_conn)); if (!left--) return sprintf(page,"\n" " AAL0:\t\t\t cells\n" " TX:\t\t\t%10u\n" " RX:\t\t\t%10u\n" " dropped:\t\t\t%10u\n", be32_to_cpu(fore200e->stats->aal0.cells_transmitted), be32_to_cpu(fore200e->stats->aal0.cells_received), be32_to_cpu(fore200e->stats->aal0.cells_dropped)); if (!left--) return sprintf(page,"\n" " AAL3/4:\n" " SAR sublayer:\t\t cells\n" " TX:\t\t\t%10u\n" " RX:\t\t\t%10u\n" " dropped:\t\t\t%10u\n" " CRC errors:\t\t%10u\n" " protocol errors:\t\t%10u\n\n" " CS sublayer:\t\t PDUs\n" " TX:\t\t\t%10u\n" " RX:\t\t\t%10u\n" " dropped:\t\t\t%10u\n" " protocol errors:\t\t%10u\n", be32_to_cpu(fore200e->stats->aal34.cells_transmitted), be32_to_cpu(fore200e->stats->aal34.cells_received), be32_to_cpu(fore200e->stats->aal34.cells_dropped), be32_to_cpu(fore200e->stats->aal34.cells_crc_errors), be32_to_cpu(fore200e->stats->aal34.cells_protocol_errors), be32_to_cpu(fore200e->stats->aal34.cspdus_transmitted), be32_to_cpu(fore200e->stats->aal34.cspdus_received), be32_to_cpu(fore200e->stats->aal34.cspdus_dropped), be32_to_cpu(fore200e->stats->aal34.cspdus_protocol_errors)); if (!left--) return sprintf(page,"\n" " AAL5:\n" " SAR sublayer:\t\t cells\n" " TX:\t\t\t%10u\n" " RX:\t\t\t%10u\n" " dropped:\t\t\t%10u\n" " congestions:\t\t%10u\n\n" " CS sublayer:\t\t PDUs\n" " TX:\t\t\t%10u\n" " RX:\t\t\t%10u\n" " dropped:\t\t\t%10u\n" " CRC errors:\t\t%10u\n" " protocol errors:\t\t%10u\n", be32_to_cpu(fore200e->stats->aal5.cells_transmitted), be32_to_cpu(fore200e->stats->aal5.cells_received), be32_to_cpu(fore200e->stats->aal5.cells_dropped), be32_to_cpu(fore200e->stats->aal5.congestion_experienced), be32_to_cpu(fore200e->stats->aal5.cspdus_transmitted), be32_to_cpu(fore200e->stats->aal5.cspdus_received), be32_to_cpu(fore200e->stats->aal5.cspdus_dropped), be32_to_cpu(fore200e->stats->aal5.cspdus_crc_errors), be32_to_cpu(fore200e->stats->aal5.cspdus_protocol_errors)); if (!left--) return sprintf(page,"\n" " AUX:\t\t allocation failures\n" " small b1:\t\t\t%10u\n" " large b1:\t\t\t%10u\n" " small b2:\t\t\t%10u\n" " large b2:\t\t\t%10u\n" " RX PDUs:\t\t\t%10u\n" " TX PDUs:\t\t\t%10lu\n", be32_to_cpu(fore200e->stats->aux.small_b1_failed), be32_to_cpu(fore200e->stats->aux.large_b1_failed), be32_to_cpu(fore200e->stats->aux.small_b2_failed), be32_to_cpu(fore200e->stats->aux.large_b2_failed), be32_to_cpu(fore200e->stats->aux.rpd_alloc_failed), fore200e->tx_sat); if (!left--) return sprintf(page,"\n" " receive carrier:\t\t\t%s\n", fore200e->stats->aux.receive_carrier ? "ON" : "OFF!"); if (!left--) { return sprintf(page,"\n" " VCCs:\n address VPI VCI AAL " "TX PDUs TX min/max size RX PDUs RX min/max size\n"); } for (i = 0; i < NBR_CONNECT; i++) { vcc = fore200e->vc_map[i].vcc; if (vcc == NULL) continue; spin_lock_irqsave(&fore200e->q_lock, flags); if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) { fore200e_vcc = FORE200E_VCC(vcc); ASSERT(fore200e_vcc); len = sprintf(page, " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n", (u32)(unsigned long)vcc, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), fore200e_vcc->tx_pdu, fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu, fore200e_vcc->tx_max_pdu, fore200e_vcc->rx_pdu, fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu, fore200e_vcc->rx_max_pdu); spin_unlock_irqrestore(&fore200e->q_lock, flags); return len; } spin_unlock_irqrestore(&fore200e->q_lock, flags); } return 0; } module_init(fore200e_module_init); module_exit(fore200e_module_cleanup); static const struct atmdev_ops fore200e_ops = { .open = fore200e_open, .close = fore200e_close, .ioctl = fore200e_ioctl, .getsockopt = fore200e_getsockopt, .setsockopt = fore200e_setsockopt, .send = fore200e_send, .change_qos = fore200e_change_qos, .proc_read = fore200e_proc_read, .owner = THIS_MODULE }; static const struct fore200e_bus fore200e_bus[] = { #ifdef CONFIG_PCI { "PCA-200E", "pca200e", 32, 4, 32, fore200e_pca_read, fore200e_pca_write, fore200e_pca_dma_map, fore200e_pca_dma_unmap, fore200e_pca_dma_sync_for_cpu, fore200e_pca_dma_sync_for_device, fore200e_pca_dma_chunk_alloc, fore200e_pca_dma_chunk_free, fore200e_pca_configure, fore200e_pca_map, fore200e_pca_reset, fore200e_pca_prom_read, fore200e_pca_unmap, NULL, fore200e_pca_irq_check, fore200e_pca_irq_ack, fore200e_pca_proc_read, }, #endif #ifdef CONFIG_SBUS { "SBA-200E", "sba200e", 32, 64, 32, fore200e_sba_read, fore200e_sba_write, fore200e_sba_dma_map, fore200e_sba_dma_unmap, fore200e_sba_dma_sync_for_cpu, fore200e_sba_dma_sync_for_device, fore200e_sba_dma_chunk_alloc, fore200e_sba_dma_chunk_free, fore200e_sba_configure, fore200e_sba_map, fore200e_sba_reset, fore200e_sba_prom_read, fore200e_sba_unmap, fore200e_sba_irq_enable, fore200e_sba_irq_check, fore200e_sba_irq_ack, fore200e_sba_proc_read, }, #endif {} }; MODULE_LICENSE("GPL"); #ifdef CONFIG_PCI #ifdef __LITTLE_ENDIAN__ MODULE_FIRMWARE("pca200e.bin"); #else MODULE_FIRMWARE("pca200e_ecd.bin2"); #endif #endif /* CONFIG_PCI */ #ifdef CONFIG_SBUS MODULE_FIRMWARE("sba200e_ecd.bin2"); #endif
gpl-2.0
mingit/mstcp_v0.89.4
arch/metag/tbx/tbistring.c
4399
2792
/* * tbistring.c * * Copyright (C) 2001, 2002, 2003, 2005, 2007, 2012 Imagination Technologies. * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. * * String table functions provided as part of the thread binary interface for * Meta processors */ #include <linux/export.h> #include <linux/string.h> #include <asm/tbx.h> /* * There are not any functions to modify the string table currently, if these * are required at some later point I suggest having a seperate module and * ensuring that creating new entries does not interfere with reading old * entries in any way. */ const TBISTR *__TBIFindStr(const TBISTR *start, const char *str, int match_len) { const TBISTR *search = start; bool exact = true; const TBISEG *seg; if (match_len < 0) { /* Make match_len always positive for the inner loop */ match_len = -match_len; exact = false; } else { /* * Also support historic behaviour, which expected match_len to * include null terminator */ if (match_len && str[match_len-1] == '\0') match_len--; } if (!search) { /* Find global string table segment */ seg = __TBIFindSeg(NULL, TBID_SEG(TBID_THREAD_GLOBAL, TBID_SEGSCOPE_GLOBAL, TBID_SEGTYPE_STRING)); if (!seg || seg->Bytes < sizeof(TBISTR)) /* No string table! */ return NULL; /* Start of string table */ search = seg->pGAddr; } for (;;) { while (!search->Tag) /* Allow simple gaps which are just zero initialised */ search = (const TBISTR *)((const char *)search + 8); if (search->Tag == METAG_TBI_STRE) { /* Reached the end of the table */ search = NULL; break; } if ((search->Len >= match_len) && (!exact || (search->Len == match_len + 1)) && (search->Tag != METAG_TBI_STRG)) { /* Worth searching */ if (!strncmp(str, (const char *)search->String, match_len)) break; } /* Next entry */ search = (const TBISTR *)((const char *)search + search->Bytes); } return search; } const void *__TBITransStr(const char *str, int len) { const TBISTR *search = NULL; const void *res = NULL; for (;;) { /* Search onwards */ search = __TBIFindStr(search, str, len); /* No translation returns NULL */ if (!search) break; /* Skip matching entries with no translation data */ if (search->TransLen != METAG_TBI_STRX) { /* Calculate base of translation string */ res = (const char *)search->String + ((search->Len + 7) & ~7); break; } /* Next entry */ search = (const TBISTR *)((const char *)search + search->Bytes); } /* Return base address of translation data or NULL */ return res; } EXPORT_SYMBOL(__TBITransStr);
gpl-2.0
InfinitiveOS-Devices/android_kernel_motorola_msm8226
drivers/staging/tidspbridge/core/wdt.c
4911
3055
/* * wdt.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * IO dispatcher for a shared memory channel driver. * * Copyright (C) 2010 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #include <linux/types.h> #include <dspbridge/dbdefs.h> #include <dspbridge/dspdeh.h> #include <dspbridge/dev.h> #include <dspbridge/_chnl_sm.h> #include <dspbridge/wdt.h> #include <dspbridge/host_os.h> #define OMAP34XX_WDT3_BASE (L4_PER_34XX_BASE + 0x30000) static struct dsp_wdt_setting dsp_wdt; void dsp_wdt_dpc(unsigned long data) { struct deh_mgr *deh_mgr; dev_get_deh_mgr(dev_get_first(), &deh_mgr); if (deh_mgr) bridge_deh_notify(deh_mgr, DSP_WDTOVERFLOW, 0); } irqreturn_t dsp_wdt_isr(int irq, void *data) { u32 value; /* ack wdt3 interrupt */ value = __raw_readl(dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET); __raw_writel(value, dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET); tasklet_schedule(&dsp_wdt.wdt3_tasklet); return IRQ_HANDLED; } int dsp_wdt_init(void) { int ret = 0; dsp_wdt.sm_wdt = NULL; dsp_wdt.reg_base = ioremap(OMAP34XX_WDT3_BASE, SZ_4K); if (!dsp_wdt.reg_base) return -ENOMEM; tasklet_init(&dsp_wdt.wdt3_tasklet, dsp_wdt_dpc, 0); dsp_wdt.fclk = clk_get(NULL, "wdt3_fck"); if (dsp_wdt.fclk) { dsp_wdt.iclk = clk_get(NULL, "wdt3_ick"); if (!dsp_wdt.iclk) { clk_put(dsp_wdt.fclk); dsp_wdt.fclk = NULL; ret = -EFAULT; } } else ret = -EFAULT; if (!ret) ret = request_irq(INT_34XX_WDT3_IRQ, dsp_wdt_isr, 0, "dsp_wdt", &dsp_wdt); /* Disable at this moment, it will be enabled when DSP starts */ if (!ret) disable_irq(INT_34XX_WDT3_IRQ); return ret; } void dsp_wdt_sm_set(void *data) { dsp_wdt.sm_wdt = data; dsp_wdt.sm_wdt->wdt_overflow = 5; /* in seconds */ } void dsp_wdt_exit(void) { free_irq(INT_34XX_WDT3_IRQ, &dsp_wdt); tasklet_kill(&dsp_wdt.wdt3_tasklet); if (dsp_wdt.fclk) clk_put(dsp_wdt.fclk); if (dsp_wdt.iclk) clk_put(dsp_wdt.iclk); dsp_wdt.fclk = NULL; dsp_wdt.iclk = NULL; dsp_wdt.sm_wdt = NULL; if (dsp_wdt.reg_base) iounmap(dsp_wdt.reg_base); dsp_wdt.reg_base = NULL; } void dsp_wdt_enable(bool enable) { u32 tmp; static bool wdt_enable; if (wdt_enable == enable || !dsp_wdt.fclk || !dsp_wdt.iclk) return; wdt_enable = enable; if (enable) { clk_enable(dsp_wdt.fclk); clk_enable(dsp_wdt.iclk); dsp_wdt.sm_wdt->wdt_setclocks = 1; tmp = __raw_readl(dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET); __raw_writel(tmp, dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET); enable_irq(INT_34XX_WDT3_IRQ); } else { disable_irq(INT_34XX_WDT3_IRQ); dsp_wdt.sm_wdt->wdt_setclocks = 0; clk_disable(dsp_wdt.iclk); clk_disable(dsp_wdt.fclk); } }
gpl-2.0
tbalden/One_X-2.6.39.4
fs/hpfs/anode.c
7983
15656
/* * linux/fs/hpfs/anode.c * * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999 * * handling HPFS anode tree that contains file allocation info */ #include "hpfs_fn.h" /* Find a sector in allocation tree */ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode, struct bplus_header *btree, unsigned sec, struct buffer_head *bh) { anode_secno a = -1; struct anode *anode; int i; int c1, c2 = 0; go_down: if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1; if (btree->internal) { for (i = 0; i < btree->n_used_nodes; i++) if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) { a = le32_to_cpu(btree->u.internal[i].down); brelse(bh); if (!(anode = hpfs_map_anode(s, a, &bh))) return -1; btree = &anode->btree; goto go_down; } hpfs_error(s, "sector %08x not found in internal anode %08x", sec, a); brelse(bh); return -1; } for (i = 0; i < btree->n_used_nodes; i++) if (le32_to_cpu(btree->u.external[i].file_secno) <= sec && le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) { a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_secno); if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, a, 1, "data")) { brelse(bh); return -1; } if (inode) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); hpfs_inode->i_file_sec = le32_to_cpu(btree->u.external[i].file_secno); hpfs_inode->i_disk_sec = le32_to_cpu(btree->u.external[i].disk_secno); hpfs_inode->i_n_secs = le32_to_cpu(btree->u.external[i].length); } brelse(bh); return a; } hpfs_error(s, "sector %08x not found in external anode %08x", sec, a); brelse(bh); return -1; } /* Add a sector to tree */ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsigned fsecno) { struct bplus_header *btree; struct anode *anode = NULL, *ranode = NULL; struct fnode *fnode; anode_secno a, na = -1, ra, up = -1; secno se; struct buffer_head *bh, *bh1, *bh2; int n; unsigned fs; int c1, c2 = 0; if (fnod) { if (!(fnode = hpfs_map_fnode(s, node, &bh))) return -1; btree = &fnode->btree; } else { if (!(anode = hpfs_map_anode(s, node, &bh))) return -1; btree = &anode->btree; } a = node; go_down: if ((n = btree->n_used_nodes - 1) < -!!fnod) { hpfs_error(s, "anode %08x has no entries", a); brelse(bh); return -1; } if (btree->internal) { a = le32_to_cpu(btree->u.internal[n].down); btree->u.internal[n].file_secno = cpu_to_le32(-1); mark_buffer_dirty(bh); brelse(bh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_add_sector_to_btree #1")) return -1; if (!(anode = hpfs_map_anode(s, a, &bh))) return -1; btree = &anode->btree; goto go_down; } if (n >= 0) { if (le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length) != fsecno) { hpfs_error(s, "allocated size %08x, trying to add sector %08x, %cnode %08x", le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length), fsecno, fnod?'f':'a', node); brelse(bh); return -1; } if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) { btree->u.external[n].length = cpu_to_le32(le32_to_cpu(btree->u.external[n].length) + 1); mark_buffer_dirty(bh); brelse(bh); return se; } } else { if (fsecno) { hpfs_error(s, "empty file %08x, trying to add sector %08x", node, fsecno); brelse(bh); return -1; } se = !fnod ? node : (node + 16384) & ~16383; } if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M))) { brelse(bh); return -1; } fs = n < 0 ? 0 : le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length); if (!btree->n_free_nodes) { up = a != node ? le32_to_cpu(anode->up) : -1; if (!(anode = hpfs_alloc_anode(s, a, &na, &bh1))) { brelse(bh); hpfs_free_sectors(s, se, 1); return -1; } if (a == node && fnod) { anode->up = cpu_to_le32(node); anode->btree.fnode_parent = 1; anode->btree.n_used_nodes = btree->n_used_nodes; anode->btree.first_free = btree->first_free; anode->btree.n_free_nodes = 40 - anode->btree.n_used_nodes; memcpy(&anode->u, &btree->u, btree->n_used_nodes * 12); btree->internal = 1; btree->n_free_nodes = 11; btree->n_used_nodes = 1; btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree); btree->u.internal[0].file_secno = cpu_to_le32(-1); btree->u.internal[0].down = cpu_to_le32(na); mark_buffer_dirty(bh); } else if (!(ranode = hpfs_alloc_anode(s, /*a*/0, &ra, &bh2))) { brelse(bh); brelse(bh1); hpfs_free_sectors(s, se, 1); hpfs_free_sectors(s, na, 1); return -1; } brelse(bh); bh = bh1; btree = &anode->btree; } btree->n_free_nodes--; n = btree->n_used_nodes++; btree->first_free = cpu_to_le16(le16_to_cpu(btree->first_free) + 12); btree->u.external[n].disk_secno = cpu_to_le32(se); btree->u.external[n].file_secno = cpu_to_le32(fs); btree->u.external[n].length = cpu_to_le32(1); mark_buffer_dirty(bh); brelse(bh); if ((a == node && fnod) || na == -1) return se; c2 = 0; while (up != (anode_secno)-1) { struct anode *new_anode; if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1; if (up != node || !fnod) { if (!(anode = hpfs_map_anode(s, up, &bh))) return -1; btree = &anode->btree; } else { if (!(fnode = hpfs_map_fnode(s, up, &bh))) return -1; btree = &fnode->btree; } if (btree->n_free_nodes) { btree->n_free_nodes--; n = btree->n_used_nodes++; btree->first_free = cpu_to_le16(le16_to_cpu(btree->first_free) + 8); btree->u.internal[n].file_secno = cpu_to_le32(-1); btree->u.internal[n].down = cpu_to_le32(na); btree->u.internal[n-1].file_secno = cpu_to_le32(fs); mark_buffer_dirty(bh); brelse(bh); brelse(bh2); hpfs_free_sectors(s, ra, 1); if ((anode = hpfs_map_anode(s, na, &bh))) { anode->up = cpu_to_le32(up); anode->btree.fnode_parent = up == node && fnod; mark_buffer_dirty(bh); brelse(bh); } return se; } up = up != node ? le32_to_cpu(anode->up) : -1; btree->u.internal[btree->n_used_nodes - 1].file_secno = cpu_to_le32(/*fs*/-1); mark_buffer_dirty(bh); brelse(bh); a = na; if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) { anode = new_anode; /*anode->up = cpu_to_le32(up != -1 ? up : ra);*/ anode->btree.internal = 1; anode->btree.n_used_nodes = 1; anode->btree.n_free_nodes = 59; anode->btree.first_free = cpu_to_le16(16); anode->btree.u.internal[0].down = cpu_to_le32(a); anode->btree.u.internal[0].file_secno = cpu_to_le32(-1); mark_buffer_dirty(bh); brelse(bh); if ((anode = hpfs_map_anode(s, a, &bh))) { anode->up = cpu_to_le32(na); mark_buffer_dirty(bh); brelse(bh); } } else na = a; } if ((anode = hpfs_map_anode(s, na, &bh))) { anode->up = cpu_to_le32(node); if (fnod) anode->btree.fnode_parent = 1; mark_buffer_dirty(bh); brelse(bh); } if (!fnod) { if (!(anode = hpfs_map_anode(s, node, &bh))) { brelse(bh2); return -1; } btree = &anode->btree; } else { if (!(fnode = hpfs_map_fnode(s, node, &bh))) { brelse(bh2); return -1; } btree = &fnode->btree; } ranode->up = cpu_to_le32(node); memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free)); if (fnod) ranode->btree.fnode_parent = 1; ranode->btree.n_free_nodes = (ranode->btree.internal ? 60 : 40) - ranode->btree.n_used_nodes; if (ranode->btree.internal) for (n = 0; n < ranode->btree.n_used_nodes; n++) { struct anode *unode; if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) { unode->up = cpu_to_le32(ra); unode->btree.fnode_parent = 0; mark_buffer_dirty(bh1); brelse(bh1); } } btree->internal = 1; btree->n_free_nodes = fnod ? 10 : 58; btree->n_used_nodes = 2; btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree); btree->u.internal[0].file_secno = cpu_to_le32(fs); btree->u.internal[0].down = cpu_to_le32(ra); btree->u.internal[1].file_secno = cpu_to_le32(-1); btree->u.internal[1].down = cpu_to_le32(na); mark_buffer_dirty(bh); brelse(bh); mark_buffer_dirty(bh2); brelse(bh2); return se; } /* * Remove allocation tree. Recursion would look much nicer but * I want to avoid it because it can cause stack overflow. */ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree) { struct bplus_header *btree1 = btree; struct anode *anode = NULL; anode_secno ano = 0, oano; struct buffer_head *bh; int level = 0; int pos = 0; int i; int c1, c2 = 0; int d1, d2; go_down: d2 = 0; while (btree1->internal) { ano = le32_to_cpu(btree1->u.internal[pos].down); if (level) brelse(bh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1")) return; if (!(anode = hpfs_map_anode(s, ano, &bh))) return; btree1 = &anode->btree; level++; pos = 0; } for (i = 0; i < btree1->n_used_nodes; i++) hpfs_free_sectors(s, le32_to_cpu(btree1->u.external[i].disk_secno), le32_to_cpu(btree1->u.external[i].length)); go_up: if (!level) return; brelse(bh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, ano, &c1, &c2, "hpfs_remove_btree #2")) return; hpfs_free_sectors(s, ano, 1); oano = ano; ano = le32_to_cpu(anode->up); if (--level) { if (!(anode = hpfs_map_anode(s, ano, &bh))) return; btree1 = &anode->btree; } else btree1 = btree; for (i = 0; i < btree1->n_used_nodes; i++) { if (le32_to_cpu(btree1->u.internal[i].down) == oano) { if ((pos = i + 1) < btree1->n_used_nodes) goto go_down; else goto go_up; } } hpfs_error(s, "reference to anode %08x not found in anode %08x " "(probably bad up pointer)", oano, level ? ano : -1); if (level) brelse(bh); } /* Just a wrapper around hpfs_bplus_lookup .. used for reading eas */ static secno anode_lookup(struct super_block *s, anode_secno a, unsigned sec) { struct anode *anode; struct buffer_head *bh; if (!(anode = hpfs_map_anode(s, a, &bh))) return -1; return hpfs_bplus_lookup(s, NULL, &anode->btree, sec, bh); } int hpfs_ea_read(struct super_block *s, secno a, int ano, unsigned pos, unsigned len, char *buf) { struct buffer_head *bh; char *data; secno sec; unsigned l; while (len) { if (ano) { if ((sec = anode_lookup(s, a, pos >> 9)) == -1) return -1; } else sec = a + (pos >> 9); if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #1")) return -1; if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9))) return -1; l = 0x200 - (pos & 0x1ff); if (l > len) l = len; memcpy(buf, data + (pos & 0x1ff), l); brelse(bh); buf += l; pos += l; len -= l; } return 0; } int hpfs_ea_write(struct super_block *s, secno a, int ano, unsigned pos, unsigned len, const char *buf) { struct buffer_head *bh; char *data; secno sec; unsigned l; while (len) { if (ano) { if ((sec = anode_lookup(s, a, pos >> 9)) == -1) return -1; } else sec = a + (pos >> 9); if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #2")) return -1; if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9))) return -1; l = 0x200 - (pos & 0x1ff); if (l > len) l = len; memcpy(data + (pos & 0x1ff), buf, l); mark_buffer_dirty(bh); brelse(bh); buf += l; pos += l; len -= l; } return 0; } void hpfs_ea_remove(struct super_block *s, secno a, int ano, unsigned len) { struct anode *anode; struct buffer_head *bh; if (ano) { if (!(anode = hpfs_map_anode(s, a, &bh))) return; hpfs_remove_btree(s, &anode->btree); brelse(bh); hpfs_free_sectors(s, a, 1); } else hpfs_free_sectors(s, a, (len + 511) >> 9); } /* Truncate allocation tree. Doesn't join anodes - I hope it doesn't matter */ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs) { struct fnode *fnode; struct anode *anode; struct buffer_head *bh; struct bplus_header *btree; anode_secno node = f; int i, j, nodes; int c1, c2 = 0; if (fno) { if (!(fnode = hpfs_map_fnode(s, f, &bh))) return; btree = &fnode->btree; } else { if (!(anode = hpfs_map_anode(s, f, &bh))) return; btree = &anode->btree; } if (!secs) { hpfs_remove_btree(s, btree); if (fno) { btree->n_free_nodes = 8; btree->n_used_nodes = 0; btree->first_free = cpu_to_le16(8); btree->internal = 0; mark_buffer_dirty(bh); } else hpfs_free_sectors(s, f, 1); brelse(bh); return; } while (btree->internal) { nodes = btree->n_used_nodes + btree->n_free_nodes; for (i = 0; i < btree->n_used_nodes; i++) if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f; brelse(bh); hpfs_error(s, "internal btree %08x doesn't end with -1", node); return; f: for (j = i + 1; j < btree->n_used_nodes; j++) hpfs_ea_remove(s, le32_to_cpu(btree->u.internal[j].down), 1, 0); btree->n_used_nodes = i + 1; btree->n_free_nodes = nodes - btree->n_used_nodes; btree->first_free = cpu_to_le16(8 + 8 * btree->n_used_nodes); mark_buffer_dirty(bh); if (btree->u.internal[i].file_secno == cpu_to_le32(secs)) { brelse(bh); return; } node = le32_to_cpu(btree->u.internal[i].down); brelse(bh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree")) return; if (!(anode = hpfs_map_anode(s, node, &bh))) return; btree = &anode->btree; } nodes = btree->n_used_nodes + btree->n_free_nodes; for (i = 0; i < btree->n_used_nodes; i++) if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) >= secs) goto ff; brelse(bh); return; ff: if (secs <= le32_to_cpu(btree->u.external[i].file_secno)) { hpfs_error(s, "there is an allocation error in file %08x, sector %08x", f, secs); if (i) i--; } else if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > secs) { hpfs_free_sectors(s, le32_to_cpu(btree->u.external[i].disk_secno) + secs - le32_to_cpu(btree->u.external[i].file_secno), le32_to_cpu(btree->u.external[i].length) - secs + le32_to_cpu(btree->u.external[i].file_secno)); /* I hope gcc optimizes this :-) */ btree->u.external[i].length = cpu_to_le32(secs - le32_to_cpu(btree->u.external[i].file_secno)); } for (j = i + 1; j < btree->n_used_nodes; j++) hpfs_free_sectors(s, le32_to_cpu(btree->u.external[j].disk_secno), le32_to_cpu(btree->u.external[j].length)); btree->n_used_nodes = i + 1; btree->n_free_nodes = nodes - btree->n_used_nodes; btree->first_free = cpu_to_le16(8 + 12 * btree->n_used_nodes); mark_buffer_dirty(bh); brelse(bh); } /* Remove file or directory and it's eas - note that directory must be empty when this is called. */ void hpfs_remove_fnode(struct super_block *s, fnode_secno fno) { struct buffer_head *bh; struct fnode *fnode; struct extended_attribute *ea; struct extended_attribute *ea_end; if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return; if (!fnode->dirflag) hpfs_remove_btree(s, &fnode->btree); else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno)); ea_end = fnode_end_ea(fnode); for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) if (ea->indirect) hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea)); hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l)); brelse(bh); hpfs_free_sectors(s, fno, 1); }
gpl-2.0
Quarx2k/android_kernel_asus_padfone_s
fs/hpfs/buffer.c
7983
3825
/* * linux/fs/hpfs/buffer.c * * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999 * * general buffer i/o */ #include <linux/sched.h> #include <linux/slab.h> #include "hpfs_fn.h" /* Map a sector into a buffer and return pointers to it and to the buffer. */ void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp, int ahead) { struct buffer_head *bh; hpfs_lock_assert(s); cond_resched(); *bhp = bh = sb_bread(s, secno); if (bh != NULL) return bh->b_data; else { printk("HPFS: hpfs_map_sector: read error\n"); return NULL; } } /* Like hpfs_map_sector but don't read anything */ void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp) { struct buffer_head *bh; /*return hpfs_map_sector(s, secno, bhp, 0);*/ hpfs_lock_assert(s); cond_resched(); if ((*bhp = bh = sb_getblk(s, secno)) != NULL) { if (!buffer_uptodate(bh)) wait_on_buffer(bh); set_buffer_uptodate(bh); return bh->b_data; } else { printk("HPFS: hpfs_get_sector: getblk failed\n"); return NULL; } } /* Map 4 sectors into a 4buffer and return pointers to it and to the buffer. */ void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh, int ahead) { struct buffer_head *bh; char *data; hpfs_lock_assert(s); cond_resched(); if (secno & 3) { printk("HPFS: hpfs_map_4sectors: unaligned read\n"); return NULL; } qbh->data = data = kmalloc(2048, GFP_NOFS); if (!data) { printk("HPFS: hpfs_map_4sectors: out of memory\n"); goto bail; } qbh->bh[0] = bh = sb_bread(s, secno); if (!bh) goto bail0; memcpy(data, bh->b_data, 512); qbh->bh[1] = bh = sb_bread(s, secno + 1); if (!bh) goto bail1; memcpy(data + 512, bh->b_data, 512); qbh->bh[2] = bh = sb_bread(s, secno + 2); if (!bh) goto bail2; memcpy(data + 2 * 512, bh->b_data, 512); qbh->bh[3] = bh = sb_bread(s, secno + 3); if (!bh) goto bail3; memcpy(data + 3 * 512, bh->b_data, 512); return data; bail3: brelse(qbh->bh[2]); bail2: brelse(qbh->bh[1]); bail1: brelse(qbh->bh[0]); bail0: kfree(data); printk("HPFS: hpfs_map_4sectors: read error\n"); bail: return NULL; } /* Don't read sectors */ void *hpfs_get_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh) { cond_resched(); hpfs_lock_assert(s); if (secno & 3) { printk("HPFS: hpfs_get_4sectors: unaligned read\n"); return NULL; } /*return hpfs_map_4sectors(s, secno, qbh, 0);*/ if (!(qbh->data = kmalloc(2048, GFP_NOFS))) { printk("HPFS: hpfs_get_4sectors: out of memory\n"); return NULL; } if (!(hpfs_get_sector(s, secno, &qbh->bh[0]))) goto bail0; if (!(hpfs_get_sector(s, secno + 1, &qbh->bh[1]))) goto bail1; if (!(hpfs_get_sector(s, secno + 2, &qbh->bh[2]))) goto bail2; if (!(hpfs_get_sector(s, secno + 3, &qbh->bh[3]))) goto bail3; memcpy(qbh->data, qbh->bh[0]->b_data, 512); memcpy(qbh->data + 512, qbh->bh[1]->b_data, 512); memcpy(qbh->data + 2*512, qbh->bh[2]->b_data, 512); memcpy(qbh->data + 3*512, qbh->bh[3]->b_data, 512); return qbh->data; bail3: brelse(qbh->bh[2]); bail2: brelse(qbh->bh[1]); bail1: brelse(qbh->bh[0]); bail0: return NULL; } void hpfs_brelse4(struct quad_buffer_head *qbh) { brelse(qbh->bh[3]); brelse(qbh->bh[2]); brelse(qbh->bh[1]); brelse(qbh->bh[0]); kfree(qbh->data); } void hpfs_mark_4buffers_dirty(struct quad_buffer_head *qbh) { PRINTK(("hpfs_mark_4buffers_dirty\n")); memcpy(qbh->bh[0]->b_data, qbh->data, 512); memcpy(qbh->bh[1]->b_data, qbh->data + 512, 512); memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512); memcpy(qbh->bh[3]->b_data, qbh->data + 3 * 512, 512); mark_buffer_dirty(qbh->bh[0]); mark_buffer_dirty(qbh->bh[1]); mark_buffer_dirty(qbh->bh[2]); mark_buffer_dirty(qbh->bh[3]); }
gpl-2.0
GuneetAtwal/android_kernel_iocean_mtk6589t
drivers/scsi/qlogicfas.c
10031
5756
/* * Qlogic FAS408 ISA card driver * * Copyright 1994, Tom Zerucha. * tz@execpc.com * * Redistributable under terms of the GNU General Public License * * For the avoidance of doubt the "preferred form" of this code is one which * is in an open non patent encumbered format. Where cryptographic key signing * forms part of the process of creating an executable the information * including keys needed to generate an equivalently functional executable * are deemed to be part of the source code. * * Check qlogicfas408.c for more credits and info. */ #include <linux/module.h> #include <linux/blkdev.h> /* to get disk capacity */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/proc_fs.h> #include <linux/unistd.h> #include <linux/spinlock.h> #include <linux/stat.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/dma.h> #include "scsi.h" #include <scsi/scsi_host.h> #include "qlogicfas408.h" /* Set the following to 2 to use normal interrupt (active high/totempole- * tristate), otherwise use 0 (REQUIRED FOR PCMCIA) for active low, open * drain */ #define INT_TYPE 2 static char qlogicfas_name[] = "qlogicfas"; /* * Look for qlogic card and init if found */ static struct Scsi_Host *__qlogicfas_detect(struct scsi_host_template *host, int qbase, int qlirq) { int qltyp; /* type of chip */ int qinitid; struct Scsi_Host *hreg; /* registered host structure */ struct qlogicfas408_priv *priv; /* Qlogic Cards only exist at 0x230 or 0x330 (the chip itself * decodes the address - I check 230 first since MIDI cards are * typically at 0x330 * * Theoretically, two Qlogic cards can coexist in the same system. * This should work by simply using this as a loadable module for * the second card, but I haven't tested this. */ if (!qbase || qlirq == -1) goto err; if (!request_region(qbase, 0x10, qlogicfas_name)) { printk(KERN_INFO "%s: address %#x is busy\n", qlogicfas_name, qbase); goto err; } if (!qlogicfas408_detect(qbase, INT_TYPE)) { printk(KERN_WARNING "%s: probe failed for %#x\n", qlogicfas_name, qbase); goto err_release_mem; } printk(KERN_INFO "%s: Using preset base address of %03x," " IRQ %d\n", qlogicfas_name, qbase, qlirq); qltyp = qlogicfas408_get_chip_type(qbase, INT_TYPE); qinitid = host->this_id; if (qinitid < 0) qinitid = 7; /* if no ID, use 7 */ qlogicfas408_setup(qbase, qinitid, INT_TYPE); hreg = scsi_host_alloc(host, sizeof(struct qlogicfas408_priv)); if (!hreg) goto err_release_mem; priv = get_priv_by_host(hreg); hreg->io_port = qbase; hreg->n_io_port = 16; hreg->dma_channel = -1; if (qlirq != -1) hreg->irq = qlirq; priv->qbase = qbase; priv->qlirq = qlirq; priv->qinitid = qinitid; priv->shost = hreg; priv->int_type = INT_TYPE; sprintf(priv->qinfo, "Qlogicfas Driver version 0.46, chip %02X at %03X, IRQ %d, TPdma:%d", qltyp, qbase, qlirq, QL_TURBO_PDMA); host->name = qlogicfas_name; if (request_irq(qlirq, qlogicfas408_ihandl, 0, qlogicfas_name, hreg)) goto free_scsi_host; if (scsi_add_host(hreg, NULL)) goto free_interrupt; scsi_scan_host(hreg); return hreg; free_interrupt: free_irq(qlirq, hreg); free_scsi_host: scsi_host_put(hreg); err_release_mem: release_region(qbase, 0x10); err: return NULL; } #define MAX_QLOGICFAS 8 static struct qlogicfas408_priv *cards; static int iobase[MAX_QLOGICFAS]; static int irq[MAX_QLOGICFAS] = { [0 ... MAX_QLOGICFAS-1] = -1 }; module_param_array(iobase, int, NULL, 0); module_param_array(irq, int, NULL, 0); MODULE_PARM_DESC(iobase, "I/O address"); MODULE_PARM_DESC(irq, "IRQ"); static int __devinit qlogicfas_detect(struct scsi_host_template *sht) { struct Scsi_Host *shost; struct qlogicfas408_priv *priv; int num; for (num = 0; num < MAX_QLOGICFAS; num++) { shost = __qlogicfas_detect(sht, iobase[num], irq[num]); if (shost == NULL) { /* no more devices */ break; } priv = get_priv_by_host(shost); priv->next = cards; cards = priv; } return num; } static int qlogicfas_release(struct Scsi_Host *shost) { struct qlogicfas408_priv *priv = get_priv_by_host(shost); scsi_remove_host(shost); if (shost->irq) { qlogicfas408_disable_ints(priv); free_irq(shost->irq, shost); } if (shost->dma_channel != 0xff) free_dma(shost->dma_channel); if (shost->io_port && shost->n_io_port) release_region(shost->io_port, shost->n_io_port); scsi_host_put(shost); return 0; } /* * The driver template is also needed for PCMCIA */ static struct scsi_host_template qlogicfas_driver_template = { .module = THIS_MODULE, .name = qlogicfas_name, .proc_name = qlogicfas_name, .info = qlogicfas408_info, .queuecommand = qlogicfas408_queuecommand, .eh_abort_handler = qlogicfas408_abort, .eh_bus_reset_handler = qlogicfas408_bus_reset, .bios_param = qlogicfas408_biosparam, .can_queue = 1, .this_id = -1, .sg_tablesize = SG_ALL, .cmd_per_lun = 1, .use_clustering = DISABLE_CLUSTERING, }; static __init int qlogicfas_init(void) { if (!qlogicfas_detect(&qlogicfas_driver_template)) { /* no cards found */ printk(KERN_INFO "%s: no cards were found, please specify " "I/O address and IRQ using iobase= and irq= " "options", qlogicfas_name); return -ENODEV; } return 0; } static __exit void qlogicfas_exit(void) { struct qlogicfas408_priv *priv; for (priv = cards; priv != NULL; priv = priv->next) qlogicfas_release(priv->shost); } MODULE_AUTHOR("Tom Zerucha, Michael Griffith"); MODULE_DESCRIPTION("Driver for the Qlogic FAS408 based ISA card"); MODULE_LICENSE("GPL"); module_init(qlogicfas_init); module_exit(qlogicfas_exit);
gpl-2.0
forumi0721/bananapi_linux-sunxi
arch/cris/arch-v10/drivers/axisflashmap.c
11567
11688
/* * Physical mapping layer for MTD using the Axis partitiontable format * * Copyright (c) 2001, 2002 Axis Communications AB * * This file is under the GPL. * * First partition is always sector 0 regardless of if we find a partitiontable * or not. In the start of the next sector, there can be a partitiontable that * tells us what other partitions to define. If there isn't, we use a default * partition split defined below. * */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/mtd/concat.h> #include <linux/mtd/map.h> #include <linux/mtd/mtd.h> #include <linux/mtd/mtdram.h> #include <linux/mtd/partitions.h> #include <asm/axisflashmap.h> #include <asm/mmu.h> #include <arch/sv_addr_ag.h> #ifdef CONFIG_CRIS_LOW_MAP #define FLASH_UNCACHED_ADDR KSEG_8 #define FLASH_CACHED_ADDR KSEG_5 #else #define FLASH_UNCACHED_ADDR KSEG_E #define FLASH_CACHED_ADDR KSEG_F #endif #if CONFIG_ETRAX_FLASH_BUSWIDTH==1 #define flash_data __u8 #elif CONFIG_ETRAX_FLASH_BUSWIDTH==2 #define flash_data __u16 #elif CONFIG_ETRAX_FLASH_BUSWIDTH==4 #define flash_data __u32 #endif /* From head.S */ extern unsigned long romfs_start, romfs_length, romfs_in_flash; /* The master mtd for the entire flash. */ struct mtd_info* axisflash_mtd = NULL; /* Map driver functions. */ static map_word flash_read(struct map_info *map, unsigned long ofs) { map_word tmp; tmp.x[0] = *(flash_data *)(map->map_priv_1 + ofs); return tmp; } static void flash_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) { memcpy(to, (void *)(map->map_priv_1 + from), len); } static void flash_write(struct map_info *map, map_word d, unsigned long adr) { *(flash_data *)(map->map_priv_1 + adr) = (flash_data)d.x[0]; } /* * The map for chip select e0. * * We run into tricky coherence situations if we mix cached with uncached * accesses to we only use the uncached version here. * * The size field is the total size where the flash chips may be mapped on the * chip select. MTD probes should find all devices there and it does not matter * if there are unmapped gaps or aliases (mirrors of flash devices). The MTD * probes will ignore them. * * The start address in map_priv_1 is in virtual memory so we cannot use * MEM_CSE0_START but must rely on that FLASH_UNCACHED_ADDR is the start * address of cse0. */ static struct map_info map_cse0 = { .name = "cse0", .size = MEM_CSE0_SIZE, .bankwidth = CONFIG_ETRAX_FLASH_BUSWIDTH, .read = flash_read, .copy_from = flash_copy_from, .write = flash_write, .map_priv_1 = FLASH_UNCACHED_ADDR }; /* * The map for chip select e1. * * If there was a gap between cse0 and cse1, map_priv_1 would get the wrong * address, but there isn't. */ static struct map_info map_cse1 = { .name = "cse1", .size = MEM_CSE1_SIZE, .bankwidth = CONFIG_ETRAX_FLASH_BUSWIDTH, .read = flash_read, .copy_from = flash_copy_from, .write = flash_write, .map_priv_1 = FLASH_UNCACHED_ADDR + MEM_CSE0_SIZE }; /* If no partition-table was found, we use this default-set. */ #define MAX_PARTITIONS 7 #define NUM_DEFAULT_PARTITIONS 3 /* * Default flash size is 2MB. CONFIG_ETRAX_PTABLE_SECTOR is most likely the * size of one flash block and "filesystem"-partition needs 5 blocks to be able * to use JFFS. */ static struct mtd_partition axis_default_partitions[NUM_DEFAULT_PARTITIONS] = { { .name = "boot firmware", .size = CONFIG_ETRAX_PTABLE_SECTOR, .offset = 0 }, { .name = "kernel", .size = 0x200000 - (6 * CONFIG_ETRAX_PTABLE_SECTOR), .offset = CONFIG_ETRAX_PTABLE_SECTOR }, { .name = "filesystem", .size = 5 * CONFIG_ETRAX_PTABLE_SECTOR, .offset = 0x200000 - (5 * CONFIG_ETRAX_PTABLE_SECTOR) } }; /* Initialize the ones normally used. */ static struct mtd_partition axis_partitions[MAX_PARTITIONS] = { { .name = "part0", .size = CONFIG_ETRAX_PTABLE_SECTOR, .offset = 0 }, { .name = "part1", .size = 0, .offset = 0 }, { .name = "part2", .size = 0, .offset = 0 }, { .name = "part3", .size = 0, .offset = 0 }, { .name = "part4", .size = 0, .offset = 0 }, { .name = "part5", .size = 0, .offset = 0 }, { .name = "part6", .size = 0, .offset = 0 }, }; #ifdef CONFIG_ETRAX_AXISFLASHMAP_MTD0WHOLE /* Main flash device */ static struct mtd_partition main_partition = { .name = "main", .size = 0, .offset = 0 }; #endif /* * Probe a chip select for AMD-compatible (JEDEC) or CFI-compatible flash * chips in that order (because the amd_flash-driver is faster). */ static struct mtd_info *probe_cs(struct map_info *map_cs) { struct mtd_info *mtd_cs = NULL; printk(KERN_INFO "%s: Probing a 0x%08lx bytes large window at 0x%08lx.\n", map_cs->name, map_cs->size, map_cs->map_priv_1); #ifdef CONFIG_MTD_CFI mtd_cs = do_map_probe("cfi_probe", map_cs); #endif #ifdef CONFIG_MTD_JEDECPROBE if (!mtd_cs) mtd_cs = do_map_probe("jedec_probe", map_cs); #endif return mtd_cs; } /* * Probe each chip select individually for flash chips. If there are chips on * both cse0 and cse1, the mtd_info structs will be concatenated to one struct * so that MTD partitions can cross chip boundries. * * The only known restriction to how you can mount your chips is that each * chip select must hold similar flash chips. But you need external hardware * to do that anyway and you can put totally different chips on cse0 and cse1 * so it isn't really much of a restriction. */ static struct mtd_info *flash_probe(void) { struct mtd_info *mtd_cse0; struct mtd_info *mtd_cse1; struct mtd_info *mtd_cse; mtd_cse0 = probe_cs(&map_cse0); mtd_cse1 = probe_cs(&map_cse1); if (!mtd_cse0 && !mtd_cse1) { /* No chip found. */ return NULL; } if (mtd_cse0 && mtd_cse1) { struct mtd_info *mtds[] = { mtd_cse0, mtd_cse1 }; /* Since the concatenation layer adds a small overhead we * could try to figure out if the chips in cse0 and cse1 are * identical and reprobe the whole cse0+cse1 window. But since * flash chips are slow, the overhead is relatively small. * So we use the MTD concatenation layer instead of further * complicating the probing procedure. */ mtd_cse = mtd_concat_create(mtds, ARRAY_SIZE(mtds), "cse0+cse1"); if (!mtd_cse) { printk(KERN_ERR "%s and %s: Concatenation failed!\n", map_cse0.name, map_cse1.name); /* The best we can do now is to only use what we found * at cse0. */ mtd_cse = mtd_cse0; map_destroy(mtd_cse1); } } else { mtd_cse = mtd_cse0? mtd_cse0 : mtd_cse1; } return mtd_cse; } /* * Probe the flash chip(s) and, if it succeeds, read the partition-table * and register the partitions with MTD. */ static int __init init_axis_flash(void) { struct mtd_info *mymtd; int err = 0; int pidx = 0; struct partitiontable_head *ptable_head = NULL; struct partitiontable_entry *ptable; int use_default_ptable = 1; /* Until proven otherwise. */ const char pmsg[] = " /dev/flash%d at 0x%08x, size 0x%08x\n"; if (!(mymtd = flash_probe())) { /* There's no reason to use this module if no flash chip can * be identified. Make sure that's understood. */ printk(KERN_INFO "axisflashmap: Found no flash chip.\n"); } else { printk(KERN_INFO "%s: 0x%08x bytes of flash memory.\n", mymtd->name, mymtd->size); axisflash_mtd = mymtd; } if (mymtd) { mymtd->owner = THIS_MODULE; ptable_head = (struct partitiontable_head *)(FLASH_CACHED_ADDR + CONFIG_ETRAX_PTABLE_SECTOR + PARTITION_TABLE_OFFSET); } pidx++; /* First partition is always set to the default. */ if (ptable_head && (ptable_head->magic == PARTITION_TABLE_MAGIC) && (ptable_head->size < (MAX_PARTITIONS * sizeof(struct partitiontable_entry) + PARTITIONTABLE_END_MARKER_SIZE)) && (*(unsigned long*)((void*)ptable_head + sizeof(*ptable_head) + ptable_head->size - PARTITIONTABLE_END_MARKER_SIZE) == PARTITIONTABLE_END_MARKER)) { /* Looks like a start, sane length and end of a * partition table, lets check csum etc. */ int ptable_ok = 0; struct partitiontable_entry *max_addr = (struct partitiontable_entry *) ((unsigned long)ptable_head + sizeof(*ptable_head) + ptable_head->size); unsigned long offset = CONFIG_ETRAX_PTABLE_SECTOR; unsigned char *p; unsigned long csum = 0; ptable = (struct partitiontable_entry *) ((unsigned long)ptable_head + sizeof(*ptable_head)); /* Lets be PARANOID, and check the checksum. */ p = (unsigned char*) ptable; while (p <= (unsigned char*)max_addr) { csum += *p++; csum += *p++; csum += *p++; csum += *p++; } ptable_ok = (csum == ptable_head->checksum); /* Read the entries and use/show the info. */ printk(KERN_INFO " Found a%s partition table at 0x%p-0x%p.\n", (ptable_ok ? " valid" : "n invalid"), ptable_head, max_addr); /* We have found a working bootblock. Now read the * partition table. Scan the table. It ends when * there is 0xffffffff, that is, empty flash. */ while (ptable_ok && ptable->offset != 0xffffffff && ptable < max_addr && pidx < MAX_PARTITIONS) { axis_partitions[pidx].offset = offset + ptable->offset; axis_partitions[pidx].size = ptable->size; printk(pmsg, pidx, axis_partitions[pidx].offset, axis_partitions[pidx].size); pidx++; ptable++; } use_default_ptable = !ptable_ok; } if (romfs_in_flash) { /* Add an overlapping device for the root partition (romfs). */ axis_partitions[pidx].name = "romfs"; axis_partitions[pidx].size = romfs_length; axis_partitions[pidx].offset = romfs_start - FLASH_CACHED_ADDR; axis_partitions[pidx].mask_flags |= MTD_WRITEABLE; printk(KERN_INFO " Adding readonly flash partition for romfs image:\n"); printk(pmsg, pidx, axis_partitions[pidx].offset, axis_partitions[pidx].size); pidx++; } #ifdef CONFIG_ETRAX_AXISFLASHMAP_MTD0WHOLE if (mymtd) { main_partition.size = mymtd->size; err = mtd_device_register(mymtd, &main_partition, 1); if (err) panic("axisflashmap: Could not initialize " "partition for whole main mtd device!\n"); } #endif if (mymtd) { if (use_default_ptable) { printk(KERN_INFO " Using default partition table.\n"); err = mtd_device_register(mymtd, axis_default_partitions, NUM_DEFAULT_PARTITIONS); } else { err = mtd_device_register(mymtd, axis_partitions, pidx); } if (err) panic("axisflashmap could not add MTD partitions!\n"); } if (!romfs_in_flash) { /* Create an RAM device for the root partition (romfs). */ #if !defined(CONFIG_MTD_MTDRAM) || (CONFIG_MTDRAM_TOTAL_SIZE != 0) || (CONFIG_MTDRAM_ABS_POS != 0) /* No use trying to boot this kernel from RAM. Panic! */ printk(KERN_EMERG "axisflashmap: Cannot create an MTD RAM " "device due to kernel (mis)configuration!\n"); panic("This kernel cannot boot from RAM!\n"); #else struct mtd_info *mtd_ram; mtd_ram = kmalloc(sizeof(struct mtd_info), GFP_KERNEL); if (!mtd_ram) panic("axisflashmap couldn't allocate memory for " "mtd_info!\n"); printk(KERN_INFO " Adding RAM partition for romfs image:\n"); printk(pmsg, pidx, (unsigned)romfs_start, (unsigned)romfs_length); err = mtdram_init_device(mtd_ram, (void *)romfs_start, romfs_length, "romfs"); if (err) panic("axisflashmap could not initialize MTD RAM " "device!\n"); #endif } return err; } /* This adds the above to the kernels init-call chain. */ module_init(init_axis_flash); EXPORT_SYMBOL(axisflash_mtd);
gpl-2.0
dtsd/zte_blade_s6_lollipop_kernel
arch/cris/arch-v10/drivers/axisflashmap.c
11567
11688
/* * Physical mapping layer for MTD using the Axis partitiontable format * * Copyright (c) 2001, 2002 Axis Communications AB * * This file is under the GPL. * * First partition is always sector 0 regardless of if we find a partitiontable * or not. In the start of the next sector, there can be a partitiontable that * tells us what other partitions to define. If there isn't, we use a default * partition split defined below. * */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/mtd/concat.h> #include <linux/mtd/map.h> #include <linux/mtd/mtd.h> #include <linux/mtd/mtdram.h> #include <linux/mtd/partitions.h> #include <asm/axisflashmap.h> #include <asm/mmu.h> #include <arch/sv_addr_ag.h> #ifdef CONFIG_CRIS_LOW_MAP #define FLASH_UNCACHED_ADDR KSEG_8 #define FLASH_CACHED_ADDR KSEG_5 #else #define FLASH_UNCACHED_ADDR KSEG_E #define FLASH_CACHED_ADDR KSEG_F #endif #if CONFIG_ETRAX_FLASH_BUSWIDTH==1 #define flash_data __u8 #elif CONFIG_ETRAX_FLASH_BUSWIDTH==2 #define flash_data __u16 #elif CONFIG_ETRAX_FLASH_BUSWIDTH==4 #define flash_data __u32 #endif /* From head.S */ extern unsigned long romfs_start, romfs_length, romfs_in_flash; /* The master mtd for the entire flash. */ struct mtd_info* axisflash_mtd = NULL; /* Map driver functions. */ static map_word flash_read(struct map_info *map, unsigned long ofs) { map_word tmp; tmp.x[0] = *(flash_data *)(map->map_priv_1 + ofs); return tmp; } static void flash_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) { memcpy(to, (void *)(map->map_priv_1 + from), len); } static void flash_write(struct map_info *map, map_word d, unsigned long adr) { *(flash_data *)(map->map_priv_1 + adr) = (flash_data)d.x[0]; } /* * The map for chip select e0. * * We run into tricky coherence situations if we mix cached with uncached * accesses to we only use the uncached version here. * * The size field is the total size where the flash chips may be mapped on the * chip select. MTD probes should find all devices there and it does not matter * if there are unmapped gaps or aliases (mirrors of flash devices). The MTD * probes will ignore them. * * The start address in map_priv_1 is in virtual memory so we cannot use * MEM_CSE0_START but must rely on that FLASH_UNCACHED_ADDR is the start * address of cse0. */ static struct map_info map_cse0 = { .name = "cse0", .size = MEM_CSE0_SIZE, .bankwidth = CONFIG_ETRAX_FLASH_BUSWIDTH, .read = flash_read, .copy_from = flash_copy_from, .write = flash_write, .map_priv_1 = FLASH_UNCACHED_ADDR }; /* * The map for chip select e1. * * If there was a gap between cse0 and cse1, map_priv_1 would get the wrong * address, but there isn't. */ static struct map_info map_cse1 = { .name = "cse1", .size = MEM_CSE1_SIZE, .bankwidth = CONFIG_ETRAX_FLASH_BUSWIDTH, .read = flash_read, .copy_from = flash_copy_from, .write = flash_write, .map_priv_1 = FLASH_UNCACHED_ADDR + MEM_CSE0_SIZE }; /* If no partition-table was found, we use this default-set. */ #define MAX_PARTITIONS 7 #define NUM_DEFAULT_PARTITIONS 3 /* * Default flash size is 2MB. CONFIG_ETRAX_PTABLE_SECTOR is most likely the * size of one flash block and "filesystem"-partition needs 5 blocks to be able * to use JFFS. */ static struct mtd_partition axis_default_partitions[NUM_DEFAULT_PARTITIONS] = { { .name = "boot firmware", .size = CONFIG_ETRAX_PTABLE_SECTOR, .offset = 0 }, { .name = "kernel", .size = 0x200000 - (6 * CONFIG_ETRAX_PTABLE_SECTOR), .offset = CONFIG_ETRAX_PTABLE_SECTOR }, { .name = "filesystem", .size = 5 * CONFIG_ETRAX_PTABLE_SECTOR, .offset = 0x200000 - (5 * CONFIG_ETRAX_PTABLE_SECTOR) } }; /* Initialize the ones normally used. */ static struct mtd_partition axis_partitions[MAX_PARTITIONS] = { { .name = "part0", .size = CONFIG_ETRAX_PTABLE_SECTOR, .offset = 0 }, { .name = "part1", .size = 0, .offset = 0 }, { .name = "part2", .size = 0, .offset = 0 }, { .name = "part3", .size = 0, .offset = 0 }, { .name = "part4", .size = 0, .offset = 0 }, { .name = "part5", .size = 0, .offset = 0 }, { .name = "part6", .size = 0, .offset = 0 }, }; #ifdef CONFIG_ETRAX_AXISFLASHMAP_MTD0WHOLE /* Main flash device */ static struct mtd_partition main_partition = { .name = "main", .size = 0, .offset = 0 }; #endif /* * Probe a chip select for AMD-compatible (JEDEC) or CFI-compatible flash * chips in that order (because the amd_flash-driver is faster). */ static struct mtd_info *probe_cs(struct map_info *map_cs) { struct mtd_info *mtd_cs = NULL; printk(KERN_INFO "%s: Probing a 0x%08lx bytes large window at 0x%08lx.\n", map_cs->name, map_cs->size, map_cs->map_priv_1); #ifdef CONFIG_MTD_CFI mtd_cs = do_map_probe("cfi_probe", map_cs); #endif #ifdef CONFIG_MTD_JEDECPROBE if (!mtd_cs) mtd_cs = do_map_probe("jedec_probe", map_cs); #endif return mtd_cs; } /* * Probe each chip select individually for flash chips. If there are chips on * both cse0 and cse1, the mtd_info structs will be concatenated to one struct * so that MTD partitions can cross chip boundries. * * The only known restriction to how you can mount your chips is that each * chip select must hold similar flash chips. But you need external hardware * to do that anyway and you can put totally different chips on cse0 and cse1 * so it isn't really much of a restriction. */ static struct mtd_info *flash_probe(void) { struct mtd_info *mtd_cse0; struct mtd_info *mtd_cse1; struct mtd_info *mtd_cse; mtd_cse0 = probe_cs(&map_cse0); mtd_cse1 = probe_cs(&map_cse1); if (!mtd_cse0 && !mtd_cse1) { /* No chip found. */ return NULL; } if (mtd_cse0 && mtd_cse1) { struct mtd_info *mtds[] = { mtd_cse0, mtd_cse1 }; /* Since the concatenation layer adds a small overhead we * could try to figure out if the chips in cse0 and cse1 are * identical and reprobe the whole cse0+cse1 window. But since * flash chips are slow, the overhead is relatively small. * So we use the MTD concatenation layer instead of further * complicating the probing procedure. */ mtd_cse = mtd_concat_create(mtds, ARRAY_SIZE(mtds), "cse0+cse1"); if (!mtd_cse) { printk(KERN_ERR "%s and %s: Concatenation failed!\n", map_cse0.name, map_cse1.name); /* The best we can do now is to only use what we found * at cse0. */ mtd_cse = mtd_cse0; map_destroy(mtd_cse1); } } else { mtd_cse = mtd_cse0? mtd_cse0 : mtd_cse1; } return mtd_cse; } /* * Probe the flash chip(s) and, if it succeeds, read the partition-table * and register the partitions with MTD. */ static int __init init_axis_flash(void) { struct mtd_info *mymtd; int err = 0; int pidx = 0; struct partitiontable_head *ptable_head = NULL; struct partitiontable_entry *ptable; int use_default_ptable = 1; /* Until proven otherwise. */ const char pmsg[] = " /dev/flash%d at 0x%08x, size 0x%08x\n"; if (!(mymtd = flash_probe())) { /* There's no reason to use this module if no flash chip can * be identified. Make sure that's understood. */ printk(KERN_INFO "axisflashmap: Found no flash chip.\n"); } else { printk(KERN_INFO "%s: 0x%08x bytes of flash memory.\n", mymtd->name, mymtd->size); axisflash_mtd = mymtd; } if (mymtd) { mymtd->owner = THIS_MODULE; ptable_head = (struct partitiontable_head *)(FLASH_CACHED_ADDR + CONFIG_ETRAX_PTABLE_SECTOR + PARTITION_TABLE_OFFSET); } pidx++; /* First partition is always set to the default. */ if (ptable_head && (ptable_head->magic == PARTITION_TABLE_MAGIC) && (ptable_head->size < (MAX_PARTITIONS * sizeof(struct partitiontable_entry) + PARTITIONTABLE_END_MARKER_SIZE)) && (*(unsigned long*)((void*)ptable_head + sizeof(*ptable_head) + ptable_head->size - PARTITIONTABLE_END_MARKER_SIZE) == PARTITIONTABLE_END_MARKER)) { /* Looks like a start, sane length and end of a * partition table, lets check csum etc. */ int ptable_ok = 0; struct partitiontable_entry *max_addr = (struct partitiontable_entry *) ((unsigned long)ptable_head + sizeof(*ptable_head) + ptable_head->size); unsigned long offset = CONFIG_ETRAX_PTABLE_SECTOR; unsigned char *p; unsigned long csum = 0; ptable = (struct partitiontable_entry *) ((unsigned long)ptable_head + sizeof(*ptable_head)); /* Lets be PARANOID, and check the checksum. */ p = (unsigned char*) ptable; while (p <= (unsigned char*)max_addr) { csum += *p++; csum += *p++; csum += *p++; csum += *p++; } ptable_ok = (csum == ptable_head->checksum); /* Read the entries and use/show the info. */ printk(KERN_INFO " Found a%s partition table at 0x%p-0x%p.\n", (ptable_ok ? " valid" : "n invalid"), ptable_head, max_addr); /* We have found a working bootblock. Now read the * partition table. Scan the table. It ends when * there is 0xffffffff, that is, empty flash. */ while (ptable_ok && ptable->offset != 0xffffffff && ptable < max_addr && pidx < MAX_PARTITIONS) { axis_partitions[pidx].offset = offset + ptable->offset; axis_partitions[pidx].size = ptable->size; printk(pmsg, pidx, axis_partitions[pidx].offset, axis_partitions[pidx].size); pidx++; ptable++; } use_default_ptable = !ptable_ok; } if (romfs_in_flash) { /* Add an overlapping device for the root partition (romfs). */ axis_partitions[pidx].name = "romfs"; axis_partitions[pidx].size = romfs_length; axis_partitions[pidx].offset = romfs_start - FLASH_CACHED_ADDR; axis_partitions[pidx].mask_flags |= MTD_WRITEABLE; printk(KERN_INFO " Adding readonly flash partition for romfs image:\n"); printk(pmsg, pidx, axis_partitions[pidx].offset, axis_partitions[pidx].size); pidx++; } #ifdef CONFIG_ETRAX_AXISFLASHMAP_MTD0WHOLE if (mymtd) { main_partition.size = mymtd->size; err = mtd_device_register(mymtd, &main_partition, 1); if (err) panic("axisflashmap: Could not initialize " "partition for whole main mtd device!\n"); } #endif if (mymtd) { if (use_default_ptable) { printk(KERN_INFO " Using default partition table.\n"); err = mtd_device_register(mymtd, axis_default_partitions, NUM_DEFAULT_PARTITIONS); } else { err = mtd_device_register(mymtd, axis_partitions, pidx); } if (err) panic("axisflashmap could not add MTD partitions!\n"); } if (!romfs_in_flash) { /* Create an RAM device for the root partition (romfs). */ #if !defined(CONFIG_MTD_MTDRAM) || (CONFIG_MTDRAM_TOTAL_SIZE != 0) || (CONFIG_MTDRAM_ABS_POS != 0) /* No use trying to boot this kernel from RAM. Panic! */ printk(KERN_EMERG "axisflashmap: Cannot create an MTD RAM " "device due to kernel (mis)configuration!\n"); panic("This kernel cannot boot from RAM!\n"); #else struct mtd_info *mtd_ram; mtd_ram = kmalloc(sizeof(struct mtd_info), GFP_KERNEL); if (!mtd_ram) panic("axisflashmap couldn't allocate memory for " "mtd_info!\n"); printk(KERN_INFO " Adding RAM partition for romfs image:\n"); printk(pmsg, pidx, (unsigned)romfs_start, (unsigned)romfs_length); err = mtdram_init_device(mtd_ram, (void *)romfs_start, romfs_length, "romfs"); if (err) panic("axisflashmap could not initialize MTD RAM " "device!\n"); #endif } return err; } /* This adds the above to the kernels init-call chain. */ module_init(init_axis_flash); EXPORT_SYMBOL(axisflash_mtd);
gpl-2.0
bjzhang/xen
tools/misc/xen-hvmctx.c
48
17871
/* * xen-hvmctx.c * * Print out the contents of a HVM save record in a human-readable way. * * Tim Deegan <Tim.Deegan@citrix.com> * Copyright (c) 2008 Citrix Systems, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <inttypes.h> #include <stdio.h> #include <stdlib.h> #include <stddef.h> #include <stdint.h> #include <unistd.h> #include <string.h> #include <errno.h> #include <limits.h> #include <sys/types.h> #include <sys/stat.h> #include <arpa/inet.h> #define BITS_PER_LONG __WORDSIZE #define BITS_TO_LONGS(bits) \ (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) #define DECLARE_BITMAP(name,bits) \ unsigned long name[BITS_TO_LONGS(bits)] #include <xenctrl.h> #include <xen/xen.h> #include <xen/domctl.h> #include <xen/hvm/save.h> static uint8_t *buf = NULL; static uint32_t len; static uint32_t off; #define READ(_x) do { \ if ( len - off < sizeof (_x) ) \ { \ fprintf(stderr, "Error: need another %u bytes, only %u available", \ (unsigned int)sizeof(_x), len - off); \ exit(1); \ } \ memcpy(&(_x), buf + off, sizeof (_x)); \ off += sizeof (_x); \ } while (0) static void dump_header(void) { HVM_SAVE_TYPE(HEADER) h; READ(h); printf(" Header: magic %#lx, version %lu\n", (unsigned long) h.magic, (unsigned long) h.version); printf(" Xen changeset %llx\n", (unsigned long long) h.changeset); printf(" CPUID[0][%%eax] 0x%.8lx\n", (unsigned long) h.cpuid); printf(" gtsc_khz %lu\n", (unsigned long) h.gtsc_khz); } struct fpu_mm { uint64_t lo; uint16_t hi; uint16_t pad[3]; } __attribute__((packed)); struct fpu_xmm { uint64_t lo; uint64_t hi; }; struct fpu_regs { uint16_t fcw; uint16_t fsw; uint8_t ftw; uint8_t res0; uint16_t fop; uint64_t fpuip; uint64_t fpudp; uint32_t mxcsr; uint32_t mxcsr_mask; struct fpu_mm mm[8]; struct fpu_xmm xmm[16]; uint64_t res1[12]; } __attribute__((packed)); static void dump_fpu(void *p) { struct fpu_regs *r = p; int i; printf(" FPU: fcw 0x%4.4x fsw 0x%4.4x\n" " ftw 0x%2.2x (0x%2.2x) fop 0x%4.4x\n" " fpuip 0x%16.16"PRIx64" fpudp 0x%16.16"PRIx64"\n" " mxcsr 0x%8.8lx mask 0x%8.8lx\n", (unsigned)r->fcw, (unsigned)r->fsw, (unsigned)r->ftw, (unsigned)r->res0, (unsigned)r->fop, r->fpuip, r->fpudp, (unsigned long)r->mxcsr, (unsigned long)r->mxcsr_mask); for ( i = 0 ; i < 8 ; i++ ) printf(" mm%i 0x%4.4x%16.16"PRIx64" (0x%4.4x%4.4x%4.4x)\n", i, r->mm[i].hi, r->mm[i].lo, r->mm[i].pad[2], r->mm[i].pad[1], r->mm[i].pad[0]); for ( i = 0 ; i < 16 ; i++ ) printf(" xmm%2.2i 0x%16.16"PRIx64"%16.16"PRIx64"\n", i, r->xmm[i].hi, r->xmm[i].lo); for ( i = 0 ; i < 6 ; i++ ) printf(" (0x%16.16"PRIx64"%16.16"PRIx64")\n", r->res1[2*i+1], r->res1[2*i]); } static void dump_cpu(void) { HVM_SAVE_TYPE(CPU) c; READ(c); printf(" CPU: rax 0x%16.16llx rbx 0x%16.16llx\n" " rcx 0x%16.16llx rdx 0x%16.16llx\n" " rbp 0x%16.16llx rsi 0x%16.16llx\n" " rdi 0x%16.16llx rsp 0x%16.16llx\n" " r8 0x%16.16llx r9 0x%16.16llx\n" " r10 0x%16.16llx r11 0x%16.16llx\n" " r12 0x%16.16llx r13 0x%16.16llx\n" " r14 0x%16.16llx r15 0x%16.16llx\n" " rip 0x%16.16llx rflags 0x%16.16llx\n" " cr0 0x%16.16llx cr2 0x%16.16llx\n" " cr3 0x%16.16llx cr4 0x%16.16llx\n" " dr0 0x%16.16llx dr1 0x%16.16llx\n" " dr2 0x%16.16llx dr3 0x%16.16llx\n" " dr6 0x%16.16llx dr7 0x%16.16llx\n" " cs 0x%8.8x (0x%16.16llx + 0x%8.8x / 0x%5.5x)\n" " ds 0x%8.8x (0x%16.16llx + 0x%8.8x / 0x%5.5x)\n" " es 0x%8.8x (0x%16.16llx + 0x%8.8x / 0x%5.5x)\n" " fs 0x%8.8x (0x%16.16llx + 0x%8.8x / 0x%5.5x)\n" " gs 0x%8.8x (0x%16.16llx + 0x%8.8x / 0x%5.5x)\n" " ss 0x%8.8x (0x%16.16llx + 0x%8.8x / 0x%5.5x)\n" " tr 0x%8.8x (0x%16.16llx + 0x%8.8x / 0x%5.5x)\n" " ldtr 0x%8.8x (0x%16.16llx + 0x%8.8x / 0x%5.5x)\n" " idtr (0x%16.16llx + 0x%8.8x)\n" " gdtr (0x%16.16llx + 0x%8.8x)\n" " sysenter cs 0x%8.8llx eip 0x%16.16llx esp 0x%16.16llx\n" " shadow gs 0x%16.16llx\n" " MSR flags 0x%16.16llx lstar 0x%16.16llx\n" " star 0x%16.16llx cstar 0x%16.16llx\n" " sfmask 0x%16.16llx efer 0x%16.16llx\n" " tsc 0x%16.16llx\n" " event 0x%8.8lx error 0x%8.8lx\n", (unsigned long long) c.rax, (unsigned long long) c.rbx, (unsigned long long) c.rcx, (unsigned long long) c.rdx, (unsigned long long) c.rbp, (unsigned long long) c.rsi, (unsigned long long) c.rdi, (unsigned long long) c.rsp, (unsigned long long) c.r8, (unsigned long long) c.r9, (unsigned long long) c.r10, (unsigned long long) c.r11, (unsigned long long) c.r12, (unsigned long long) c.r13, (unsigned long long) c.r14, (unsigned long long) c.r15, (unsigned long long) c.rip, (unsigned long long) c.rflags, (unsigned long long) c.cr0, (unsigned long long) c.cr2, (unsigned long long) c.cr3, (unsigned long long) c.cr4, (unsigned long long) c.dr0, (unsigned long long) c.dr1, (unsigned long long) c.dr2, (unsigned long long) c.dr3, (unsigned long long) c.dr6, (unsigned long long) c.dr7, c.cs_sel, (unsigned long long) c.cs_base, c.cs_limit, c.cs_arbytes, c.ds_sel, (unsigned long long) c.ds_base, c.ds_limit, c.ds_arbytes, c.es_sel, (unsigned long long) c.es_base, c.es_limit, c.es_arbytes, c.fs_sel, (unsigned long long) c.fs_base, c.fs_limit, c.fs_arbytes, c.gs_sel, (unsigned long long) c.gs_base, c.gs_limit, c.gs_arbytes, c.ss_sel, (unsigned long long) c.ss_base, c.ss_limit, c.ss_arbytes, c.tr_sel, (unsigned long long) c.tr_base, c.tr_limit, c.tr_arbytes, c.ldtr_sel, (unsigned long long) c.ldtr_base, c.ldtr_limit, c.ldtr_arbytes, (unsigned long long) c.idtr_base, c.idtr_limit, (unsigned long long) c.gdtr_base, c.gdtr_limit, (unsigned long long) c.sysenter_cs, (unsigned long long) c.sysenter_eip, (unsigned long long) c.sysenter_esp, (unsigned long long) c.shadow_gs, (unsigned long long) c.msr_flags, (unsigned long long) c.msr_lstar, (unsigned long long) c.msr_star, (unsigned long long) c.msr_cstar, (unsigned long long) c.msr_syscall_mask, (unsigned long long) c.msr_efer, (unsigned long long) c.tsc, (unsigned long) c.pending_event, (unsigned long) c.error_code); dump_fpu(&c.fpu_regs); } static void dump_pic(void) { HVM_SAVE_TYPE(PIC) p; READ(p); printf(" PIC: IRQ base %#x, irr %#x, imr %#x, isr %#x\n", p.irq_base, p.irr, p.imr, p.isr); printf(" init_state %u, priority_add %u, readsel_isr %u, poll %u\n", p.init_state, p.priority_add, p.readsel_isr, p.poll); printf(" auto_eoi %u, rotate_on_auto_eoi %u\n", p.auto_eoi, p.rotate_on_auto_eoi); printf(" special_fully_nested_mode %u, special_mask_mode %u\n", p.special_fully_nested_mode, p.special_mask_mode); printf(" is_master %u, elcr %#x, int_output %#x\n", p.is_master, p.elcr, p.int_output); } static void dump_ioapic(void) { int i; HVM_SAVE_TYPE(IOAPIC) p; READ(p); printf(" IOAPIC: base_address %#llx, ioregsel %#x id %#x\n", (unsigned long long) p.base_address, p.ioregsel, p.id); for ( i = 0; i < VIOAPIC_NUM_PINS; i++ ) { printf(" pin %.2i: 0x%.16llx\n", i, (unsigned long long) p.redirtbl[i].bits); } } static void dump_lapic(void) { HVM_SAVE_TYPE(LAPIC) p; READ(p); printf(" LAPIC: base_msr %#llx, disabled %#x, timer_divisor %#x\n", (unsigned long long) p.apic_base_msr, p.disabled, p.timer_divisor); } static void dump_lapic_regs(void) { unsigned int i; HVM_SAVE_TYPE(LAPIC_REGS) r; READ(r); printf(" LAPIC registers:\n"); for ( i = 0 ; i < 0x400 ; i += 32 ) { printf(" 0x%4.4x: 0x%16.16llx 0x%4.4x: 0x%16.16llx\n", i, *(unsigned long long *)&r.data[i], i + 16, *(unsigned long long *)&r.data[i + 16]); } } static void dump_pci_irq(void) { HVM_SAVE_TYPE(PCI_IRQ) i; READ(i); printf(" PCI IRQs: 0x%16.16llx%16.16llx\n", (unsigned long long) i.pad[0], (unsigned long long) i.pad[1]); } static void dump_isa_irq(void) { HVM_SAVE_TYPE(ISA_IRQ) i; READ(i); printf(" ISA IRQs: 0x%4.4llx\n", (unsigned long long) i.pad[0]); } static void dump_pci_link(void) { HVM_SAVE_TYPE(PCI_LINK) l; READ(l); printf(" PCI LINK: %u %u %u %u\n", l.route[0], l.route[1], l.route[2], l.route[3]); } static void dump_pit(void) { int i; HVM_SAVE_TYPE(PIT) p; READ(p); printf(" PIT: speaker %s\n", p.speaker_data_on ? "on" : "off"); for ( i = 0 ; i < 2 ; i++ ) { printf(" ch %1i: count %#x, latched_count %#x, count_latched %u\n", i, p.channels[i].count, p.channels[i].latched_count, p.channels[i].count_latched); printf(" status %#x, status_latched %#x\n", p.channels[i].status, p.channels[i].status_latched); printf(" rd_state %#x, wr_state %#x, wr_latch %#x, rw_mode %#x\n", p.channels[i].read_state, p.channels[i].write_state, p.channels[i].write_latch, p.channels[i].rw_mode); printf(" mode %#x, bcd %#x, gate %#x\n", p.channels[i].mode, p.channels[i].bcd, p.channels[i].gate); } } static void dump_rtc(void) { HVM_SAVE_TYPE(RTC) r; READ(r); printf(" RTC: regs 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", r.cmos_data[0], r.cmos_data[1], r.cmos_data[2], r.cmos_data[3], r.cmos_data[4], r.cmos_data[5], r.cmos_data[6], r.cmos_data[7]); printf(" 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x, index 0x%2.2x\n", r.cmos_data[8], r.cmos_data[9], r.cmos_data[10], r.cmos_data[11], r.cmos_data[12], r.cmos_data[13], r.cmos_index); } static void dump_hpet(void) { int i; HVM_SAVE_TYPE(HPET) h; READ(h); printf(" HPET: capability %#llx config %#llx\n", (unsigned long long) h.capability, (unsigned long long) h.config); printf(" isr %#llx counter %#llx\n", (unsigned long long) h.isr, (unsigned long long) h.mc64); for ( i = 0; i < HPET_TIMER_NUM; i++ ) { printf(" timer%i config %#llx cmp %#llx\n", i, (unsigned long long) h.timers[i].config, (unsigned long long) h.timers[i].cmp); printf(" timer%i period %#llx fsb %#llx\n", i, (unsigned long long) h.period[i], (unsigned long long) h.timers[i].fsb); } } static void dump_pmtimer(void) { HVM_SAVE_TYPE(PMTIMER) p; READ(p); printf(" ACPI PM: TMR_VAL 0x%x, PM1a_STS 0x%x, PM1a_EN 0x%x\n", p.tmr_val, (unsigned) p.pm1a_sts, (unsigned) p.pm1a_en); } static void dump_mtrr(void) { HVM_SAVE_TYPE(MTRR) p; int i; READ(p); printf(" MTRR: PAT 0x%llx, cap 0x%llx, default 0x%llx\n", (unsigned long long) p.msr_pat_cr, (unsigned long long) p.msr_mtrr_cap, (unsigned long long) p.msr_mtrr_def_type); for ( i = 0 ; i < MTRR_VCNT ; i++ ) printf(" var %i 0x%16.16llx 0x%16.16llx\n", i, (unsigned long long) p.msr_mtrr_var[2 * i], (unsigned long long) p.msr_mtrr_var[2 * i + 1]); for ( i = 0 ; i < NUM_FIXED_MSR ; i++ ) printf(" fixed %.2i 0x%16.16llx\n", i, (unsigned long long) p.msr_mtrr_fixed[i]); } static void dump_viridian_domain(void) { HVM_SAVE_TYPE(VIRIDIAN_DOMAIN) p; READ(p); printf(" VIRIDIAN_DOMAIN: hypercall gpa 0x%llx, guest_os_id 0x%llx\n", (unsigned long long) p.hypercall_gpa, (unsigned long long) p.guest_os_id); } static void dump_viridian_vcpu(void) { HVM_SAVE_TYPE(VIRIDIAN_VCPU) p; READ(p); printf(" VIRIDIAN_VCPU: apic_assist 0x%llx\n", (unsigned long long) p.apic_assist); } static void dump_vmce_vcpu(void) { HVM_SAVE_TYPE(VMCE_VCPU) p; READ(p); printf(" VMCE_VCPU: caps %" PRIx64 "\n", p.caps); printf(" VMCE_VCPU: bank0 mci_ctl2 %" PRIx64 "\n", p.mci_ctl2_bank0); printf(" VMCE_VCPU: bank1 mci_ctl2 %" PRIx64 "\n", p.mci_ctl2_bank1); } static void dump_tsc_adjust(void) { HVM_SAVE_TYPE(TSC_ADJUST) p; READ(p); printf(" TSC_ADJUST: tsc_adjust %" PRIx64 "\n", p.tsc_adjust); } int main(int argc, char **argv) { int entry, domid; xc_interface *xch; struct hvm_save_descriptor desc; if ( argc != 2 || !argv[1] || (domid = atoi(argv[1])) < 0 ) { fprintf(stderr, "usage: %s <domid>\n", argv[0]); exit(1); } xch = xc_interface_open(0,0,0); if ( !xch ) { fprintf(stderr, "Error: can't open libxc handle\n"); exit(1); } len = xc_domain_hvm_getcontext(xch, domid, 0, 0); if ( len == (uint32_t) -1 ) { fprintf(stderr, "Error: can't get record length for dom %i\n", domid); exit(1); } buf = malloc(len); if ( buf == NULL ) { fprintf(stderr, "Error: can't allocate %u bytes\n", len); exit(1); } len = xc_domain_hvm_getcontext(xch, domid, buf, len); if ( len == (uint32_t) -1 ) { fprintf(stderr, "Error: can't get HVM record for dom %i\n", domid); exit(1); } off = 0; /* Say hello */ printf("HVM save record for domain %i\n", domid); entry = 0; do { READ(desc); printf("Entry %i: type %u instance %u, length %u\n", entry++, (unsigned) desc.typecode, (unsigned) desc.instance, (unsigned) desc.length); switch (desc.typecode) { case HVM_SAVE_CODE(HEADER): dump_header(); break; case HVM_SAVE_CODE(CPU): dump_cpu(); break; case HVM_SAVE_CODE(PIC): dump_pic(); break; case HVM_SAVE_CODE(IOAPIC): dump_ioapic(); break; case HVM_SAVE_CODE(LAPIC): dump_lapic(); break; case HVM_SAVE_CODE(LAPIC_REGS): dump_lapic_regs(); break; case HVM_SAVE_CODE(PCI_IRQ): dump_pci_irq(); break; case HVM_SAVE_CODE(ISA_IRQ): dump_isa_irq(); break; case HVM_SAVE_CODE(PCI_LINK): dump_pci_link(); break; case HVM_SAVE_CODE(PIT): dump_pit(); break; case HVM_SAVE_CODE(RTC): dump_rtc(); break; case HVM_SAVE_CODE(HPET): dump_hpet(); break; case HVM_SAVE_CODE(PMTIMER): dump_pmtimer(); break; case HVM_SAVE_CODE(MTRR): dump_mtrr(); break; case HVM_SAVE_CODE(VIRIDIAN_DOMAIN): dump_viridian_domain(); break; case HVM_SAVE_CODE(VIRIDIAN_VCPU): dump_viridian_vcpu(); break; case HVM_SAVE_CODE(VMCE_VCPU): dump_vmce_vcpu(); break; case HVM_SAVE_CODE(TSC_ADJUST): dump_tsc_adjust(); break; case HVM_SAVE_CODE(END): break; default: printf(" ** Don't understand type %u: skipping\n", (unsigned) desc.typecode); off += (desc.length); } } while ( desc.typecode != HVM_SAVE_CODE(END) && off < len ); return 0; }
gpl-2.0
Shimejing/u-boot-1.1.6-denx_jz2440v2
board/esd/ar405/ar405.c
48
10582
/* * (C) Copyright 2001-2004 * Stefan Roese, esd gmbh germany, stefan.roese@esd-electronics.com * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <common.h> #include "ar405.h" #include <asm/processor.h> #include <command.h> DECLARE_GLOBAL_DATA_PTR; /*cmd_boot.c*/ extern int do_reset (cmd_tbl_t *cmdtp, int flag, int argc, char *argv[]); extern void lxt971_no_sleep(void); /* ------------------------------------------------------------------------- */ #if 0 #define FPGA_DEBUG #endif /* fpga configuration data - generated by bin2cc */ const unsigned char fpgadata[] = { #include "fpgadata.c" }; const unsigned char fpgadata_xl30[] = { #include "fpgadata_xl30.c" }; /* * include common fpga code (for esd boards) */ #include "../common/fpga.c" int board_early_init_f (void) { int index, len, i; int status; #ifdef FPGA_DEBUG /* set up serial port with default baudrate */ (void) get_clocks (); gd->baudrate = CONFIG_BAUDRATE; serial_init (); console_init_f (); #endif /* * Boot onboard FPGA */ /* first try 40er image */ gd->board_type = 40; status = fpga_boot ((unsigned char *) fpgadata, sizeof (fpgadata)); if (status != 0) { /* try xl30er image */ gd->board_type = 30; status = fpga_boot ((unsigned char *) fpgadata_xl30, sizeof (fpgadata_xl30)); if (status != 0) { /* booting FPGA failed */ #ifndef FPGA_DEBUG /* set up serial port with default baudrate */ (void) get_clocks (); gd->baudrate = CONFIG_BAUDRATE; serial_init (); console_init_f (); #endif printf ("\nFPGA: Booting failed "); switch (status) { case ERROR_FPGA_PRG_INIT_LOW: printf ("(Timeout: INIT not low after asserting PROGRAM*)\n "); break; case ERROR_FPGA_PRG_INIT_HIGH: printf ("(Timeout: INIT not high after deasserting PROGRAM*)\n "); break; case ERROR_FPGA_PRG_DONE: printf ("(Timeout: DONE not high after programming FPGA)\n "); break; } /* display infos on fpgaimage */ index = 15; for (i = 0; i < 4; i++) { len = fpgadata[index]; printf ("FPGA: %s\n", &(fpgadata[index + 1])); index += len + 3; } putc ('\n'); /* delayed reboot */ for (i = 20; i > 0; i--) { printf ("Rebooting in %2d seconds \r", i); for (index = 0; index < 1000; index++) udelay (1000); } putc ('\n'); do_reset (NULL, 0, 0, NULL); } } /* * IRQ 0-15 405GP internally generated; active high; level sensitive * IRQ 16 405GP internally generated; active low; level sensitive * IRQ 17-24 RESERVED * IRQ 25 (EXT IRQ 0) CAN0; active low; level sensitive * IRQ 26 (EXT IRQ 1) CAN1; active low; level sensitive * IRQ 27 (EXT IRQ 2) PCI SLOT 0; active low; level sensitive * IRQ 28 (EXT IRQ 3) PCI SLOT 1; active low; level sensitive * IRQ 29 (EXT IRQ 4) PCI SLOT 2; active low; level sensitive * IRQ 30 (EXT IRQ 5) PCI SLOT 3; active low; level sensitive * IRQ 31 (EXT IRQ 6) COMPACT FLASH; active high; level sensitive */ mtdcr (uicsr, 0xFFFFFFFF); /* clear all ints */ mtdcr (uicer, 0x00000000); /* disable all ints */ mtdcr (uiccr, 0x00000000); /* set all to be non-critical */ mtdcr (uicpr, 0xFFFFFF81); /* set int polarities */ mtdcr (uictr, 0x10000000); /* set int trigger levels */ mtdcr (uicvcr, 0x00000001); /* set vect base=0,INT0 highest priority */ mtdcr (uicsr, 0xFFFFFFFF); /* clear all ints */ *(ushort *) 0xf03000ec = 0x0fff; /* enable all interrupts in fpga */ return 0; } /* ------------------------------------------------------------------------- */ /* * Check Board Identity: */ int checkboard (void) { int index; int len; char str[64]; int i = getenv_r ("serial#", str, sizeof (str)); const unsigned char *fpga; puts ("Board: "); if (i == -1) { puts ("### No HW ID - assuming AR405"); } else { puts(str); } puts ("\nFPGA: "); /* display infos on fpgaimage */ if (gd->board_type == 30) { fpga = fpgadata_xl30; } else { fpga = fpgadata; } index = 15; for (i = 0; i < 4; i++) { len = fpga[index]; printf ("%s ", &(fpga[index + 1])); index += len + 3; } putc ('\n'); /* * Disable sleep mode in LXT971 */ lxt971_no_sleep(); return 0; } /* ------------------------------------------------------------------------- */ long int initdram (int board_type) { unsigned long val; mtdcr(memcfga, mem_mb0cf); val = mfdcr(memcfgd); return (4*1024*1024 << ((val & 0x000e0000) >> 17)); } /* ------------------------------------------------------------------------- */ int testdram (void) { /* TODO: XXX XXX XXX */ printf ("test: 16 MB - ok\n"); return (0); } #if 1 /* test-only: some internal test routines... */ /* * Some test routines */ int do_digtest(cmd_tbl_t *cmdtp, int flag, int argc, char *argv[]) { volatile uchar *digen = (volatile uchar *)0xf03000b4; volatile ushort *digout = (volatile ushort *)0xf03000b0; volatile ushort *digin = (volatile ushort *)0xf03000a0; int i; int k; int start; int end; if (argc != 3) { puts("Usage: digtest n_start n_end (digtest 0 7)\n"); return 0; } start = simple_strtol (argv[1], NULL, 10); end = simple_strtol (argv[2], NULL, 10); /* * Enable digital outputs */ *digen = 0x08; printf("\nStarting digital In-/Out Test from I/O %d to %d (Cntrl-C to abort)...\n", start, end); /* * Set outputs one by one */ for (;;) { for (i=start; i<=end; i++) { *digout = 0x0001 << i; for (k=0; k<200; k++) udelay(1000); if (*digin != (0x0001 << i)) { printf("ERROR: OUT=0x%04X, IN=0x%04X\n", 0x0001 << i, *digin); return 0; } /* Abort if ctrl-c was pressed */ if (ctrlc()) { puts("\nAbort\n"); return 0; } } } return 0; } U_BOOT_CMD( digtest, 3, 1, do_digtest, "digtest - Test digital in-/output\n", NULL ); #define ERROR_DELTA 256 struct io { volatile short val; short dummy; }; int do_anatest(cmd_tbl_t *cmdtp, int flag, int argc, char *argv[]) { volatile short val; int i; int volt; struct io *out; struct io *in; out = (struct io *)0xf0300090; in = (struct io *)0xf0300000; i = simple_strtol (argv[1], NULL, 10); volt = 0; printf("Setting Channel %d to %dV...\n", i, volt); out[i].val = (volt * 0x7fff) / 10; udelay(10000); val = in[i*2].val; printf("-> InChannel %d: 0x%04x=%dV\n", i*2, val, (val * 4000) / 0x7fff); if ((val < ((volt * 0x7fff) / 40) - ERROR_DELTA) || (val > ((volt * 0x7fff) / 40) + ERROR_DELTA)) { printf("ERROR! (min=0x%04x max=0x%04x)\n", ((volt * 0x7fff) / 40) - ERROR_DELTA, ((volt * 0x7fff) / 40) + ERROR_DELTA); return -1; } val = in[i*2+1].val; printf("-> InChannel %d: 0x%04x=%dV\n", i*2+1, val, (val * 4000) / 0x7fff); if ((val < ((volt * 0x7fff) / 40) - ERROR_DELTA) || (val > ((volt * 0x7fff) / 40) + ERROR_DELTA)) { printf("ERROR! (min=0x%04x max=0x%04x)\n", ((volt * 0x7fff) / 40) - ERROR_DELTA, ((volt * 0x7fff) / 40) + ERROR_DELTA); return -1; } volt = 5; printf("Setting Channel %d to %dV...\n", i, volt); out[i].val = (volt * 0x7fff) / 10; udelay(10000); val = in[i*2].val; printf("-> InChannel %d: 0x%04x=%dV\n", i*2, val, (val * 4000) / 0x7fff); if ((val < ((volt * 0x7fff) / 40) - ERROR_DELTA) || (val > ((volt * 0x7fff) / 40) + ERROR_DELTA)) { printf("ERROR! (min=0x%04x max=0x%04x)\n", ((volt * 0x7fff) / 40) - ERROR_DELTA, ((volt * 0x7fff) / 40) + ERROR_DELTA); return -1; } val = in[i*2+1].val; printf("-> InChannel %d: 0x%04x=%dV\n", i*2+1, val, (val * 4000) / 0x7fff); if ((val < ((volt * 0x7fff) / 40) - ERROR_DELTA) || (val > ((volt * 0x7fff) / 40) + ERROR_DELTA)) { printf("ERROR! (min=0x%04x max=0x%04x)\n", ((volt * 0x7fff) / 40) - ERROR_DELTA, ((volt * 0x7fff) / 40) + ERROR_DELTA); return -1; } volt = 10; printf("Setting Channel %d to %dV...\n", i, volt); out[i].val = (volt * 0x7fff) / 10; udelay(10000); val = in[i*2].val; printf("-> InChannel %d: 0x%04x=%dV\n", i*2, val, (val * 4000) / 0x7fff); if ((val < ((volt * 0x7fff) / 40) - ERROR_DELTA) || (val > ((volt * 0x7fff) / 40) + ERROR_DELTA)) { printf("ERROR! (min=0x%04x max=0x%04x)\n", ((volt * 0x7fff) / 40) - ERROR_DELTA, ((volt * 0x7fff) / 40) + ERROR_DELTA); return -1; } val = in[i*2+1].val; printf("-> InChannel %d: 0x%04x=%dV\n", i*2+1, val, (val * 4000) / 0x7fff); if ((val < ((volt * 0x7fff) / 40) - ERROR_DELTA) || (val > ((volt * 0x7fff) / 40) + ERROR_DELTA)) { printf("ERROR! (min=0x%04x max=0x%04x)\n", ((volt * 0x7fff) / 40) - ERROR_DELTA, ((volt * 0x7fff) / 40) + ERROR_DELTA); return -1; } printf("Channel %d OK!\n", i); return 0; } U_BOOT_CMD( anatest, 2, 1, do_anatest, "anatest - Test analog in-/output\n", NULL ); int counter = 0; void cyclicInt(void *ptr) { *(ushort *)0xf03000e8 = 0x0800; /* ack int */ counter++; } int do_inctest(cmd_tbl_t *cmdtp, int flag, int argc, char *argv[]) { volatile uchar *digout = (volatile uchar *)0xf03000b4; volatile ulong *incin; int i; incin = (volatile ulong *)0xf0300040; /* * Clear inc counter */ incin[0] = 0; incin[1] = 0; incin[2] = 0; incin[3] = 0; incin = (volatile ulong *)0xf0300050; /* * Inc a little */ for (i=0; i<10000; i++) { switch (i & 0x03) { case 0: *digout = 0x02; break; case 1: *digout = 0x03; break; case 2: *digout = 0x01; break; case 3: *digout = 0x00; break; } udelay(10); } printf("Inc 0 = %ld\n", incin[0]); printf("Inc 1 = %ld\n", incin[1]); printf("Inc 2 = %ld\n", incin[2]); printf("Inc 3 = %ld\n", incin[3]); *(ushort *)0xf03000e0 = 0x0c80-1; /* set counter */ *(ushort *)0xf03000ec |= 0x0800; /* enable int */ irq_install_handler (30, (interrupt_handler_t *) cyclicInt, NULL); printf("counter=%d\n", counter); return 0; } U_BOOT_CMD( inctest, 3, 1, do_inctest, "inctest - Test incremental encoder inputs\n", NULL ); #endif
gpl-2.0
xobs/linux-2.6.28.mx233-falconwing
drivers/gpu/drm/drm_vm.c
48
18650
/** * \file drm_vm.c * Memory mapping for DRM * * \author Rickard E. (Rik) Faith <faith@valinux.com> * \author Gareth Hughes <gareth@valinux.com> */ /* * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include "drmP.h" #if defined(__ia64__) #include <linux/efi.h> #endif static void drm_vm_open(struct vm_area_struct *vma); static void drm_vm_close(struct vm_area_struct *vma); static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) { pgprot_t tmp = vm_get_page_prot(vma->vm_flags); #if defined(__i386__) || defined(__x86_64__) if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) { pgprot_val(tmp) |= _PAGE_PCD; pgprot_val(tmp) &= ~_PAGE_PWT; } #elif defined(__powerpc__) pgprot_val(tmp) |= _PAGE_NO_CACHE; if (map_type == _DRM_REGISTERS) pgprot_val(tmp) |= _PAGE_GUARDED; #elif defined(__ia64__) if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) tmp = pgprot_writecombine(tmp); else tmp = pgprot_noncached(tmp); #elif defined(__sparc__) tmp = pgprot_noncached(tmp); #endif return tmp; } static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) { pgprot_t tmp = vm_get_page_prot(vma->vm_flags); #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE) tmp |= _PAGE_NO_CACHE; #endif return tmp; } /** * \c fault method for AGP virtual memory. * * \param vma virtual memory area. * \param address access address. * \return pointer to the page structure. * * Find the right map and if it's AGP memory find the real physical page to * map, get the page, increment the use count and return it. */ #if __OS_HAS_AGP static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct drm_file *priv = vma->vm_file->private_data; struct drm_device *dev = priv->minor->dev; struct drm_map *map = NULL; struct drm_map_list *r_list; struct drm_hash_item *hash; /* * Find the right map */ if (!drm_core_has_AGP(dev)) goto vm_fault_error; if (!dev->agp || !dev->agp->cant_use_aperture) goto vm_fault_error; if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) goto vm_fault_error; r_list = drm_hash_entry(hash, struct drm_map_list, hash); map = r_list->map; if (map && map->type == _DRM_AGP) { /* * Using vm_pgoff as a selector forces us to use this unusual * addressing scheme. */ unsigned long offset = (unsigned long)vmf->virtual_address - vma->vm_start; unsigned long baddr = map->offset + offset; struct drm_agp_mem *agpmem; struct page *page; #ifdef __alpha__ /* * Adjust to a bus-relative address */ baddr -= dev->hose->mem_space->start; #endif /* * It's AGP memory - find the real physical page to map */ list_for_each_entry(agpmem, &dev->agp->memory, head) { if (agpmem->bound <= baddr && agpmem->bound + agpmem->pages * PAGE_SIZE > baddr) break; } if (!agpmem) goto vm_fault_error; /* * Get the page, inc the use count, and return it */ offset = (baddr - agpmem->bound) >> PAGE_SHIFT; page = virt_to_page(__va(agpmem->memory->memory[offset])); get_page(page); vmf->page = page; DRM_DEBUG ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n", baddr, __va(agpmem->memory->memory[offset]), offset, page_count(page)); return 0; } vm_fault_error: return VM_FAULT_SIGBUS; /* Disallow mremap */ } #else /* __OS_HAS_AGP */ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } #endif /* __OS_HAS_AGP */ /** * \c nopage method for shared virtual memory. * * \param vma virtual memory area. * \param address access address. * \return pointer to the page structure. * * Get the mapping, find the real physical page to map, get the page, and * return it. */ static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct drm_map *map = (struct drm_map *) vma->vm_private_data; unsigned long offset; unsigned long i; struct page *page; if (!map) return VM_FAULT_SIGBUS; /* Nothing allocated */ offset = (unsigned long)vmf->virtual_address - vma->vm_start; i = (unsigned long)map->handle + offset; page = vmalloc_to_page((void *)i); if (!page) return VM_FAULT_SIGBUS; get_page(page); vmf->page = page; DRM_DEBUG("shm_fault 0x%lx\n", offset); return 0; } /** * \c close method for shared virtual memory. * * \param vma virtual memory area. * * Deletes map information if we are the last * person to close a mapping and it's not in the global maplist. */ static void drm_vm_shm_close(struct vm_area_struct *vma) { struct drm_file *priv = vma->vm_file->private_data; struct drm_device *dev = priv->minor->dev; struct drm_vma_entry *pt, *temp; struct drm_map *map; struct drm_map_list *r_list; int found_maps = 0; DRM_DEBUG("0x%08lx,0x%08lx\n", vma->vm_start, vma->vm_end - vma->vm_start); atomic_dec(&dev->vma_count); map = vma->vm_private_data; mutex_lock(&dev->struct_mutex); list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { if (pt->vma->vm_private_data == map) found_maps++; if (pt->vma == vma) { list_del(&pt->head); drm_free(pt, sizeof(*pt), DRM_MEM_VMAS); } } /* We were the only map that was found */ if (found_maps == 1 && map->flags & _DRM_REMOVABLE) { /* Check to see if we are in the maplist, if we are not, then * we delete this mappings information. */ found_maps = 0; list_for_each_entry(r_list, &dev->maplist, head) { if (r_list->map == map) found_maps++; } if (!found_maps) { drm_dma_handle_t dmah; switch (map->type) { case _DRM_REGISTERS: case _DRM_FRAME_BUFFER: if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { int retcode; retcode = mtrr_del(map->mtrr, map->offset, map->size); DRM_DEBUG("mtrr_del = %d\n", retcode); } iounmap(map->handle); break; case _DRM_SHM: vfree(map->handle); break; case _DRM_AGP: case _DRM_SCATTER_GATHER: break; case _DRM_CONSISTENT: dmah.vaddr = map->handle; dmah.busaddr = map->offset; dmah.size = map->size; __drm_pci_free(dev, &dmah); break; } drm_free(map, sizeof(*map), DRM_MEM_MAPS); } } mutex_unlock(&dev->struct_mutex); } /** * \c fault method for DMA virtual memory. * * \param vma virtual memory area. * \param address access address. * \return pointer to the page structure. * * Determine the page number from the page offset and get it from drm_device_dma::pagelist. */ static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct drm_file *priv = vma->vm_file->private_data; struct drm_device *dev = priv->minor->dev; struct drm_device_dma *dma = dev->dma; unsigned long offset; unsigned long page_nr; struct page *page; if (!dma) return VM_FAULT_SIGBUS; /* Error */ if (!dma->pagelist) return VM_FAULT_SIGBUS; /* Nothing allocated */ offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */ page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK)))); get_page(page); vmf->page = page; DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr); return 0; } /** * \c fault method for scatter-gather virtual memory. * * \param vma virtual memory area. * \param address access address. * \return pointer to the page structure. * * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. */ static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct drm_map *map = (struct drm_map *) vma->vm_private_data; struct drm_file *priv = vma->vm_file->private_data; struct drm_device *dev = priv->minor->dev; struct drm_sg_mem *entry = dev->sg; unsigned long offset; unsigned long map_offset; unsigned long page_offset; struct page *page; if (!entry) return VM_FAULT_SIGBUS; /* Error */ if (!entry->pagelist) return VM_FAULT_SIGBUS; /* Nothing allocated */ offset = (unsigned long)vmf->virtual_address - vma->vm_start; map_offset = map->offset - (unsigned long)dev->sg->virtual; page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); page = entry->pagelist[page_offset]; get_page(page); vmf->page = page; return 0; } static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { return drm_do_vm_fault(vma, vmf); } static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { return drm_do_vm_shm_fault(vma, vmf); } static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { return drm_do_vm_dma_fault(vma, vmf); } static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { return drm_do_vm_sg_fault(vma, vmf); } /** AGP virtual memory operations */ static struct vm_operations_struct drm_vm_ops = { .fault = drm_vm_fault, .open = drm_vm_open, .close = drm_vm_close, }; /** Shared virtual memory operations */ static struct vm_operations_struct drm_vm_shm_ops = { .fault = drm_vm_shm_fault, .open = drm_vm_open, .close = drm_vm_shm_close, }; /** DMA virtual memory operations */ static struct vm_operations_struct drm_vm_dma_ops = { .fault = drm_vm_dma_fault, .open = drm_vm_open, .close = drm_vm_close, }; /** Scatter-gather virtual memory operations */ static struct vm_operations_struct drm_vm_sg_ops = { .fault = drm_vm_sg_fault, .open = drm_vm_open, .close = drm_vm_close, }; /** * \c open method for shared virtual memory. * * \param vma virtual memory area. * * Create a new drm_vma_entry structure as the \p vma private data entry and * add it to drm_device::vmalist. */ static void drm_vm_open_locked(struct vm_area_struct *vma) { struct drm_file *priv = vma->vm_file->private_data; struct drm_device *dev = priv->minor->dev; struct drm_vma_entry *vma_entry; DRM_DEBUG("0x%08lx,0x%08lx\n", vma->vm_start, vma->vm_end - vma->vm_start); atomic_inc(&dev->vma_count); vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); if (vma_entry) { vma_entry->vma = vma; vma_entry->pid = current->pid; list_add(&vma_entry->head, &dev->vmalist); } } static void drm_vm_open(struct vm_area_struct *vma) { struct drm_file *priv = vma->vm_file->private_data; struct drm_device *dev = priv->minor->dev; mutex_lock(&dev->struct_mutex); drm_vm_open_locked(vma); mutex_unlock(&dev->struct_mutex); } /** * \c close method for all virtual memory types. * * \param vma virtual memory area. * * Search the \p vma private data entry in drm_device::vmalist, unlink it, and * free it. */ static void drm_vm_close(struct vm_area_struct *vma) { struct drm_file *priv = vma->vm_file->private_data; struct drm_device *dev = priv->minor->dev; struct drm_vma_entry *pt, *temp; DRM_DEBUG("0x%08lx,0x%08lx\n", vma->vm_start, vma->vm_end - vma->vm_start); atomic_dec(&dev->vma_count); mutex_lock(&dev->struct_mutex); list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { if (pt->vma == vma) { list_del(&pt->head); drm_free(pt, sizeof(*pt), DRM_MEM_VMAS); break; } } mutex_unlock(&dev->struct_mutex); } /** * mmap DMA memory. * * \param file_priv DRM file private. * \param vma virtual memory area. * \return zero on success or a negative number on failure. * * Sets the virtual memory area operations structure to vm_dma_ops, the file * pointer, and calls vm_open(). */ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) { struct drm_file *priv = filp->private_data; struct drm_device *dev; struct drm_device_dma *dma; unsigned long length = vma->vm_end - vma->vm_start; dev = priv->minor->dev; dma = dev->dma; DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", vma->vm_start, vma->vm_end, vma->vm_pgoff); /* Length must match exact page count */ if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { return -EINVAL; } if (!capable(CAP_SYS_ADMIN) && (dma->flags & _DRM_DMA_USE_PCI_RO)) { vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); #if defined(__i386__) || defined(__x86_64__) pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; #else /* Ye gads this is ugly. With more thought we could move this up higher and use `protection_map' instead. */ vma->vm_page_prot = __pgprot(pte_val (pte_wrprotect (__pte(pgprot_val(vma->vm_page_prot))))); #endif } vma->vm_ops = &drm_vm_dma_ops; vma->vm_flags |= VM_RESERVED; /* Don't swap */ vma->vm_flags |= VM_DONTEXPAND; vma->vm_file = filp; /* Needed for drm_vm_open() */ drm_vm_open_locked(vma); return 0; } unsigned long drm_core_get_map_ofs(struct drm_map * map) { return map->offset; } EXPORT_SYMBOL(drm_core_get_map_ofs); unsigned long drm_core_get_reg_ofs(struct drm_device *dev) { #ifdef __alpha__ return dev->hose->dense_mem_base - dev->hose->mem_space->start; #else return 0; #endif } EXPORT_SYMBOL(drm_core_get_reg_ofs); /** * mmap DMA memory. * * \param file_priv DRM file private. * \param vma virtual memory area. * \return zero on success or a negative number on failure. * * If the virtual memory area has no offset associated with it then it's a DMA * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist, * checks that the restricted flag is not set, sets the virtual memory operations * according to the mapping type and remaps the pages. Finally sets the file * pointer and calls vm_open(). */ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) { struct drm_file *priv = filp->private_data; struct drm_device *dev = priv->minor->dev; struct drm_map *map = NULL; unsigned long offset = 0; struct drm_hash_item *hash; DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", vma->vm_start, vma->vm_end, vma->vm_pgoff); if (!priv->authenticated) return -EACCES; /* We check for "dma". On Apple's UniNorth, it's valid to have * the AGP mapped at physical address 0 * --BenH. */ if (!vma->vm_pgoff #if __OS_HAS_AGP && (!dev->agp || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE) #endif ) return drm_mmap_dma(filp, vma); if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) { DRM_ERROR("Could not find map\n"); return -EINVAL; } map = drm_hash_entry(hash, struct drm_map_list, hash)->map; if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) return -EPERM; /* Check for valid size. */ if (map->size < vma->vm_end - vma->vm_start) return -EINVAL; if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) { vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); #if defined(__i386__) || defined(__x86_64__) pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; #else /* Ye gads this is ugly. With more thought we could move this up higher and use `protection_map' instead. */ vma->vm_page_prot = __pgprot(pte_val (pte_wrprotect (__pte(pgprot_val(vma->vm_page_prot))))); #endif } switch (map->type) { case _DRM_AGP: if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) { /* * On some platforms we can't talk to bus dma address from the CPU, so for * memory of type DRM_AGP, we'll deal with sorting out the real physical * pages and mappings in fault() */ #if defined(__powerpc__) pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; #endif vma->vm_ops = &drm_vm_ops; break; } /* fall through to _DRM_FRAME_BUFFER... */ case _DRM_FRAME_BUFFER: case _DRM_REGISTERS: offset = dev->driver->get_reg_ofs(dev); vma->vm_flags |= VM_IO; /* not in core dump */ vma->vm_page_prot = drm_io_prot(map->type, vma); if (io_remap_pfn_range(vma, vma->vm_start, (map->offset + offset) >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EAGAIN; DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," " offset = 0x%lx\n", map->type, vma->vm_start, vma->vm_end, map->offset + offset); vma->vm_ops = &drm_vm_ops; break; case _DRM_CONSISTENT: /* Consistent memory is really like shared memory. But * it's allocated in a different way, so avoid fault */ if (remap_pfn_range(vma, vma->vm_start, page_to_pfn(virt_to_page(map->handle)), vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EAGAIN; vma->vm_page_prot = drm_dma_prot(map->type, vma); /* fall through to _DRM_SHM */ case _DRM_SHM: vma->vm_ops = &drm_vm_shm_ops; vma->vm_private_data = (void *)map; /* Don't let this area swap. Change when DRM_KERNEL advisory is supported. */ vma->vm_flags |= VM_RESERVED; break; case _DRM_SCATTER_GATHER: vma->vm_ops = &drm_vm_sg_ops; vma->vm_private_data = (void *)map; vma->vm_flags |= VM_RESERVED; vma->vm_page_prot = drm_dma_prot(map->type, vma); break; default: return -EINVAL; /* This should never happen. */ } vma->vm_flags |= VM_RESERVED; /* Don't swap */ vma->vm_flags |= VM_DONTEXPAND; vma->vm_file = filp; /* Needed for drm_vm_open() */ drm_vm_open_locked(vma); return 0; } int drm_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *priv = filp->private_data; struct drm_device *dev = priv->minor->dev; int ret; mutex_lock(&dev->struct_mutex); ret = drm_mmap_locked(filp, vma); mutex_unlock(&dev->struct_mutex); return ret; } EXPORT_SYMBOL(drm_mmap);
gpl-2.0
omerjerk/CodyKernel-hammerhead
sound/soc/soc-core.c
48
100453
/* * soc-core.c -- ALSA SoC Audio Layer * * Copyright 2005 Wolfson Microelectronics PLC. * Copyright 2005 Openedhand Ltd. * Copyright (C) 2010 Slimlogic Ltd. * Copyright (C) 2010 Texas Instruments Inc. * * Author: Liam Girdwood <lrg@slimlogic.co.uk> * with code, comments and ideas from :- * Richard Purdie <richard@openedhand.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * TODO: * o Add hw rules to enforce rates, etc. * o More testing with other codecs/machines. * o Add more codecs and platforms to ensure good API coverage. * o Support TDM on PCM and I2S */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/bitops.h> #include <linux/debugfs.h> #include <linux/platform_device.h> #include <linux/ctype.h> #include <linux/slab.h> #include <linux/of.h> #include <sound/ac97_codec.h> #include <sound/core.h> #include <sound/jack.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-dpcm.h> #include <sound/initval.h> #define CREATE_TRACE_POINTS #include <trace/events/asoc.h> #define NAME_SIZE 32 static DECLARE_WAIT_QUEUE_HEAD(soc_pm_waitq); #ifdef CONFIG_DEBUG_FS struct dentry *snd_soc_debugfs_root; EXPORT_SYMBOL_GPL(snd_soc_debugfs_root); #endif static DEFINE_MUTEX(client_mutex); static LIST_HEAD(dai_list); static LIST_HEAD(platform_list); static LIST_HEAD(codec_list); int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num); int soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd); int soc_dpcm_be_digital_mute(struct snd_soc_pcm_runtime *fe, int mute); int soc_dpcm_be_ac97_cpu_dai_suspend(struct snd_soc_pcm_runtime *fe); int soc_dpcm_be_ac97_cpu_dai_resume(struct snd_soc_pcm_runtime *fe); int soc_dpcm_be_cpu_dai_resume(struct snd_soc_pcm_runtime *fe); int soc_dpcm_be_cpu_dai_suspend(struct snd_soc_pcm_runtime *fe); int soc_dpcm_be_platform_suspend(struct snd_soc_pcm_runtime *fe); int soc_dpcm_be_platform_resume(struct snd_soc_pcm_runtime *fe); /* * This is a timeout to do a DAPM powerdown after a stream is closed(). * It can be used to eliminate pops between different playback streams, e.g. * between two audio tracks. */ static int pmdown_time; module_param(pmdown_time, int, 0); MODULE_PARM_DESC(pmdown_time, "DAPM stream powerdown time (msecs)"); /* returns the minimum number of bytes needed to represent * a particular given value */ static int min_bytes_needed(unsigned long val) { int c = 0; int i; for (i = (sizeof val * 8) - 1; i >= 0; --i, ++c) if (val & (1UL << i)) break; c = (sizeof val * 8) - c; if (!c || (c % 8)) c = (c + 8) / 8; else c /= 8; return c; } /* fill buf which is 'len' bytes with a formatted * string of the form 'reg: value\n' */ static int format_register_str(struct snd_soc_codec *codec, unsigned int reg, char *buf, size_t len) { int wordsize = min_bytes_needed(codec->driver->reg_cache_size) * 2; int regsize = codec->driver->reg_word_size * 2; int ret; char tmpbuf[len + 1]; char regbuf[regsize + 1]; /* since tmpbuf is allocated on the stack, warn the callers if they * try to abuse this function */ WARN_ON(len > 63); /* +2 for ': ' and + 1 for '\n' */ if (wordsize + regsize + 2 + 1 != len) return -EINVAL; ret = snd_soc_read(codec, reg); if (ret < 0) { memset(regbuf, 'X', regsize); regbuf[regsize] = '\0'; } else { snprintf(regbuf, regsize + 1, "%.*x", regsize, ret); } /* prepare the buffer */ snprintf(tmpbuf, len + 1, "%.*x: %s\n", wordsize, reg, regbuf); /* copy it back to the caller without the '\0' */ memcpy(buf, tmpbuf, len); return 0; } /* codec register dump */ static ssize_t soc_codec_reg_show(struct snd_soc_codec *codec, char *buf, size_t count, loff_t pos) { int i, step = 1; int wordsize, regsize; int len; size_t total = 0; loff_t p = 0; wordsize = min_bytes_needed(codec->driver->reg_cache_size) * 2; regsize = codec->driver->reg_word_size * 2; len = wordsize + regsize + 2 + 1; if (!codec->driver->reg_cache_size) return 0; if (codec->driver->reg_cache_step) step = codec->driver->reg_cache_step; for (i = 0; i < codec->driver->reg_cache_size; i += step) { if (!snd_soc_codec_readable_register(codec, i)) continue; if (codec->driver->display_register) { count += codec->driver->display_register(codec, buf + count, PAGE_SIZE - count, i); } else { /* only support larger than PAGE_SIZE bytes debugfs * entries for the default case */ if (p >= pos) { if (total + len >= count - 1) break; format_register_str(codec, i, buf + total, len); total += len; } p += len; } } total = min(total, count - 1); return total; } static ssize_t codec_reg_show(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev); return soc_codec_reg_show(rtd->codec, buf, PAGE_SIZE, 0); } static DEVICE_ATTR(codec_reg, 0444, codec_reg_show, NULL); static ssize_t pmdown_time_show(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev); return sprintf(buf, "%ld\n", rtd->pmdown_time); } static ssize_t pmdown_time_set(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev); int ret; ret = strict_strtol(buf, 10, &rtd->pmdown_time); if (ret) return ret; return count; } static DEVICE_ATTR(pmdown_time, 0644, pmdown_time_show, pmdown_time_set); #ifdef CONFIG_DEBUG_FS static int codec_reg_open_file(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static ssize_t codec_reg_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { ssize_t ret; struct snd_soc_codec *codec = file->private_data; char *buf; if (*ppos < 0 || !count) return -EINVAL; buf = kmalloc(count, GFP_KERNEL); if (!buf) return -ENOMEM; ret = soc_codec_reg_show(codec, buf, count, *ppos); if (ret >= 0) { if (copy_to_user(user_buf, buf, ret)) { kfree(buf); return -EFAULT; } *ppos += ret; } kfree(buf); return ret; } static ssize_t codec_reg_write_file(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { char buf[32]; size_t buf_size; char *start = buf; unsigned long reg, value; struct snd_soc_codec *codec = file->private_data; buf_size = min(count, (sizeof(buf)-1)); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; buf[buf_size] = 0; while (*start == ' ') start++; reg = simple_strtoul(start, &start, 16); while (*start == ' ') start++; if (strict_strtoul(start, 16, &value)) return -EINVAL; /* Userspace has been fiddling around behind the kernel's back */ add_taint(TAINT_USER); snd_soc_write(codec, reg, value); return buf_size; } static const struct file_operations codec_reg_fops = { .open = codec_reg_open_file, .read = codec_reg_read_file, .write = codec_reg_write_file, .llseek = default_llseek, }; static void soc_init_codec_debugfs(struct snd_soc_codec *codec) { struct dentry *debugfs_card_root = codec->card->debugfs_card_root; codec->debugfs_codec_root = debugfs_create_dir(codec->name, debugfs_card_root); if (!codec->debugfs_codec_root) { printk(KERN_WARNING "ASoC: Failed to create codec debugfs directory\n"); return; } debugfs_create_bool("cache_sync", 0444, codec->debugfs_codec_root, &codec->cache_sync); debugfs_create_bool("cache_only", 0444, codec->debugfs_codec_root, &codec->cache_only); codec->debugfs_reg = debugfs_create_file("codec_reg", 0644, codec->debugfs_codec_root, codec, &codec_reg_fops); if (!codec->debugfs_reg) printk(KERN_WARNING "ASoC: Failed to create codec register debugfs file\n"); snd_soc_dapm_debugfs_init(&codec->dapm, codec->debugfs_codec_root); } static void soc_cleanup_codec_debugfs(struct snd_soc_codec *codec) { debugfs_remove_recursive(codec->debugfs_codec_root); } static void soc_init_platform_debugfs(struct snd_soc_platform *platform) { struct dentry *debugfs_card_root = platform->card->debugfs_card_root; platform->debugfs_platform_root = debugfs_create_dir(platform->name, debugfs_card_root); if (!platform->debugfs_platform_root) { printk(KERN_WARNING "ASoC: Failed to create platform debugfs directory\n"); return; } snd_soc_dapm_debugfs_init(&platform->dapm, platform->debugfs_platform_root); } static void soc_cleanup_platform_debugfs(struct snd_soc_platform *platform) { debugfs_remove_recursive(platform->debugfs_platform_root); } static ssize_t codec_list_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); ssize_t len, ret = 0; struct snd_soc_codec *codec; if (!buf) return -ENOMEM; list_for_each_entry(codec, &codec_list, list) { len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", codec->name); if (len >= 0) ret += len; if (ret > PAGE_SIZE) { ret = PAGE_SIZE; break; } } if (ret >= 0) ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); kfree(buf); return ret; } static const struct file_operations codec_list_fops = { .read = codec_list_read_file, .llseek = default_llseek,/* read accesses f_pos */ }; static ssize_t dai_list_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); ssize_t len, ret = 0; struct snd_soc_dai *dai; if (!buf) return -ENOMEM; list_for_each_entry(dai, &dai_list, list) { len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", dai->name); if (len >= 0) ret += len; if (ret > PAGE_SIZE) { ret = PAGE_SIZE; break; } } ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); kfree(buf); return ret; } static const struct file_operations dai_list_fops = { .read = dai_list_read_file, .llseek = default_llseek,/* read accesses f_pos */ }; static ssize_t platform_list_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); ssize_t len, ret = 0; struct snd_soc_platform *platform; if (!buf) return -ENOMEM; list_for_each_entry(platform, &platform_list, list) { len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", platform->name); if (len >= 0) ret += len; if (ret > PAGE_SIZE) { ret = PAGE_SIZE; break; } } ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); kfree(buf); return ret; } static const struct file_operations platform_list_fops = { .read = platform_list_read_file, .llseek = default_llseek,/* read accesses f_pos */ }; static void soc_init_card_debugfs(struct snd_soc_card *card) { card->debugfs_card_root = debugfs_create_dir(card->name, snd_soc_debugfs_root); if (!card->debugfs_card_root) { dev_warn(card->dev, "ASoC: Failed to create card debugfs directory\n"); return; } card->debugfs_pop_time = debugfs_create_u32("dapm_pop_time", 0644, card->debugfs_card_root, &card->pop_time); if (!card->debugfs_pop_time) dev_warn(card->dev, "Failed to create pop time debugfs file\n"); } static void soc_cleanup_card_debugfs(struct snd_soc_card *card) { debugfs_remove_recursive(card->debugfs_card_root); } #else static inline void soc_init_codec_debugfs(struct snd_soc_codec *codec) { } static inline void soc_cleanup_codec_debugfs(struct snd_soc_codec *codec) { } static inline void soc_init_platform_debugfs(struct snd_soc_platform *platform) { } static inline void soc_cleanup_platform_debugfs(struct snd_soc_platform *platform) { } static inline void soc_init_card_debugfs(struct snd_soc_card *card) { } static inline void soc_cleanup_card_debugfs(struct snd_soc_card *card) { } #endif struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card, const char *dai_link, int stream) { int i; for (i = 0; i < card->num_links; i++) { if (card->rtd[i].dai_link->no_pcm && !strcmp(card->rtd[i].dai_link->name, dai_link)) return card->rtd[i].pcm->streams[stream].substream; } dev_dbg(card->dev, "failed to find dai link %s\n", dai_link); return NULL; } EXPORT_SYMBOL_GPL(snd_soc_get_dai_substream); struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card, const char *dai_link) { int i; for (i = 0; i < card->num_links; i++) { if (!strcmp(card->rtd[i].dai_link->name, dai_link)) return &card->rtd[i]; } dev_dbg(card->dev, "failed to find rtd %s\n", dai_link); return NULL; } EXPORT_SYMBOL_GPL(snd_soc_get_pcm_runtime); #ifdef CONFIG_SND_SOC_AC97_BUS /* unregister ac97 codec */ static int soc_ac97_dev_unregister(struct snd_soc_codec *codec) { if (codec->ac97->dev.bus) device_unregister(&codec->ac97->dev); return 0; } /* stop no dev release warning */ static void soc_ac97_device_release(struct device *dev){} /* register ac97 codec to bus */ static int soc_ac97_dev_register(struct snd_soc_codec *codec) { int err; codec->ac97->dev.bus = &ac97_bus_type; codec->ac97->dev.parent = codec->card->dev; codec->ac97->dev.release = soc_ac97_device_release; dev_set_name(&codec->ac97->dev, "%d-%d:%s", codec->card->snd_card->number, 0, codec->name); err = device_register(&codec->ac97->dev); if (err < 0) { snd_printk(KERN_ERR "Can't register ac97 bus\n"); codec->ac97->dev.bus = NULL; return err; } return 0; } #endif #ifdef CONFIG_PM_SLEEP /* powers down audio subsystem for suspend */ int snd_soc_suspend(struct device *dev) { struct snd_soc_card *card = dev_get_drvdata(dev); struct snd_soc_codec *codec; int i; /* If the initialization of this soc device failed, there is no codec * associated with it. Just bail out in this case. */ if (list_empty(&card->codec_dev_list)) return 0; /* Due to the resume being scheduled into a workqueue we could * suspend before that's finished - wait for it to complete. */ snd_power_lock(card->snd_card); snd_power_wait(card->snd_card, SNDRV_CTL_POWER_D0); snd_power_unlock(card->snd_card); /* we're going to block userspace touching us until resume completes */ snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D3hot); /* mute any active DACs */ for (i = 0; i < card->num_rtd; i++) { struct snd_soc_dai *dai = card->rtd[i].codec_dai; struct snd_soc_dai_driver *drv = dai->driver; if (card->rtd[i].dai_link->ignore_suspend || card->rtd[i].dai_link->no_pcm) continue; if (card->rtd[i].dai_link->dynamic) soc_dpcm_be_digital_mute(&card->rtd[i], 1); else { if (drv->ops->digital_mute && dai->playback_active) drv->ops->digital_mute(dai, 1); } } /* suspend all pcms */ for (i = 0; i < card->num_rtd; i++) { if (card->rtd[i].dai_link->ignore_suspend || card->rtd[i].dai_link->no_pcm) continue; snd_pcm_suspend_all(card->rtd[i].pcm); } if (card->suspend_pre) card->suspend_pre(card); for (i = 0; i < card->num_rtd; i++) { struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai; struct snd_soc_platform *platform = card->rtd[i].platform; if (card->rtd[i].dai_link->ignore_suspend || card->rtd[i].dai_link->no_pcm) continue; if (card->rtd[i].dai_link->dynamic) { soc_dpcm_be_cpu_dai_suspend(&card->rtd[i]); soc_dpcm_be_platform_suspend(&card->rtd[i]); } else { if (cpu_dai->driver->suspend && !cpu_dai->driver->ac97_control) cpu_dai->driver->suspend(cpu_dai); if (platform->driver->suspend && !platform->suspended) { platform->driver->suspend(cpu_dai); platform->suspended = 1; } } } /* close any waiting streams and save state */ for (i = 0; i < card->num_rtd; i++) { flush_delayed_work_sync(&card->rtd[i].delayed_work); card->rtd[i].codec->dapm.suspend_bias_level = card->rtd[i].codec->dapm.bias_level; } for (i = 0; i < card->num_rtd; i++) { struct snd_soc_dai_driver *driver = card->rtd[i].codec_dai->driver; if (card->rtd[i].dai_link->ignore_suspend || card->rtd[i].dai_link->no_pcm) continue; if (driver->playback.stream_name != NULL) snd_soc_dapm_stream_event(&card->rtd[i], driver->playback.stream_name, SND_SOC_DAPM_STREAM_SUSPEND); if (driver->capture.stream_name != NULL) snd_soc_dapm_stream_event(&card->rtd[i], driver->capture.stream_name, SND_SOC_DAPM_STREAM_SUSPEND); } /* suspend all CODECs */ list_for_each_entry(codec, &card->codec_dev_list, card_list) { /* If there are paths active then the CODEC will be held with * bias _ON and should not be suspended. */ if (!codec->suspended && codec->driver->suspend) { switch (codec->dapm.bias_level) { case SND_SOC_BIAS_STANDBY: case SND_SOC_BIAS_OFF: codec->driver->suspend(codec); codec->suspended = 1; codec->cache_sync = 1; break; default: dev_dbg(codec->dev, "CODEC is on over suspend\n"); break; } } } for (i = 0; i < card->num_rtd; i++) { struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai; if (card->rtd[i].dai_link->ignore_suspend || card->rtd[i].dai_link->no_pcm) continue; if (card->rtd[i].dai_link->dynamic) soc_dpcm_be_ac97_cpu_dai_suspend(&card->rtd[i]); else if (cpu_dai->driver->suspend && cpu_dai->driver->ac97_control) cpu_dai->driver->suspend(cpu_dai); } if (card->suspend_post) card->suspend_post(card); return 0; } EXPORT_SYMBOL_GPL(snd_soc_suspend); /* deferred resume work, so resume can complete before we finished * setting our codec back up, which can be very slow on I2C */ static void soc_resume_deferred(struct work_struct *work) { struct snd_soc_card *card = container_of(work, struct snd_soc_card, deferred_resume_work); struct snd_soc_codec *codec; int i; /* our power state is still SNDRV_CTL_POWER_D3hot from suspend time, * so userspace apps are blocked from touching us */ dev_dbg(card->dev, "starting resume work\n"); /* Bring us up into D2 so that DAPM starts enabling things */ snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D2); if (card->resume_pre) card->resume_pre(card); /* resume AC97 DAIs */ for (i = 0; i < card->num_rtd; i++) { struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai; if (card->rtd[i].dai_link->ignore_suspend || card->rtd[i].dai_link->no_pcm) continue; if (card->rtd[i].dai_link->dynamic) soc_dpcm_be_ac97_cpu_dai_resume(&card->rtd[i]); else if (cpu_dai->driver->resume && cpu_dai->driver->ac97_control) cpu_dai->driver->resume(cpu_dai); } list_for_each_entry(codec, &card->codec_dev_list, card_list) { /* If the CODEC was idle over suspend then it will have been * left with bias OFF or STANDBY and suspended so we must now * resume. Otherwise the suspend was suppressed. */ if (codec->driver->resume && codec->suspended) { switch (codec->dapm.bias_level) { case SND_SOC_BIAS_STANDBY: case SND_SOC_BIAS_OFF: codec->driver->resume(codec); codec->suspended = 0; break; default: dev_dbg(codec->dev, "CODEC was on over suspend\n"); break; } } } for (i = 0; i < card->num_rtd; i++) { struct snd_soc_dai_driver *driver = card->rtd[i].codec_dai->driver; if (card->rtd[i].dai_link->ignore_suspend || card->rtd[i].dai_link->no_pcm) continue; if (driver->playback.stream_name != NULL) snd_soc_dapm_stream_event(&card->rtd[i], driver->playback.stream_name, SND_SOC_DAPM_STREAM_RESUME); if (driver->capture.stream_name != NULL) snd_soc_dapm_stream_event(&card->rtd[i], driver->capture.stream_name, SND_SOC_DAPM_STREAM_RESUME); } /* unmute any active DACs */ for (i = 0; i < card->num_rtd; i++) { struct snd_soc_dai *dai = card->rtd[i].codec_dai; struct snd_soc_dai_driver *drv = dai->driver; if (card->rtd[i].dai_link->ignore_suspend || card->rtd[i].dai_link->no_pcm) continue; if (card->rtd[i].dai_link->dynamic) soc_dpcm_be_digital_mute(&card->rtd[i], 0); else { if (drv->ops->digital_mute && dai->playback_active) drv->ops->digital_mute(dai, 0); } } for (i = 0; i < card->num_rtd; i++) { struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai; struct snd_soc_platform *platform = card->rtd[i].platform; if (card->rtd[i].dai_link->ignore_suspend || card->rtd[i].dai_link->no_pcm) continue; if (card->rtd[i].dai_link->dynamic) { soc_dpcm_be_cpu_dai_resume(&card->rtd[i]); soc_dpcm_be_platform_resume(&card->rtd[i]); } else { if (cpu_dai->driver->resume && !cpu_dai->driver->ac97_control) cpu_dai->driver->resume(cpu_dai); if (platform->driver->resume && platform->suspended) { platform->driver->resume(cpu_dai); platform->suspended = 0; } } } if (card->resume_post) card->resume_post(card); dev_dbg(card->dev, "resume work completed\n"); /* userspace can access us now we are back as we were before */ snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D0); } /* powers up audio subsystem after a suspend */ int snd_soc_resume(struct device *dev) { struct snd_soc_card *card = dev_get_drvdata(dev); int i, ac97_control = 0; /* If the initialization of this soc device failed, there is no codec * associated with it. Just bail out in this case. */ if (list_empty(&card->codec_dev_list)) return 0; /* AC97 devices might have other drivers hanging off them so * need to resume immediately. Other drivers don't have that * problem and may take a substantial amount of time to resume * due to I/O costs and anti-pop so handle them out of line. */ for (i = 0; i < card->num_rtd; i++) { struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai; ac97_control |= cpu_dai->driver->ac97_control; } if (ac97_control) { dev_dbg(dev, "Resuming AC97 immediately\n"); soc_resume_deferred(&card->deferred_resume_work); } else { dev_dbg(dev, "Scheduling resume work\n"); if (!schedule_work(&card->deferred_resume_work)) dev_err(dev, "resume work item may be lost\n"); } return 0; } EXPORT_SYMBOL_GPL(snd_soc_resume); #else #define snd_soc_suspend NULL #define snd_soc_resume NULL #endif static const struct snd_soc_dai_ops null_dai_ops = { }; static int soc_bind_dai_link(struct snd_soc_card *card, int num) { struct snd_soc_dai_link *dai_link = &card->dai_link[num]; struct snd_soc_pcm_runtime *rtd = &card->rtd[num]; struct snd_soc_codec *codec; struct snd_soc_platform *platform; struct snd_soc_dai *codec_dai, *cpu_dai; const char *platform_name; dev_dbg(card->dev, "binding %s at idx %d\n", dai_link->name, num); /* Find CPU DAI from registered DAIs*/ list_for_each_entry(cpu_dai, &dai_list, list) { if (dai_link->cpu_dai_of_node) { if (cpu_dai->dev->of_node != dai_link->cpu_dai_of_node) continue; } else { if (strcmp(cpu_dai->name, dai_link->cpu_dai_name)) continue; } rtd->cpu_dai = cpu_dai; } if (!rtd->cpu_dai) { dev_dbg(card->dev, "CPU DAI %s not registered\n", dai_link->cpu_dai_name); return -EPROBE_DEFER; } /* no, then find CODEC from registered CODECs*/ list_for_each_entry(codec, &codec_list, list) { if (dai_link->codec_of_node) { if (codec->dev->of_node != dai_link->codec_of_node) continue; } else { if (strcmp(codec->name, dai_link->codec_name)) continue; } rtd->codec = codec; /* * CODEC found, so find CODEC DAI from registered DAIs from * this CODEC */ list_for_each_entry(codec_dai, &dai_list, list) { if (codec->dev == codec_dai->dev && !strcmp(codec_dai->name, dai_link->codec_dai_name)) { rtd->codec_dai = codec_dai; } } if (!rtd->codec_dai) { dev_dbg(card->dev, "CODEC DAI %s not registered\n", dai_link->codec_dai_name); return -EPROBE_DEFER; } } if (!rtd->codec) { dev_dbg(card->dev, "CODEC %s not registered\n", dai_link->codec_name); return -EPROBE_DEFER; } /* if there's no platform we match on the empty platform */ platform_name = dai_link->platform_name; if (!platform_name && !dai_link->platform_of_node) platform_name = "snd-soc-dummy"; /* no, then find one from the set of registered platforms */ list_for_each_entry(platform, &platform_list, list) { if (dai_link->platform_of_node) { if (platform->dev->of_node != dai_link->platform_of_node) continue; } else { if (strcmp(platform->name, platform_name)) continue; } rtd->platform = platform; } if (!rtd->platform) { dev_dbg(card->dev, "platform %s not registered\n", dai_link->platform_name); return -EPROBE_DEFER; } card->num_rtd++; return 0; } static void soc_remove_codec(struct snd_soc_codec *codec) { int err; if (codec->driver->remove) { err = codec->driver->remove(codec); if (err < 0) dev_err(codec->dev, "asoc: failed to remove %s: %d\n", codec->name, err); } /* Make sure all DAPM widgets are freed */ snd_soc_dapm_free(&codec->dapm); soc_cleanup_codec_debugfs(codec); codec->probed = 0; list_del(&codec->card_list); module_put(codec->dev->driver->owner); } static void soc_remove_dai_link(struct snd_soc_card *card, int num, int order) { struct snd_soc_pcm_runtime *rtd = &card->rtd[num]; struct snd_soc_codec *codec = rtd->codec; struct snd_soc_platform *platform = rtd->platform; struct snd_soc_dai *codec_dai = rtd->codec_dai, *cpu_dai = rtd->cpu_dai; int err; /* unregister the rtd device */ if (rtd->dev_registered) { device_remove_file(rtd->dev, &dev_attr_pmdown_time); device_remove_file(rtd->dev, &dev_attr_codec_reg); device_unregister(rtd->dev); rtd->dev_registered = 0; } /* remove the CODEC DAI */ if (codec_dai && codec_dai->probed && codec_dai->driver->remove_order == order) { if (codec_dai->driver->remove) { err = codec_dai->driver->remove(codec_dai); if (err < 0) printk(KERN_ERR "asoc: failed to remove %s\n", codec_dai->name); } codec_dai->probed = 0; list_del(&codec_dai->card_list); } /* remove the platform */ if (platform && platform->probed && platform->driver->remove_order == order) { if (platform->driver->remove) { err = platform->driver->remove(platform); if (err < 0) printk(KERN_ERR "asoc: failed to remove %s\n", platform->name); } /* Make sure all DAPM widgets are freed */ snd_soc_dapm_free(&platform->dapm); soc_cleanup_platform_debugfs(platform); platform->probed = 0; list_del(&platform->card_list); module_put(platform->dev->driver->owner); } /* remove the CODEC */ if (codec && codec->probed && codec->driver->remove_order == order) soc_remove_codec(codec); /* remove the cpu_dai */ if (cpu_dai && cpu_dai->probed && cpu_dai->driver->remove_order == order) { if (cpu_dai->driver->remove) { err = cpu_dai->driver->remove(cpu_dai); if (err < 0) printk(KERN_ERR "asoc: failed to remove %s\n", cpu_dai->name); } cpu_dai->probed = 0; list_del(&cpu_dai->card_list); module_put(cpu_dai->dev->driver->owner); } } static void soc_remove_dai_links(struct snd_soc_card *card) { int dai, order; for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST; order++) { for (dai = 0; dai < card->num_rtd; dai++) soc_remove_dai_link(card, dai, order); } card->num_rtd = 0; } static void soc_set_name_prefix(struct snd_soc_card *card, struct snd_soc_codec *codec) { int i; if (card->codec_conf == NULL) return; for (i = 0; i < card->num_configs; i++) { struct snd_soc_codec_conf *map = &card->codec_conf[i]; if (map->dev_name && !strcmp(codec->name, map->dev_name)) { codec->name_prefix = map->name_prefix; break; } } } static int soc_probe_codec(struct snd_soc_card *card, struct snd_soc_codec *codec) { int ret = 0; const struct snd_soc_codec_driver *driver = codec->driver; codec->card = card; codec->dapm.card = card; soc_set_name_prefix(card, codec); if (!try_module_get(codec->dev->driver->owner)) return -ENODEV; soc_init_codec_debugfs(codec); if (driver->dapm_widgets) snd_soc_dapm_new_controls(&codec->dapm, driver->dapm_widgets, driver->num_dapm_widgets); codec->dapm.idle_bias_off = driver->idle_bias_off; if (driver->probe) { ret = driver->probe(codec); if (ret < 0) { dev_err(codec->dev, "asoc: failed to probe CODEC %s: %d\n", codec->name, ret); goto err_probe; } } if (driver->controls) snd_soc_add_codec_controls(codec, driver->controls, driver->num_controls); if (driver->dapm_routes) snd_soc_dapm_add_routes(&codec->dapm, driver->dapm_routes, driver->num_dapm_routes); /* mark codec as probed and add to card codec list */ codec->probed = 1; list_add(&codec->card_list, &card->codec_dev_list); list_add(&codec->dapm.list, &card->dapm_list); return 0; err_probe: soc_cleanup_codec_debugfs(codec); module_put(codec->dev->driver->owner); return ret; } static int soc_probe_platform(struct snd_soc_card *card, struct snd_soc_platform *platform) { int ret = 0; const struct snd_soc_platform_driver *driver = platform->driver; platform->card = card; platform->dapm.card = card; if (!try_module_get(platform->dev->driver->owner)) return -ENODEV; soc_init_platform_debugfs(platform); if (driver->dapm_widgets) snd_soc_dapm_new_controls(&platform->dapm, driver->dapm_widgets, driver->num_dapm_widgets); if (driver->probe) { ret = driver->probe(platform); if (ret < 0) { dev_err(platform->dev, "asoc: failed to probe platform %s: %d\n", platform->name, ret); goto err_probe; } } if (driver->controls) snd_soc_add_platform_controls(platform, driver->controls, driver->num_controls); if (driver->dapm_routes) snd_soc_dapm_add_routes(&platform->dapm, driver->dapm_routes, driver->num_dapm_routes); /* mark platform as probed and add to card platform list */ platform->probed = 1; list_add(&platform->card_list, &card->platform_dev_list); list_add(&platform->dapm.list, &card->dapm_list); return 0; err_probe: module_put(platform->dev->driver->owner); return ret; } static void rtd_release(struct device *dev) { kfree(dev); } static int soc_post_component_init(struct snd_soc_card *card, struct snd_soc_codec *codec, int num, int dailess) { struct snd_soc_dai_link *dai_link = NULL; struct snd_soc_aux_dev *aux_dev = NULL; struct snd_soc_pcm_runtime *rtd; const char *temp, *name; int ret = 0; if (!dailess) { dai_link = &card->dai_link[num]; rtd = &card->rtd[num]; name = dai_link->name; } else { aux_dev = &card->aux_dev[num]; rtd = &card->rtd_aux[num]; name = aux_dev->name; } rtd->card = card; /* Make sure all DAPM widgets are instantiated */ snd_soc_dapm_new_widgets(&codec->dapm); /* machine controls, routes and widgets are not prefixed */ temp = codec->name_prefix; codec->name_prefix = NULL; /* do machine specific initialization */ if (!dailess && dai_link->init) ret = dai_link->init(rtd); else if (dailess && aux_dev->init) ret = aux_dev->init(&codec->dapm); if (ret < 0) { dev_err(card->dev, "asoc: failed to init %s: %d\n", name, ret); return ret; } codec->name_prefix = temp; /* register the rtd device */ rtd->codec = codec; rtd->dev = kzalloc(sizeof(struct device), GFP_KERNEL); if (!rtd->dev) return -ENOMEM; device_initialize(rtd->dev); rtd->dev->parent = card->dev; rtd->dev->release = rtd_release; rtd->dev->init_name = name; dev_set_drvdata(rtd->dev, rtd); mutex_init(&rtd->pcm_mutex); INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].be_clients); INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].be_clients); INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].fe_clients); INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].fe_clients); ret = device_add(rtd->dev); if (ret < 0) { dev_err(card->dev, "asoc: failed to register runtime device: %d\n", ret); return ret; } rtd->dev_registered = 1; /* add DAPM sysfs entries for this codec */ ret = snd_soc_dapm_sys_add(rtd->dev); if (ret < 0) dev_err(codec->dev, "asoc: failed to add codec dapm sysfs entries: %d\n", ret); /* add codec sysfs entries */ ret = device_create_file(rtd->dev, &dev_attr_codec_reg); if (ret < 0) dev_err(codec->dev, "asoc: failed to add codec sysfs files: %d\n", ret); #ifdef CONFIG_DEBUG_FS /* add DSP sysfs entries */ if (!dai_link->dynamic) goto out; ret = soc_dpcm_debugfs_add(rtd); if (ret < 0) dev_err(rtd->dev, "asoc: failed to add dpcm sysfs entries: %d\n", ret); out: #endif return 0; } static int soc_probe_dai_link(struct snd_soc_card *card, int num, int order) { struct snd_soc_dai_link *dai_link = &card->dai_link[num]; struct snd_soc_pcm_runtime *rtd = &card->rtd[num]; struct snd_soc_codec *codec = rtd->codec; struct snd_soc_platform *platform = rtd->platform; struct snd_soc_dai *codec_dai = rtd->codec_dai, *cpu_dai = rtd->cpu_dai; int ret; dev_dbg(card->dev, "probe %s dai link %d late %d\n", card->name, num, order); /* config components */ codec_dai->codec = codec; cpu_dai->platform = platform; codec_dai->card = card; cpu_dai->card = card; /* set default power off timeout */ rtd->pmdown_time = pmdown_time; /* probe the cpu_dai */ if (!cpu_dai->probed && cpu_dai->driver->probe_order == order) { if (!try_module_get(cpu_dai->dev->driver->owner)) return -ENODEV; if (cpu_dai->driver->probe) { ret = cpu_dai->driver->probe(cpu_dai); if (ret < 0) { printk(KERN_ERR "asoc: failed to probe CPU DAI %s\n", cpu_dai->name); module_put(cpu_dai->dev->driver->owner); return ret; } } cpu_dai->probed = 1; /* mark cpu_dai as probed and add to card dai list */ list_add(&cpu_dai->card_list, &card->dai_dev_list); } /* probe the CODEC */ if (!codec->probed && codec->driver->probe_order == order) { ret = soc_probe_codec(card, codec); if (ret < 0) return ret; } /* probe the platform */ if (!platform->probed && platform->driver->probe_order == order) { ret = soc_probe_platform(card, platform); if (ret < 0) return ret; } /* probe the CODEC DAI */ if (!codec_dai->probed && codec_dai->driver->probe_order == order) { if (codec_dai->driver->probe) { ret = codec_dai->driver->probe(codec_dai); if (ret < 0) { printk(KERN_ERR "asoc: failed to probe CODEC DAI %s\n", codec_dai->name); return ret; } } /* mark codec_dai as probed and add to card dai list */ codec_dai->probed = 1; list_add(&codec_dai->card_list, &card->dai_dev_list); } /* complete DAI probe during last probe */ if (order != SND_SOC_COMP_ORDER_LAST) return 0; ret = soc_post_component_init(card, codec, num, 0); if (ret) return ret; ret = device_create_file(rtd->dev, &dev_attr_pmdown_time); if (ret < 0) printk(KERN_WARNING "asoc: failed to add pmdown_time sysfs\n"); if (cpu_dai->driver->compress_dai) { /*create compress_device"*/ ret = soc_new_compress(rtd, num); if (ret < 0) { printk(KERN_ERR "asoc: can't create compress %s\n", dai_link->stream_name); return ret; } } else { /* create the pcm */ ret = soc_new_pcm(rtd, num); if (ret < 0) { printk(KERN_ERR "asoc: can't create pcm %s :%d\n", dai_link->stream_name, ret); return ret; } } /* add platform data for AC97 devices */ if (rtd->codec_dai->driver->ac97_control) snd_ac97_dev_add_pdata(codec->ac97, rtd->cpu_dai->ac97_pdata); return 0; } #ifdef CONFIG_SND_SOC_AC97_BUS static int soc_register_ac97_dai_link(struct snd_soc_pcm_runtime *rtd) { int ret; /* Only instantiate AC97 if not already done by the adaptor * for the generic AC97 subsystem. */ if (rtd->codec_dai->driver->ac97_control && !rtd->codec->ac97_registered) { /* * It is possible that the AC97 device is already registered to * the device subsystem. This happens when the device is created * via snd_ac97_mixer(). Currently only SoC codec that does so * is the generic AC97 glue but others migh emerge. * * In those cases we don't try to register the device again. */ if (!rtd->codec->ac97_created) return 0; ret = soc_ac97_dev_register(rtd->codec); if (ret < 0) { printk(KERN_ERR "asoc: AC97 device register failed\n"); return ret; } rtd->codec->ac97_registered = 1; } return 0; } static void soc_unregister_ac97_dai_link(struct snd_soc_codec *codec) { if (codec->ac97_registered) { soc_ac97_dev_unregister(codec); codec->ac97_registered = 0; } } #endif static int soc_check_aux_dev(struct snd_soc_card *card, int num) { struct snd_soc_aux_dev *aux_dev = &card->aux_dev[num]; struct snd_soc_codec *codec; /* find CODEC from registered CODECs*/ list_for_each_entry(codec, &codec_list, list) { if (!strcmp(codec->name, aux_dev->codec_name)) return 0; } return -EPROBE_DEFER; } static int soc_probe_aux_dev(struct snd_soc_card *card, int num) { struct snd_soc_aux_dev *aux_dev = &card->aux_dev[num]; struct snd_soc_codec *codec; int ret = -ENODEV; /* find CODEC from registered CODECs*/ list_for_each_entry(codec, &codec_list, list) { if (!strcmp(codec->name, aux_dev->codec_name)) { if (codec->probed) { dev_err(codec->dev, "asoc: codec already probed"); ret = -EBUSY; goto out; } goto found; } } /* codec not found */ dev_err(card->dev, "asoc: codec %s not found", aux_dev->codec_name); return -EPROBE_DEFER; found: ret = soc_probe_codec(card, codec); if (ret < 0) return ret; ret = soc_post_component_init(card, codec, num, 1); out: return ret; } static void soc_remove_aux_dev(struct snd_soc_card *card, int num) { struct snd_soc_pcm_runtime *rtd = &card->rtd_aux[num]; struct snd_soc_codec *codec = rtd->codec; /* unregister the rtd device */ if (rtd->dev_registered) { device_remove_file(rtd->dev, &dev_attr_codec_reg); device_del(rtd->dev); rtd->dev_registered = 0; } if (codec && codec->probed) soc_remove_codec(codec); } static int snd_soc_init_codec_cache(struct snd_soc_codec *codec, enum snd_soc_compress_type compress_type) { int ret; if (codec->cache_init) return 0; /* override the compress_type if necessary */ if (compress_type && codec->compress_type != compress_type) codec->compress_type = compress_type; ret = snd_soc_cache_init(codec); if (ret < 0) { dev_err(codec->dev, "Failed to set cache compression type: %d\n", ret); return ret; } codec->cache_init = 1; return 0; } static void soc_init_dai_aif_channel_map(struct snd_soc_card *card, struct snd_soc_dai *dai, int stream) { struct snd_soc_dapm_widget *w = NULL; const char *aif_name; if (stream == SNDRV_PCM_STREAM_PLAYBACK) aif_name = dai->driver->playback.aif_name; else aif_name = dai->driver->capture.aif_name; if (dai->codec) { struct snd_soc_codec *codec; list_for_each_entry(codec, &card->codec_dev_list, card_list) { w = snd_soc_get_codec_widget(card, codec, aif_name); if (w) break; } } else if (dai->platform) { struct snd_soc_platform *platform; list_for_each_entry(platform, &card->platform_dev_list, card_list) { w = snd_soc_get_platform_widget(card, platform, aif_name); if (w) break; } } if (w) { if (stream == SNDRV_PCM_STREAM_PLAYBACK) dai->playback_aif = w; else dai->capture_aif = w; } else dev_err(dai->dev, "unable to find %s DAI AIF %s\n", stream ? "capture" : "playback", aif_name); dai->channel_map_instanciated = 1; } static int soc_is_dai_pcm(struct snd_soc_card *card, struct snd_soc_dai *dai) { int i; for (i = 0; i < card->num_rtd; i++) { if (card->rtd[i].cpu_dai == dai && !card->rtd[i].dai_link->no_pcm) return 1; } return 0; } static void soc_init_card_aif_channel_map(struct snd_soc_card *card) { struct snd_soc_dai *dai; list_for_each_entry(dai, &card->dai_dev_list, card_list) { /* only process DAIs that use the new API until * the old "stream name" API is fully deprecated */ if (!dai->driver->playback.aif_name && !dai->driver->capture.aif_name) continue; /* channels are only mapped from PCM DAIs */ if (!soc_is_dai_pcm(card, dai)) continue; /* skip if already instanciated */ if (dai->channel_map_instanciated) continue; /* create unique channels masks for each DAI in the sound card */ dai->playback_channel_map = ((1 << dai->driver->playback.channels_max) - 1) << card->num_playback_channels; card->num_playback_channels += dai->driver->playback.channels_max; dai->capture_channel_map = ((1 << dai->driver->capture.channels_max) - 1) << card->num_capture_channels; card->num_capture_channels += dai->driver->capture.channels_max; if (dai->driver->playback.channels_max) soc_init_dai_aif_channel_map(card, dai, SNDRV_PCM_STREAM_PLAYBACK); if (dai->driver->capture.channels_max) soc_init_dai_aif_channel_map(card, dai, SNDRV_PCM_STREAM_CAPTURE); } } static int snd_soc_instantiate_card(struct snd_soc_card *card) { struct snd_soc_codec *codec; struct snd_soc_codec_conf *codec_conf; enum snd_soc_compress_type compress_type; struct snd_soc_dai_link *dai_link; int ret, i, order; mutex_lock(&card->mutex); /* bind DAIs */ for (i = 0; i < card->num_links; i++) { ret = soc_bind_dai_link(card, i); if (ret != 0) goto base_error; } /* check aux_devs too */ for (i = 0; i < card->num_aux_devs; i++) { ret = soc_check_aux_dev(card, i); if (ret != 0) goto base_error; } /* initialize the register cache for each available codec */ list_for_each_entry(codec, &codec_list, list) { if (codec->cache_init) continue; /* by default we don't override the compress_type */ compress_type = 0; /* check to see if we need to override the compress_type */ for (i = 0; i < card->num_configs; ++i) { codec_conf = &card->codec_conf[i]; if (!strcmp(codec->name, codec_conf->dev_name)) { compress_type = codec_conf->compress_type; if (compress_type && compress_type != codec->compress_type) break; } } ret = snd_soc_init_codec_cache(codec, compress_type); if (ret < 0) goto base_error; } /* card bind complete so register a sound card */ ret = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1, card->owner, 0, &card->snd_card); if (ret < 0) { printk(KERN_ERR "asoc: can't create sound card for card %s\n", card->name); goto base_error; } card->snd_card->dev = card->dev; card->dapm.bias_level = SND_SOC_BIAS_OFF; card->dapm.dev = card->dev; card->dapm.card = card; list_add(&card->dapm.list, &card->dapm_list); #ifdef CONFIG_DEBUG_FS snd_soc_dapm_debugfs_init(&card->dapm, card->debugfs_card_root); #endif #ifdef CONFIG_PM_SLEEP /* deferred resume work */ INIT_WORK(&card->deferred_resume_work, soc_resume_deferred); #endif if (card->dapm_widgets) snd_soc_dapm_new_controls(&card->dapm, card->dapm_widgets, card->num_dapm_widgets); /* initialise the sound card only once */ if (card->probe) { ret = card->probe(card); if (ret < 0) goto card_probe_error; } /* early DAI link probe */ for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST; order++) { for (i = 0; i < card->num_links; i++) { ret = soc_probe_dai_link(card, i, order); if (ret < 0) { pr_err("asoc: failed to instantiate card %s: %d\n", card->name, ret); goto probe_dai_err; } } } for (i = 0; i < card->num_aux_devs; i++) { ret = soc_probe_aux_dev(card, i); if (ret < 0) { pr_err("asoc: failed to add auxiliary devices %s: %d\n", card->name, ret); goto probe_aux_dev_err; } } if (card->controls) snd_soc_add_card_controls(card, card->controls, card->num_controls); if (card->dapm_routes) snd_soc_dapm_add_routes(&card->dapm, card->dapm_routes, card->num_dapm_routes); snd_soc_dapm_new_widgets(&card->dapm); for (i = 0; i < card->num_links; i++) { dai_link = &card->dai_link[i]; if (dai_link->dai_fmt) { ret = snd_soc_dai_set_fmt(card->rtd[i].codec_dai, dai_link->dai_fmt); if (ret != 0) dev_warn(card->rtd[i].codec_dai->dev, "Failed to set DAI format: %d\n", ret); ret = snd_soc_dai_set_fmt(card->rtd[i].cpu_dai, dai_link->dai_fmt); if (ret != 0) dev_warn(card->rtd[i].cpu_dai->dev, "Failed to set DAI format: %d\n", ret); } } snprintf(card->snd_card->shortname, sizeof(card->snd_card->shortname), "%s", card->name); snprintf(card->snd_card->longname, sizeof(card->snd_card->longname), "%s", card->long_name ? card->long_name : card->name); snprintf(card->snd_card->driver, sizeof(card->snd_card->driver), "%s", card->driver_name ? card->driver_name : card->name); for (i = 0; i < ARRAY_SIZE(card->snd_card->driver); i++) { switch (card->snd_card->driver[i]) { case '_': case '-': case '\0': break; default: if (!isalnum(card->snd_card->driver[i])) card->snd_card->driver[i] = '_'; break; } } if (card->late_probe) { ret = card->late_probe(card); if (ret < 0) { dev_err(card->dev, "%s late_probe() failed: %d\n", card->name, ret); goto probe_aux_dev_err; } } snd_soc_dapm_new_widgets(&card->dapm); soc_init_card_aif_channel_map(card); if (card->fully_routed) list_for_each_entry(codec, &card->codec_dev_list, card_list) snd_soc_dapm_auto_nc_codec_pins(codec); ret = snd_card_register(card->snd_card); if (ret < 0) { printk(KERN_ERR "asoc: failed to register soundcard for %s\n", card->name); goto probe_aux_dev_err; } #ifdef CONFIG_SND_SOC_AC97_BUS /* register any AC97 codecs */ for (i = 0; i < card->num_rtd; i++) { ret = soc_register_ac97_dai_link(&card->rtd[i]); if (ret < 0) { printk(KERN_ERR "asoc: failed to register AC97 %s\n", card->name); while (--i >= 0) soc_unregister_ac97_dai_link(card->rtd[i].codec); goto probe_aux_dev_err; } } #endif card->instantiated = 1; snd_soc_dapm_sync(&card->dapm); mutex_unlock(&card->mutex); return 0; probe_aux_dev_err: for (i = 0; i < card->num_aux_devs; i++) soc_remove_aux_dev(card, i); probe_dai_err: soc_remove_dai_links(card); card_probe_error: if (card->remove) card->remove(card); snd_card_free(card->snd_card); base_error: mutex_unlock(&card->mutex); return ret; } /* probes a new socdev */ static int soc_probe(struct platform_device *pdev) { struct snd_soc_card *card = platform_get_drvdata(pdev); int ret = 0; /* * no card, so machine driver should be registering card * we should not be here in that case so ret error */ if (!card) return -EINVAL; /* Bodge while we unpick instantiation */ card->dev = &pdev->dev; ret = snd_soc_register_card(card); if (ret != 0) { dev_err(&pdev->dev, "Failed to register card\n"); return ret; } return 0; } static int soc_cleanup_card_resources(struct snd_soc_card *card) { int i; /* make sure any delayed work runs */ for (i = 0; i < card->num_rtd; i++) { struct snd_soc_pcm_runtime *rtd = &card->rtd[i]; flush_delayed_work_sync(&rtd->delayed_work); } /* remove auxiliary devices */ for (i = 0; i < card->num_aux_devs; i++) soc_remove_aux_dev(card, i); /* remove and free each DAI */ soc_remove_dai_links(card); soc_cleanup_card_debugfs(card); /* remove the card */ if (card->remove) card->remove(card); snd_soc_dapm_free(&card->dapm); kfree(card->rtd); snd_card_free(card->snd_card); return 0; } /* removes a socdev */ static int soc_remove(struct platform_device *pdev) { struct snd_soc_card *card = platform_get_drvdata(pdev); snd_soc_unregister_card(card); return 0; } int snd_soc_poweroff(struct device *dev) { struct snd_soc_card *card = dev_get_drvdata(dev); int i; if (!card->instantiated) return 0; /* Flush out pmdown_time work - we actually do want to run it * now, we're shutting down so no imminent restart. */ for (i = 0; i < card->num_rtd; i++) { struct snd_soc_pcm_runtime *rtd = &card->rtd[i]; flush_delayed_work_sync(&rtd->delayed_work); } snd_soc_dapm_shutdown(card); return 0; } EXPORT_SYMBOL_GPL(snd_soc_poweroff); const struct dev_pm_ops snd_soc_pm_ops = { .suspend = snd_soc_suspend, .resume = snd_soc_resume, .poweroff = snd_soc_poweroff, }; EXPORT_SYMBOL_GPL(snd_soc_pm_ops); /* ASoC platform driver */ static struct platform_driver soc_driver = { .driver = { .name = "soc-audio", .owner = THIS_MODULE, .pm = &snd_soc_pm_ops, }, .probe = soc_probe, .remove = soc_remove, }; /** * snd_soc_codec_volatile_register: Report if a register is volatile. * * @codec: CODEC to query. * @reg: Register to query. * * Boolean function indiciating if a CODEC register is volatile. */ int snd_soc_codec_volatile_register(struct snd_soc_codec *codec, unsigned int reg) { if (codec->volatile_register) return codec->volatile_register(codec, reg); else return 0; } EXPORT_SYMBOL_GPL(snd_soc_codec_volatile_register); /** * snd_soc_codec_readable_register: Report if a register is readable. * * @codec: CODEC to query. * @reg: Register to query. * * Boolean function indicating if a CODEC register is readable. */ int snd_soc_codec_readable_register(struct snd_soc_codec *codec, unsigned int reg) { if (codec->readable_register) return codec->readable_register(codec, reg); else return 1; } EXPORT_SYMBOL_GPL(snd_soc_codec_readable_register); /** * snd_soc_codec_writable_register: Report if a register is writable. * * @codec: CODEC to query. * @reg: Register to query. * * Boolean function indicating if a CODEC register is writable. */ int snd_soc_codec_writable_register(struct snd_soc_codec *codec, unsigned int reg) { if (codec->writable_register) return codec->writable_register(codec, reg); else return 1; } EXPORT_SYMBOL_GPL(snd_soc_codec_writable_register); int snd_soc_platform_read(struct snd_soc_platform *platform, unsigned int reg) { unsigned int ret; if (!platform->driver->read) { dev_err(platform->dev, "platform has no read back\n"); return -1; } ret = platform->driver->read(platform, reg); dev_dbg(platform->dev, "read %x => %x\n", reg, ret); trace_snd_soc_preg_read(platform, reg, ret); return ret; } EXPORT_SYMBOL_GPL(snd_soc_platform_read); int snd_soc_platform_write(struct snd_soc_platform *platform, unsigned int reg, unsigned int val) { if (!platform->driver->write) { dev_err(platform->dev, "platform has no write back\n"); return -1; } dev_dbg(platform->dev, "write %x = %x\n", reg, val); trace_snd_soc_preg_write(platform, reg, val); return platform->driver->write(platform, reg, val); } EXPORT_SYMBOL_GPL(snd_soc_platform_write); /** * snd_soc_new_ac97_codec - initailise AC97 device * @codec: audio codec * @ops: AC97 bus operations * @num: AC97 codec number * * Initialises AC97 codec resources for use by ad-hoc devices only. */ int snd_soc_new_ac97_codec(struct snd_soc_codec *codec, struct snd_ac97_bus_ops *ops, int num) { mutex_lock(&codec->mutex); codec->ac97 = kzalloc(sizeof(struct snd_ac97), GFP_KERNEL); if (codec->ac97 == NULL) { mutex_unlock(&codec->mutex); return -ENOMEM; } codec->ac97->bus = kzalloc(sizeof(struct snd_ac97_bus), GFP_KERNEL); if (codec->ac97->bus == NULL) { kfree(codec->ac97); codec->ac97 = NULL; mutex_unlock(&codec->mutex); return -ENOMEM; } codec->ac97->bus->ops = ops; codec->ac97->num = num; /* * Mark the AC97 device to be created by us. This way we ensure that the * device will be registered with the device subsystem later on. */ codec->ac97_created = 1; mutex_unlock(&codec->mutex); return 0; } EXPORT_SYMBOL_GPL(snd_soc_new_ac97_codec); /** * snd_soc_free_ac97_codec - free AC97 codec device * @codec: audio codec * * Frees AC97 codec device resources. */ void snd_soc_free_ac97_codec(struct snd_soc_codec *codec) { mutex_lock(&codec->mutex); #ifdef CONFIG_SND_SOC_AC97_BUS soc_unregister_ac97_dai_link(codec); #endif kfree(codec->ac97->bus); kfree(codec->ac97); codec->ac97 = NULL; codec->ac97_created = 0; mutex_unlock(&codec->mutex); } EXPORT_SYMBOL_GPL(snd_soc_free_ac97_codec); unsigned int snd_soc_read(struct snd_soc_codec *codec, unsigned int reg) { unsigned int ret; if (unlikely(!snd_card_is_online_state(codec->card->snd_card))) { dev_err(codec->dev, "read 0x%02x while offline\n", reg); return -ENODEV; } ret = codec->read(codec, reg); dev_dbg(codec->dev, "read %x => %x\n", reg, ret); trace_snd_soc_reg_read(codec, reg, ret); return ret; } EXPORT_SYMBOL_GPL(snd_soc_read); unsigned int snd_soc_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int val) { if (unlikely(!snd_card_is_online_state(codec->card->snd_card))) { dev_err(codec->dev, "write 0x%02x while offline\n", reg); return -ENODEV; } dev_dbg(codec->dev, "write %x = %x\n", reg, val); trace_snd_soc_reg_write(codec, reg, val); return codec->write(codec, reg, val); } EXPORT_SYMBOL_GPL(snd_soc_write); unsigned int snd_soc_bulk_write_raw(struct snd_soc_codec *codec, unsigned int reg, const void *data, size_t len) { return codec->bulk_write_raw(codec, reg, data, len); } EXPORT_SYMBOL_GPL(snd_soc_bulk_write_raw); /** * snd_soc_update_bits - update codec register bits * @codec: audio codec * @reg: codec register * @mask: register mask * @value: new value * * Writes new register value. * * Returns 1 for change, 0 for no change, or negative error code. */ int snd_soc_update_bits(struct snd_soc_codec *codec, unsigned short reg, unsigned int mask, unsigned int value) { bool change; unsigned int old, new; int ret; if (codec->using_regmap) { ret = regmap_update_bits_check(codec->control_data, reg, mask, value, &change); } else { ret = snd_soc_read(codec, reg); if (ret < 0) return ret; old = ret; new = (old & ~mask) | (value & mask); change = old != new; if (change) ret = snd_soc_write(codec, reg, new); } if (ret < 0) return ret; return change; } EXPORT_SYMBOL_GPL(snd_soc_update_bits); /** * snd_soc_update_bits_locked - update codec register bits * @codec: audio codec * @reg: codec register * @mask: register mask * @value: new value * * Writes new register value, and takes the codec mutex. * * Returns 1 for change else 0. */ int snd_soc_update_bits_locked(struct snd_soc_codec *codec, unsigned short reg, unsigned int mask, unsigned int value) { int change; mutex_lock(&codec->mutex); change = snd_soc_update_bits(codec, reg, mask, value); mutex_unlock(&codec->mutex); return change; } EXPORT_SYMBOL_GPL(snd_soc_update_bits_locked); /** * snd_soc_test_bits - test register for change * @codec: audio codec * @reg: codec register * @mask: register mask * @value: new value * * Tests a register with a new value and checks if the new value is * different from the old value. * * Returns 1 for change else 0. */ int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned short reg, unsigned int mask, unsigned int value) { int change; unsigned int old, new; old = snd_soc_read(codec, reg); new = (old & ~mask) | value; change = old != new; return change; } EXPORT_SYMBOL_GPL(snd_soc_test_bits); /** * snd_soc_set_runtime_hwparams - set the runtime hardware parameters * @substream: the pcm substream * @hw: the hardware parameters * * Sets the substream runtime hardware parameters. */ int snd_soc_set_runtime_hwparams(struct snd_pcm_substream *substream, const struct snd_pcm_hardware *hw) { struct snd_pcm_runtime *runtime = substream->runtime; if (!runtime) return 0; runtime->hw.info = hw->info; runtime->hw.formats = hw->formats; runtime->hw.period_bytes_min = hw->period_bytes_min; runtime->hw.period_bytes_max = hw->period_bytes_max; runtime->hw.periods_min = hw->periods_min; runtime->hw.periods_max = hw->periods_max; runtime->hw.buffer_bytes_max = hw->buffer_bytes_max; runtime->hw.fifo_size = hw->fifo_size; return 0; } EXPORT_SYMBOL_GPL(snd_soc_set_runtime_hwparams); /** * snd_soc_cnew - create new control * @_template: control template * @data: control private data * @long_name: control long name * @prefix: control name prefix * * Create a new mixer control from a template control. * * Returns 0 for success, else error. */ struct snd_kcontrol *snd_soc_cnew(const struct snd_kcontrol_new *_template, void *data, char *long_name, const char *prefix) { struct snd_kcontrol_new template; struct snd_kcontrol *kcontrol; char *name = NULL; int name_len; memcpy(&template, _template, sizeof(template)); template.index = 0; if (!long_name) long_name = template.name; if (prefix) { name_len = strlen(long_name) + strlen(prefix) + 2; name = kmalloc(name_len, GFP_KERNEL); if (!name) return NULL; snprintf(name, name_len, "%s %s", prefix, long_name); template.name = name; } else { template.name = long_name; } kcontrol = snd_ctl_new1(&template, data); kfree(name); return kcontrol; } EXPORT_SYMBOL_GPL(snd_soc_cnew); static int snd_soc_add_controls(struct snd_card *card, struct device *dev, const struct snd_kcontrol_new *controls, int num_controls, const char *prefix, void *data) { int err, i; for (i = 0; i < num_controls; i++) { const struct snd_kcontrol_new *control = &controls[i]; err = snd_ctl_add(card, snd_soc_cnew(control, data, control->name, prefix)); if (err < 0) { dev_err(dev, "Failed to add %s: %d\n", control->name, err); return err; } } return 0; } /** * snd_soc_add_codec_controls - add an array of controls to a codec. * Convenience function to add a list of controls. Many codecs were * duplicating this code. * * @codec: codec to add controls to * @controls: array of controls to add * @num_controls: number of elements in the array * * Return 0 for success, else error. */ int snd_soc_add_codec_controls(struct snd_soc_codec *codec, const struct snd_kcontrol_new *controls, int num_controls) { struct snd_card *card = codec->card->snd_card; return snd_soc_add_controls(card, codec->dev, controls, num_controls, codec->name_prefix, codec); } EXPORT_SYMBOL_GPL(snd_soc_add_codec_controls); /** * snd_soc_add_platform_controls - add an array of controls to a platform. * Convenience function to add a list of controls. * * @platform: platform to add controls to * @controls: array of controls to add * @num_controls: number of elements in the array * * Return 0 for success, else error. */ int snd_soc_add_platform_controls(struct snd_soc_platform *platform, const struct snd_kcontrol_new *controls, int num_controls) { struct snd_card *card = platform->card->snd_card; return snd_soc_add_controls(card, platform->dev, controls, num_controls, NULL, platform); } EXPORT_SYMBOL_GPL(snd_soc_add_platform_controls); /** * snd_soc_add_card_controls - add an array of controls to a SoC card. * Convenience function to add a list of controls. * * @soc_card: SoC card to add controls to * @controls: array of controls to add * @num_controls: number of elements in the array * * Return 0 for success, else error. */ int snd_soc_add_card_controls(struct snd_soc_card *soc_card, const struct snd_kcontrol_new *controls, int num_controls) { struct snd_card *card = soc_card->snd_card; return snd_soc_add_controls(card, soc_card->dev, controls, num_controls, NULL, soc_card); } EXPORT_SYMBOL_GPL(snd_soc_add_card_controls); /** * snd_soc_add_dai_controls - add an array of controls to a DAI. * Convienience function to add a list of controls. * * @dai: DAI to add controls to * @controls: array of controls to add * @num_controls: number of elements in the array * * Return 0 for success, else error. */ int snd_soc_add_dai_controls(struct snd_soc_dai *dai, const struct snd_kcontrol_new *controls, int num_controls) { struct snd_card *card = dai->card->snd_card; return snd_soc_add_controls(card, dai->dev, controls, num_controls, NULL, dai); } EXPORT_SYMBOL_GPL(snd_soc_add_dai_controls); /** * snd_soc_info_enum_double - enumerated double mixer info callback * @kcontrol: mixer control * @uinfo: control element information * * Callback to provide information about a double enumerated * mixer control. * * Returns 0 for success. */ int snd_soc_info_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = e->shift_l == e->shift_r ? 1 : 2; uinfo->value.enumerated.items = e->max; if (uinfo->value.enumerated.item > e->max - 1) uinfo->value.enumerated.item = e->max - 1; strcpy(uinfo->value.enumerated.name, snd_soc_get_enum_text(e, uinfo->value.enumerated.item)); return 0; } EXPORT_SYMBOL_GPL(snd_soc_info_enum_double); /** * snd_soc_get_enum_double - enumerated double mixer get callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to get the value of a double enumerated mixer. * * Returns 0 for success. */ int snd_soc_get_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int val, bitmask; for (bitmask = 1; bitmask < e->max; bitmask <<= 1) ; val = snd_soc_read(codec, e->reg); ucontrol->value.enumerated.item[0] = (val >> e->shift_l) & (bitmask - 1); if (e->shift_l != e->shift_r) ucontrol->value.enumerated.item[1] = (val >> e->shift_r) & (bitmask - 1); return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_enum_double); /** * snd_soc_put_enum_double - enumerated double mixer put callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to set the value of a double enumerated mixer. * * Returns 0 for success. */ int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int val; unsigned int mask, bitmask; for (bitmask = 1; bitmask < e->max; bitmask <<= 1) ; if (ucontrol->value.enumerated.item[0] > e->max - 1) return -EINVAL; val = ucontrol->value.enumerated.item[0] << e->shift_l; mask = (bitmask - 1) << e->shift_l; if (e->shift_l != e->shift_r) { if (ucontrol->value.enumerated.item[1] > e->max - 1) return -EINVAL; val |= ucontrol->value.enumerated.item[1] << e->shift_r; mask |= (bitmask - 1) << e->shift_r; } return snd_soc_update_bits_locked(codec, e->reg, mask, val); } EXPORT_SYMBOL_GPL(snd_soc_put_enum_double); /** * snd_soc_get_value_enum_double - semi enumerated double mixer get callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to get the value of a double semi enumerated mixer. * * Semi enumerated mixer: the enumerated items are referred as values. Can be * used for handling bitfield coded enumeration for example. * * Returns 0 for success. */ int snd_soc_get_value_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int reg_val, val, mux; reg_val = snd_soc_read(codec, e->reg); val = (reg_val >> e->shift_l) & e->mask; for (mux = 0; mux < e->max; mux++) { if (val == e->values[mux]) break; } ucontrol->value.enumerated.item[0] = mux; if (e->shift_l != e->shift_r) { val = (reg_val >> e->shift_r) & e->mask; for (mux = 0; mux < e->max; mux++) { if (val == e->values[mux]) break; } ucontrol->value.enumerated.item[1] = mux; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_value_enum_double); /** * snd_soc_put_value_enum_double - semi enumerated double mixer put callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to set the value of a double semi enumerated mixer. * * Semi enumerated mixer: the enumerated items are referred as values. Can be * used for handling bitfield coded enumeration for example. * * Returns 0 for success. */ int snd_soc_put_value_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int val; unsigned int mask; if (ucontrol->value.enumerated.item[0] > e->max - 1) return -EINVAL; val = e->values[ucontrol->value.enumerated.item[0]] << e->shift_l; mask = e->mask << e->shift_l; if (e->shift_l != e->shift_r) { if (ucontrol->value.enumerated.item[1] > e->max - 1) return -EINVAL; val |= e->values[ucontrol->value.enumerated.item[1]] << e->shift_r; mask |= e->mask << e->shift_r; } return snd_soc_update_bits_locked(codec, e->reg, mask, val); } EXPORT_SYMBOL_GPL(snd_soc_put_value_enum_double); /** * snd_soc_info_enum_ext - external enumerated single mixer info callback * @kcontrol: mixer control * @uinfo: control element information * * Callback to provide information about an external enumerated * single mixer. * * Returns 0 for success. */ int snd_soc_info_enum_ext(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = e->max; if (uinfo->value.enumerated.item > e->max - 1) uinfo->value.enumerated.item = e->max - 1; strcpy(uinfo->value.enumerated.name, snd_soc_get_enum_text(e, uinfo->value.enumerated.item)); return 0; } EXPORT_SYMBOL_GPL(snd_soc_info_enum_ext); /** * snd_soc_info_volsw_ext - external single mixer info callback * @kcontrol: mixer control * @uinfo: control element information * * Callback to provide information about a single external mixer control. * * Returns 0 for success. */ int snd_soc_info_volsw_ext(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int max = kcontrol->private_value; if (max == 1 && !strstr(kcontrol->id.name, " Volume")) uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; else uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = max; return 0; } EXPORT_SYMBOL_GPL(snd_soc_info_volsw_ext); /** * snd_soc_info_multi_ext - external single mixer info callback * @kcontrol: mixer control * @uinfo: control element information * * Callback to provide information about a single external mixer control. * that accepts multiple input. * * Returns 0 for success. */ int snd_soc_info_multi_ext(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct soc_multi_mixer_control *mc = (struct soc_multi_mixer_control *)kcontrol->private_value; int platform_max; if (!mc->platform_max) mc->platform_max = mc->max; platform_max = mc->platform_max; if (platform_max == 1 && !strnstr(kcontrol->id.name, " Volume", 30)) uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; else uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = mc->count; uinfo->value.integer.min = 0; uinfo->value.integer.max = platform_max; return 0; } EXPORT_SYMBOL_GPL(snd_soc_info_multi_ext); /** * snd_soc_info_volsw - single mixer info callback * @kcontrol: mixer control * @uinfo: control element information * * Callback to provide information about a single mixer control, or a double * mixer control that spans 2 registers. * * Returns 0 for success. */ int snd_soc_info_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int platform_max; if (!mc->platform_max) mc->platform_max = mc->max; platform_max = mc->platform_max; if (platform_max == 1 && !strstr(kcontrol->id.name, " Volume")) uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; else uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = platform_max; return 0; } EXPORT_SYMBOL_GPL(snd_soc_info_volsw); /** * snd_soc_get_volsw - single mixer get callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to get the value of a single mixer control, or a double mixer * control that spans 2 registers. * * Returns 0 for success. */ int snd_soc_get_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int reg = mc->reg; unsigned int reg2 = mc->rreg; unsigned int shift = mc->shift; unsigned int rshift = mc->rshift; int max = mc->max; unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; ucontrol->value.integer.value[0] = (snd_soc_read(codec, reg) >> shift) & mask; if (invert) ucontrol->value.integer.value[0] = max - ucontrol->value.integer.value[0]; if (snd_soc_volsw_is_stereo(mc)) { if (reg == reg2) ucontrol->value.integer.value[1] = (snd_soc_read(codec, reg) >> rshift) & mask; else ucontrol->value.integer.value[1] = (snd_soc_read(codec, reg2) >> shift) & mask; if (invert) ucontrol->value.integer.value[1] = max - ucontrol->value.integer.value[1]; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_volsw); /** * snd_soc_put_volsw - single mixer put callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to set the value of a single mixer control, or a double mixer * control that spans 2 registers. * * Returns 0 for success. */ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int reg = mc->reg; unsigned int reg2 = mc->rreg; unsigned int shift = mc->shift; unsigned int rshift = mc->rshift; int max = mc->max; unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; int err; bool type_2r = 0; unsigned int val2 = 0; unsigned int val, val_mask; val = (ucontrol->value.integer.value[0] & mask); if (invert) val = max - val; val_mask = mask << shift; val = val << shift; if (snd_soc_volsw_is_stereo(mc)) { val2 = (ucontrol->value.integer.value[1] & mask); if (invert) val2 = max - val2; if (reg == reg2) { val_mask |= mask << rshift; val |= val2 << rshift; } else { val2 = val2 << shift; type_2r = 1; } } err = snd_soc_update_bits_locked(codec, reg, val_mask, val); if (err < 0) return err; if (type_2r) err = snd_soc_update_bits_locked(codec, reg2, val_mask, val2); return err; } EXPORT_SYMBOL_GPL(snd_soc_put_volsw); /** * snd_soc_info_volsw_s8 - signed mixer info callback * @kcontrol: mixer control * @uinfo: control element information * * Callback to provide information about a signed mixer control. * * Returns 0 for success. */ int snd_soc_info_volsw_s8(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int platform_max; int min = mc->min; unsigned int shift = mc->shift; unsigned int rshift = mc->rshift; if (!mc->platform_max) mc->platform_max = mc->max; platform_max = mc->platform_max; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = shift == rshift ? 1 : 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = platform_max - min; return 0; } EXPORT_SYMBOL_GPL(snd_soc_info_volsw_s8); /** * snd_soc_get_volsw_s8 - signed mixer get callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to get the value of a signed mixer control. * * Returns 0 for success. */ int snd_soc_get_volsw_s8(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int reg = mc->reg; unsigned int shift = mc->shift; unsigned int rshift = mc->rshift; int min = mc->min; int val = snd_soc_read(codec, reg); ucontrol->value.integer.value[0] = ((signed char)((val >> shift) & 0xff))-min; if (shift != rshift) ucontrol->value.integer.value[1] = ((signed char)((val >> rshift) & 0xff))-min; return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_volsw_s8); /** * snd_soc_put_volsw_sgn - signed mixer put callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to set the value of a signed mixer control. * * Returns 0 for success. */ int snd_soc_put_volsw_s8(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int reg = mc->reg; unsigned int shift = mc->shift; unsigned int rshift = mc->rshift; int min = mc->min; unsigned int val, val2, val_mask; val = ((ucontrol->value.integer.value[0]+min) & 0xff) << shift; val_mask = 0xff << shift; if (shift != rshift) { val2 = (ucontrol->value.integer.value[1]+min) & 0xff; val |= val2 << rshift; val_mask |= 0xff << rshift; } return snd_soc_update_bits_locked(codec, reg, val_mask, val); } EXPORT_SYMBOL_GPL(snd_soc_put_volsw_s8); /** * snd_soc_limit_volume - Set new limit to an existing volume control. * * @codec: where to look for the control * @name: Name of the control * @max: new maximum limit * * Return 0 for success, else error. */ int snd_soc_limit_volume(struct snd_soc_codec *codec, const char *name, int max) { struct snd_card *card = codec->card->snd_card; struct snd_kcontrol *kctl; struct soc_mixer_control *mc; int found = 0; int ret = -EINVAL; /* Sanity check for name and max */ if (unlikely(!name || max <= 0)) return -EINVAL; list_for_each_entry(kctl, &card->controls, list) { if (!strncmp(kctl->id.name, name, sizeof(kctl->id.name))) { found = 1; break; } } if (found) { mc = (struct soc_mixer_control *)kctl->private_value; if (max <= mc->max) { mc->platform_max = max; ret = 0; } } return ret; } EXPORT_SYMBOL_GPL(snd_soc_limit_volume); /** * snd_soc_info_volsw_2r_sx - double with tlv and variable data size * mixer info callback * @kcontrol: mixer control * @uinfo: control element information * * Returns 0 for success. */ int snd_soc_info_volsw_2r_sx(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int max = mc->max; int min = mc->min; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = max-min; return 0; } EXPORT_SYMBOL_GPL(snd_soc_info_volsw_2r_sx); /** * snd_soc_get_volsw_2r_sx - double with tlv and variable data size * mixer get callback * @kcontrol: mixer control * @uinfo: control element information * * Returns 0 for success. */ int snd_soc_get_volsw_2r_sx(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int mask = (1<<mc->shift)-1; int min = mc->min; int val = snd_soc_read(codec, mc->reg) & mask; int valr = snd_soc_read(codec, mc->rreg) & mask; ucontrol->value.integer.value[0] = ((val & 0xff)-min) & mask; ucontrol->value.integer.value[1] = ((valr & 0xff)-min) & mask; return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_volsw_2r_sx); /** * snd_soc_put_volsw_2r_sx - double with tlv and variable data size * mixer put callback * @kcontrol: mixer control * @uinfo: control element information * * Returns 0 for success. */ int snd_soc_put_volsw_2r_sx(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int mask = (1<<mc->shift)-1; int min = mc->min; int ret; unsigned int val, valr, oval, ovalr; val = ((ucontrol->value.integer.value[0]+min) & 0xff); val &= mask; valr = ((ucontrol->value.integer.value[1]+min) & 0xff); valr &= mask; oval = snd_soc_read(codec, mc->reg) & mask; ovalr = snd_soc_read(codec, mc->rreg) & mask; ret = 0; if (oval != val) { ret = snd_soc_write(codec, mc->reg, val); if (ret < 0) return ret; } if (ovalr != valr) { ret = snd_soc_write(codec, mc->rreg, valr); if (ret < 0) return ret; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_put_volsw_2r_sx); /** * snd_soc_dai_set_sysclk - configure DAI system or master clock. * @dai: DAI * @clk_id: DAI specific clock ID * @freq: new clock frequency in Hz * @dir: new clock direction - input/output. * * Configures the DAI master (MCLK) or system (SYSCLK) clocking. */ int snd_soc_dai_set_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { if (dai->driver && dai->driver->ops->set_sysclk) return dai->driver->ops->set_sysclk(dai, clk_id, freq, dir); else if (dai->codec && dai->codec->driver->set_sysclk) return dai->codec->driver->set_sysclk(dai->codec, clk_id, 0, freq, dir); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dai_set_sysclk); /** * snd_soc_codec_set_sysclk - configure CODEC system or master clock. * @codec: CODEC * @clk_id: DAI specific clock ID * @source: Source for the clock * @freq: new clock frequency in Hz * @dir: new clock direction - input/output. * * Configures the CODEC master (MCLK) or system (SYSCLK) clocking. */ int snd_soc_codec_set_sysclk(struct snd_soc_codec *codec, int clk_id, int source, unsigned int freq, int dir) { if (codec->driver->set_sysclk) return codec->driver->set_sysclk(codec, clk_id, source, freq, dir); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_codec_set_sysclk); /** * snd_soc_dai_set_clkdiv - configure DAI clock dividers. * @dai: DAI * @div_id: DAI specific clock divider ID * @div: new clock divisor. * * Configures the clock dividers. This is used to derive the best DAI bit and * frame clocks from the system or master clock. It's best to set the DAI bit * and frame clocks as low as possible to save system power. */ int snd_soc_dai_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div) { if (dai->driver && dai->driver->ops->set_clkdiv) return dai->driver->ops->set_clkdiv(dai, div_id, div); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dai_set_clkdiv); /** * snd_soc_dai_set_pll - configure DAI PLL. * @dai: DAI * @pll_id: DAI specific PLL ID * @source: DAI specific source for the PLL * @freq_in: PLL input clock frequency in Hz * @freq_out: requested PLL output clock frequency in Hz * * Configures and enables PLL to generate output clock based on input clock. */ int snd_soc_dai_set_pll(struct snd_soc_dai *dai, int pll_id, int source, unsigned int freq_in, unsigned int freq_out) { if (dai->driver && dai->driver->ops->set_pll) return dai->driver->ops->set_pll(dai, pll_id, source, freq_in, freq_out); else if (dai->codec && dai->codec->driver->set_pll) return dai->codec->driver->set_pll(dai->codec, pll_id, source, freq_in, freq_out); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dai_set_pll); /* * snd_soc_codec_set_pll - configure codec PLL. * @codec: CODEC * @pll_id: DAI specific PLL ID * @source: DAI specific source for the PLL * @freq_in: PLL input clock frequency in Hz * @freq_out: requested PLL output clock frequency in Hz * * Configures and enables PLL to generate output clock based on input clock. */ int snd_soc_codec_set_pll(struct snd_soc_codec *codec, int pll_id, int source, unsigned int freq_in, unsigned int freq_out) { if (codec->driver->set_pll) return codec->driver->set_pll(codec, pll_id, source, freq_in, freq_out); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_codec_set_pll); /** * snd_soc_dai_set_fmt - configure DAI hardware audio format. * @dai: DAI * @fmt: SND_SOC_DAIFMT_ format value. * * Configures the DAI hardware format and clocking. */ int snd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { if (dai->driver && dai->driver->ops->set_fmt) return dai->driver->ops->set_fmt(dai, fmt); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dai_set_fmt); /** * snd_soc_dai_set_tdm_slot - configure DAI TDM. * @dai: DAI * @tx_mask: bitmask representing active TX slots. * @rx_mask: bitmask representing active RX slots. * @slots: Number of slots in use. * @slot_width: Width in bits for each slot. * * Configures a DAI for TDM operation. Both mask and slots are codec and DAI * specific. */ int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) { if (dai->driver && dai->driver->ops->set_tdm_slot) return dai->driver->ops->set_tdm_slot(dai, tx_mask, rx_mask, slots, slot_width); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dai_set_tdm_slot); /** * snd_soc_dai_set_channel_map - configure DAI audio channel map * @dai: DAI * @tx_num: how many TX channels * @tx_slot: pointer to an array which imply the TX slot number channel * 0~num-1 uses * @rx_num: how many RX channels * @rx_slot: pointer to an array which imply the RX slot number channel * 0~num-1 uses * * configure the relationship between channel number and TDM slot number. */ int snd_soc_dai_set_channel_map(struct snd_soc_dai *dai, unsigned int tx_num, unsigned int *tx_slot, unsigned int rx_num, unsigned int *rx_slot) { if (dai->driver && dai->driver->ops->set_channel_map) return dai->driver->ops->set_channel_map(dai, tx_num, tx_slot, rx_num, rx_slot); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dai_set_channel_map); /** * snd_soc_dai_get_channel_map - configure DAI audio channel map * @dai: DAI * @tx_num: how many TX channels * @tx_slot: pointer to an array which imply the TX slot number channel * 0~num-1 uses * @rx_num: how many RX channels * @rx_slot: pointer to an array which imply the RX slot number channel * 0~num-1 uses * * configure the relationship between channel number and TDM slot number. */ int snd_soc_dai_get_channel_map(struct snd_soc_dai *dai, unsigned int *tx_num, unsigned int *tx_slot, unsigned int *rx_num, unsigned int *rx_slot) { if (dai->driver && dai->driver->ops->get_channel_map) return dai->driver->ops->get_channel_map(dai, tx_num, tx_slot, rx_num, rx_slot); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dai_get_channel_map); /** * snd_soc_dai_set_tristate - configure DAI system or master clock. * @dai: DAI * @tristate: tristate enable * * Tristates the DAI so that others can use it. */ int snd_soc_dai_set_tristate(struct snd_soc_dai *dai, int tristate) { if (dai->driver && dai->driver->ops->set_tristate) return dai->driver->ops->set_tristate(dai, tristate); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dai_set_tristate); /** * snd_soc_dai_digital_mute - configure DAI system or master clock. * @dai: DAI * @mute: mute enable * * Mutes the DAI DAC. */ int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute) { if (dai->driver && dai->driver->ops->digital_mute) return dai->driver->ops->digital_mute(dai, mute); else return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_dai_digital_mute); /** * snd_soc_register_card - Register a card with the ASoC core * * @card: Card to register * */ int snd_soc_register_card(struct snd_soc_card *card) { int i; int ret = 0; if (!card->name || !card->dev) return -EINVAL; for (i = 0; i < card->num_links; i++) { struct snd_soc_dai_link *link = &card->dai_link[i]; /* * Codec must be specified by 1 of name or OF node, * not both or neither. */ if (!!link->codec_name == !!link->codec_of_node) { dev_err(card->dev, "Neither/both codec name/of_node are set for %s\n", link->name); return -EINVAL; } /* * Platform may be specified by either name or OF node, but * can be left unspecified, and a dummy platform will be used. */ if (link->platform_name && link->platform_of_node) { dev_err(card->dev, "Both platform name/of_node are set for %s\n", link->name); return -EINVAL; } /* * CPU DAI must be specified by 1 of name or OF node, * not both or neither. */ if (!!link->cpu_dai_name == !!link->cpu_dai_of_node) { dev_err(card->dev, "Neither/both cpu_dai name/of_node are set for %s\n", link->name); return -EINVAL; } } dev_set_drvdata(card->dev, card); snd_soc_initialize_card_lists(card); soc_init_card_debugfs(card); card->rtd = kzalloc(sizeof(struct snd_soc_pcm_runtime) * (card->num_links + card->num_aux_devs), GFP_KERNEL); if (card->rtd == NULL) return -ENOMEM; card->num_rtd = 0; card->rtd_aux = &card->rtd[card->num_links]; for (i = 0; i < card->num_links; i++) card->rtd[i].dai_link = &card->dai_link[i]; INIT_LIST_HEAD(&card->list); INIT_LIST_HEAD(&card->dapm_dirty); card->instantiated = 0; mutex_init(&card->mutex); mutex_init(&card->dpcm_mutex); mutex_init(&card->dapm_power_mutex); ret = snd_soc_instantiate_card(card); if (ret != 0) { soc_cleanup_card_debugfs(card); if (card->rtd) kfree(card->rtd); } return ret; } EXPORT_SYMBOL_GPL(snd_soc_register_card); /** * snd_soc_unregister_card - Unregister a card with the ASoC core * * @card: Card to unregister * */ int snd_soc_unregister_card(struct snd_soc_card *card) { if (card->instantiated) soc_cleanup_card_resources(card); dev_dbg(card->dev, "Unregistered card '%s'\n", card->name); return 0; } EXPORT_SYMBOL_GPL(snd_soc_unregister_card); /* * Simplify DAI link configuration by removing ".-1" from device names * and sanitizing names. */ static char *fmt_single_name(struct device *dev, int *id) { char *found, name[NAME_SIZE]; int id1, id2; if (dev_name(dev) == NULL) return NULL; strlcpy(name, dev_name(dev), NAME_SIZE); /* are we a "%s.%d" name (platform and SPI components) */ found = strstr(name, dev->driver->name); if (found) { /* get ID */ if (sscanf(&found[strlen(dev->driver->name)], ".%d", id) == 1) { /* discard ID from name if ID == -1 */ if (*id == -1) found[strlen(dev->driver->name)] = '\0'; } } else { /* I2C component devices are named "bus-addr" */ if (sscanf(name, "%x-%x", &id1, &id2) == 2) { char tmp[NAME_SIZE]; /* create unique ID number from I2C addr and bus */ *id = ((id1 & 0xffff) << 16) + id2; /* sanitize component name for DAI link creation */ snprintf(tmp, NAME_SIZE, "%s.%s", dev->driver->name, name); strlcpy(name, tmp, NAME_SIZE); } else *id = 0; } return kstrdup(name, GFP_KERNEL); } /* * Simplify DAI link naming for single devices with multiple DAIs by removing * any ".-1" and using the DAI name (instead of device name). */ static inline char *fmt_multiple_name(struct device *dev, struct snd_soc_dai_driver *dai_drv) { if (dai_drv->name == NULL) { printk(KERN_ERR "asoc: error - multiple DAI %s registered with no name\n", dev_name(dev)); return NULL; } return kstrdup(dai_drv->name, GFP_KERNEL); } /** * snd_soc_register_dai - Register a DAI with the ASoC core * * @dai: DAI to register */ int snd_soc_register_dai(struct device *dev, struct snd_soc_dai_driver *dai_drv) { struct snd_soc_dai *dai; dev_dbg(dev, "dai register %s\n", dev_name(dev)); dai = kzalloc(sizeof(struct snd_soc_dai), GFP_KERNEL); if (dai == NULL) return -ENOMEM; /* create DAI component name */ dai->name = fmt_single_name(dev, &dai->id); if (dai->name == NULL) { kfree(dai); return -ENOMEM; } dai->dev = dev; dai->driver = dai_drv; if (!dai->driver->ops) dai->driver->ops = &null_dai_ops; mutex_lock(&client_mutex); list_add(&dai->list, &dai_list); mutex_unlock(&client_mutex); pr_debug("Registered DAI '%s'\n", dai->name); return 0; } EXPORT_SYMBOL_GPL(snd_soc_register_dai); /** * snd_soc_unregister_dai - Unregister a DAI from the ASoC core * * @dai: DAI to unregister */ void snd_soc_unregister_dai(struct device *dev) { struct snd_soc_dai *dai; list_for_each_entry(dai, &dai_list, list) { if (dev == dai->dev) goto found; } return; found: mutex_lock(&client_mutex); list_del(&dai->list); mutex_unlock(&client_mutex); pr_debug("Unregistered DAI '%s'\n", dai->name); kfree(dai->name); kfree(dai); } EXPORT_SYMBOL_GPL(snd_soc_unregister_dai); /** * snd_soc_register_dais - Register multiple DAIs with the ASoC core * * @dai: Array of DAIs to register * @count: Number of DAIs */ int snd_soc_register_dais(struct device *dev, struct snd_soc_dai_driver *dai_drv, size_t count) { struct snd_soc_dai *dai; int i, ret = 0; dev_dbg(dev, "dai register %s #%Zu\n", dev_name(dev), count); for (i = 0; i < count; i++) { dai = kzalloc(sizeof(struct snd_soc_dai), GFP_KERNEL); if (dai == NULL) { ret = -ENOMEM; goto err; } /* create DAI component name */ dai->name = fmt_multiple_name(dev, &dai_drv[i]); if (dai->name == NULL) { kfree(dai); ret = -EINVAL; goto err; } dai->dev = dev; dai->driver = &dai_drv[i]; if (dai->driver->id) dai->id = dai->driver->id; else dai->id = i; if (!dai->driver->ops) dai->driver->ops = &null_dai_ops; mutex_lock(&client_mutex); list_add(&dai->list, &dai_list); mutex_unlock(&client_mutex); pr_debug("Registered DAI '%s'\n", dai->name); } return 0; err: for (i--; i >= 0; i--) snd_soc_unregister_dai(dev); return ret; } EXPORT_SYMBOL_GPL(snd_soc_register_dais); /** * snd_soc_unregister_dais - Unregister multiple DAIs from the ASoC core * * @dai: Array of DAIs to unregister * @count: Number of DAIs */ void snd_soc_unregister_dais(struct device *dev, size_t count) { int i; for (i = 0; i < count; i++) snd_soc_unregister_dai(dev); } EXPORT_SYMBOL_GPL(snd_soc_unregister_dais); /** * snd_soc_register_platform - Register a platform with the ASoC core * * @platform: platform to register */ int snd_soc_register_platform(struct device *dev, struct snd_soc_platform_driver *platform_drv) { struct snd_soc_platform *platform; dev_dbg(dev, "platform register %s\n", dev_name(dev)); platform = kzalloc(sizeof(struct snd_soc_platform), GFP_KERNEL); if (platform == NULL) return -ENOMEM; /* create platform component name */ platform->name = fmt_single_name(dev, &platform->id); if (platform->name == NULL) { kfree(platform); return -ENOMEM; } platform->dev = dev; platform->driver = platform_drv; platform->dapm.dev = dev; platform->dapm.platform = platform; platform->dapm.stream_event = platform_drv->stream_event; mutex_lock(&client_mutex); list_add(&platform->list, &platform_list); mutex_unlock(&client_mutex); pr_debug("Registered platform '%s'\n", platform->name); return 0; } EXPORT_SYMBOL_GPL(snd_soc_register_platform); /** * snd_soc_unregister_platform - Unregister a platform from the ASoC core * * @platform: platform to unregister */ void snd_soc_unregister_platform(struct device *dev) { struct snd_soc_platform *platform; list_for_each_entry(platform, &platform_list, list) { if (dev == platform->dev) goto found; } return; found: mutex_lock(&client_mutex); list_del(&platform->list); mutex_unlock(&client_mutex); pr_debug("Unregistered platform '%s'\n", platform->name); kfree(platform->name); kfree(platform); } EXPORT_SYMBOL_GPL(snd_soc_unregister_platform); static u64 codec_format_map[] = { SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE, SNDRV_PCM_FMTBIT_U16_LE | SNDRV_PCM_FMTBIT_U16_BE, SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S24_BE, SNDRV_PCM_FMTBIT_U24_LE | SNDRV_PCM_FMTBIT_U24_BE, SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE, SNDRV_PCM_FMTBIT_U32_LE | SNDRV_PCM_FMTBIT_U32_BE, SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_U24_3BE, SNDRV_PCM_FMTBIT_U24_3LE | SNDRV_PCM_FMTBIT_U24_3BE, SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S20_3BE, SNDRV_PCM_FMTBIT_U20_3LE | SNDRV_PCM_FMTBIT_U20_3BE, SNDRV_PCM_FMTBIT_S18_3LE | SNDRV_PCM_FMTBIT_S18_3BE, SNDRV_PCM_FMTBIT_U18_3LE | SNDRV_PCM_FMTBIT_U18_3BE, SNDRV_PCM_FMTBIT_FLOAT_LE | SNDRV_PCM_FMTBIT_FLOAT_BE, SNDRV_PCM_FMTBIT_FLOAT64_LE | SNDRV_PCM_FMTBIT_FLOAT64_BE, SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE | SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE, }; /* Fix up the DAI formats for endianness: codecs don't actually see * the endianness of the data but we're using the CPU format * definitions which do need to include endianness so we ensure that * codec DAIs always have both big and little endian variants set. */ static void fixup_codec_formats(struct snd_soc_pcm_stream *stream) { int i; for (i = 0; i < ARRAY_SIZE(codec_format_map); i++) if (stream->formats & codec_format_map[i]) stream->formats |= codec_format_map[i]; } /** * snd_soc_card_change_online_state - Mark if soc card is online/offline * * @soc_card : soc_card to mark */ void snd_soc_card_change_online_state(struct snd_soc_card *soc_card, int online) { snd_card_change_online_state(soc_card->snd_card, online); } EXPORT_SYMBOL(snd_soc_card_change_online_state); /** * snd_soc_register_codec - Register a codec with the ASoC core * * @codec: codec to register */ int snd_soc_register_codec(struct device *dev, const struct snd_soc_codec_driver *codec_drv, struct snd_soc_dai_driver *dai_drv, int num_dai) { size_t reg_size; struct snd_soc_codec *codec; int ret, i; dev_dbg(dev, "codec register %s\n", dev_name(dev)); codec = kzalloc(sizeof(struct snd_soc_codec), GFP_KERNEL); if (codec == NULL) return -ENOMEM; /* create CODEC component name */ codec->name = fmt_single_name(dev, &codec->id); if (codec->name == NULL) { kfree(codec); return -ENOMEM; } if (codec_drv->compress_type) codec->compress_type = codec_drv->compress_type; else codec->compress_type = SND_SOC_FLAT_COMPRESSION; codec->write = codec_drv->write; codec->read = codec_drv->read; codec->volatile_register = codec_drv->volatile_register; codec->readable_register = codec_drv->readable_register; codec->writable_register = codec_drv->writable_register; codec->dapm.bias_level = SND_SOC_BIAS_OFF; codec->dapm.dev = dev; codec->dapm.codec = codec; codec->dapm.seq_notifier = codec_drv->seq_notifier; codec->dapm.stream_event = codec_drv->stream_event; codec->dev = dev; codec->driver = codec_drv; codec->num_dai = num_dai; mutex_init(&codec->mutex); /* allocate CODEC register cache */ if (codec_drv->reg_cache_size && codec_drv->reg_word_size) { reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size; codec->reg_size = reg_size; /* it is necessary to make a copy of the default register cache * because in the case of using a compression type that requires * the default register cache to be marked as __devinitconst the * kernel might have freed the array by the time we initialize * the cache. */ if (codec_drv->reg_cache_default) { codec->reg_def_copy = kmemdup(codec_drv->reg_cache_default, reg_size, GFP_KERNEL); if (!codec->reg_def_copy) { ret = -ENOMEM; goto fail; } } } if (codec_drv->reg_access_size && codec_drv->reg_access_default) { if (!codec->volatile_register) codec->volatile_register = snd_soc_default_volatile_register; if (!codec->readable_register) codec->readable_register = snd_soc_default_readable_register; if (!codec->writable_register) codec->writable_register = snd_soc_default_writable_register; } for (i = 0; i < num_dai; i++) { fixup_codec_formats(&dai_drv[i].playback); fixup_codec_formats(&dai_drv[i].capture); } /* register any DAIs */ if (num_dai) { ret = snd_soc_register_dais(dev, dai_drv, num_dai); if (ret < 0) goto fail; } mutex_lock(&client_mutex); list_add(&codec->list, &codec_list); mutex_unlock(&client_mutex); pr_debug("Registered codec '%s'\n", codec->name); return 0; fail: kfree(codec->reg_def_copy); codec->reg_def_copy = NULL; kfree(codec->name); kfree(codec); return ret; } EXPORT_SYMBOL_GPL(snd_soc_register_codec); /** * snd_soc_unregister_codec - Unregister a codec from the ASoC core * * @codec: codec to unregister */ void snd_soc_unregister_codec(struct device *dev) { struct snd_soc_codec *codec; int i; list_for_each_entry(codec, &codec_list, list) { if (dev == codec->dev) goto found; } return; found: if (codec->num_dai) for (i = 0; i < codec->num_dai; i++) snd_soc_unregister_dai(dev); mutex_lock(&client_mutex); list_del(&codec->list); mutex_unlock(&client_mutex); pr_debug("Unregistered codec '%s'\n", codec->name); snd_soc_cache_exit(codec); kfree(codec->reg_def_copy); kfree(codec->name); kfree(codec); } EXPORT_SYMBOL_GPL(snd_soc_unregister_codec); /* Retrieve a card's name from device tree */ int snd_soc_of_parse_card_name(struct snd_soc_card *card, const char *propname) { struct device_node *np = card->dev->of_node; int ret; ret = of_property_read_string_index(np, propname, 0, &card->name); /* * EINVAL means the property does not exist. This is fine providing * card->name was previously set, which is checked later in * snd_soc_register_card. */ if (ret < 0 && ret != -EINVAL) { dev_err(card->dev, "Property '%s' could not be read: %d\n", propname, ret); return ret; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_of_parse_card_name); int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, const char *propname) { struct device_node *np = card->dev->of_node; int num_routes; struct snd_soc_dapm_route *routes; int i, ret; num_routes = of_property_count_strings(np, propname); if (num_routes < 0 || num_routes & 1) { dev_err(card->dev, "Property '%s' does not exist or its length is not even\n", propname); return -EINVAL; } num_routes /= 2; if (!num_routes) { dev_err(card->dev, "Property '%s's length is zero\n", propname); return -EINVAL; } routes = devm_kzalloc(card->dev, num_routes * sizeof(*routes), GFP_KERNEL); if (!routes) { dev_err(card->dev, "Could not allocate DAPM route table\n"); return -EINVAL; } for (i = 0; i < num_routes; i++) { ret = of_property_read_string_index(np, propname, 2 * i, &routes[i].sink); if (ret) { dev_err(card->dev, "Property '%s' index %d could not be read: %d\n", propname, 2 * i, ret); return -EINVAL; } ret = of_property_read_string_index(np, propname, (2 * i) + 1, &routes[i].source); if (ret) { dev_err(card->dev, "Property '%s' index %d could not be read: %d\n", propname, (2 * i) + 1, ret); return -EINVAL; } } card->num_dapm_routes = num_routes; card->dapm_routes = routes; return 0; } EXPORT_SYMBOL_GPL(snd_soc_of_parse_audio_routing); static int __init snd_soc_init(void) { #ifdef CONFIG_DEBUG_FS snd_soc_debugfs_root = debugfs_create_dir("asoc", NULL); if (IS_ERR(snd_soc_debugfs_root) || !snd_soc_debugfs_root) { printk(KERN_WARNING "ASoC: Failed to create debugfs directory\n"); snd_soc_debugfs_root = NULL; } if (!debugfs_create_file("codecs", 0444, snd_soc_debugfs_root, NULL, &codec_list_fops)) pr_warn("ASoC: Failed to create CODEC list debugfs file\n"); if (!debugfs_create_file("dais", 0444, snd_soc_debugfs_root, NULL, &dai_list_fops)) pr_warn("ASoC: Failed to create DAI list debugfs file\n"); if (!debugfs_create_file("platforms", 0444, snd_soc_debugfs_root, NULL, &platform_list_fops)) pr_warn("ASoC: Failed to create platform list debugfs file\n"); #endif snd_soc_util_init(); return platform_driver_register(&soc_driver); } module_init(snd_soc_init); static void __exit snd_soc_exit(void) { snd_soc_util_exit(); #ifdef CONFIG_DEBUG_FS debugfs_remove_recursive(snd_soc_debugfs_root); #endif platform_driver_unregister(&soc_driver); } module_exit(snd_soc_exit); /* Module information */ MODULE_AUTHOR("Liam Girdwood, lrg@slimlogic.co.uk"); MODULE_DESCRIPTION("ALSA SoC Core"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:soc-audio");
gpl-2.0
MarvinCorro/linux-cmps107
drivers/usb/host/ohci-pxa27x.c
48
17653
/* * OHCI HCD (Host Controller Driver) for USB. * * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at> * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net> * (C) Copyright 2002 Hewlett-Packard Company * * Bus Glue for pxa27x * * Written by Christopher Hoover <ch@hpl.hp.com> * Based on fragments of previous driver by Russell King et al. * * Modified for LH7A404 from ohci-sa1111.c * by Durgesh Pattamatta <pattamattad@sharpsec.com> * * Modified for pxa27x from ohci-lh7a404.c * by Nick Bane <nick@cecomputing.co.uk> 26-8-2004 * * This file is licenced under the GPL. */ #include <linux/clk.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_platform.h> #include <linux/of_gpio.h> #include <linux/platform_data/usb-ohci-pxa27x.h> #include <linux/platform_data/usb-pxa3xx-ulpi.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/signal.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include <linux/usb/otg.h> #include <mach/hardware.h> #include "ohci.h" #define DRIVER_DESC "OHCI PXA27x/PXA3x driver" /* * UHC: USB Host Controller (OHCI-like) register definitions */ #define UHCREV (0x0000) /* UHC HCI Spec Revision */ #define UHCHCON (0x0004) /* UHC Host Control Register */ #define UHCCOMS (0x0008) /* UHC Command Status Register */ #define UHCINTS (0x000C) /* UHC Interrupt Status Register */ #define UHCINTE (0x0010) /* UHC Interrupt Enable */ #define UHCINTD (0x0014) /* UHC Interrupt Disable */ #define UHCHCCA (0x0018) /* UHC Host Controller Comm. Area */ #define UHCPCED (0x001C) /* UHC Period Current Endpt Descr */ #define UHCCHED (0x0020) /* UHC Control Head Endpt Descr */ #define UHCCCED (0x0024) /* UHC Control Current Endpt Descr */ #define UHCBHED (0x0028) /* UHC Bulk Head Endpt Descr */ #define UHCBCED (0x002C) /* UHC Bulk Current Endpt Descr */ #define UHCDHEAD (0x0030) /* UHC Done Head */ #define UHCFMI (0x0034) /* UHC Frame Interval */ #define UHCFMR (0x0038) /* UHC Frame Remaining */ #define UHCFMN (0x003C) /* UHC Frame Number */ #define UHCPERS (0x0040) /* UHC Periodic Start */ #define UHCLS (0x0044) /* UHC Low Speed Threshold */ #define UHCRHDA (0x0048) /* UHC Root Hub Descriptor A */ #define UHCRHDA_NOCP (1 << 12) /* No over current protection */ #define UHCRHDA_OCPM (1 << 11) /* Over Current Protection Mode */ #define UHCRHDA_POTPGT(x) \ (((x) & 0xff) << 24) /* Power On To Power Good Time */ #define UHCRHDB (0x004C) /* UHC Root Hub Descriptor B */ #define UHCRHS (0x0050) /* UHC Root Hub Status */ #define UHCRHPS1 (0x0054) /* UHC Root Hub Port 1 Status */ #define UHCRHPS2 (0x0058) /* UHC Root Hub Port 2 Status */ #define UHCRHPS3 (0x005C) /* UHC Root Hub Port 3 Status */ #define UHCSTAT (0x0060) /* UHC Status Register */ #define UHCSTAT_UPS3 (1 << 16) /* USB Power Sense Port3 */ #define UHCSTAT_SBMAI (1 << 15) /* System Bus Master Abort Interrupt*/ #define UHCSTAT_SBTAI (1 << 14) /* System Bus Target Abort Interrupt*/ #define UHCSTAT_UPRI (1 << 13) /* USB Port Resume Interrupt */ #define UHCSTAT_UPS2 (1 << 12) /* USB Power Sense Port 2 */ #define UHCSTAT_UPS1 (1 << 11) /* USB Power Sense Port 1 */ #define UHCSTAT_HTA (1 << 10) /* HCI Target Abort */ #define UHCSTAT_HBA (1 << 8) /* HCI Buffer Active */ #define UHCSTAT_RWUE (1 << 7) /* HCI Remote Wake Up Event */ #define UHCHR (0x0064) /* UHC Reset Register */ #define UHCHR_SSEP3 (1 << 11) /* Sleep Standby Enable for Port3 */ #define UHCHR_SSEP2 (1 << 10) /* Sleep Standby Enable for Port2 */ #define UHCHR_SSEP1 (1 << 9) /* Sleep Standby Enable for Port1 */ #define UHCHR_PCPL (1 << 7) /* Power control polarity low */ #define UHCHR_PSPL (1 << 6) /* Power sense polarity low */ #define UHCHR_SSE (1 << 5) /* Sleep Standby Enable */ #define UHCHR_UIT (1 << 4) /* USB Interrupt Test */ #define UHCHR_SSDC (1 << 3) /* Simulation Scale Down Clock */ #define UHCHR_CGR (1 << 2) /* Clock Generation Reset */ #define UHCHR_FHR (1 << 1) /* Force Host Controller Reset */ #define UHCHR_FSBIR (1 << 0) /* Force System Bus Iface Reset */ #define UHCHIE (0x0068) /* UHC Interrupt Enable Register*/ #define UHCHIE_UPS3IE (1 << 14) /* Power Sense Port3 IntEn */ #define UHCHIE_UPRIE (1 << 13) /* Port Resume IntEn */ #define UHCHIE_UPS2IE (1 << 12) /* Power Sense Port2 IntEn */ #define UHCHIE_UPS1IE (1 << 11) /* Power Sense Port1 IntEn */ #define UHCHIE_TAIE (1 << 10) /* HCI Interface Transfer Abort Interrupt Enable*/ #define UHCHIE_HBAIE (1 << 8) /* HCI Buffer Active IntEn */ #define UHCHIE_RWIE (1 << 7) /* Remote Wake-up IntEn */ #define UHCHIT (0x006C) /* UHC Interrupt Test register */ #define PXA_UHC_MAX_PORTNUM 3 static const char hcd_name[] = "ohci-pxa27x"; static struct hc_driver __read_mostly ohci_pxa27x_hc_driver; struct pxa27x_ohci { struct clk *clk; void __iomem *mmio_base; struct regulator *vbus[3]; bool vbus_enabled[3]; }; #define to_pxa27x_ohci(hcd) (struct pxa27x_ohci *)(hcd_to_ohci(hcd)->priv) /* PMM_NPS_MODE -- PMM Non-power switching mode Ports are powered continuously. PMM_GLOBAL_MODE -- PMM global switching mode All ports are powered at the same time. PMM_PERPORT_MODE -- PMM per port switching mode Ports are powered individually. */ static int pxa27x_ohci_select_pmm(struct pxa27x_ohci *pxa_ohci, int mode) { uint32_t uhcrhda = __raw_readl(pxa_ohci->mmio_base + UHCRHDA); uint32_t uhcrhdb = __raw_readl(pxa_ohci->mmio_base + UHCRHDB); switch (mode) { case PMM_NPS_MODE: uhcrhda |= RH_A_NPS; break; case PMM_GLOBAL_MODE: uhcrhda &= ~(RH_A_NPS & RH_A_PSM); break; case PMM_PERPORT_MODE: uhcrhda &= ~(RH_A_NPS); uhcrhda |= RH_A_PSM; /* Set port power control mask bits, only 3 ports. */ uhcrhdb |= (0x7<<17); break; default: printk( KERN_ERR "Invalid mode %d, set to non-power switch mode.\n", mode ); uhcrhda |= RH_A_NPS; } __raw_writel(uhcrhda, pxa_ohci->mmio_base + UHCRHDA); __raw_writel(uhcrhdb, pxa_ohci->mmio_base + UHCRHDB); return 0; } static int pxa27x_ohci_set_vbus_power(struct pxa27x_ohci *pxa_ohci, unsigned int port, bool enable) { struct regulator *vbus = pxa_ohci->vbus[port]; int ret = 0; if (IS_ERR_OR_NULL(vbus)) return 0; if (enable && !pxa_ohci->vbus_enabled[port]) ret = regulator_enable(vbus); else if (!enable && pxa_ohci->vbus_enabled[port]) ret = regulator_disable(vbus); if (ret < 0) return ret; pxa_ohci->vbus_enabled[port] = enable; return 0; } static int pxa27x_ohci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength) { struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd); int ret; switch (typeReq) { case SetPortFeature: case ClearPortFeature: if (!wIndex || wIndex > 3) return -EPIPE; if (wValue != USB_PORT_FEAT_POWER) break; ret = pxa27x_ohci_set_vbus_power(pxa_ohci, wIndex - 1, typeReq == SetPortFeature); if (ret) return ret; break; } return ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength); } /*-------------------------------------------------------------------------*/ static inline void pxa27x_setup_hc(struct pxa27x_ohci *pxa_ohci, struct pxaohci_platform_data *inf) { uint32_t uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR); uint32_t uhcrhda = __raw_readl(pxa_ohci->mmio_base + UHCRHDA); if (inf->flags & ENABLE_PORT1) uhchr &= ~UHCHR_SSEP1; if (inf->flags & ENABLE_PORT2) uhchr &= ~UHCHR_SSEP2; if (inf->flags & ENABLE_PORT3) uhchr &= ~UHCHR_SSEP3; if (inf->flags & POWER_CONTROL_LOW) uhchr |= UHCHR_PCPL; if (inf->flags & POWER_SENSE_LOW) uhchr |= UHCHR_PSPL; if (inf->flags & NO_OC_PROTECTION) uhcrhda |= UHCRHDA_NOCP; else uhcrhda &= ~UHCRHDA_NOCP; if (inf->flags & OC_MODE_PERPORT) uhcrhda |= UHCRHDA_OCPM; else uhcrhda &= ~UHCRHDA_OCPM; if (inf->power_on_delay) { uhcrhda &= ~UHCRHDA_POTPGT(0xff); uhcrhda |= UHCRHDA_POTPGT(inf->power_on_delay / 2); } __raw_writel(uhchr, pxa_ohci->mmio_base + UHCHR); __raw_writel(uhcrhda, pxa_ohci->mmio_base + UHCRHDA); } static inline void pxa27x_reset_hc(struct pxa27x_ohci *pxa_ohci) { uint32_t uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR); __raw_writel(uhchr | UHCHR_FHR, pxa_ohci->mmio_base + UHCHR); udelay(11); __raw_writel(uhchr & ~UHCHR_FHR, pxa_ohci->mmio_base + UHCHR); } #ifdef CONFIG_PXA27x extern void pxa27x_clear_otgph(void); #else #define pxa27x_clear_otgph() do {} while (0) #endif static int pxa27x_start_hc(struct pxa27x_ohci *pxa_ohci, struct device *dev) { int retval = 0; struct pxaohci_platform_data *inf; uint32_t uhchr; struct usb_hcd *hcd = dev_get_drvdata(dev); inf = dev_get_platdata(dev); clk_prepare_enable(pxa_ohci->clk); pxa27x_reset_hc(pxa_ohci); uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR) | UHCHR_FSBIR; __raw_writel(uhchr, pxa_ohci->mmio_base + UHCHR); while (__raw_readl(pxa_ohci->mmio_base + UHCHR) & UHCHR_FSBIR) cpu_relax(); pxa27x_setup_hc(pxa_ohci, inf); if (inf->init) retval = inf->init(dev); if (retval < 0) return retval; if (cpu_is_pxa3xx()) pxa3xx_u2d_start_hc(&hcd->self); uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR) & ~UHCHR_SSE; __raw_writel(uhchr, pxa_ohci->mmio_base + UHCHR); __raw_writel(UHCHIE_UPRIE | UHCHIE_RWIE, pxa_ohci->mmio_base + UHCHIE); /* Clear any OTG Pin Hold */ pxa27x_clear_otgph(); return 0; } static void pxa27x_stop_hc(struct pxa27x_ohci *pxa_ohci, struct device *dev) { struct pxaohci_platform_data *inf; struct usb_hcd *hcd = dev_get_drvdata(dev); uint32_t uhccoms; inf = dev_get_platdata(dev); if (cpu_is_pxa3xx()) pxa3xx_u2d_stop_hc(&hcd->self); if (inf->exit) inf->exit(dev); pxa27x_reset_hc(pxa_ohci); /* Host Controller Reset */ uhccoms = __raw_readl(pxa_ohci->mmio_base + UHCCOMS) | 0x01; __raw_writel(uhccoms, pxa_ohci->mmio_base + UHCCOMS); udelay(10); clk_disable_unprepare(pxa_ohci->clk); } #ifdef CONFIG_OF static const struct of_device_id pxa_ohci_dt_ids[] = { { .compatible = "marvell,pxa-ohci" }, { } }; MODULE_DEVICE_TABLE(of, pxa_ohci_dt_ids); static int ohci_pxa_of_init(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct pxaohci_platform_data *pdata; u32 tmp; int ret; if (!np) return 0; /* Right now device-tree probed devices don't get dma_mask set. * Since shared usb code relies on it, set it here for now. * Once we have dma capability bindings this can go away. */ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) return ret; pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; if (of_property_read_bool(np, "marvell,enable-port1")) pdata->flags |= ENABLE_PORT1; if (of_property_read_bool(np, "marvell,enable-port2")) pdata->flags |= ENABLE_PORT2; if (of_property_read_bool(np, "marvell,enable-port3")) pdata->flags |= ENABLE_PORT3; if (of_property_read_bool(np, "marvell,port-sense-low")) pdata->flags |= POWER_SENSE_LOW; if (of_property_read_bool(np, "marvell,power-control-low")) pdata->flags |= POWER_CONTROL_LOW; if (of_property_read_bool(np, "marvell,no-oc-protection")) pdata->flags |= NO_OC_PROTECTION; if (of_property_read_bool(np, "marvell,oc-mode-perport")) pdata->flags |= OC_MODE_PERPORT; if (!of_property_read_u32(np, "marvell,power-on-delay", &tmp)) pdata->power_on_delay = tmp; if (!of_property_read_u32(np, "marvell,port-mode", &tmp)) pdata->port_mode = tmp; if (!of_property_read_u32(np, "marvell,power-budget", &tmp)) pdata->power_budget = tmp; pdev->dev.platform_data = pdata; return 0; } #else static int ohci_pxa_of_init(struct platform_device *pdev) { return 0; } #endif /*-------------------------------------------------------------------------*/ /* configure so an HC device and id are always provided */ /* always called with process context; sleeping is OK */ /** * usb_hcd_pxa27x_probe - initialize pxa27x-based HCDs * Context: !in_interrupt() * * Allocates basic resources for this USB host controller, and * then invokes the start() method for the HCD associated with it * through the hotplug entry's driver_data. * */ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device *pdev) { int retval, irq; struct usb_hcd *hcd; struct pxaohci_platform_data *inf; struct pxa27x_ohci *pxa_ohci; struct ohci_hcd *ohci; struct resource *r; struct clk *usb_clk; unsigned int i; retval = ohci_pxa_of_init(pdev); if (retval) return retval; inf = dev_get_platdata(&pdev->dev); if (!inf) return -ENODEV; irq = platform_get_irq(pdev, 0); if (irq < 0) { pr_err("no resource of IORESOURCE_IRQ"); return -ENXIO; } usb_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(usb_clk)) return PTR_ERR(usb_clk); hcd = usb_create_hcd (driver, &pdev->dev, "pxa27x"); if (!hcd) return -ENOMEM; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); hcd->regs = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(hcd->regs)) { retval = PTR_ERR(hcd->regs); goto err; } hcd->rsrc_start = r->start; hcd->rsrc_len = resource_size(r); /* initialize "struct pxa27x_ohci" */ pxa_ohci = to_pxa27x_ohci(hcd); pxa_ohci->clk = usb_clk; pxa_ohci->mmio_base = (void __iomem *)hcd->regs; for (i = 0; i < 3; ++i) { char name[6]; if (!(inf->flags & (ENABLE_PORT1 << i))) continue; sprintf(name, "vbus%u", i + 1); pxa_ohci->vbus[i] = devm_regulator_get(&pdev->dev, name); } retval = pxa27x_start_hc(pxa_ohci, &pdev->dev); if (retval < 0) { pr_debug("pxa27x_start_hc failed"); goto err; } /* Select Power Management Mode */ pxa27x_ohci_select_pmm(pxa_ohci, inf->port_mode); if (inf->power_budget) hcd->power_budget = inf->power_budget; /* The value of NDP in roothub_a is incorrect on this hardware */ ohci = hcd_to_ohci(hcd); ohci->num_ports = 3; retval = usb_add_hcd(hcd, irq, 0); if (retval == 0) { device_wakeup_enable(hcd->self.controller); return retval; } pxa27x_stop_hc(pxa_ohci, &pdev->dev); err: usb_put_hcd(hcd); return retval; } /* may be called without controller electrically present */ /* may be called with controller, bus, and devices active */ /** * usb_hcd_pxa27x_remove - shutdown processing for pxa27x-based HCDs * @dev: USB Host Controller being removed * Context: !in_interrupt() * * Reverses the effect of usb_hcd_pxa27x_probe(), first invoking * the HCD's stop() method. It is always called from a thread * context, normally "rmmod", "apmd", or something similar. * */ void usb_hcd_pxa27x_remove (struct usb_hcd *hcd, struct platform_device *pdev) { struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd); unsigned int i; usb_remove_hcd(hcd); pxa27x_stop_hc(pxa_ohci, &pdev->dev); for (i = 0; i < 3; ++i) pxa27x_ohci_set_vbus_power(pxa_ohci, i, false); usb_put_hcd(hcd); } /*-------------------------------------------------------------------------*/ static int ohci_hcd_pxa27x_drv_probe(struct platform_device *pdev) { pr_debug ("In ohci_hcd_pxa27x_drv_probe"); if (usb_disabled()) return -ENODEV; return usb_hcd_pxa27x_probe(&ohci_pxa27x_hc_driver, pdev); } static int ohci_hcd_pxa27x_drv_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); usb_hcd_pxa27x_remove(hcd, pdev); return 0; } #ifdef CONFIG_PM static int ohci_hcd_pxa27x_drv_suspend(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd); struct ohci_hcd *ohci = hcd_to_ohci(hcd); bool do_wakeup = device_may_wakeup(dev); int ret; if (time_before(jiffies, ohci->next_statechange)) msleep(5); ohci->next_statechange = jiffies; ret = ohci_suspend(hcd, do_wakeup); if (ret) return ret; pxa27x_stop_hc(pxa_ohci, dev); return ret; } static int ohci_hcd_pxa27x_drv_resume(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd); struct pxaohci_platform_data *inf = dev_get_platdata(dev); struct ohci_hcd *ohci = hcd_to_ohci(hcd); int status; if (time_before(jiffies, ohci->next_statechange)) msleep(5); ohci->next_statechange = jiffies; status = pxa27x_start_hc(pxa_ohci, dev); if (status < 0) return status; /* Select Power Management Mode */ pxa27x_ohci_select_pmm(pxa_ohci, inf->port_mode); ohci_resume(hcd, false); return 0; } static const struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = { .suspend = ohci_hcd_pxa27x_drv_suspend, .resume = ohci_hcd_pxa27x_drv_resume, }; #endif static struct platform_driver ohci_hcd_pxa27x_driver = { .probe = ohci_hcd_pxa27x_drv_probe, .remove = ohci_hcd_pxa27x_drv_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "pxa27x-ohci", .of_match_table = of_match_ptr(pxa_ohci_dt_ids), #ifdef CONFIG_PM .pm = &ohci_hcd_pxa27x_pm_ops, #endif }, }; static const struct ohci_driver_overrides pxa27x_overrides __initconst = { .extra_priv_size = sizeof(struct pxa27x_ohci), }; static int __init ohci_pxa27x_init(void) { if (usb_disabled()) return -ENODEV; pr_info("%s: " DRIVER_DESC "\n", hcd_name); ohci_init_driver(&ohci_pxa27x_hc_driver, &pxa27x_overrides); ohci_pxa27x_hc_driver.hub_control = pxa27x_ohci_hub_control; return platform_driver_register(&ohci_hcd_pxa27x_driver); } module_init(ohci_pxa27x_init); static void __exit ohci_pxa27x_cleanup(void) { platform_driver_unregister(&ohci_hcd_pxa27x_driver); } module_exit(ohci_pxa27x_cleanup); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pxa27x-ohci");
gpl-2.0
merlinholland/kernel
net/bridge/netfilter/ebt_ulog.c
560
9178
/* * netfilter module for userspace bridged Ethernet frames logging daemons * * Authors: * Bart De Schuymer <bdschuym@pandora.be> * Harald Welte <laforge@netfilter.org> * * November, 2004 * * Based on ipt_ULOG.c, which is * (C) 2000-2002 by Harald Welte <laforge@netfilter.org> * * This module accepts two parameters: * * nlbufsiz: * The parameter specifies how big the buffer for each netlink multicast * group is. e.g. If you say nlbufsiz=8192, up to eight kb of packets will * get accumulated in the kernel until they are sent to userspace. It is * NOT possible to allocate more than 128kB, and it is strongly discouraged, * because atomically allocating 128kB inside the network rx softirq is not * reliable. Please also keep in mind that this buffer size is allocated for * each nlgroup you are using, so the total kernel memory usage increases * by that factor. * * flushtimeout: * Specify, after how many hundredths of a second the queue should be * flushed even if it is not full yet. * */ #include <linux/module.h> #include <linux/spinlock.h> #include <linux/socket.h> #include <linux/skbuff.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/netlink.h> #include <linux/netdevice.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_bridge/ebtables.h> #include <linux/netfilter_bridge/ebt_ulog.h> #include <net/netfilter/nf_log.h> #include <net/sock.h> #include "../br_private.h" #define PRINTR(format, args...) do { if (net_ratelimit()) \ printk(format , ## args); } while (0) static unsigned int nlbufsiz = NLMSG_GOODSIZE; module_param(nlbufsiz, uint, 0600); MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) " "(defaults to 4096)"); static unsigned int flushtimeout = 10; module_param(flushtimeout, uint, 0600); MODULE_PARM_DESC(flushtimeout, "buffer flush timeout (hundredths ofa second) " "(defaults to 10)"); typedef struct { unsigned int qlen; /* number of nlmsgs' in the skb */ struct nlmsghdr *lastnlh; /* netlink header of last msg in skb */ struct sk_buff *skb; /* the pre-allocated skb */ struct timer_list timer; /* the timer function */ spinlock_t lock; /* the per-queue lock */ } ebt_ulog_buff_t; static ebt_ulog_buff_t ulog_buffers[EBT_ULOG_MAXNLGROUPS]; static struct sock *ebtulognl; /* send one ulog_buff_t to userspace */ static void ulog_send(unsigned int nlgroup) { ebt_ulog_buff_t *ub = &ulog_buffers[nlgroup]; if (timer_pending(&ub->timer)) del_timer(&ub->timer); if (!ub->skb) return; /* last nlmsg needs NLMSG_DONE */ if (ub->qlen > 1) ub->lastnlh->nlmsg_type = NLMSG_DONE; NETLINK_CB(ub->skb).dst_group = nlgroup + 1; netlink_broadcast(ebtulognl, ub->skb, 0, nlgroup + 1, GFP_ATOMIC); ub->qlen = 0; ub->skb = NULL; } /* timer function to flush queue in flushtimeout time */ static void ulog_timer(unsigned long data) { spin_lock_bh(&ulog_buffers[data].lock); if (ulog_buffers[data].skb) ulog_send(data); spin_unlock_bh(&ulog_buffers[data].lock); } static struct sk_buff *ulog_alloc_skb(unsigned int size) { struct sk_buff *skb; unsigned int n; n = max(size, nlbufsiz); skb = alloc_skb(n, GFP_ATOMIC); if (!skb) { PRINTR(KERN_ERR "ebt_ulog: can't alloc whole buffer " "of size %ub!\n", n); if (n > size) { /* try to allocate only as much as we need for * current packet */ skb = alloc_skb(size, GFP_ATOMIC); if (!skb) PRINTR(KERN_ERR "ebt_ulog: can't even allocate " "buffer of size %ub\n", size); } } return skb; } static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct ebt_ulog_info *uloginfo, const char *prefix) { ebt_ulog_packet_msg_t *pm; size_t size, copy_len; struct nlmsghdr *nlh; unsigned int group = uloginfo->nlgroup; ebt_ulog_buff_t *ub = &ulog_buffers[group]; spinlock_t *lock = &ub->lock; ktime_t kt; if ((uloginfo->cprange == 0) || (uloginfo->cprange > skb->len + ETH_HLEN)) copy_len = skb->len + ETH_HLEN; else copy_len = uloginfo->cprange; size = NLMSG_SPACE(sizeof(*pm) + copy_len); if (size > nlbufsiz) { PRINTR("ebt_ulog: Size %Zd needed, but nlbufsiz=%d\n", size, nlbufsiz); return; } spin_lock_bh(lock); if (!ub->skb) { if (!(ub->skb = ulog_alloc_skb(size))) goto alloc_failure; } else if (size > skb_tailroom(ub->skb)) { ulog_send(group); if (!(ub->skb = ulog_alloc_skb(size))) goto alloc_failure; } nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, 0, size - NLMSG_ALIGN(sizeof(*nlh))); ub->qlen++; pm = NLMSG_DATA(nlh); /* Fill in the ulog data */ pm->version = EBT_ULOG_VERSION; kt = ktime_get_real(); pm->stamp = ktime_to_timeval(kt); if (ub->qlen == 1) ub->skb->tstamp = kt; pm->data_len = copy_len; pm->mark = skb->mark; pm->hook = hooknr; if (uloginfo->prefix != NULL) strcpy(pm->prefix, uloginfo->prefix); else *(pm->prefix) = '\0'; if (in) { strcpy(pm->physindev, in->name); /* If in isn't a bridge, then physindev==indev */ if (in->br_port) strcpy(pm->indev, in->br_port->br->dev->name); else strcpy(pm->indev, in->name); } else pm->indev[0] = pm->physindev[0] = '\0'; if (out) { /* If out exists, then out is a bridge port */ strcpy(pm->physoutdev, out->name); strcpy(pm->outdev, out->br_port->br->dev->name); } else pm->outdev[0] = pm->physoutdev[0] = '\0'; if (skb_copy_bits(skb, -ETH_HLEN, pm->data, copy_len) < 0) BUG(); if (ub->qlen > 1) ub->lastnlh->nlmsg_flags |= NLM_F_MULTI; ub->lastnlh = nlh; if (ub->qlen >= uloginfo->qthreshold) ulog_send(group); else if (!timer_pending(&ub->timer)) { ub->timer.expires = jiffies + flushtimeout * HZ / 100; add_timer(&ub->timer); } unlock: spin_unlock_bh(lock); return; nlmsg_failure: printk(KERN_CRIT "ebt_ulog: error during NLMSG_PUT. This should " "not happen, please report to author.\n"); goto unlock; alloc_failure: goto unlock; } /* this function is registered with the netfilter core */ static void ebt_log_packet(u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *li, const char *prefix) { struct ebt_ulog_info loginfo; if (!li || li->type != NF_LOG_TYPE_ULOG) { loginfo.nlgroup = EBT_ULOG_DEFAULT_NLGROUP; loginfo.cprange = 0; loginfo.qthreshold = EBT_ULOG_DEFAULT_QTHRESHOLD; loginfo.prefix[0] = '\0'; } else { loginfo.nlgroup = li->u.ulog.group; loginfo.cprange = li->u.ulog.copy_len; loginfo.qthreshold = li->u.ulog.qthreshold; strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix)); } ebt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix); } static unsigned int ebt_ulog_tg(struct sk_buff *skb, const struct xt_target_param *par) { ebt_ulog_packet(par->hooknum, skb, par->in, par->out, par->targinfo, NULL); return EBT_CONTINUE; } static bool ebt_ulog_tg_check(const struct xt_tgchk_param *par) { struct ebt_ulog_info *uloginfo = par->targinfo; if (uloginfo->nlgroup > 31) return false; uloginfo->prefix[EBT_ULOG_PREFIX_LEN - 1] = '\0'; if (uloginfo->qthreshold > EBT_ULOG_MAX_QLEN) uloginfo->qthreshold = EBT_ULOG_MAX_QLEN; return true; } static struct xt_target ebt_ulog_tg_reg __read_mostly = { .name = "ulog", .revision = 0, .family = NFPROTO_BRIDGE, .target = ebt_ulog_tg, .checkentry = ebt_ulog_tg_check, .targetsize = XT_ALIGN(sizeof(struct ebt_ulog_info)), .me = THIS_MODULE, }; static struct nf_logger ebt_ulog_logger __read_mostly = { .name = "ebt_ulog", .logfn = &ebt_log_packet, .me = THIS_MODULE, }; static int __init ebt_ulog_init(void) { int ret; int i; if (nlbufsiz >= 128*1024) { printk(KERN_NOTICE "ebt_ulog: Netlink buffer has to be <= 128kB," " please try a smaller nlbufsiz parameter.\n"); return -EINVAL; } /* initialize ulog_buffers */ for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) { setup_timer(&ulog_buffers[i].timer, ulog_timer, i); spin_lock_init(&ulog_buffers[i].lock); } ebtulognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, EBT_ULOG_MAXNLGROUPS, NULL, NULL, THIS_MODULE); if (!ebtulognl) { printk(KERN_WARNING KBUILD_MODNAME ": out of memory trying to " "call netlink_kernel_create\n"); ret = -ENOMEM; } else if ((ret = xt_register_target(&ebt_ulog_tg_reg)) != 0) { netlink_kernel_release(ebtulognl); } if (ret == 0) nf_log_register(NFPROTO_BRIDGE, &ebt_ulog_logger); return ret; } static void __exit ebt_ulog_fini(void) { ebt_ulog_buff_t *ub; int i; nf_log_unregister(&ebt_ulog_logger); xt_unregister_target(&ebt_ulog_tg_reg); for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) { ub = &ulog_buffers[i]; if (timer_pending(&ub->timer)) del_timer(&ub->timer); spin_lock_bh(&ub->lock); if (ub->skb) { kfree_skb(ub->skb); ub->skb = NULL; } spin_unlock_bh(&ub->lock); } netlink_kernel_release(ebtulognl); } module_init(ebt_ulog_init); module_exit(ebt_ulog_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>"); MODULE_DESCRIPTION("Ebtables: Packet logging to netlink using ULOG");
gpl-2.0
nks15/nks_kernel_j7xeltektt
drivers/net/wireless/rtlwifi/stats.c
560
8555
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "wifi.h" #include "stats.h" #include <linux/export.h> u8 rtl_query_rxpwrpercentage(char antpower) { if ((antpower <= -100) || (antpower >= 20)) return 0; else if (antpower >= 0) return 100; else return 100 + antpower; } EXPORT_SYMBOL(rtl_query_rxpwrpercentage); u8 rtl_evm_db_to_percentage(char value) { char ret_val; ret_val = value; if (ret_val >= 0) ret_val = 0; if (ret_val <= -33) ret_val = -33; ret_val = 0 - ret_val; ret_val *= 3; if (ret_val == 99) ret_val = 100; return ret_val; } EXPORT_SYMBOL(rtl_evm_db_to_percentage); u8 rtl_evm_dbm_jaguar(char value) { char ret_val = value; /* -33dB~0dB to 33dB ~ 0dB*/ if (ret_val == -128) ret_val = 127; else if (ret_val < 0) ret_val = 0 - ret_val; ret_val = ret_val >> 1; return ret_val; } EXPORT_SYMBOL(rtl_evm_dbm_jaguar); static long rtl_translate_todbm(struct ieee80211_hw *hw, u8 signal_strength_index) { long signal_power; signal_power = (long)((signal_strength_index + 1) >> 1); signal_power -= 95; return signal_power; } long rtl_signal_scale_mapping(struct ieee80211_hw *hw, long currsig) { long retsig; if (currsig >= 61 && currsig <= 100) retsig = 90 + ((currsig - 60) / 4); else if (currsig >= 41 && currsig <= 60) retsig = 78 + ((currsig - 40) / 2); else if (currsig >= 31 && currsig <= 40) retsig = 66 + (currsig - 30); else if (currsig >= 21 && currsig <= 30) retsig = 54 + (currsig - 20); else if (currsig >= 5 && currsig <= 20) retsig = 42 + (((currsig - 5) * 2) / 3); else if (currsig == 4) retsig = 36; else if (currsig == 3) retsig = 27; else if (currsig == 2) retsig = 18; else if (currsig == 1) retsig = 9; else retsig = currsig; return retsig; } EXPORT_SYMBOL(rtl_signal_scale_mapping); static void rtl_process_ui_rssi(struct ieee80211_hw *hw, struct rtl_stats *pstatus) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); u8 rfpath; u32 last_rssi, tmpval; if (!pstatus->packet_toself && !pstatus->packet_beacon) return; rtlpriv->stats.pwdb_all_cnt += pstatus->rx_pwdb_all; rtlpriv->stats.rssi_calculate_cnt++; if (rtlpriv->stats.ui_rssi.total_num++ >= PHY_RSSI_SLID_WIN_MAX) { rtlpriv->stats.ui_rssi.total_num = PHY_RSSI_SLID_WIN_MAX; last_rssi = rtlpriv->stats.ui_rssi.elements[ rtlpriv->stats.ui_rssi.index]; rtlpriv->stats.ui_rssi.total_val -= last_rssi; } rtlpriv->stats.ui_rssi.total_val += pstatus->signalstrength; rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.index++] = pstatus->signalstrength; if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX) rtlpriv->stats.ui_rssi.index = 0; tmpval = rtlpriv->stats.ui_rssi.total_val / rtlpriv->stats.ui_rssi.total_num; rtlpriv->stats.signal_strength = rtl_translate_todbm(hw, (u8) tmpval); pstatus->rssi = rtlpriv->stats.signal_strength; if (pstatus->is_cck) return; for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath; rfpath++) { if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) { rtlpriv->stats.rx_rssi_percentage[rfpath] = pstatus->rx_mimo_signalstrength[rfpath]; } if (pstatus->rx_mimo_signalstrength[rfpath] > rtlpriv->stats.rx_rssi_percentage[rfpath]) { rtlpriv->stats.rx_rssi_percentage[rfpath] = ((rtlpriv->stats.rx_rssi_percentage[rfpath] * (RX_SMOOTH_FACTOR - 1)) + (pstatus->rx_mimo_signalstrength[rfpath])) / (RX_SMOOTH_FACTOR); rtlpriv->stats.rx_rssi_percentage[rfpath] = rtlpriv->stats.rx_rssi_percentage[rfpath] + 1; } else { rtlpriv->stats.rx_rssi_percentage[rfpath] = ((rtlpriv->stats.rx_rssi_percentage[rfpath] * (RX_SMOOTH_FACTOR - 1)) + (pstatus->rx_mimo_signalstrength[rfpath])) / (RX_SMOOTH_FACTOR); } rtlpriv->stats.rx_snr_db[rfpath] = pstatus->rx_snr[rfpath]; rtlpriv->stats.rx_evm_dbm[rfpath] = pstatus->rx_mimo_evm_dbm[rfpath]; rtlpriv->stats.rx_cfo_short[rfpath] = pstatus->cfo_short[rfpath]; rtlpriv->stats.rx_cfo_tail[rfpath] = pstatus->cfo_tail[rfpath]; } } static void rtl_update_rxsignalstatistics(struct ieee80211_hw *hw, struct rtl_stats *pstatus) { struct rtl_priv *rtlpriv = rtl_priv(hw); int weighting = 0; if (rtlpriv->stats.recv_signal_power == 0) rtlpriv->stats.recv_signal_power = pstatus->recvsignalpower; if (pstatus->recvsignalpower > rtlpriv->stats.recv_signal_power) weighting = 5; else if (pstatus->recvsignalpower < rtlpriv->stats.recv_signal_power) weighting = (-5); rtlpriv->stats.recv_signal_power = (rtlpriv->stats.recv_signal_power * 5 + pstatus->recvsignalpower + weighting) / 6; } static void rtl_process_pwdb(struct ieee80211_hw *hw, struct rtl_stats *pstatus) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_sta_info *drv_priv = NULL; struct ieee80211_sta *sta = NULL; long undec_sm_pwdb; rcu_read_lock(); if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION) sta = rtl_find_sta(hw, pstatus->psaddr); /* adhoc or ap mode */ if (sta) { drv_priv = (struct rtl_sta_info *) sta->drv_priv; undec_sm_pwdb = drv_priv->rssi_stat.undec_sm_pwdb; } else { undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb; } if (undec_sm_pwdb < 0) undec_sm_pwdb = pstatus->rx_pwdb_all; if (pstatus->rx_pwdb_all > (u32) undec_sm_pwdb) { undec_sm_pwdb = (((undec_sm_pwdb) * (RX_SMOOTH_FACTOR - 1)) + (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR); undec_sm_pwdb = undec_sm_pwdb + 1; } else { undec_sm_pwdb = (((undec_sm_pwdb) * (RX_SMOOTH_FACTOR - 1)) + (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR); } if (sta) { drv_priv->rssi_stat.undec_sm_pwdb = undec_sm_pwdb; } else { rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb; } rcu_read_unlock(); rtl_update_rxsignalstatistics(hw, pstatus); } static void rtl_process_ui_link_quality(struct ieee80211_hw *hw, struct rtl_stats *pstatus) { struct rtl_priv *rtlpriv = rtl_priv(hw); u32 last_evm, n_stream, tmpval; if (pstatus->signalquality == 0) return; if (rtlpriv->stats.ui_link_quality.total_num++ >= PHY_LINKQUALITY_SLID_WIN_MAX) { rtlpriv->stats.ui_link_quality.total_num = PHY_LINKQUALITY_SLID_WIN_MAX; last_evm = rtlpriv->stats.ui_link_quality.elements[ rtlpriv->stats.ui_link_quality.index]; rtlpriv->stats.ui_link_quality.total_val -= last_evm; } rtlpriv->stats.ui_link_quality.total_val += pstatus->signalquality; rtlpriv->stats.ui_link_quality.elements[ rtlpriv->stats.ui_link_quality.index++] = pstatus->signalquality; if (rtlpriv->stats.ui_link_quality.index >= PHY_LINKQUALITY_SLID_WIN_MAX) rtlpriv->stats.ui_link_quality.index = 0; tmpval = rtlpriv->stats.ui_link_quality.total_val / rtlpriv->stats.ui_link_quality.total_num; rtlpriv->stats.signal_quality = tmpval; rtlpriv->stats.last_sigstrength_inpercent = tmpval; for (n_stream = 0; n_stream < 2; n_stream++) { if (pstatus->rx_mimo_sig_qual[n_stream] != -1) { if (rtlpriv->stats.rx_evm_percentage[n_stream] == 0) { rtlpriv->stats.rx_evm_percentage[n_stream] = pstatus->rx_mimo_sig_qual[n_stream]; } rtlpriv->stats.rx_evm_percentage[n_stream] = ((rtlpriv->stats.rx_evm_percentage[n_stream] * (RX_SMOOTH_FACTOR - 1)) + (pstatus->rx_mimo_sig_qual[n_stream] * 1)) / (RX_SMOOTH_FACTOR); } } } void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer, struct rtl_stats *pstatus) { if (!pstatus->packet_matchbssid) return; rtl_process_ui_rssi(hw, pstatus); rtl_process_pwdb(hw, pstatus); rtl_process_ui_link_quality(hw, pstatus); } EXPORT_SYMBOL(rtl_process_phyinfo);
gpl-2.0
glenlee75/linux-at91
drivers/misc/tsl2550.c
2096
10796
/* * tsl2550.c - Linux kernel modules for ambient light sensor * * Copyright (C) 2007 Rodolfo Giometti <giometti@linux.it> * Copyright (C) 2007 Eurotech S.p.A. <info@eurotech.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/mutex.h> #define TSL2550_DRV_NAME "tsl2550" #define DRIVER_VERSION "1.2" /* * Defines */ #define TSL2550_POWER_DOWN 0x00 #define TSL2550_POWER_UP 0x03 #define TSL2550_STANDARD_RANGE 0x18 #define TSL2550_EXTENDED_RANGE 0x1d #define TSL2550_READ_ADC0 0x43 #define TSL2550_READ_ADC1 0x83 /* * Structs */ struct tsl2550_data { struct i2c_client *client; struct mutex update_lock; unsigned int power_state:1; unsigned int operating_mode:1; }; /* * Global data */ static const u8 TSL2550_MODE_RANGE[2] = { TSL2550_STANDARD_RANGE, TSL2550_EXTENDED_RANGE, }; /* * Management functions */ static int tsl2550_set_operating_mode(struct i2c_client *client, int mode) { struct tsl2550_data *data = i2c_get_clientdata(client); int ret = i2c_smbus_write_byte(client, TSL2550_MODE_RANGE[mode]); data->operating_mode = mode; return ret; } static int tsl2550_set_power_state(struct i2c_client *client, int state) { struct tsl2550_data *data = i2c_get_clientdata(client); int ret; if (state == 0) ret = i2c_smbus_write_byte(client, TSL2550_POWER_DOWN); else { ret = i2c_smbus_write_byte(client, TSL2550_POWER_UP); /* On power up we should reset operating mode also... */ tsl2550_set_operating_mode(client, data->operating_mode); } data->power_state = state; return ret; } static int tsl2550_get_adc_value(struct i2c_client *client, u8 cmd) { int ret; ret = i2c_smbus_read_byte_data(client, cmd); if (ret < 0) return ret; if (!(ret & 0x80)) return -EAGAIN; return ret & 0x7f; /* remove the "valid" bit */ } /* * LUX calculation */ #define TSL2550_MAX_LUX 1846 static const u8 ratio_lut[] = { 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 98, 98, 98, 98, 98, 98, 98, 97, 97, 97, 97, 97, 96, 96, 96, 96, 95, 95, 95, 94, 94, 93, 93, 93, 92, 92, 91, 91, 90, 89, 89, 88, 87, 87, 86, 85, 84, 83, 82, 81, 80, 79, 78, 77, 75, 74, 73, 71, 69, 68, 66, 64, 62, 60, 58, 56, 54, 52, 49, 47, 44, 42, 41, 40, 40, 39, 39, 38, 38, 37, 37, 37, 36, 36, 36, 35, 35, 35, 35, 34, 34, 34, 34, 33, 33, 33, 33, 32, 32, 32, 32, 32, 31, 31, 31, 31, 31, 30, 30, 30, 30, 30, }; static const u16 count_lut[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 49, 53, 57, 61, 65, 69, 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 115, 123, 131, 139, 147, 155, 163, 171, 179, 187, 195, 203, 211, 219, 227, 235, 247, 263, 279, 295, 311, 327, 343, 359, 375, 391, 407, 423, 439, 455, 471, 487, 511, 543, 575, 607, 639, 671, 703, 735, 767, 799, 831, 863, 895, 927, 959, 991, 1039, 1103, 1167, 1231, 1295, 1359, 1423, 1487, 1551, 1615, 1679, 1743, 1807, 1871, 1935, 1999, 2095, 2223, 2351, 2479, 2607, 2735, 2863, 2991, 3119, 3247, 3375, 3503, 3631, 3759, 3887, 4015, }; /* * This function is described into Taos TSL2550 Designer's Notebook * pages 2, 3. */ static int tsl2550_calculate_lux(u8 ch0, u8 ch1) { unsigned int lux; /* Look up count from channel values */ u16 c0 = count_lut[ch0]; u16 c1 = count_lut[ch1]; /* * Calculate ratio. * Note: the "128" is a scaling factor */ u8 r = 128; /* Avoid division by 0 and count 1 cannot be greater than count 0 */ if (c1 <= c0) if (c0) { r = c1 * 128 / c0; /* Calculate LUX */ lux = ((c0 - c1) * ratio_lut[r]) / 256; } else lux = 0; else return -EAGAIN; /* LUX range check */ return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux; } /* * SysFS support */ static ssize_t tsl2550_show_power_state(struct device *dev, struct device_attribute *attr, char *buf) { struct tsl2550_data *data = i2c_get_clientdata(to_i2c_client(dev)); return sprintf(buf, "%u\n", data->power_state); } static ssize_t tsl2550_store_power_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct tsl2550_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); int ret; if (val < 0 || val > 1) return -EINVAL; mutex_lock(&data->update_lock); ret = tsl2550_set_power_state(client, val); mutex_unlock(&data->update_lock); if (ret < 0) return ret; return count; } static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO, tsl2550_show_power_state, tsl2550_store_power_state); static ssize_t tsl2550_show_operating_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct tsl2550_data *data = i2c_get_clientdata(to_i2c_client(dev)); return sprintf(buf, "%u\n", data->operating_mode); } static ssize_t tsl2550_store_operating_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct tsl2550_data *data = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); int ret; if (val < 0 || val > 1) return -EINVAL; if (data->power_state == 0) return -EBUSY; mutex_lock(&data->update_lock); ret = tsl2550_set_operating_mode(client, val); mutex_unlock(&data->update_lock); if (ret < 0) return ret; return count; } static DEVICE_ATTR(operating_mode, S_IWUSR | S_IRUGO, tsl2550_show_operating_mode, tsl2550_store_operating_mode); static ssize_t __tsl2550_show_lux(struct i2c_client *client, char *buf) { struct tsl2550_data *data = i2c_get_clientdata(client); u8 ch0, ch1; int ret; ret = tsl2550_get_adc_value(client, TSL2550_READ_ADC0); if (ret < 0) return ret; ch0 = ret; ret = tsl2550_get_adc_value(client, TSL2550_READ_ADC1); if (ret < 0) return ret; ch1 = ret; /* Do the job */ ret = tsl2550_calculate_lux(ch0, ch1); if (ret < 0) return ret; if (data->operating_mode == 1) ret *= 5; return sprintf(buf, "%d\n", ret); } static ssize_t tsl2550_show_lux1_input(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct tsl2550_data *data = i2c_get_clientdata(client); int ret; /* No LUX data if not operational */ if (!data->power_state) return -EBUSY; mutex_lock(&data->update_lock); ret = __tsl2550_show_lux(client, buf); mutex_unlock(&data->update_lock); return ret; } static DEVICE_ATTR(lux1_input, S_IRUGO, tsl2550_show_lux1_input, NULL); static struct attribute *tsl2550_attributes[] = { &dev_attr_power_state.attr, &dev_attr_operating_mode.attr, &dev_attr_lux1_input.attr, NULL }; static const struct attribute_group tsl2550_attr_group = { .attrs = tsl2550_attributes, }; /* * Initialization function */ static int tsl2550_init_client(struct i2c_client *client) { struct tsl2550_data *data = i2c_get_clientdata(client); int err; /* * Probe the chip. To do so we try to power up the device and then to * read back the 0x03 code */ err = i2c_smbus_read_byte_data(client, TSL2550_POWER_UP); if (err < 0) return err; if (err != TSL2550_POWER_UP) return -ENODEV; data->power_state = 1; /* Set the default operating mode */ err = i2c_smbus_write_byte(client, TSL2550_MODE_RANGE[data->operating_mode]); if (err < 0) return err; return 0; } /* * I2C init/probing/exit functions */ static struct i2c_driver tsl2550_driver; static int tsl2550_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); struct tsl2550_data *data; int *opmode, err = 0; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE | I2C_FUNC_SMBUS_READ_BYTE_DATA)) { err = -EIO; goto exit; } data = kzalloc(sizeof(struct tsl2550_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } data->client = client; i2c_set_clientdata(client, data); /* Check platform data */ opmode = client->dev.platform_data; if (opmode) { if (*opmode < 0 || *opmode > 1) { dev_err(&client->dev, "invalid operating_mode (%d)\n", *opmode); err = -EINVAL; goto exit_kfree; } data->operating_mode = *opmode; } else data->operating_mode = 0; /* default mode is standard */ dev_info(&client->dev, "%s operating mode\n", data->operating_mode ? "extended" : "standard"); mutex_init(&data->update_lock); /* Initialize the TSL2550 chip */ err = tsl2550_init_client(client); if (err) goto exit_kfree; /* Register sysfs hooks */ err = sysfs_create_group(&client->dev.kobj, &tsl2550_attr_group); if (err) goto exit_kfree; dev_info(&client->dev, "support ver. %s enabled\n", DRIVER_VERSION); return 0; exit_kfree: kfree(data); exit: return err; } static int tsl2550_remove(struct i2c_client *client) { sysfs_remove_group(&client->dev.kobj, &tsl2550_attr_group); /* Power down the device */ tsl2550_set_power_state(client, 0); kfree(i2c_get_clientdata(client)); return 0; } #ifdef CONFIG_PM_SLEEP static int tsl2550_suspend(struct device *dev) { return tsl2550_set_power_state(to_i2c_client(dev), 0); } static int tsl2550_resume(struct device *dev) { return tsl2550_set_power_state(to_i2c_client(dev), 1); } static SIMPLE_DEV_PM_OPS(tsl2550_pm_ops, tsl2550_suspend, tsl2550_resume); #define TSL2550_PM_OPS (&tsl2550_pm_ops) #else #define TSL2550_PM_OPS NULL #endif /* CONFIG_PM_SLEEP */ static const struct i2c_device_id tsl2550_id[] = { { "tsl2550", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, tsl2550_id); static struct i2c_driver tsl2550_driver = { .driver = { .name = TSL2550_DRV_NAME, .owner = THIS_MODULE, .pm = TSL2550_PM_OPS, }, .probe = tsl2550_probe, .remove = tsl2550_remove, .id_table = tsl2550_id, }; module_i2c_driver(tsl2550_driver); MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>"); MODULE_DESCRIPTION("TSL2550 ambient light sensor driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRIVER_VERSION);
gpl-2.0
hiikezoe/android_kernel_lge_lgl21
drivers/gpu/drm/radeon/radeon_kms.c
2096
14126
/* * Copyright 2008 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * Copyright 2009 Jerome Glisse. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher * Jerome Glisse */ #include "drmP.h" #include "drm_sarea.h" #include "radeon.h" #include "radeon_drm.h" #include <linux/vga_switcheroo.h> #include <linux/slab.h> int radeon_driver_unload_kms(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; if (rdev == NULL) return 0; radeon_modeset_fini(rdev); radeon_device_fini(rdev); kfree(rdev); dev->dev_private = NULL; return 0; } int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) { struct radeon_device *rdev; int r, acpi_status; rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL); if (rdev == NULL) { return -ENOMEM; } dev->dev_private = (void *)rdev; /* update BUS flag */ if (drm_pci_device_is_agp(dev)) { flags |= RADEON_IS_AGP; } else if (drm_pci_device_is_pcie(dev)) { flags |= RADEON_IS_PCIE; } else { flags |= RADEON_IS_PCI; } /* radeon_device_init should report only fatal error * like memory allocation failure or iomapping failure, * or memory manager initialization failure, it must * properly initialize the GPU MC controller and permit * VRAM allocation */ r = radeon_device_init(rdev, dev, dev->pdev, flags); if (r) { dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); goto out; } /* Call ACPI methods */ acpi_status = radeon_acpi_init(rdev); if (acpi_status) dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n"); /* Again modeset_init should fail only on fatal error * otherwise it should provide enough functionalities * for shadowfb to run */ r = radeon_modeset_init(rdev); if (r) dev_err(&dev->pdev->dev, "Fatal error during modeset init\n"); out: if (r) radeon_driver_unload_kms(dev); return r; } static void radeon_set_filp_rights(struct drm_device *dev, struct drm_file **owner, struct drm_file *applier, uint32_t *value) { mutex_lock(&dev->struct_mutex); if (*value == 1) { /* wants rights */ if (!*owner) *owner = applier; } else if (*value == 0) { /* revokes rights */ if (*owner == applier) *owner = NULL; } *value = *owner == applier ? 1 : 0; mutex_unlock(&dev->struct_mutex); } /* * Userspace get information ioctl */ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct radeon_device *rdev = dev->dev_private; struct drm_radeon_info *info; struct radeon_mode_info *minfo = &rdev->mode_info; uint32_t *value_ptr; uint32_t value; struct drm_crtc *crtc; int i, found; info = data; value_ptr = (uint32_t *)((unsigned long)info->value); if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) return -EFAULT; switch (info->request) { case RADEON_INFO_DEVICE_ID: value = dev->pci_device; break; case RADEON_INFO_NUM_GB_PIPES: value = rdev->num_gb_pipes; break; case RADEON_INFO_NUM_Z_PIPES: value = rdev->num_z_pipes; break; case RADEON_INFO_ACCEL_WORKING: /* xf86-video-ati 6.13.0 relies on this being false for evergreen */ if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) value = false; else value = rdev->accel_working; break; case RADEON_INFO_CRTC_FROM_ID: for (i = 0, found = 0; i < rdev->num_crtc; i++) { crtc = (struct drm_crtc *)minfo->crtcs[i]; if (crtc && crtc->base.id == value) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); value = radeon_crtc->crtc_id; found = 1; break; } } if (!found) { DRM_DEBUG_KMS("unknown crtc id %d\n", value); return -EINVAL; } break; case RADEON_INFO_ACCEL_WORKING2: value = rdev->accel_working; break; case RADEON_INFO_TILING_CONFIG: if (rdev->family >= CHIP_CAYMAN) value = rdev->config.cayman.tile_config; else if (rdev->family >= CHIP_CEDAR) value = rdev->config.evergreen.tile_config; else if (rdev->family >= CHIP_RV770) value = rdev->config.rv770.tile_config; else if (rdev->family >= CHIP_R600) value = rdev->config.r600.tile_config; else { DRM_DEBUG_KMS("tiling config is r6xx+ only!\n"); return -EINVAL; } break; case RADEON_INFO_WANT_HYPERZ: /* The "value" here is both an input and output parameter. * If the input value is 1, filp requests hyper-z access. * If the input value is 0, filp revokes its hyper-z access. * * When returning, the value is 1 if filp owns hyper-z access, * 0 otherwise. */ if (value >= 2) { DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value); return -EINVAL; } radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value); break; case RADEON_INFO_WANT_CMASK: /* The same logic as Hyper-Z. */ if (value >= 2) { DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value); return -EINVAL; } radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value); break; case RADEON_INFO_CLOCK_CRYSTAL_FREQ: /* return clock value in KHz */ value = rdev->clock.spll.reference_freq * 10; break; case RADEON_INFO_NUM_BACKENDS: if (rdev->family >= CHIP_CAYMAN) value = rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines; else if (rdev->family >= CHIP_CEDAR) value = rdev->config.evergreen.max_backends; else if (rdev->family >= CHIP_RV770) value = rdev->config.rv770.max_backends; else if (rdev->family >= CHIP_R600) value = rdev->config.r600.max_backends; else { return -EINVAL; } break; case RADEON_INFO_NUM_TILE_PIPES: if (rdev->family >= CHIP_CAYMAN) value = rdev->config.cayman.max_tile_pipes; else if (rdev->family >= CHIP_CEDAR) value = rdev->config.evergreen.max_tile_pipes; else if (rdev->family >= CHIP_RV770) value = rdev->config.rv770.max_tile_pipes; else if (rdev->family >= CHIP_R600) value = rdev->config.r600.max_tile_pipes; else { return -EINVAL; } break; case RADEON_INFO_FUSION_GART_WORKING: value = 1; break; default: DRM_DEBUG_KMS("Invalid request %d\n", info->request); return -EINVAL; } if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) { DRM_ERROR("copy_to_user\n"); return -EFAULT; } return 0; } /* * Outdated mess for old drm with Xorg being in charge (void function now). */ int radeon_driver_firstopen_kms(struct drm_device *dev) { return 0; } void radeon_driver_lastclose_kms(struct drm_device *dev) { vga_switcheroo_process_delayed_switch(); } int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) { return 0; } void radeon_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv) { } void radeon_driver_preclose_kms(struct drm_device *dev, struct drm_file *file_priv) { struct radeon_device *rdev = dev->dev_private; if (rdev->hyperz_filp == file_priv) rdev->hyperz_filp = NULL; if (rdev->cmask_filp == file_priv) rdev->cmask_filp = NULL; } /* * VBlank related functions. */ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) { struct radeon_device *rdev = dev->dev_private; if (crtc < 0 || crtc >= rdev->num_crtc) { DRM_ERROR("Invalid crtc %d\n", crtc); return -EINVAL; } return radeon_get_vblank_counter(rdev, crtc); } int radeon_enable_vblank_kms(struct drm_device *dev, int crtc) { struct radeon_device *rdev = dev->dev_private; if (crtc < 0 || crtc >= rdev->num_crtc) { DRM_ERROR("Invalid crtc %d\n", crtc); return -EINVAL; } rdev->irq.crtc_vblank_int[crtc] = true; return radeon_irq_set(rdev); } void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) { struct radeon_device *rdev = dev->dev_private; if (crtc < 0 || crtc >= rdev->num_crtc) { DRM_ERROR("Invalid crtc %d\n", crtc); return; } rdev->irq.crtc_vblank_int[crtc] = false; radeon_irq_set(rdev); } int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, int *max_error, struct timeval *vblank_time, unsigned flags) { struct drm_crtc *drmcrtc; struct radeon_device *rdev = dev->dev_private; if (crtc < 0 || crtc >= dev->num_crtcs) { DRM_ERROR("Invalid crtc %d\n", crtc); return -EINVAL; } /* Get associated drm_crtc: */ drmcrtc = &rdev->mode_info.crtcs[crtc]->base; /* Helper routine in DRM core does all the work: */ return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, vblank_time, flags, drmcrtc); } /* * IOCTL. */ int radeon_dma_ioctl_kms(struct drm_device *dev, void *data, struct drm_file *file_priv) { /* Not valid in KMS. */ return -EINVAL; } #define KMS_INVALID_IOCTL(name) \ int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\ { \ DRM_ERROR("invalid ioctl with kms %s\n", __func__); \ return -EINVAL; \ } /* * All these ioctls are invalid in kms world. */ KMS_INVALID_IOCTL(radeon_cp_init_kms) KMS_INVALID_IOCTL(radeon_cp_start_kms) KMS_INVALID_IOCTL(radeon_cp_stop_kms) KMS_INVALID_IOCTL(radeon_cp_reset_kms) KMS_INVALID_IOCTL(radeon_cp_idle_kms) KMS_INVALID_IOCTL(radeon_cp_resume_kms) KMS_INVALID_IOCTL(radeon_engine_reset_kms) KMS_INVALID_IOCTL(radeon_fullscreen_kms) KMS_INVALID_IOCTL(radeon_cp_swap_kms) KMS_INVALID_IOCTL(radeon_cp_clear_kms) KMS_INVALID_IOCTL(radeon_cp_vertex_kms) KMS_INVALID_IOCTL(radeon_cp_indices_kms) KMS_INVALID_IOCTL(radeon_cp_texture_kms) KMS_INVALID_IOCTL(radeon_cp_stipple_kms) KMS_INVALID_IOCTL(radeon_cp_indirect_kms) KMS_INVALID_IOCTL(radeon_cp_vertex2_kms) KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms) KMS_INVALID_IOCTL(radeon_cp_getparam_kms) KMS_INVALID_IOCTL(radeon_cp_flip_kms) KMS_INVALID_IOCTL(radeon_mem_alloc_kms) KMS_INVALID_IOCTL(radeon_mem_free_kms) KMS_INVALID_IOCTL(radeon_mem_init_heap_kms) KMS_INVALID_IOCTL(radeon_irq_emit_kms) KMS_INVALID_IOCTL(radeon_irq_wait_kms) KMS_INVALID_IOCTL(radeon_cp_setparam_kms) KMS_INVALID_IOCTL(radeon_surface_alloc_kms) KMS_INVALID_IOCTL(radeon_surface_free_kms) struct drm_ioctl_desc radeon_ioctls_kms[] = { DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH), DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH), /* KMS */ DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), }; int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
gpl-2.0
Dabug123/owlCore64
drivers/media/radio/wl128x/fmdrv_v4l2.c
2096
15683
/* * FM Driver for Connectivity chip of Texas Instruments. * This file provides interfaces to V4L2 subsystem. * * This module registers with V4L2 subsystem as Radio * data system interface (/dev/radio). During the registration, * it will expose two set of function pointers. * * 1) File operation related API (open, close, read, write, poll...etc). * 2) Set of V4L2 IOCTL complaint API. * * Copyright (C) 2011 Texas Instruments * Author: Raja Mani <raja_mani@ti.com> * Author: Manjunatha Halli <manjunatha_halli@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/export.h> #include "fmdrv.h" #include "fmdrv_v4l2.h" #include "fmdrv_common.h" #include "fmdrv_rx.h" #include "fmdrv_tx.h" static struct video_device *gradio_dev; static u8 radio_disconnected; /* -- V4L2 RADIO (/dev/radioX) device file operation interfaces --- */ /* Read RX RDS data */ static ssize_t fm_v4l2_fops_read(struct file *file, char __user * buf, size_t count, loff_t *ppos) { u8 rds_mode; int ret; struct fmdev *fmdev; fmdev = video_drvdata(file); if (!radio_disconnected) { fmerr("FM device is already disconnected\n"); return -EIO; } if (mutex_lock_interruptible(&fmdev->mutex)) return -ERESTARTSYS; /* Turn on RDS mode if it is disabled */ ret = fm_rx_get_rds_mode(fmdev, &rds_mode); if (ret < 0) { fmerr("Unable to read current rds mode\n"); goto read_unlock; } if (rds_mode == FM_RDS_DISABLE) { ret = fmc_set_rds_mode(fmdev, FM_RDS_ENABLE); if (ret < 0) { fmerr("Failed to enable rds mode\n"); goto read_unlock; } } /* Copy RDS data from internal buffer to user buffer */ ret = fmc_transfer_rds_from_internal_buff(fmdev, file, buf, count); read_unlock: mutex_unlock(&fmdev->mutex); return ret; } /* Write TX RDS data */ static ssize_t fm_v4l2_fops_write(struct file *file, const char __user * buf, size_t count, loff_t *ppos) { struct tx_rds rds; int ret; struct fmdev *fmdev; ret = copy_from_user(&rds, buf, sizeof(rds)); rds.text[sizeof(rds.text) - 1] = '\0'; fmdbg("(%d)type: %d, text %s, af %d\n", ret, rds.text_type, rds.text, rds.af_freq); if (ret) return -EFAULT; fmdev = video_drvdata(file); if (mutex_lock_interruptible(&fmdev->mutex)) return -ERESTARTSYS; fm_tx_set_radio_text(fmdev, rds.text, rds.text_type); fm_tx_set_af(fmdev, rds.af_freq); mutex_unlock(&fmdev->mutex); return sizeof(rds); } static u32 fm_v4l2_fops_poll(struct file *file, struct poll_table_struct *pts) { int ret; struct fmdev *fmdev; fmdev = video_drvdata(file); mutex_lock(&fmdev->mutex); ret = fmc_is_rds_data_available(fmdev, file, pts); mutex_unlock(&fmdev->mutex); if (ret < 0) return POLLIN | POLLRDNORM; return 0; } /* * Handle open request for "/dev/radioX" device. * Start with FM RX mode as default. */ static int fm_v4l2_fops_open(struct file *file) { int ret; struct fmdev *fmdev = NULL; /* Don't allow multiple open */ if (radio_disconnected) { fmerr("FM device is already opened\n"); return -EBUSY; } fmdev = video_drvdata(file); if (mutex_lock_interruptible(&fmdev->mutex)) return -ERESTARTSYS; ret = fmc_prepare(fmdev); if (ret < 0) { fmerr("Unable to prepare FM CORE\n"); goto open_unlock; } fmdbg("Load FM RX firmware..\n"); ret = fmc_set_mode(fmdev, FM_MODE_RX); if (ret < 0) { fmerr("Unable to load FM RX firmware\n"); goto open_unlock; } radio_disconnected = 1; open_unlock: mutex_unlock(&fmdev->mutex); return ret; } static int fm_v4l2_fops_release(struct file *file) { int ret; struct fmdev *fmdev; fmdev = video_drvdata(file); if (!radio_disconnected) { fmdbg("FM device is already closed\n"); return 0; } mutex_lock(&fmdev->mutex); ret = fmc_set_mode(fmdev, FM_MODE_OFF); if (ret < 0) { fmerr("Unable to turn off the chip\n"); goto release_unlock; } ret = fmc_release(fmdev); if (ret < 0) { fmerr("FM CORE release failed\n"); goto release_unlock; } radio_disconnected = 0; release_unlock: mutex_unlock(&fmdev->mutex); return ret; } /* V4L2 RADIO (/dev/radioX) device IOCTL interfaces */ static int fm_v4l2_vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *capability) { strlcpy(capability->driver, FM_DRV_NAME, sizeof(capability->driver)); strlcpy(capability->card, FM_DRV_CARD_SHORT_NAME, sizeof(capability->card)); sprintf(capability->bus_info, "UART"); capability->capabilities = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_MODULATOR | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE | V4L2_CAP_RDS_CAPTURE; return 0; } static int fm_g_volatile_ctrl(struct v4l2_ctrl *ctrl) { struct fmdev *fmdev = container_of(ctrl->handler, struct fmdev, ctrl_handler); switch (ctrl->id) { case V4L2_CID_TUNE_ANTENNA_CAPACITOR: ctrl->val = fm_tx_get_tune_cap_val(fmdev); break; default: fmwarn("%s: Unknown IOCTL: %d\n", __func__, ctrl->id); break; } return 0; } static int fm_v4l2_s_ctrl(struct v4l2_ctrl *ctrl) { struct fmdev *fmdev = container_of(ctrl->handler, struct fmdev, ctrl_handler); switch (ctrl->id) { case V4L2_CID_AUDIO_VOLUME: /* set volume */ return fm_rx_set_volume(fmdev, (u16)ctrl->val); case V4L2_CID_AUDIO_MUTE: /* set mute */ return fmc_set_mute_mode(fmdev, (u8)ctrl->val); case V4L2_CID_TUNE_POWER_LEVEL: /* set TX power level - ext control */ return fm_tx_set_pwr_lvl(fmdev, (u8)ctrl->val); case V4L2_CID_TUNE_PREEMPHASIS: return fm_tx_set_preemph_filter(fmdev, (u8) ctrl->val); default: return -EINVAL; } } static int fm_v4l2_vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *audio) { memset(audio, 0, sizeof(*audio)); strcpy(audio->name, "Radio"); audio->capability = V4L2_AUDCAP_STEREO; return 0; } static int fm_v4l2_vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio *audio) { if (audio->index != 0) return -EINVAL; return 0; } /* Get tuner attributes. If current mode is NOT RX, return error */ static int fm_v4l2_vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *tuner) { struct fmdev *fmdev = video_drvdata(file); u32 bottom_freq; u32 top_freq; u16 stereo_mono_mode; u16 rssilvl; int ret; if (tuner->index != 0) return -EINVAL; if (fmdev->curr_fmmode != FM_MODE_RX) return -EPERM; ret = fm_rx_get_band_freq_range(fmdev, &bottom_freq, &top_freq); if (ret != 0) return ret; ret = fm_rx_get_stereo_mono(fmdev, &stereo_mono_mode); if (ret != 0) return ret; ret = fm_rx_get_rssi_level(fmdev, &rssilvl); if (ret != 0) return ret; strcpy(tuner->name, "FM"); tuner->type = V4L2_TUNER_RADIO; /* Store rangelow and rangehigh freq in unit of 62.5 Hz */ tuner->rangelow = bottom_freq * 16; tuner->rangehigh = top_freq * 16; tuner->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO | ((fmdev->rx.rds.flag == FM_RDS_ENABLE) ? V4L2_TUNER_SUB_RDS : 0); tuner->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_HWSEEK_BOUNDED | V4L2_TUNER_CAP_HWSEEK_WRAP; tuner->audmode = (stereo_mono_mode ? V4L2_TUNER_MODE_MONO : V4L2_TUNER_MODE_STEREO); /* * Actual rssi value lies in between -128 to +127. * Convert this range from 0 to 255 by adding +128 */ rssilvl += 128; /* * Return signal strength value should be within 0 to 65535. * Find out correct signal radio by multiplying (65535/255) = 257 */ tuner->signal = rssilvl * 257; tuner->afc = 0; return ret; } /* * Set tuner attributes. If current mode is NOT RX, set to RX. * Currently, we set only audio mode (mono/stereo) and RDS state (on/off). * Should we set other tuner attributes, too? */ static int fm_v4l2_vidioc_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *tuner) { struct fmdev *fmdev = video_drvdata(file); u16 aud_mode; u8 rds_mode; int ret; if (tuner->index != 0) return -EINVAL; aud_mode = (tuner->audmode == V4L2_TUNER_MODE_STEREO) ? FM_STEREO_MODE : FM_MONO_MODE; rds_mode = (tuner->rxsubchans & V4L2_TUNER_SUB_RDS) ? FM_RDS_ENABLE : FM_RDS_DISABLE; if (fmdev->curr_fmmode != FM_MODE_RX) { ret = fmc_set_mode(fmdev, FM_MODE_RX); if (ret < 0) { fmerr("Failed to set RX mode\n"); return ret; } } ret = fmc_set_stereo_mono(fmdev, aud_mode); if (ret < 0) { fmerr("Failed to set RX stereo/mono mode\n"); return ret; } ret = fmc_set_rds_mode(fmdev, rds_mode); if (ret < 0) fmerr("Failed to set RX RDS mode\n"); return ret; } /* Get tuner or modulator radio frequency */ static int fm_v4l2_vidioc_g_freq(struct file *file, void *priv, struct v4l2_frequency *freq) { struct fmdev *fmdev = video_drvdata(file); int ret; ret = fmc_get_freq(fmdev, &freq->frequency); if (ret < 0) { fmerr("Failed to get frequency\n"); return ret; } /* Frequency unit of 62.5 Hz*/ freq->frequency = (u32) freq->frequency * 16; return 0; } /* Set tuner or modulator radio frequency */ static int fm_v4l2_vidioc_s_freq(struct file *file, void *priv, const struct v4l2_frequency *freq) { struct fmdev *fmdev = video_drvdata(file); /* * As V4L2_TUNER_CAP_LOW is set 1 user sends the frequency * in units of 62.5 Hz. */ return fmc_set_freq(fmdev, freq->frequency / 16); } /* Set hardware frequency seek. If current mode is NOT RX, set it RX. */ static int fm_v4l2_vidioc_s_hw_freq_seek(struct file *file, void *priv, const struct v4l2_hw_freq_seek *seek) { struct fmdev *fmdev = video_drvdata(file); int ret; if (file->f_flags & O_NONBLOCK) return -EWOULDBLOCK; if (fmdev->curr_fmmode != FM_MODE_RX) { ret = fmc_set_mode(fmdev, FM_MODE_RX); if (ret != 0) { fmerr("Failed to set RX mode\n"); return ret; } } ret = fm_rx_seek(fmdev, seek->seek_upward, seek->wrap_around, seek->spacing); if (ret < 0) fmerr("RX seek failed - %d\n", ret); return ret; } /* Get modulator attributes. If mode is not TX, return no attributes. */ static int fm_v4l2_vidioc_g_modulator(struct file *file, void *priv, struct v4l2_modulator *mod) { struct fmdev *fmdev = video_drvdata(file); if (mod->index != 0) return -EINVAL; if (fmdev->curr_fmmode != FM_MODE_TX) return -EPERM; mod->txsubchans = ((fmdev->tx_data.aud_mode == FM_STEREO_MODE) ? V4L2_TUNER_SUB_STEREO : V4L2_TUNER_SUB_MONO) | ((fmdev->tx_data.rds.flag == FM_RDS_ENABLE) ? V4L2_TUNER_SUB_RDS : 0); mod->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_LOW; return 0; } /* Set modulator attributes. If mode is not TX, set to TX. */ static int fm_v4l2_vidioc_s_modulator(struct file *file, void *priv, const struct v4l2_modulator *mod) { struct fmdev *fmdev = video_drvdata(file); u8 rds_mode; u16 aud_mode; int ret; if (mod->index != 0) return -EINVAL; if (fmdev->curr_fmmode != FM_MODE_TX) { ret = fmc_set_mode(fmdev, FM_MODE_TX); if (ret != 0) { fmerr("Failed to set TX mode\n"); return ret; } } aud_mode = (mod->txsubchans & V4L2_TUNER_SUB_STEREO) ? FM_STEREO_MODE : FM_MONO_MODE; rds_mode = (mod->txsubchans & V4L2_TUNER_SUB_RDS) ? FM_RDS_ENABLE : FM_RDS_DISABLE; ret = fm_tx_set_stereo_mono(fmdev, aud_mode); if (ret < 0) { fmerr("Failed to set mono/stereo mode for TX\n"); return ret; } ret = fm_tx_set_rds_mode(fmdev, rds_mode); if (ret < 0) fmerr("Failed to set rds mode for TX\n"); return ret; } static const struct v4l2_file_operations fm_drv_fops = { .owner = THIS_MODULE, .read = fm_v4l2_fops_read, .write = fm_v4l2_fops_write, .poll = fm_v4l2_fops_poll, .unlocked_ioctl = video_ioctl2, .open = fm_v4l2_fops_open, .release = fm_v4l2_fops_release, }; static const struct v4l2_ctrl_ops fm_ctrl_ops = { .s_ctrl = fm_v4l2_s_ctrl, .g_volatile_ctrl = fm_g_volatile_ctrl, }; static const struct v4l2_ioctl_ops fm_drv_ioctl_ops = { .vidioc_querycap = fm_v4l2_vidioc_querycap, .vidioc_g_audio = fm_v4l2_vidioc_g_audio, .vidioc_s_audio = fm_v4l2_vidioc_s_audio, .vidioc_g_tuner = fm_v4l2_vidioc_g_tuner, .vidioc_s_tuner = fm_v4l2_vidioc_s_tuner, .vidioc_g_frequency = fm_v4l2_vidioc_g_freq, .vidioc_s_frequency = fm_v4l2_vidioc_s_freq, .vidioc_s_hw_freq_seek = fm_v4l2_vidioc_s_hw_freq_seek, .vidioc_g_modulator = fm_v4l2_vidioc_g_modulator, .vidioc_s_modulator = fm_v4l2_vidioc_s_modulator }; /* V4L2 RADIO device parent structure */ static struct video_device fm_viddev_template = { .fops = &fm_drv_fops, .ioctl_ops = &fm_drv_ioctl_ops, .name = FM_DRV_NAME, .release = video_device_release, /* * To ensure both the tuner and modulator ioctls are accessible we * set the vfl_dir to M2M to indicate this. * * It is not really a mem2mem device of course, but it can both receive * and transmit using the same radio device. It's the only radio driver * that does this and it should really be split in two radio devices, * but that would affect applications using this driver. */ .vfl_dir = VFL_DIR_M2M, }; int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr) { struct v4l2_ctrl *ctrl; int ret; /* Init mutex for core locking */ mutex_init(&fmdev->mutex); /* Allocate new video device */ gradio_dev = video_device_alloc(); if (NULL == gradio_dev) { fmerr("Can't allocate video device\n"); return -ENOMEM; } /* Setup FM driver's V4L2 properties */ memcpy(gradio_dev, &fm_viddev_template, sizeof(fm_viddev_template)); video_set_drvdata(gradio_dev, fmdev); gradio_dev->lock = &fmdev->mutex; /* Register with V4L2 subsystem as RADIO device */ if (video_register_device(gradio_dev, VFL_TYPE_RADIO, radio_nr)) { video_device_release(gradio_dev); fmerr("Could not register video device\n"); return -ENOMEM; } fmdev->radio_dev = gradio_dev; /* Register to v4l2 ctrl handler framework */ fmdev->radio_dev->ctrl_handler = &fmdev->ctrl_handler; ret = v4l2_ctrl_handler_init(&fmdev->ctrl_handler, 5); if (ret < 0) { fmerr("(fmdev): Can't init ctrl handler\n"); v4l2_ctrl_handler_free(&fmdev->ctrl_handler); return -EBUSY; } /* * Following controls are handled by V4L2 control framework. * Added in ascending ID order. */ v4l2_ctrl_new_std(&fmdev->ctrl_handler, &fm_ctrl_ops, V4L2_CID_AUDIO_VOLUME, FM_RX_VOLUME_MIN, FM_RX_VOLUME_MAX, 1, FM_RX_VOLUME_MAX); v4l2_ctrl_new_std(&fmdev->ctrl_handler, &fm_ctrl_ops, V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1); v4l2_ctrl_new_std_menu(&fmdev->ctrl_handler, &fm_ctrl_ops, V4L2_CID_TUNE_PREEMPHASIS, V4L2_PREEMPHASIS_75_uS, 0, V4L2_PREEMPHASIS_75_uS); v4l2_ctrl_new_std(&fmdev->ctrl_handler, &fm_ctrl_ops, V4L2_CID_TUNE_POWER_LEVEL, FM_PWR_LVL_LOW, FM_PWR_LVL_HIGH, 1, FM_PWR_LVL_HIGH); ctrl = v4l2_ctrl_new_std(&fmdev->ctrl_handler, &fm_ctrl_ops, V4L2_CID_TUNE_ANTENNA_CAPACITOR, 0, 255, 1, 255); if (ctrl) ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; return 0; } void *fm_v4l2_deinit_video_device(void) { struct fmdev *fmdev; fmdev = video_get_drvdata(gradio_dev); /* Unregister to v4l2 ctrl handler framework*/ v4l2_ctrl_handler_free(&fmdev->ctrl_handler); /* Unregister RADIO device from V4L2 subsystem */ video_unregister_device(gradio_dev); return fmdev; }
gpl-2.0
Pafcholini/Beta_TW
drivers/input/serio/apbps2.c
2608
5809
/* * Copyright (C) 2013 Aeroflex Gaisler * * This driver supports the APBPS2 PS/2 core available in the GRLIB * VHDL IP core library. * * Full documentation of the APBPS2 core can be found here: * http://www.gaisler.com/products/grlib/grip.pdf * * See "Documentation/devicetree/bindings/input/ps2keyb-mouse-apbps2.txt" for * information on open firmware properties. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Contributors: Daniel Hellstrom <daniel@gaisler.com> */ #include <linux/platform_device.h> #include <linux/of_device.h> #include <linux/module.h> #include <linux/serio.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/of_irq.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/io.h> struct apbps2_regs { u32 __iomem data; /* 0x00 */ u32 __iomem status; /* 0x04 */ u32 __iomem ctrl; /* 0x08 */ u32 __iomem reload; /* 0x0c */ }; #define APBPS2_STATUS_DR (1<<0) #define APBPS2_STATUS_PE (1<<1) #define APBPS2_STATUS_FE (1<<2) #define APBPS2_STATUS_KI (1<<3) #define APBPS2_STATUS_RF (1<<4) #define APBPS2_STATUS_TF (1<<5) #define APBPS2_STATUS_TCNT (0x1f<<22) #define APBPS2_STATUS_RCNT (0x1f<<27) #define APBPS2_CTRL_RE (1<<0) #define APBPS2_CTRL_TE (1<<1) #define APBPS2_CTRL_RI (1<<2) #define APBPS2_CTRL_TI (1<<3) struct apbps2_priv { struct serio *io; struct apbps2_regs *regs; }; static int apbps2_idx; static irqreturn_t apbps2_isr(int irq, void *dev_id) { struct apbps2_priv *priv = dev_id; unsigned long status, data, rxflags; irqreturn_t ret = IRQ_NONE; while ((status = ioread32be(&priv->regs->status)) & APBPS2_STATUS_DR) { data = ioread32be(&priv->regs->data); rxflags = (status & APBPS2_STATUS_PE) ? SERIO_PARITY : 0; rxflags |= (status & APBPS2_STATUS_FE) ? SERIO_FRAME : 0; /* clear error bits? */ if (rxflags) iowrite32be(0, &priv->regs->status); serio_interrupt(priv->io, data, rxflags); ret = IRQ_HANDLED; } return ret; } static int apbps2_write(struct serio *io, unsigned char val) { struct apbps2_priv *priv = io->port_data; unsigned int tleft = 10000; /* timeout in 100ms */ /* delay until PS/2 controller has room for more chars */ while ((ioread32be(&priv->regs->status) & APBPS2_STATUS_TF) && tleft--) udelay(10); if ((ioread32be(&priv->regs->status) & APBPS2_STATUS_TF) == 0) { iowrite32be(val, &priv->regs->data); iowrite32be(APBPS2_CTRL_RE | APBPS2_CTRL_RI | APBPS2_CTRL_TE, &priv->regs->ctrl); return 0; } return -ETIMEDOUT; } static int apbps2_open(struct serio *io) { struct apbps2_priv *priv = io->port_data; int limit; unsigned long tmp; /* clear error flags */ iowrite32be(0, &priv->regs->status); /* Clear old data if available (unlikely) */ limit = 1024; while ((ioread32be(&priv->regs->status) & APBPS2_STATUS_DR) && --limit) tmp = ioread32be(&priv->regs->data); /* Enable reciever and it's interrupt */ iowrite32be(APBPS2_CTRL_RE | APBPS2_CTRL_RI, &priv->regs->ctrl); return 0; } static void apbps2_close(struct serio *io) { struct apbps2_priv *priv = io->port_data; /* stop interrupts at PS/2 HW level */ iowrite32be(0, &priv->regs->ctrl); } /* Initialize one APBPS2 PS/2 core */ static int apbps2_of_probe(struct platform_device *ofdev) { struct apbps2_priv *priv; int irq, err; u32 freq_hz; struct resource *res; priv = devm_kzalloc(&ofdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) { dev_err(&ofdev->dev, "memory allocation failed\n"); return -ENOMEM; } /* Find Device Address */ res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); priv->regs = devm_ioremap_resource(&ofdev->dev, res); if (IS_ERR(priv->regs)) return PTR_ERR(priv->regs); /* Reset hardware, disable interrupt */ iowrite32be(0, &priv->regs->ctrl); /* IRQ */ irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); err = devm_request_irq(&ofdev->dev, irq, apbps2_isr, IRQF_SHARED, "apbps2", priv); if (err) { dev_err(&ofdev->dev, "request IRQ%d failed\n", irq); return err; } /* Get core frequency */ if (of_property_read_u32(ofdev->dev.of_node, "freq", &freq_hz)) { dev_err(&ofdev->dev, "unable to get core frequency\n"); return -EINVAL; } /* Set reload register to core freq in kHz/10 */ iowrite32be(freq_hz / 10000, &priv->regs->reload); priv->io = kzalloc(sizeof(struct serio), GFP_KERNEL); if (!priv->io) return -ENOMEM; priv->io->id.type = SERIO_8042; priv->io->open = apbps2_open; priv->io->close = apbps2_close; priv->io->write = apbps2_write; priv->io->port_data = priv; strlcpy(priv->io->name, "APBPS2 PS/2", sizeof(priv->io->name)); snprintf(priv->io->phys, sizeof(priv->io->phys), "apbps2_%d", apbps2_idx++); dev_info(&ofdev->dev, "irq = %d, base = 0x%p\n", irq, priv->regs); serio_register_port(priv->io); platform_set_drvdata(ofdev, priv); return 0; } static int apbps2_of_remove(struct platform_device *of_dev) { struct apbps2_priv *priv = platform_get_drvdata(of_dev); serio_unregister_port(priv->io); return 0; } static struct of_device_id apbps2_of_match[] = { { .name = "GAISLER_APBPS2", }, { .name = "01_060", }, {} }; MODULE_DEVICE_TABLE(of, apbps2_of_match); static struct platform_driver apbps2_of_driver = { .driver = { .name = "grlib-apbps2", .owner = THIS_MODULE, .of_match_table = apbps2_of_match, }, .probe = apbps2_of_probe, .remove = apbps2_of_remove, }; module_platform_driver(apbps2_of_driver); MODULE_AUTHOR("Aeroflex Gaisler AB."); MODULE_DESCRIPTION("GRLIB APBPS2 PS/2 serial I/O"); MODULE_LICENSE("GPL");
gpl-2.0
NooNameR/NMK
sound/pci/trident/trident.c
3632
5887
/* * Driver for Trident 4DWave DX/NX & SiS SI7018 Audio PCI soundcard * * Driver was originated by Trident <audio@tridentmicro.com> * Fri Feb 19 15:55:28 MST 1999 * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/pci.h> #include <linux/time.h> #include <linux/moduleparam.h> #include <sound/core.h> #include <sound/trident.h> #include <sound/initval.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, <audio@tridentmicro.com>"); MODULE_DESCRIPTION("Trident 4D-WaveDX/NX & SiS SI7018"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Trident,4DWave DX}," "{Trident,4DWave NX}," "{SiS,SI7018 PCI Audio}," "{Best Union,Miss Melody 4DWave PCI}," "{HIS,4DWave PCI}," "{Warpspeed,ONSpeed 4DWave PCI}," "{Aztech Systems,PCI 64-Q3D}," "{Addonics,SV 750}," "{CHIC,True Sound 4Dwave}," "{Shark,Predator4D-PCI}," "{Jaton,SonicWave 4D}," "{Hoontech,SoundTrack Digital 4DWave NX}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ static int pcm_channels[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 32}; static int wavetable_size[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 8192}; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for Trident 4DWave PCI soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for Trident 4DWave PCI soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Trident 4DWave PCI soundcard."); module_param_array(pcm_channels, int, NULL, 0444); MODULE_PARM_DESC(pcm_channels, "Number of hardware channels assigned for PCM."); module_param_array(wavetable_size, int, NULL, 0444); MODULE_PARM_DESC(wavetable_size, "Maximum memory size in kB for wavetable synth."); static DEFINE_PCI_DEVICE_TABLE(snd_trident_ids) = { {PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_TRIDENT_4DWAVE_DX), PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, {PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_TRIDENT_4DWAVE_NX), 0, 0, 0}, {PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7018), 0, 0, 0}, { 0, } }; MODULE_DEVICE_TABLE(pci, snd_trident_ids); static int __devinit snd_trident_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct snd_trident *trident; const char *str; int err, pcm_dev = 0; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; if ((err = snd_trident_create(card, pci, pcm_channels[dev], ((pci->vendor << 16) | pci->device) == TRIDENT_DEVICE_ID_SI7018 ? 1 : 2, wavetable_size[dev], &trident)) < 0) { snd_card_free(card); return err; } card->private_data = trident; switch (trident->device) { case TRIDENT_DEVICE_ID_DX: str = "TRID4DWAVEDX"; break; case TRIDENT_DEVICE_ID_NX: str = "TRID4DWAVENX"; break; case TRIDENT_DEVICE_ID_SI7018: str = "SI7018"; break; default: str = "Unknown"; } strcpy(card->driver, str); if (trident->device == TRIDENT_DEVICE_ID_SI7018) { strcpy(card->shortname, "SiS "); } else { strcpy(card->shortname, "Trident "); } strcat(card->shortname, card->driver); sprintf(card->longname, "%s PCI Audio at 0x%lx, irq %d", card->shortname, trident->port, trident->irq); if ((err = snd_trident_pcm(trident, pcm_dev++, NULL)) < 0) { snd_card_free(card); return err; } switch (trident->device) { case TRIDENT_DEVICE_ID_DX: case TRIDENT_DEVICE_ID_NX: if ((err = snd_trident_foldback_pcm(trident, pcm_dev++, NULL)) < 0) { snd_card_free(card); return err; } break; } if (trident->device == TRIDENT_DEVICE_ID_NX || trident->device == TRIDENT_DEVICE_ID_SI7018) { if ((err = snd_trident_spdif_pcm(trident, pcm_dev++, NULL)) < 0) { snd_card_free(card); return err; } } if (trident->device != TRIDENT_DEVICE_ID_SI7018 && (err = snd_mpu401_uart_new(card, 0, MPU401_HW_TRID4DWAVE, trident->midi_port, MPU401_INFO_INTEGRATED, trident->irq, 0, &trident->rmidi)) < 0) { snd_card_free(card); return err; } snd_trident_create_gameport(trident); if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); dev++; return 0; } static void __devexit snd_trident_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static struct pci_driver driver = { .name = "Trident4DWaveAudio", .id_table = snd_trident_ids, .probe = snd_trident_probe, .remove = __devexit_p(snd_trident_remove), #ifdef CONFIG_PM .suspend = snd_trident_suspend, .resume = snd_trident_resume, #endif }; static int __init alsa_card_trident_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_trident_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_trident_init) module_exit(alsa_card_trident_exit)
gpl-2.0
k2wlxda/3.10
drivers/gpu/drm/omapdrm/tcm-sita.c
4400
18703
/* * tcm-sita.c * * SImple Tiler Allocator (SiTA): 2D and 1D allocation(reservation) algorithm * * Authors: Ravi Ramachandra <r.ramachandra@ti.com>, * Lajos Molnar <molnar@ti.com> * * Copyright (C) 2009-2010 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. * */ #include <linux/slab.h> #include <linux/spinlock.h> #include "tcm-sita.h" #define ALIGN_DOWN(value, align) ((value) & ~((align) - 1)) /* Individual selection criteria for different scan areas */ static s32 CR_L2R_T2B = CR_BIAS_HORIZONTAL; static s32 CR_R2L_T2B = CR_DIAGONAL_BALANCE; /********************************************* * TCM API - Sita Implementation *********************************************/ static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align, struct tcm_area *area); static s32 sita_reserve_1d(struct tcm *tcm, u32 slots, struct tcm_area *area); static s32 sita_free(struct tcm *tcm, struct tcm_area *area); static void sita_deinit(struct tcm *tcm); /********************************************* * Main Scanner functions *********************************************/ static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align, struct tcm_area *area); static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align, struct tcm_area *field, struct tcm_area *area); static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align, struct tcm_area *field, struct tcm_area *area); static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots, struct tcm_area *field, struct tcm_area *area); /********************************************* * Support Infrastructure Methods *********************************************/ static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h); static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h, struct tcm_area *field, s32 criteria, struct score *best); static void get_nearness_factor(struct tcm_area *field, struct tcm_area *candidate, struct nearness_factor *nf); static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area, struct neighbor_stats *stat); static void fill_area(struct tcm *tcm, struct tcm_area *area, struct tcm_area *parent); /*********************************************/ /********************************************* * Utility Methods *********************************************/ struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr) { struct tcm *tcm; struct sita_pvt *pvt; struct tcm_area area = {0}; s32 i; if (width == 0 || height == 0) return NULL; tcm = kmalloc(sizeof(*tcm), GFP_KERNEL); pvt = kmalloc(sizeof(*pvt), GFP_KERNEL); if (!tcm || !pvt) goto error; memset(tcm, 0, sizeof(*tcm)); memset(pvt, 0, sizeof(*pvt)); /* Updating the pointers to SiTA implementation APIs */ tcm->height = height; tcm->width = width; tcm->reserve_2d = sita_reserve_2d; tcm->reserve_1d = sita_reserve_1d; tcm->free = sita_free; tcm->deinit = sita_deinit; tcm->pvt = (void *)pvt; spin_lock_init(&(pvt->lock)); /* Creating tam map */ pvt->map = kmalloc(sizeof(*pvt->map) * tcm->width, GFP_KERNEL); if (!pvt->map) goto error; for (i = 0; i < tcm->width; i++) { pvt->map[i] = kmalloc(sizeof(**pvt->map) * tcm->height, GFP_KERNEL); if (pvt->map[i] == NULL) { while (i--) kfree(pvt->map[i]); kfree(pvt->map); goto error; } } if (attr && attr->x <= tcm->width && attr->y <= tcm->height) { pvt->div_pt.x = attr->x; pvt->div_pt.y = attr->y; } else { /* Defaulting to 3:1 ratio on width for 2D area split */ /* Defaulting to 3:1 ratio on height for 2D and 1D split */ pvt->div_pt.x = (tcm->width * 3) / 4; pvt->div_pt.y = (tcm->height * 3) / 4; } spin_lock(&(pvt->lock)); assign(&area, 0, 0, width - 1, height - 1); fill_area(tcm, &area, NULL); spin_unlock(&(pvt->lock)); return tcm; error: kfree(tcm); kfree(pvt); return NULL; } static void sita_deinit(struct tcm *tcm) { struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; struct tcm_area area = {0}; s32 i; area.p1.x = tcm->width - 1; area.p1.y = tcm->height - 1; spin_lock(&(pvt->lock)); fill_area(tcm, &area, NULL); spin_unlock(&(pvt->lock)); for (i = 0; i < tcm->height; i++) kfree(pvt->map[i]); kfree(pvt->map); kfree(pvt); } /** * Reserve a 1D area in the container * * @param num_slots size of 1D area * @param area pointer to the area that will be populated with the * reserved area * * @return 0 on success, non-0 error value on failure. */ static s32 sita_reserve_1d(struct tcm *tcm, u32 num_slots, struct tcm_area *area) { s32 ret; struct tcm_area field = {0}; struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; spin_lock(&(pvt->lock)); /* Scanning entire container */ assign(&field, tcm->width - 1, tcm->height - 1, 0, 0); ret = scan_r2l_b2t_one_dim(tcm, num_slots, &field, area); if (!ret) /* update map */ fill_area(tcm, area, area); spin_unlock(&(pvt->lock)); return ret; } /** * Reserve a 2D area in the container * * @param w width * @param h height * @param area pointer to the area that will be populated with the reserved * area * * @return 0 on success, non-0 error value on failure. */ static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align, struct tcm_area *area) { s32 ret; struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; /* not supporting more than 64 as alignment */ if (align > 64) return -EINVAL; /* we prefer 1, 32 and 64 as alignment */ align = align <= 1 ? 1 : align <= 32 ? 32 : 64; spin_lock(&(pvt->lock)); ret = scan_areas_and_find_fit(tcm, w, h, align, area); if (!ret) /* update map */ fill_area(tcm, area, area); spin_unlock(&(pvt->lock)); return ret; } /** * Unreserve a previously allocated 2D or 1D area * @param area area to be freed * @return 0 - success */ static s32 sita_free(struct tcm *tcm, struct tcm_area *area) { struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; spin_lock(&(pvt->lock)); /* check that this is in fact an existing area */ WARN_ON(pvt->map[area->p0.x][area->p0.y] != area || pvt->map[area->p1.x][area->p1.y] != area); /* Clear the contents of the associated tiles in the map */ fill_area(tcm, area, NULL); spin_unlock(&(pvt->lock)); return 0; } /** * Note: In general the cordinates in the scan field area relevant to the can * sweep directions. The scan origin (e.g. top-left corner) will always be * the p0 member of the field. Therfore, for a scan from top-left p0.x <= p1.x * and p0.y <= p1.y; whereas, for a scan from bottom-right p1.x <= p0.x and p1.y * <= p0.y */ /** * Raster scan horizontally right to left from top to bottom to find a place for * a 2D area of given size inside a scan field. * * @param w width of desired area * @param h height of desired area * @param align desired area alignment * @param area pointer to the area that will be set to the best position * @param field area to scan (inclusive) * * @return 0 on success, non-0 error value on failure. */ static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align, struct tcm_area *field, struct tcm_area *area) { s32 x, y; s16 start_x, end_x, start_y, end_y, found_x = -1; struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map; struct score best = {{0}, {0}, {0}, 0}; start_x = field->p0.x; end_x = field->p1.x; start_y = field->p0.y; end_y = field->p1.y; /* check scan area co-ordinates */ if (field->p0.x < field->p1.x || field->p1.y < field->p0.y) return -EINVAL; /* check if allocation would fit in scan area */ if (w > LEN(start_x, end_x) || h > LEN(end_y, start_y)) return -ENOSPC; /* adjust start_x and end_y, as allocation would not fit beyond */ start_x = ALIGN_DOWN(start_x - w + 1, align); /* - 1 to be inclusive */ end_y = end_y - h + 1; /* check if allocation would still fit in scan area */ if (start_x < end_x) return -ENOSPC; /* scan field top-to-bottom, right-to-left */ for (y = start_y; y <= end_y; y++) { for (x = start_x; x >= end_x; x -= align) { if (is_area_free(map, x, y, w, h)) { found_x = x; /* update best candidate */ if (update_candidate(tcm, x, y, w, h, field, CR_R2L_T2B, &best)) goto done; /* change upper x bound */ end_x = x + 1; break; } else if (map[x][y] && map[x][y]->is2d) { /* step over 2D areas */ x = ALIGN(map[x][y]->p0.x - w + 1, align); } } /* break if you find a free area shouldering the scan field */ if (found_x == start_x) break; } if (!best.a.tcm) return -ENOSPC; done: assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y); return 0; } /** * Raster scan horizontally left to right from top to bottom to find a place for * a 2D area of given size inside a scan field. * * @param w width of desired area * @param h height of desired area * @param align desired area alignment * @param area pointer to the area that will be set to the best position * @param field area to scan (inclusive) * * @return 0 on success, non-0 error value on failure. */ static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align, struct tcm_area *field, struct tcm_area *area) { s32 x, y; s16 start_x, end_x, start_y, end_y, found_x = -1; struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map; struct score best = {{0}, {0}, {0}, 0}; start_x = field->p0.x; end_x = field->p1.x; start_y = field->p0.y; end_y = field->p1.y; /* check scan area co-ordinates */ if (field->p1.x < field->p0.x || field->p1.y < field->p0.y) return -EINVAL; /* check if allocation would fit in scan area */ if (w > LEN(end_x, start_x) || h > LEN(end_y, start_y)) return -ENOSPC; start_x = ALIGN(start_x, align); /* check if allocation would still fit in scan area */ if (w > LEN(end_x, start_x)) return -ENOSPC; /* adjust end_x and end_y, as allocation would not fit beyond */ end_x = end_x - w + 1; /* + 1 to be inclusive */ end_y = end_y - h + 1; /* scan field top-to-bottom, left-to-right */ for (y = start_y; y <= end_y; y++) { for (x = start_x; x <= end_x; x += align) { if (is_area_free(map, x, y, w, h)) { found_x = x; /* update best candidate */ if (update_candidate(tcm, x, y, w, h, field, CR_L2R_T2B, &best)) goto done; /* change upper x bound */ end_x = x - 1; break; } else if (map[x][y] && map[x][y]->is2d) { /* step over 2D areas */ x = ALIGN_DOWN(map[x][y]->p1.x, align); } } /* break if you find a free area shouldering the scan field */ if (found_x == start_x) break; } if (!best.a.tcm) return -ENOSPC; done: assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y); return 0; } /** * Raster scan horizontally right to left from bottom to top to find a place * for a 1D area of given size inside a scan field. * * @param num_slots size of desired area * @param align desired area alignment * @param area pointer to the area that will be set to the best * position * @param field area to scan (inclusive) * * @return 0 on success, non-0 error value on failure. */ static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots, struct tcm_area *field, struct tcm_area *area) { s32 found = 0; s16 x, y; struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; struct tcm_area *p; /* check scan area co-ordinates */ if (field->p0.y < field->p1.y) return -EINVAL; /** * Currently we only support full width 1D scan field, which makes sense * since 1D slot-ordering spans the full container width. */ if (tcm->width != field->p0.x - field->p1.x + 1) return -EINVAL; /* check if allocation would fit in scan area */ if (num_slots > tcm->width * LEN(field->p0.y, field->p1.y)) return -ENOSPC; x = field->p0.x; y = field->p0.y; /* find num_slots consecutive free slots to the left */ while (found < num_slots) { if (y < 0) return -ENOSPC; /* remember bottom-right corner */ if (found == 0) { area->p1.x = x; area->p1.y = y; } /* skip busy regions */ p = pvt->map[x][y]; if (p) { /* move to left of 2D areas, top left of 1D */ x = p->p0.x; if (!p->is2d) y = p->p0.y; /* start over */ found = 0; } else { /* count consecutive free slots */ found++; if (found == num_slots) break; } /* move to the left */ if (x == 0) y--; x = (x ? : tcm->width) - 1; } /* set top-left corner */ area->p0.x = x; area->p0.y = y; return 0; } /** * Find a place for a 2D area of given size inside a scan field based on its * alignment needs. * * @param w width of desired area * @param h height of desired area * @param align desired area alignment * @param area pointer to the area that will be set to the best position * * @return 0 on success, non-0 error value on failure. */ static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align, struct tcm_area *area) { s32 ret = 0; struct tcm_area field = {0}; u16 boundary_x, boundary_y; struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; if (align > 1) { /* prefer top-left corner */ boundary_x = pvt->div_pt.x - 1; boundary_y = pvt->div_pt.y - 1; /* expand width and height if needed */ if (w > pvt->div_pt.x) boundary_x = tcm->width - 1; if (h > pvt->div_pt.y) boundary_y = tcm->height - 1; assign(&field, 0, 0, boundary_x, boundary_y); ret = scan_l2r_t2b(tcm, w, h, align, &field, area); /* scan whole container if failed, but do not scan 2x */ if (ret != 0 && (boundary_x != tcm->width - 1 || boundary_y != tcm->height - 1)) { /* scan the entire container if nothing found */ assign(&field, 0, 0, tcm->width - 1, tcm->height - 1); ret = scan_l2r_t2b(tcm, w, h, align, &field, area); } } else if (align == 1) { /* prefer top-right corner */ boundary_x = pvt->div_pt.x; boundary_y = pvt->div_pt.y - 1; /* expand width and height if needed */ if (w > (tcm->width - pvt->div_pt.x)) boundary_x = 0; if (h > pvt->div_pt.y) boundary_y = tcm->height - 1; assign(&field, tcm->width - 1, 0, boundary_x, boundary_y); ret = scan_r2l_t2b(tcm, w, h, align, &field, area); /* scan whole container if failed, but do not scan 2x */ if (ret != 0 && (boundary_x != 0 || boundary_y != tcm->height - 1)) { /* scan the entire container if nothing found */ assign(&field, tcm->width - 1, 0, 0, tcm->height - 1); ret = scan_r2l_t2b(tcm, w, h, align, &field, area); } } return ret; } /* check if an entire area is free */ static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h) { u16 x = 0, y = 0; for (y = y0; y < y0 + h; y++) { for (x = x0; x < x0 + w; x++) { if (map[x][y]) return false; } } return true; } /* fills an area with a parent tcm_area */ static void fill_area(struct tcm *tcm, struct tcm_area *area, struct tcm_area *parent) { s32 x, y; struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; struct tcm_area a, a_; /* set area's tcm; otherwise, enumerator considers it invalid */ area->tcm = tcm; tcm_for_each_slice(a, *area, a_) { for (x = a.p0.x; x <= a.p1.x; ++x) for (y = a.p0.y; y <= a.p1.y; ++y) pvt->map[x][y] = parent; } } /** * Compares a candidate area to the current best area, and if it is a better * fit, it updates the best to this one. * * @param x0, y0, w, h top, left, width, height of candidate area * @param field scan field * @param criteria scan criteria * @param best best candidate and its scores * * @return 1 (true) if the candidate area is known to be the final best, so no * more searching should be performed */ static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h, struct tcm_area *field, s32 criteria, struct score *best) { struct score me; /* score for area */ /* * NOTE: For horizontal bias we always give the first found, because our * scan is horizontal-raster-based and the first candidate will always * have the horizontal bias. */ bool first = criteria & CR_BIAS_HORIZONTAL; assign(&me.a, x0, y0, x0 + w - 1, y0 + h - 1); /* calculate score for current candidate */ if (!first) { get_neighbor_stats(tcm, &me.a, &me.n); me.neighs = me.n.edge + me.n.busy; get_nearness_factor(field, &me.a, &me.f); } /* the 1st candidate is always the best */ if (!best->a.tcm) goto better; BUG_ON(first); /* diagonal balance check */ if ((criteria & CR_DIAGONAL_BALANCE) && best->neighs <= me.neighs && (best->neighs < me.neighs || /* this implies that neighs and occupied match */ best->n.busy < me.n.busy || (best->n.busy == me.n.busy && /* check the nearness factor */ best->f.x + best->f.y > me.f.x + me.f.y))) goto better; /* not better, keep going */ return 0; better: /* save current area as best */ memcpy(best, &me, sizeof(me)); best->a.tcm = tcm; return first; } /** * Calculate the nearness factor of an area in a search field. The nearness * factor is smaller if the area is closer to the search origin. */ static void get_nearness_factor(struct tcm_area *field, struct tcm_area *area, struct nearness_factor *nf) { /** * Using signed math as field coordinates may be reversed if * search direction is right-to-left or bottom-to-top. */ nf->x = (s32)(area->p0.x - field->p0.x) * 1000 / (field->p1.x - field->p0.x); nf->y = (s32)(area->p0.y - field->p0.y) * 1000 / (field->p1.y - field->p0.y); } /* get neighbor statistics */ static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area, struct neighbor_stats *stat) { s16 x = 0, y = 0; struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt; /* Clearing any exisiting values */ memset(stat, 0, sizeof(*stat)); /* process top & bottom edges */ for (x = area->p0.x; x <= area->p1.x; x++) { if (area->p0.y == 0) stat->edge++; else if (pvt->map[x][area->p0.y - 1]) stat->busy++; if (area->p1.y == tcm->height - 1) stat->edge++; else if (pvt->map[x][area->p1.y + 1]) stat->busy++; } /* process left & right edges */ for (y = area->p0.y; y <= area->p1.y; ++y) { if (area->p0.x == 0) stat->edge++; else if (pvt->map[area->p0.x - 1][y]) stat->busy++; if (area->p1.x == tcm->width - 1) stat->edge++; else if (pvt->map[area->p1.x + 1][y]) stat->busy++; } }
gpl-2.0
ShikharArvind/myriad_eye
drivers/staging/comedi/drivers/ni_at_ao.c
8240
12105
/* comedi/drivers/ni_at_ao.c Driver for NI AT-AO-6/10 boards COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000,2002 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: ni_at_ao Description: National Instruments AT-AO-6/10 Devices: [National Instruments] AT-AO-6 (at-ao-6), AT-AO-10 (at-ao-10) Status: should work Author: ds Updated: Sun Dec 26 12:26:28 EST 2004 Configuration options: [0] - I/O port base address [1] - IRQ (unused) [2] - DMA (unused) [3] - analog output range, set by jumpers on hardware (0 for -10 to 10V bipolar, 1 for 0V to 10V unipolar) */ /* * Register-level programming information can be found in NI * document 320379.pdf. */ #include "../comedidev.h" #include <linux/ioport.h> /* board egisters */ /* registers with _2_ are accessed when GRP2WR is set in CFG1 */ #define ATAO_SIZE 0x20 #define ATAO_2_DMATCCLR 0x00 /* W 16 */ #define ATAO_DIN 0x00 /* R 16 */ #define ATAO_DOUT 0x00 /* W 16 */ #define ATAO_CFG2 0x02 /* W 16 */ #define CALLD1 0x8000 #define CALLD0 0x4000 #define FFRTEN 0x2000 #define DAC2S8 0x1000 #define DAC2S6 0x0800 #define DAC2S4 0x0400 #define DAC2S2 0x0200 #define DAC2S0 0x0100 #define LDAC8 0x0080 #define LDAC6 0x0040 #define LDAC4 0x0020 #define LDAC2 0x0010 #define LDAC0 0x0008 #define PROMEN 0x0004 #define SCLK 0x0002 #define SDATA 0x0001 #define ATAO_2_INT1CLR 0x02 /* W 16 */ #define ATAO_CFG3 0x04 /* W 16 */ #define DMAMODE 0x0040 #define CLKOUT 0x0020 #define RCLKEN 0x0010 #define DOUTEN2 0x0008 #define DOUTEN1 0x0004 #define EN2_5V 0x0002 #define SCANEN 0x0001 #define ATAO_2_INT2CLR 0x04 /* W 16 */ #define ATAO_82C53_BASE 0x06 /* RW 8 */ #define ATAO_82C53_CNTR1 0x06 /* RW 8 */ #define ATAO_82C53_CNTR2 0x07 /* RW 8 */ #define ATAO_82C53_CNTR3 0x08 /* RW 8 */ #define ATAO_82C53_CNTRCMD 0x09 /* W 8 */ #define CNTRSEL1 0x80 #define CNTRSEL0 0x40 #define RWSEL1 0x20 #define RWSEL0 0x10 #define MODESEL2 0x08 #define MODESEL1 0x04 #define MODESEL0 0x02 #define BCDSEL 0x01 /* read-back command */ #define COUNT 0x20 #define STATUS 0x10 #define CNTR3 0x08 #define CNTR2 0x04 #define CNTR1 0x02 /* status */ #define OUT 0x80 #define _NULL 0x40 #define RW1 0x20 #define RW0 0x10 #define MODE2 0x08 #define MODE1 0x04 #define MODE0 0x02 #define BCD 0x01 #define ATAO_2_RTSISHFT 0x06 /* W 8 */ #define RSI 0x01 #define ATAO_2_RTSISTRB 0x07 /* W 8 */ #define ATAO_CFG1 0x0a /* W 16 */ #define EXTINT2EN 0x8000 #define EXTINT1EN 0x4000 #define CNTINT2EN 0x2000 #define CNTINT1EN 0x1000 #define TCINTEN 0x0800 #define CNT1SRC 0x0400 #define CNT2SRC 0x0200 #define FIFOEN 0x0100 #define GRP2WR 0x0080 #define EXTUPDEN 0x0040 #define DMARQ 0x0020 #define DMAEN 0x0010 #define CH_mask 0x000f #define ATAO_STATUS 0x0a /* R 16 */ #define FH 0x0040 #define FE 0x0020 #define FF 0x0010 #define INT2 0x0008 #define INT1 0x0004 #define TCINT 0x0002 #define PROMOUT 0x0001 #define ATAO_FIFO_WRITE 0x0c /* W 16 */ #define ATAO_FIFO_CLEAR 0x0c /* R 16 */ #define ATAO_DACn(x) (0x0c + 2*(x)) /* W */ /* * Board descriptions for two imaginary boards. Describing the * boards in this way is optional, and completely driver-dependent. * Some drivers use arrays such as this, other do not. */ struct atao_board { const char *name; int n_ao_chans; }; static const struct atao_board atao_boards[] = { { .name = "ai-ao-6", .n_ao_chans = 6, }, { .name = "ai-ao-10", .n_ao_chans = 10, }, }; #define thisboard ((struct atao_board *)dev->board_ptr) struct atao_private { unsigned short cfg1; unsigned short cfg2; unsigned short cfg3; /* Used for AO readback */ unsigned int ao_readback[10]; }; #define devpriv ((struct atao_private *)dev->private) static int atao_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int atao_detach(struct comedi_device *dev); static struct comedi_driver driver_atao = { .driver_name = "ni_at_ao", .module = THIS_MODULE, .attach = atao_attach, .detach = atao_detach, .board_name = &atao_boards[0].name, .offset = sizeof(struct atao_board), .num_names = ARRAY_SIZE(atao_boards), }; static int __init driver_atao_init_module(void) { return comedi_driver_register(&driver_atao); } static void __exit driver_atao_cleanup_module(void) { comedi_driver_unregister(&driver_atao); } module_init(driver_atao_init_module); module_exit(driver_atao_cleanup_module); static void atao_reset(struct comedi_device *dev); static int atao_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int atao_ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int atao_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int atao_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int atao_calib_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int atao_calib_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int atao_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; unsigned long iobase; int ao_unipolar; iobase = it->options[0]; if (iobase == 0) iobase = 0x1c0; ao_unipolar = it->options[3]; printk(KERN_INFO "comedi%d: ni_at_ao: 0x%04lx", dev->minor, iobase); if (!request_region(iobase, ATAO_SIZE, "ni_at_ao")) { printk(" I/O port conflict\n"); return -EIO; } dev->iobase = iobase; /* dev->board_ptr = atao_probe(dev); */ dev->board_name = thisboard->name; if (alloc_private(dev, sizeof(struct atao_private)) < 0) return -ENOMEM; if (alloc_subdevices(dev, 4) < 0) return -ENOMEM; s = dev->subdevices + 0; /* analog output subdevice */ s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE; s->n_chan = thisboard->n_ao_chans; s->maxdata = (1 << 12) - 1; if (ao_unipolar) s->range_table = &range_unipolar10; else s->range_table = &range_bipolar10; s->insn_write = &atao_ao_winsn; s->insn_read = &atao_ao_rinsn; s = dev->subdevices + 1; /* digital i/o subdevice */ s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 8; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = atao_dio_insn_bits; s->insn_config = atao_dio_insn_config; s = dev->subdevices + 2; /* caldac subdevice */ s->type = COMEDI_SUBD_CALIB; s->subdev_flags = SDF_WRITABLE | SDF_INTERNAL; s->n_chan = 21; s->maxdata = 0xff; s->insn_read = atao_calib_insn_read; s->insn_write = atao_calib_insn_write; s = dev->subdevices + 3; /* eeprom subdevice */ /* s->type=COMEDI_SUBD_EEPROM; */ s->type = COMEDI_SUBD_UNUSED; atao_reset(dev); printk(KERN_INFO "\n"); return 0; } static int atao_detach(struct comedi_device *dev) { printk(KERN_INFO "comedi%d: atao: remove\n", dev->minor); if (dev->iobase) release_region(dev->iobase, ATAO_SIZE); return 0; } static void atao_reset(struct comedi_device *dev) { /* This is the reset sequence described in the manual */ devpriv->cfg1 = 0; outw(devpriv->cfg1, dev->iobase + ATAO_CFG1); outb(RWSEL0 | MODESEL2, dev->iobase + ATAO_82C53_CNTRCMD); outb(0x03, dev->iobase + ATAO_82C53_CNTR1); outb(CNTRSEL0 | RWSEL0 | MODESEL2, dev->iobase + ATAO_82C53_CNTRCMD); devpriv->cfg2 = 0; outw(devpriv->cfg2, dev->iobase + ATAO_CFG2); devpriv->cfg3 = 0; outw(devpriv->cfg3, dev->iobase + ATAO_CFG3); inw(dev->iobase + ATAO_FIFO_CLEAR); devpriv->cfg1 |= GRP2WR; outw(devpriv->cfg1, dev->iobase + ATAO_CFG1); outw(0, dev->iobase + ATAO_2_INT1CLR); outw(0, dev->iobase + ATAO_2_INT2CLR); outw(0, dev->iobase + ATAO_2_DMATCCLR); devpriv->cfg1 &= ~GRP2WR; outw(devpriv->cfg1, dev->iobase + ATAO_CFG1); } static int atao_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; int chan = CR_CHAN(insn->chanspec); short bits; for (i = 0; i < insn->n; i++) { bits = data[i] - 0x800; if (chan == 0) { devpriv->cfg1 |= GRP2WR; outw(devpriv->cfg1, dev->iobase + ATAO_CFG1); } outw(bits, dev->iobase + ATAO_DACn(chan)); if (chan == 0) { devpriv->cfg1 &= ~GRP2WR; outw(devpriv->cfg1, dev->iobase + ATAO_CFG1); } devpriv->ao_readback[chan] = data[i]; } return i; } static int atao_ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; int chan = CR_CHAN(insn->chanspec); for (i = 0; i < insn->n; i++) data[i] = devpriv->ao_readback[chan]; return i; } static int atao_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (insn->n != 2) return -EINVAL; if (data[0]) { s->state &= ~data[0]; s->state |= data[0] & data[1]; outw(s->state, dev->iobase + ATAO_DOUT); } data[1] = inw(dev->iobase + ATAO_DIN); return 2; } static int atao_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int chan = CR_CHAN(insn->chanspec); unsigned int mask, bit; /* The input or output configuration of each digital line is * configured by a special insn_config instruction. chanspec * contains the channel to be changed, and data[0] contains the * value COMEDI_INPUT or COMEDI_OUTPUT. */ mask = (chan < 4) ? 0x0f : 0xf0; bit = (chan < 4) ? DOUTEN1 : DOUTEN2; switch (data[0]) { case INSN_CONFIG_DIO_OUTPUT: s->io_bits |= mask; devpriv->cfg3 |= bit; break; case INSN_CONFIG_DIO_INPUT: s->io_bits &= ~mask; devpriv->cfg3 &= ~bit; break; case INSN_CONFIG_DIO_QUERY: data[1] = (s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT; return insn->n; break; default: return -EINVAL; break; } outw(devpriv->cfg3, dev->iobase + ATAO_CFG3); return 1; } /* * Figure 2-1 in the manual shows 3 chips labeled DAC8800, which * are 8-channel 8-bit DACs. These are most likely the calibration * DACs. It is not explicitly stated in the manual how to access * the caldacs, but we can guess. */ static int atao_calib_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; for (i = 0; i < insn->n; i++) data[i] = 0; /* XXX */ return insn->n; } static int atao_calib_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int bitstring, bit; unsigned int chan = CR_CHAN(insn->chanspec); bitstring = ((chan & 0x7) << 8) | (data[insn->n - 1] & 0xff); for (bit = 1 << (11 - 1); bit; bit >>= 1) { outw(devpriv->cfg2 | ((bit & bitstring) ? SDATA : 0), dev->iobase + ATAO_CFG2); outw(devpriv->cfg2 | SCLK | ((bit & bitstring) ? SDATA : 0), dev->iobase + ATAO_CFG2); } /* strobe the appropriate caldac */ outw(devpriv->cfg2 | (((chan >> 3) + 1) << 14), dev->iobase + ATAO_CFG2); outw(devpriv->cfg2, dev->iobase + ATAO_CFG2); return insn->n; } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
bmarko82/GT-I9505_MIUI_kernel
arch/cris/arch-v32/drivers/i2c.c
8496
14551
/*!*************************************************************************** *! *! FILE NAME : i2c.c *! *! DESCRIPTION: implements an interface for IIC/I2C, both directly from other *! kernel modules (i2c_writereg/readreg) and from userspace using *! ioctl()'s *! *! Nov 30 1998 Torbjorn Eliasson Initial version. *! Bjorn Wesen Elinux kernel version. *! Jan 14 2000 Johan Adolfsson Fixed PB shadow register stuff - *! don't use PB_I2C if DS1302 uses same bits, *! use PB. *| June 23 2003 Pieter Grimmerink Added 'i2c_sendnack'. i2c_readreg now *| generates nack on last received byte, *| instead of ack. *| i2c_getack changed data level while clock *| was high, causing DS75 to see a stop condition *! *! --------------------------------------------------------------------------- *! *! (C) Copyright 1999-2007 Axis Communications AB, LUND, SWEDEN *! *!***************************************************************************/ /****************** INCLUDE FILES SECTION ***********************************/ #include <linux/module.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/init.h> #include <linux/mutex.h> #include <asm/etraxi2c.h> #include <asm/io.h> #include <asm/delay.h> #include "i2c.h" /****************** I2C DEFINITION SECTION *************************/ #define D(x) #define I2C_MAJOR 123 /* LOCAL/EXPERIMENTAL */ static DEFINE_MUTEX(i2c_mutex); static const char i2c_name[] = "i2c"; #define CLOCK_LOW_TIME 8 #define CLOCK_HIGH_TIME 8 #define START_CONDITION_HOLD_TIME 8 #define STOP_CONDITION_HOLD_TIME 8 #define ENABLE_OUTPUT 0x01 #define ENABLE_INPUT 0x00 #define I2C_CLOCK_HIGH 1 #define I2C_CLOCK_LOW 0 #define I2C_DATA_HIGH 1 #define I2C_DATA_LOW 0 #define i2c_enable() #define i2c_disable() /* enable or disable output-enable, to select output or input on the i2c bus */ #define i2c_dir_out() crisv32_io_set_dir(&cris_i2c_data, crisv32_io_dir_out) #define i2c_dir_in() crisv32_io_set_dir(&cris_i2c_data, crisv32_io_dir_in) /* control the i2c clock and data signals */ #define i2c_clk(x) crisv32_io_set(&cris_i2c_clk, x) #define i2c_data(x) crisv32_io_set(&cris_i2c_data, x) /* read a bit from the i2c interface */ #define i2c_getbit() crisv32_io_rd(&cris_i2c_data) #define i2c_delay(usecs) udelay(usecs) static DEFINE_SPINLOCK(i2c_lock); /* Protect directions etc */ /****************** VARIABLE SECTION ************************************/ static struct crisv32_iopin cris_i2c_clk; static struct crisv32_iopin cris_i2c_data; /****************** FUNCTION DEFINITION SECTION *************************/ /* generate i2c start condition */ void i2c_start(void) { /* * SCL=1 SDA=1 */ i2c_dir_out(); i2c_delay(CLOCK_HIGH_TIME/6); i2c_data(I2C_DATA_HIGH); i2c_clk(I2C_CLOCK_HIGH); i2c_delay(CLOCK_HIGH_TIME); /* * SCL=1 SDA=0 */ i2c_data(I2C_DATA_LOW); i2c_delay(START_CONDITION_HOLD_TIME); /* * SCL=0 SDA=0 */ i2c_clk(I2C_CLOCK_LOW); i2c_delay(CLOCK_LOW_TIME); } /* generate i2c stop condition */ void i2c_stop(void) { i2c_dir_out(); /* * SCL=0 SDA=0 */ i2c_clk(I2C_CLOCK_LOW); i2c_data(I2C_DATA_LOW); i2c_delay(CLOCK_LOW_TIME*2); /* * SCL=1 SDA=0 */ i2c_clk(I2C_CLOCK_HIGH); i2c_delay(CLOCK_HIGH_TIME*2); /* * SCL=1 SDA=1 */ i2c_data(I2C_DATA_HIGH); i2c_delay(STOP_CONDITION_HOLD_TIME); i2c_dir_in(); } /* write a byte to the i2c interface */ void i2c_outbyte(unsigned char x) { int i; i2c_dir_out(); for (i = 0; i < 8; i++) { if (x & 0x80) { i2c_data(I2C_DATA_HIGH); } else { i2c_data(I2C_DATA_LOW); } i2c_delay(CLOCK_LOW_TIME/2); i2c_clk(I2C_CLOCK_HIGH); i2c_delay(CLOCK_HIGH_TIME); i2c_clk(I2C_CLOCK_LOW); i2c_delay(CLOCK_LOW_TIME/2); x <<= 1; } i2c_data(I2C_DATA_LOW); i2c_delay(CLOCK_LOW_TIME/2); /* * enable input */ i2c_dir_in(); } /* read a byte from the i2c interface */ unsigned char i2c_inbyte(void) { unsigned char aBitByte = 0; int i; /* Switch off I2C to get bit */ i2c_disable(); i2c_dir_in(); i2c_delay(CLOCK_HIGH_TIME/2); /* Get bit */ aBitByte |= i2c_getbit(); /* Enable I2C */ i2c_enable(); i2c_delay(CLOCK_LOW_TIME/2); for (i = 1; i < 8; i++) { aBitByte <<= 1; /* Clock pulse */ i2c_clk(I2C_CLOCK_HIGH); i2c_delay(CLOCK_HIGH_TIME); i2c_clk(I2C_CLOCK_LOW); i2c_delay(CLOCK_LOW_TIME); /* Switch off I2C to get bit */ i2c_disable(); i2c_dir_in(); i2c_delay(CLOCK_HIGH_TIME/2); /* Get bit */ aBitByte |= i2c_getbit(); /* Enable I2C */ i2c_enable(); i2c_delay(CLOCK_LOW_TIME/2); } i2c_clk(I2C_CLOCK_HIGH); i2c_delay(CLOCK_HIGH_TIME); /* * we leave the clock low, getbyte is usually followed * by sendack/nack, they assume the clock to be low */ i2c_clk(I2C_CLOCK_LOW); return aBitByte; } /*#--------------------------------------------------------------------------- *# *# FUNCTION NAME: i2c_getack *# *# DESCRIPTION : checks if ack was received from ic2 *# *#--------------------------------------------------------------------------*/ int i2c_getack(void) { int ack = 1; /* * enable output */ i2c_dir_out(); /* * Release data bus by setting * data high */ i2c_data(I2C_DATA_HIGH); /* * enable input */ i2c_dir_in(); i2c_delay(CLOCK_HIGH_TIME/4); /* * generate ACK clock pulse */ i2c_clk(I2C_CLOCK_HIGH); #if 0 /* * Use PORT PB instead of I2C * for input. (I2C not working) */ i2c_clk(1); i2c_data(1); /* * switch off I2C */ i2c_data(1); i2c_disable(); i2c_dir_in(); #endif /* * now wait for ack */ i2c_delay(CLOCK_HIGH_TIME/2); /* * check for ack */ if (i2c_getbit()) ack = 0; i2c_delay(CLOCK_HIGH_TIME/2); if (!ack) { if (!i2c_getbit()) /* receiver pulld SDA low */ ack = 1; i2c_delay(CLOCK_HIGH_TIME/2); } /* * our clock is high now, make sure data is low * before we enable our output. If we keep data high * and enable output, we would generate a stop condition. */ #if 0 i2c_data(I2C_DATA_LOW); /* * end clock pulse */ i2c_enable(); i2c_dir_out(); #endif i2c_clk(I2C_CLOCK_LOW); i2c_delay(CLOCK_HIGH_TIME/4); /* * enable output */ i2c_dir_out(); /* * remove ACK clock pulse */ i2c_data(I2C_DATA_HIGH); i2c_delay(CLOCK_LOW_TIME/2); return ack; } /*#--------------------------------------------------------------------------- *# *# FUNCTION NAME: I2C::sendAck *# *# DESCRIPTION : Send ACK on received data *# *#--------------------------------------------------------------------------*/ void i2c_sendack(void) { /* * enable output */ i2c_delay(CLOCK_LOW_TIME); i2c_dir_out(); /* * set ack pulse high */ i2c_data(I2C_DATA_LOW); /* * generate clock pulse */ i2c_delay(CLOCK_HIGH_TIME/6); i2c_clk(I2C_CLOCK_HIGH); i2c_delay(CLOCK_HIGH_TIME); i2c_clk(I2C_CLOCK_LOW); i2c_delay(CLOCK_LOW_TIME/6); /* * reset data out */ i2c_data(I2C_DATA_HIGH); i2c_delay(CLOCK_LOW_TIME); i2c_dir_in(); } /*#--------------------------------------------------------------------------- *# *# FUNCTION NAME: i2c_sendnack *# *# DESCRIPTION : Sends NACK on received data *# *#--------------------------------------------------------------------------*/ void i2c_sendnack(void) { /* * enable output */ i2c_delay(CLOCK_LOW_TIME); i2c_dir_out(); /* * set data high */ i2c_data(I2C_DATA_HIGH); /* * generate clock pulse */ i2c_delay(CLOCK_HIGH_TIME/6); i2c_clk(I2C_CLOCK_HIGH); i2c_delay(CLOCK_HIGH_TIME); i2c_clk(I2C_CLOCK_LOW); i2c_delay(CLOCK_LOW_TIME); i2c_dir_in(); } /*#--------------------------------------------------------------------------- *# *# FUNCTION NAME: i2c_write *# *# DESCRIPTION : Writes a value to an I2C device *# *#--------------------------------------------------------------------------*/ int i2c_write(unsigned char theSlave, void *data, size_t nbytes) { int error, cntr = 3; unsigned char bytes_wrote = 0; unsigned char value; unsigned long flags; spin_lock_irqsave(&i2c_lock, flags); do { error = 0; i2c_start(); /* * send slave address */ i2c_outbyte((theSlave & 0xfe)); /* * wait for ack */ if (!i2c_getack()) error = 1; /* * send data */ for (bytes_wrote = 0; bytes_wrote < nbytes; bytes_wrote++) { memcpy(&value, data + bytes_wrote, sizeof value); i2c_outbyte(value); /* * now it's time to wait for ack */ if (!i2c_getack()) error |= 4; } /* * end byte stream */ i2c_stop(); } while (error && cntr--); i2c_delay(CLOCK_LOW_TIME); spin_unlock_irqrestore(&i2c_lock, flags); return -error; } /*#--------------------------------------------------------------------------- *# *# FUNCTION NAME: i2c_read *# *# DESCRIPTION : Reads a value from an I2C device *# *#--------------------------------------------------------------------------*/ int i2c_read(unsigned char theSlave, void *data, size_t nbytes) { unsigned char b = 0; unsigned char bytes_read = 0; int error, cntr = 3; unsigned long flags; spin_lock_irqsave(&i2c_lock, flags); do { error = 0; memset(data, 0, nbytes); /* * generate start condition */ i2c_start(); /* * send slave address */ i2c_outbyte((theSlave | 0x01)); /* * wait for ack */ if (!i2c_getack()) error = 1; /* * fetch data */ for (bytes_read = 0; bytes_read < nbytes; bytes_read++) { b = i2c_inbyte(); memcpy(data + bytes_read, &b, sizeof b); if (bytes_read < (nbytes - 1)) i2c_sendack(); } /* * last received byte needs to be nacked * instead of acked */ i2c_sendnack(); /* * end sequence */ i2c_stop(); } while (error && cntr--); spin_unlock_irqrestore(&i2c_lock, flags); return -error; } /*#--------------------------------------------------------------------------- *# *# FUNCTION NAME: i2c_writereg *# *# DESCRIPTION : Writes a value to an I2C device *# *#--------------------------------------------------------------------------*/ int i2c_writereg(unsigned char theSlave, unsigned char theReg, unsigned char theValue) { int error, cntr = 3; unsigned long flags; spin_lock_irqsave(&i2c_lock, flags); do { error = 0; i2c_start(); /* * send slave address */ i2c_outbyte((theSlave & 0xfe)); /* * wait for ack */ if(!i2c_getack()) error = 1; /* * now select register */ i2c_dir_out(); i2c_outbyte(theReg); /* * now it's time to wait for ack */ if(!i2c_getack()) error |= 2; /* * send register register data */ i2c_outbyte(theValue); /* * now it's time to wait for ack */ if(!i2c_getack()) error |= 4; /* * end byte stream */ i2c_stop(); } while(error && cntr--); i2c_delay(CLOCK_LOW_TIME); spin_unlock_irqrestore(&i2c_lock, flags); return -error; } /*#--------------------------------------------------------------------------- *# *# FUNCTION NAME: i2c_readreg *# *# DESCRIPTION : Reads a value from the decoder registers. *# *#--------------------------------------------------------------------------*/ unsigned char i2c_readreg(unsigned char theSlave, unsigned char theReg) { unsigned char b = 0; int error, cntr = 3; unsigned long flags; spin_lock_irqsave(&i2c_lock, flags); do { error = 0; /* * generate start condition */ i2c_start(); /* * send slave address */ i2c_outbyte((theSlave & 0xfe)); /* * wait for ack */ if(!i2c_getack()) error = 1; /* * now select register */ i2c_dir_out(); i2c_outbyte(theReg); /* * now it's time to wait for ack */ if(!i2c_getack()) error |= 2; /* * repeat start condition */ i2c_delay(CLOCK_LOW_TIME); i2c_start(); /* * send slave address */ i2c_outbyte(theSlave | 0x01); /* * wait for ack */ if(!i2c_getack()) error |= 4; /* * fetch register */ b = i2c_inbyte(); /* * last received byte needs to be nacked * instead of acked */ i2c_sendnack(); /* * end sequence */ i2c_stop(); } while(error && cntr--); spin_unlock_irqrestore(&i2c_lock, flags); return b; } static int i2c_open(struct inode *inode, struct file *filp) { return 0; } static int i2c_release(struct inode *inode, struct file *filp) { return 0; } /* Main device API. ioctl's to write or read to/from i2c registers. */ static long i2c_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; if(_IOC_TYPE(cmd) != ETRAXI2C_IOCTYPE) { return -ENOTTY; } switch (_IOC_NR(cmd)) { case I2C_WRITEREG: /* write to an i2c slave */ D(printk("i2cw %d %d %d\n", I2C_ARGSLAVE(arg), I2C_ARGREG(arg), I2C_ARGVALUE(arg))); mutex_lock(&i2c_mutex); ret = i2c_writereg(I2C_ARGSLAVE(arg), I2C_ARGREG(arg), I2C_ARGVALUE(arg)); mutex_unlock(&i2c_mutex); return ret; case I2C_READREG: { unsigned char val; /* read from an i2c slave */ D(printk("i2cr %d %d ", I2C_ARGSLAVE(arg), I2C_ARGREG(arg))); mutex_lock(&i2c_mutex); val = i2c_readreg(I2C_ARGSLAVE(arg), I2C_ARGREG(arg)); mutex_unlock(&i2c_mutex); D(printk("= %d\n", val)); return val; } default: return -EINVAL; } return 0; } static const struct file_operations i2c_fops = { .owner = THIS_MODULE, .unlocked_ioctl = i2c_ioctl, .open = i2c_open, .release = i2c_release, .llseek = noop_llseek, }; static int __init i2c_init(void) { static int res; static int first = 1; if (!first) return res; first = 0; /* Setup and enable the DATA and CLK pins */ res = crisv32_io_get_name(&cris_i2c_data, CONFIG_ETRAX_V32_I2C_DATA_PORT); if (res < 0) return res; res = crisv32_io_get_name(&cris_i2c_clk, CONFIG_ETRAX_V32_I2C_CLK_PORT); crisv32_io_set_dir(&cris_i2c_clk, crisv32_io_dir_out); return res; } static int __init i2c_register(void) { int res; res = i2c_init(); if (res < 0) return res; /* register char device */ res = register_chrdev(I2C_MAJOR, i2c_name, &i2c_fops); if (res < 0) { printk(KERN_ERR "i2c: couldn't get a major number.\n"); return res; } printk(KERN_INFO "I2C driver v2.2, (c) 1999-2007 Axis Communications AB\n"); return 0; } /* this makes sure that i2c_init is called during boot */ module_init(i2c_register); /****************** END OF FILE i2c.c ********************************/
gpl-2.0
InstigatorX/skyrocket_ics_kernel
kernel/trace/trace_kdb.c
8496
3087
/* * kdb helper for dumping the ftrace buffer * * Copyright (C) 2010 Jason Wessel <jason.wessel@windriver.com> * * ftrace_dump_buf based on ftrace_dump: * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> * */ #include <linux/init.h> #include <linux/kgdb.h> #include <linux/kdb.h> #include <linux/ftrace.h> #include "trace.h" #include "trace_output.h" static void ftrace_dump_buf(int skip_lines, long cpu_file) { /* use static because iter can be a bit big for the stack */ static struct trace_iterator iter; unsigned int old_userobj; int cnt = 0, cpu; trace_init_global_iter(&iter); for_each_tracing_cpu(cpu) { atomic_inc(&iter.tr->data[cpu]->disabled); } old_userobj = trace_flags; /* don't look at user memory in panic mode */ trace_flags &= ~TRACE_ITER_SYM_USEROBJ; kdb_printf("Dumping ftrace buffer:\n"); /* reset all but tr, trace, and overruns */ memset(&iter.seq, 0, sizeof(struct trace_iterator) - offsetof(struct trace_iterator, seq)); iter.iter_flags |= TRACE_FILE_LAT_FMT; iter.pos = -1; if (cpu_file == TRACE_PIPE_ALL_CPU) { for_each_tracing_cpu(cpu) { iter.buffer_iter[cpu] = ring_buffer_read_prepare(iter.tr->buffer, cpu); ring_buffer_read_start(iter.buffer_iter[cpu]); tracing_iter_reset(&iter, cpu); } } else { iter.cpu_file = cpu_file; iter.buffer_iter[cpu_file] = ring_buffer_read_prepare(iter.tr->buffer, cpu_file); ring_buffer_read_start(iter.buffer_iter[cpu_file]); tracing_iter_reset(&iter, cpu_file); } if (!trace_empty(&iter)) trace_find_next_entry_inc(&iter); while (!trace_empty(&iter)) { if (!cnt) kdb_printf("---------------------------------\n"); cnt++; if (trace_find_next_entry_inc(&iter) != NULL && !skip_lines) print_trace_line(&iter); if (!skip_lines) trace_printk_seq(&iter.seq); else skip_lines--; if (KDB_FLAG(CMD_INTERRUPT)) goto out; } if (!cnt) kdb_printf(" (ftrace buffer empty)\n"); else kdb_printf("---------------------------------\n"); out: trace_flags = old_userobj; for_each_tracing_cpu(cpu) { atomic_dec(&iter.tr->data[cpu]->disabled); } for_each_tracing_cpu(cpu) if (iter.buffer_iter[cpu]) ring_buffer_read_finish(iter.buffer_iter[cpu]); } /* * kdb_ftdump - Dump the ftrace log buffer */ static int kdb_ftdump(int argc, const char **argv) { int skip_lines = 0; long cpu_file; char *cp; if (argc > 2) return KDB_ARGCOUNT; if (argc) { skip_lines = simple_strtol(argv[1], &cp, 0); if (*cp) skip_lines = 0; } if (argc == 2) { cpu_file = simple_strtol(argv[2], &cp, 0); if (*cp || cpu_file >= NR_CPUS || cpu_file < 0 || !cpu_online(cpu_file)) return KDB_BADINT; } else { cpu_file = TRACE_PIPE_ALL_CPU; } kdb_trap_printk++; ftrace_dump_buf(skip_lines, cpu_file); kdb_trap_printk--; return 0; } static __init int kdb_ftrace_register(void) { kdb_register_repeat("ftdump", kdb_ftdump, "[skip_#lines] [cpu]", "Dump ftrace log", 0, KDB_REPEAT_NONE); return 0; } late_initcall(kdb_ftrace_register);
gpl-2.0
garwynn/L710_MA6_Kernel
arch/powerpc/sysdev/mpic_u3msi.c
9776
5369
/* * Copyright 2006, Segher Boessenkool, IBM Corporation. * Copyright 2006-2007, Michael Ellerman, IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 of the * License. * */ #include <linux/irq.h> #include <linux/bootmem.h> #include <linux/msi.h> #include <asm/mpic.h> #include <asm/prom.h> #include <asm/hw_irq.h> #include <asm/ppc-pci.h> #include <asm/msi_bitmap.h> #include "mpic.h" /* A bit ugly, can we get this from the pci_dev somehow? */ static struct mpic *msi_mpic; static void mpic_u3msi_mask_irq(struct irq_data *data) { mask_msi_irq(data); mpic_mask_irq(data); } static void mpic_u3msi_unmask_irq(struct irq_data *data) { mpic_unmask_irq(data); unmask_msi_irq(data); } static struct irq_chip mpic_u3msi_chip = { .irq_shutdown = mpic_u3msi_mask_irq, .irq_mask = mpic_u3msi_mask_irq, .irq_unmask = mpic_u3msi_unmask_irq, .irq_eoi = mpic_end_irq, .irq_set_type = mpic_set_irq_type, .irq_set_affinity = mpic_set_affinity, .name = "MPIC-U3MSI", }; static u64 read_ht_magic_addr(struct pci_dev *pdev, unsigned int pos) { u8 flags; u32 tmp; u64 addr; pci_read_config_byte(pdev, pos + HT_MSI_FLAGS, &flags); if (flags & HT_MSI_FLAGS_FIXED) return HT_MSI_FIXED_ADDR; pci_read_config_dword(pdev, pos + HT_MSI_ADDR_LO, &tmp); addr = tmp & HT_MSI_ADDR_LO_MASK; pci_read_config_dword(pdev, pos + HT_MSI_ADDR_HI, &tmp); addr = addr | ((u64)tmp << 32); return addr; } static u64 find_ht_magic_addr(struct pci_dev *pdev, unsigned int hwirq) { struct pci_bus *bus; unsigned int pos; for (bus = pdev->bus; bus && bus->self; bus = bus->parent) { pos = pci_find_ht_capability(bus->self, HT_CAPTYPE_MSI_MAPPING); if (pos) return read_ht_magic_addr(bus->self, pos); } return 0; } static u64 find_u4_magic_addr(struct pci_dev *pdev, unsigned int hwirq) { struct pci_controller *hose = pci_bus_to_host(pdev->bus); /* U4 PCIe MSIs need to write to the special register in * the bridge that generates interrupts. There should be * theorically a register at 0xf8005000 where you just write * the MSI number and that triggers the right interrupt, but * unfortunately, this is busted in HW, the bridge endian swaps * the value and hits the wrong nibble in the register. * * So instead we use another register set which is used normally * for converting HT interrupts to MPIC interrupts, which decodes * the interrupt number as part of the low address bits * * This will not work if we ever use more than one legacy MSI in * a block but we never do. For one MSI or multiple MSI-X where * each interrupt address can be specified separately, it works * just fine. */ if (of_device_is_compatible(hose->dn, "u4-pcie") || of_device_is_compatible(hose->dn, "U4-pcie")) return 0xf8004000 | (hwirq << 4); return 0; } static int u3msi_msi_check_device(struct pci_dev *pdev, int nvec, int type) { if (type == PCI_CAP_ID_MSIX) pr_debug("u3msi: MSI-X untested, trying anyway.\n"); /* If we can't find a magic address then MSI ain't gonna work */ if (find_ht_magic_addr(pdev, 0) == 0 && find_u4_magic_addr(pdev, 0) == 0) { pr_debug("u3msi: no magic address found for %s\n", pci_name(pdev)); return -ENXIO; } return 0; } static void u3msi_teardown_msi_irqs(struct pci_dev *pdev) { struct msi_desc *entry; list_for_each_entry(entry, &pdev->msi_list, list) { if (entry->irq == NO_IRQ) continue; irq_set_msi_desc(entry->irq, NULL); msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, virq_to_hw(entry->irq), 1); irq_dispose_mapping(entry->irq); } return; } static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) { unsigned int virq; struct msi_desc *entry; struct msi_msg msg; u64 addr; int hwirq; list_for_each_entry(entry, &pdev->msi_list, list) { hwirq = msi_bitmap_alloc_hwirqs(&msi_mpic->msi_bitmap, 1); if (hwirq < 0) { pr_debug("u3msi: failed allocating hwirq\n"); return hwirq; } addr = find_ht_magic_addr(pdev, hwirq); if (addr == 0) addr = find_u4_magic_addr(pdev, hwirq); msg.address_lo = addr & 0xFFFFFFFF; msg.address_hi = addr >> 32; virq = irq_create_mapping(msi_mpic->irqhost, hwirq); if (virq == NO_IRQ) { pr_debug("u3msi: failed mapping hwirq 0x%x\n", hwirq); msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1); return -ENOSPC; } irq_set_msi_desc(virq, entry); irq_set_chip(virq, &mpic_u3msi_chip); irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING); pr_debug("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n", virq, hwirq, (unsigned long)addr); printk("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n", virq, hwirq, (unsigned long)addr); msg.data = hwirq; write_msi_msg(virq, &msg); hwirq++; } return 0; } int mpic_u3msi_init(struct mpic *mpic) { int rc; rc = mpic_msi_init_allocator(mpic); if (rc) { pr_debug("u3msi: Error allocating bitmap!\n"); return rc; } pr_debug("u3msi: Registering MPIC U3 MSI callbacks.\n"); BUG_ON(msi_mpic); msi_mpic = mpic; WARN_ON(ppc_md.setup_msi_irqs); ppc_md.setup_msi_irqs = u3msi_setup_msi_irqs; ppc_md.teardown_msi_irqs = u3msi_teardown_msi_irqs; ppc_md.msi_check_device = u3msi_msi_check_device; return 0; }
gpl-2.0
manveru0/FeaCore_Phoenix_S3_JellyBean
arch/sh/boards/mach-microdev/io.c
12336
4159
/* * linux/arch/sh/boards/superh/microdev/io.c * * Copyright (C) 2003 Sean McGoogan (Sean.McGoogan@superh.com) * Copyright (C) 2003, 2004 SuperH, Inc. * Copyright (C) 2004 Paul Mundt * * SuperH SH4-202 MicroDev board support. * * May be copied or modified under the terms of the GNU General Public * License. See linux/COPYING for more information. */ #include <linux/init.h> #include <linux/pci.h> #include <linux/wait.h> #include <asm/io.h> #include <mach/microdev.h> /* * we need to have a 'safe' address to re-direct all I/O requests * that we do not explicitly wish to handle. This safe address * must have the following properies: * * * writes are ignored (no exception) * * reads are benign (no side-effects) * * accesses of width 1, 2 and 4-bytes are all valid. * * The Processor Version Register (PVR) has these properties. */ #define PVR 0xff000030 /* Processor Version Register */ #define IO_IDE2_BASE 0x170ul /* I/O base for SMSC FDC37C93xAPM IDE #2 */ #define IO_IDE1_BASE 0x1f0ul /* I/O base for SMSC FDC37C93xAPM IDE #1 */ #define IO_ISP1161_BASE 0x290ul /* I/O port for Philips ISP1161x USB chip */ #define IO_SERIAL2_BASE 0x2f8ul /* I/O base for SMSC FDC37C93xAPM Serial #2 */ #define IO_LAN91C111_BASE 0x300ul /* I/O base for SMSC LAN91C111 Ethernet chip */ #define IO_IDE2_MISC 0x376ul /* I/O misc for SMSC FDC37C93xAPM IDE #2 */ #define IO_SUPERIO_BASE 0x3f0ul /* I/O base for SMSC FDC37C93xAPM SuperIO chip */ #define IO_IDE1_MISC 0x3f6ul /* I/O misc for SMSC FDC37C93xAPM IDE #1 */ #define IO_SERIAL1_BASE 0x3f8ul /* I/O base for SMSC FDC37C93xAPM Serial #1 */ #define IO_ISP1161_EXTENT 0x04ul /* I/O extent for Philips ISP1161x USB chip */ #define IO_LAN91C111_EXTENT 0x10ul /* I/O extent for SMSC LAN91C111 Ethernet chip */ #define IO_SUPERIO_EXTENT 0x02ul /* I/O extent for SMSC FDC37C93xAPM SuperIO chip */ #define IO_IDE_EXTENT 0x08ul /* I/O extent for IDE Task Register set */ #define IO_SERIAL_EXTENT 0x10ul #define IO_LAN91C111_PHYS 0xa7500000ul /* Physical address of SMSC LAN91C111 Ethernet chip */ #define IO_ISP1161_PHYS 0xa7700000ul /* Physical address of Philips ISP1161x USB chip */ #define IO_SUPERIO_PHYS 0xa7800000ul /* Physical address of SMSC FDC37C93xAPM SuperIO chip */ /* * map I/O ports to memory-mapped addresses */ void __iomem *microdev_ioport_map(unsigned long offset, unsigned int len) { unsigned long result; if ((offset >= IO_LAN91C111_BASE) && (offset < IO_LAN91C111_BASE + IO_LAN91C111_EXTENT)) { /* * SMSC LAN91C111 Ethernet chip */ result = IO_LAN91C111_PHYS + offset - IO_LAN91C111_BASE; } else if ((offset >= IO_SUPERIO_BASE) && (offset < IO_SUPERIO_BASE + IO_SUPERIO_EXTENT)) { /* * SMSC FDC37C93xAPM SuperIO chip * * Configuration Registers */ result = IO_SUPERIO_PHYS + (offset << 1); } else if (((offset >= IO_IDE1_BASE) && (offset < IO_IDE1_BASE + IO_IDE_EXTENT)) || (offset == IO_IDE1_MISC)) { /* * SMSC FDC37C93xAPM SuperIO chip * * IDE #1 */ result = IO_SUPERIO_PHYS + (offset << 1); } else if (((offset >= IO_IDE2_BASE) && (offset < IO_IDE2_BASE + IO_IDE_EXTENT)) || (offset == IO_IDE2_MISC)) { /* * SMSC FDC37C93xAPM SuperIO chip * * IDE #2 */ result = IO_SUPERIO_PHYS + (offset << 1); } else if ((offset >= IO_SERIAL1_BASE) && (offset < IO_SERIAL1_BASE + IO_SERIAL_EXTENT)) { /* * SMSC FDC37C93xAPM SuperIO chip * * Serial #1 */ result = IO_SUPERIO_PHYS + (offset << 1); } else if ((offset >= IO_SERIAL2_BASE) && (offset < IO_SERIAL2_BASE + IO_SERIAL_EXTENT)) { /* * SMSC FDC37C93xAPM SuperIO chip * * Serial #2 */ result = IO_SUPERIO_PHYS + (offset << 1); } else if ((offset >= IO_ISP1161_BASE) && (offset < IO_ISP1161_BASE + IO_ISP1161_EXTENT)) { /* * Philips USB ISP1161x chip */ result = IO_ISP1161_PHYS + offset - IO_ISP1161_BASE; } else { /* * safe default. */ printk("Warning: unexpected port in %s( offset = 0x%lx )\n", __func__, offset); result = PVR; } return (void __iomem *)result; }
gpl-2.0
harsh1247/limbo-android
jni/qemu/disas.c
49
12360
/* General "disassemble this chunk" code. Used for debugging. */ #include "config.h" #include "dis-asm.h" #include "elf.h" #include <errno.h> #include "cpu.h" #include "disas.h" /* Filled in by elfload.c. Simplistic, but will do for now. */ struct syminfo *syminfos = NULL; /* Get LENGTH bytes from info's buffer, at target address memaddr. Transfer them to myaddr. */ int buffer_read_memory(bfd_vma memaddr, bfd_byte *myaddr, int length, struct disassemble_info *info) { if (memaddr < info->buffer_vma || memaddr + length > info->buffer_vma + info->buffer_length) /* Out of bounds. Use EIO because GDB uses it. */ return EIO; memcpy (myaddr, info->buffer + (memaddr - info->buffer_vma), length); return 0; } /* Get LENGTH bytes from info's buffer, at target address memaddr. Transfer them to myaddr. */ static int target_read_memory (bfd_vma memaddr, bfd_byte *myaddr, int length, struct disassemble_info *info) { cpu_memory_rw_debug(cpu_single_env, memaddr, myaddr, length, 0); return 0; } /* Print an error message. We can assume that this is in response to an error return from buffer_read_memory. */ void perror_memory (int status, bfd_vma memaddr, struct disassemble_info *info) { if (status != EIO) /* Can't happen. */ (*info->fprintf_func) (info->stream, "Unknown error %d\n", status); else /* Actually, address between memaddr and memaddr + len was out of bounds. */ (*info->fprintf_func) (info->stream, "Address 0x%" PRIx64 " is out of bounds.\n", memaddr); } /* This could be in a separate file, to save minuscule amounts of space in statically linked executables. */ /* Just print the address is hex. This is included for completeness even though both GDB and objdump provide their own (to print symbolic addresses). */ void generic_print_address (bfd_vma addr, struct disassemble_info *info) { (*info->fprintf_func) (info->stream, "0x%" PRIx64, addr); } /* Just return the given address. */ int generic_symbol_at_address (bfd_vma addr, struct disassemble_info *info) { return 1; } bfd_vma bfd_getl64 (const bfd_byte *addr) { unsigned long long v; v = (unsigned long long) addr[0]; v |= (unsigned long long) addr[1] << 8; v |= (unsigned long long) addr[2] << 16; v |= (unsigned long long) addr[3] << 24; v |= (unsigned long long) addr[4] << 32; v |= (unsigned long long) addr[5] << 40; v |= (unsigned long long) addr[6] << 48; v |= (unsigned long long) addr[7] << 56; return (bfd_vma) v; } bfd_vma bfd_getl32 (const bfd_byte *addr) { unsigned long v; v = (unsigned long) addr[0]; v |= (unsigned long) addr[1] << 8; v |= (unsigned long) addr[2] << 16; v |= (unsigned long) addr[3] << 24; return (bfd_vma) v; } bfd_vma bfd_getb32 (const bfd_byte *addr) { unsigned long v; v = (unsigned long) addr[0] << 24; v |= (unsigned long) addr[1] << 16; v |= (unsigned long) addr[2] << 8; v |= (unsigned long) addr[3]; return (bfd_vma) v; } bfd_vma bfd_getl16 (const bfd_byte *addr) { unsigned long v; v = (unsigned long) addr[0]; v |= (unsigned long) addr[1] << 8; return (bfd_vma) v; } bfd_vma bfd_getb16 (const bfd_byte *addr) { unsigned long v; v = (unsigned long) addr[0] << 24; v |= (unsigned long) addr[1] << 16; return (bfd_vma) v; } #ifdef TARGET_ARM static int print_insn_thumb1(bfd_vma pc, disassemble_info *info) { return print_insn_arm(pc | 1, info); } #endif /* Disassemble this for me please... (debugging). 'flags' has the following values: i386 - 1 means 16 bit code, 2 means 64 bit code arm - bit 0 = thumb, bit 1 = reverse endian ppc - nonzero means little endian other targets - unused */ void target_disas(FILE *out, target_ulong code, target_ulong size, int flags) { target_ulong pc; int count; struct disassemble_info disasm_info; int (*print_insn)(bfd_vma pc, disassemble_info *info); INIT_DISASSEMBLE_INFO(disasm_info, out, fprintf); disasm_info.read_memory_func = target_read_memory; disasm_info.buffer_vma = code; disasm_info.buffer_length = size; #ifdef TARGET_WORDS_BIGENDIAN disasm_info.endian = BFD_ENDIAN_BIG; #else disasm_info.endian = BFD_ENDIAN_LITTLE; #endif #if defined(TARGET_I386) if (flags == 2) disasm_info.mach = bfd_mach_x86_64; else if (flags == 1) disasm_info.mach = bfd_mach_i386_i8086; else disasm_info.mach = bfd_mach_i386_i386; print_insn = print_insn_i386; #elif defined(TARGET_ARM) if (flags & 1) { print_insn = print_insn_thumb1; } else { print_insn = print_insn_arm; } if (flags & 2) { #ifdef TARGET_WORDS_BIGENDIAN disasm_info.endian = BFD_ENDIAN_LITTLE; #else disasm_info.endian = BFD_ENDIAN_BIG; #endif } #elif defined(TARGET_SPARC) print_insn = print_insn_sparc; #ifdef TARGET_SPARC64 disasm_info.mach = bfd_mach_sparc_v9b; #endif #elif defined(TARGET_PPC) if (flags >> 16) disasm_info.endian = BFD_ENDIAN_LITTLE; if (flags & 0xFFFF) { /* If we have a precise definitions of the instructions set, use it */ disasm_info.mach = flags & 0xFFFF; } else { #ifdef TARGET_PPC64 disasm_info.mach = bfd_mach_ppc64; #else disasm_info.mach = bfd_mach_ppc; #endif } print_insn = print_insn_ppc; #elif defined(TARGET_M68K) print_insn = print_insn_m68k; #elif defined(TARGET_MIPS) #ifdef TARGET_WORDS_BIGENDIAN print_insn = print_insn_big_mips; #else print_insn = print_insn_little_mips; #endif #elif defined(TARGET_SH4) disasm_info.mach = bfd_mach_sh4; print_insn = print_insn_sh; #elif defined(TARGET_ALPHA) disasm_info.mach = bfd_mach_alpha_ev6; print_insn = print_insn_alpha; #elif defined(TARGET_CRIS) if (flags != 32) { disasm_info.mach = bfd_mach_cris_v0_v10; print_insn = print_insn_crisv10; } else { disasm_info.mach = bfd_mach_cris_v32; print_insn = print_insn_crisv32; } #elif defined(TARGET_S390X) disasm_info.mach = bfd_mach_s390_64; print_insn = print_insn_s390; #elif defined(TARGET_MICROBLAZE) disasm_info.mach = bfd_arch_microblaze; print_insn = print_insn_microblaze; #elif defined(TARGET_LM32) disasm_info.mach = bfd_mach_lm32; print_insn = print_insn_lm32; #else fprintf(out, "0x" TARGET_FMT_lx ": Asm output not supported on this arch\n", code); return; #endif for (pc = code; size > 0; pc += count, size -= count) { fprintf(out, "0x" TARGET_FMT_lx ": ", pc); count = print_insn(pc, &disasm_info); #if 0 { int i; uint8_t b; fprintf(out, " {"); for(i = 0; i < count; i++) { target_read_memory(pc + i, &b, 1, &disasm_info); fprintf(out, " %02x", b); } fprintf(out, " }"); } #endif fprintf(out, "\n"); if (count < 0) break; if (size < count) { fprintf(out, "Disassembler disagrees with translator over instruction " "decoding\n" "Please report this to qemu-devel@nongnu.org\n"); break; } } } /* Disassemble this for me please... (debugging). */ void disas(FILE *out, void *code, unsigned long size) { uintptr_t pc; int count; struct disassemble_info disasm_info; int (*print_insn)(bfd_vma pc, disassemble_info *info); INIT_DISASSEMBLE_INFO(disasm_info, out, fprintf); disasm_info.buffer = code; disasm_info.buffer_vma = (uintptr_t)code; disasm_info.buffer_length = size; #ifdef HOST_WORDS_BIGENDIAN disasm_info.endian = BFD_ENDIAN_BIG; #else disasm_info.endian = BFD_ENDIAN_LITTLE; #endif #if defined(CONFIG_TCG_INTERPRETER) print_insn = print_insn_tci; #elif defined(__i386__) disasm_info.mach = bfd_mach_i386_i386; print_insn = print_insn_i386; #elif defined(__x86_64__) disasm_info.mach = bfd_mach_x86_64; print_insn = print_insn_i386; #elif defined(_ARCH_PPC) print_insn = print_insn_ppc; #elif defined(__alpha__) print_insn = print_insn_alpha; #elif defined(__sparc__) print_insn = print_insn_sparc; #if defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__) disasm_info.mach = bfd_mach_sparc_v9b; #endif #elif defined(__arm__) print_insn = print_insn_arm; #elif defined(__MIPSEB__) print_insn = print_insn_big_mips; #elif defined(__MIPSEL__) print_insn = print_insn_little_mips; #elif defined(__m68k__) print_insn = print_insn_m68k; #elif defined(__s390__) print_insn = print_insn_s390; #elif defined(__hppa__) print_insn = print_insn_hppa; #elif defined(__ia64__) print_insn = print_insn_ia64; #else fprintf(out, "0x%lx: Asm output not supported on this arch\n", (long) code); return; #endif for (pc = (uintptr_t)code; size > 0; pc += count, size -= count) { fprintf(out, "0x%08" PRIxPTR ": ", pc); count = print_insn(pc, &disasm_info); fprintf(out, "\n"); if (count < 0) break; } } /* Look up symbol for debugging purpose. Returns "" if unknown. */ const char *lookup_symbol(target_ulong orig_addr) { const char *symbol = ""; struct syminfo *s; for (s = syminfos; s; s = s->next) { symbol = s->lookup_symbol(s, orig_addr); if (symbol[0] != '\0') { break; } } return symbol; } #if !defined(CONFIG_USER_ONLY) #include "monitor.h" static int monitor_disas_is_physical; static CPUArchState *monitor_disas_env; static int monitor_read_memory (bfd_vma memaddr, bfd_byte *myaddr, int length, struct disassemble_info *info) { if (monitor_disas_is_physical) { cpu_physical_memory_read(memaddr, myaddr, length); } else { cpu_memory_rw_debug(monitor_disas_env, memaddr,myaddr, length, 0); } return 0; } static int GCC_FMT_ATTR(2, 3) monitor_fprintf(FILE *stream, const char *fmt, ...) { va_list ap; va_start(ap, fmt); monitor_vprintf((Monitor *)stream, fmt, ap); va_end(ap); return 0; } void monitor_disas(Monitor *mon, CPUArchState *env, target_ulong pc, int nb_insn, int is_physical, int flags) { int count, i; struct disassemble_info disasm_info; int (*print_insn)(bfd_vma pc, disassemble_info *info); INIT_DISASSEMBLE_INFO(disasm_info, (FILE *)mon, monitor_fprintf); monitor_disas_env = env; monitor_disas_is_physical = is_physical; disasm_info.read_memory_func = monitor_read_memory; disasm_info.buffer_vma = pc; #ifdef TARGET_WORDS_BIGENDIAN disasm_info.endian = BFD_ENDIAN_BIG; #else disasm_info.endian = BFD_ENDIAN_LITTLE; #endif #if defined(TARGET_I386) if (flags == 2) disasm_info.mach = bfd_mach_x86_64; else if (flags == 1) disasm_info.mach = bfd_mach_i386_i8086; else disasm_info.mach = bfd_mach_i386_i386; print_insn = print_insn_i386; #elif defined(TARGET_ARM) print_insn = print_insn_arm; #elif defined(TARGET_ALPHA) print_insn = print_insn_alpha; #elif defined(TARGET_SPARC) print_insn = print_insn_sparc; #ifdef TARGET_SPARC64 disasm_info.mach = bfd_mach_sparc_v9b; #endif #elif defined(TARGET_PPC) #ifdef TARGET_PPC64 disasm_info.mach = bfd_mach_ppc64; #else disasm_info.mach = bfd_mach_ppc; #endif print_insn = print_insn_ppc; #elif defined(TARGET_M68K) print_insn = print_insn_m68k; #elif defined(TARGET_MIPS) #ifdef TARGET_WORDS_BIGENDIAN print_insn = print_insn_big_mips; #else print_insn = print_insn_little_mips; #endif #elif defined(TARGET_SH4) disasm_info.mach = bfd_mach_sh4; print_insn = print_insn_sh; #elif defined(TARGET_S390X) disasm_info.mach = bfd_mach_s390_64; print_insn = print_insn_s390; #elif defined(TARGET_LM32) disasm_info.mach = bfd_mach_lm32; print_insn = print_insn_lm32; #else monitor_printf(mon, "0x" TARGET_FMT_lx ": Asm output not supported on this arch\n", pc); return; #endif for(i = 0; i < nb_insn; i++) { monitor_printf(mon, "0x" TARGET_FMT_lx ": ", pc); count = print_insn(pc, &disasm_info); monitor_printf(mon, "\n"); if (count < 0) break; pc += count; } } #endif
gpl-2.0
Aircell/asp-kernel
drivers/scsi/pcmcia/nsp_cs.c
49
51691
/*====================================================================== NinjaSCSI-3 / NinjaSCSI-32Bi PCMCIA SCSI host adapter card driver By: YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp> Ver.2.8 Support 32bit MMIO mode Support Synchronous Data Transfer Request (SDTR) mode Ver.2.0 Support 32bit PIO mode Ver.1.1.2 Fix for scatter list buffer exceeds Ver.1.1 Support scatter list Ver.0.1 Initial version This software may be used and distributed according to the terms of the GNU General Public License. ======================================================================*/ /*********************************************************************** This driver is for these PCcards. I-O DATA PCSC-F (Workbit NinjaSCSI-3) "WBT", "NinjaSCSI-3", "R1.0" I-O DATA CBSC-II (Workbit NinjaSCSI-32Bi in 16bit mode) "IO DATA", "CBSC16 ", "1" ***********************************************************************/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/major.h> #include <linux/blkdev.h> #include <linux/stat.h> #include <asm/io.h> #include <asm/irq.h> #include <../drivers/scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi.h> #include <scsi/scsi_ioctl.h> #include <pcmcia/cs_types.h> #include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> #include "nsp_cs.h" MODULE_AUTHOR("YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>"); MODULE_DESCRIPTION("WorkBit NinjaSCSI-3 / NinjaSCSI-32Bi(16bit) PCMCIA SCSI host adapter module"); MODULE_SUPPORTED_DEVICE("sd,sr,sg,st"); #ifdef MODULE_LICENSE MODULE_LICENSE("GPL"); #endif #include "nsp_io.h" /*====================================================================*/ /* Parameters that can be set with 'insmod' */ static int nsp_burst_mode = BURST_MEM32; module_param(nsp_burst_mode, int, 0); MODULE_PARM_DESC(nsp_burst_mode, "Burst transfer mode (0=io8, 1=io32, 2=mem32(default))"); /* Release IO ports after configuration? */ static int free_ports = 0; module_param(free_ports, bool, 0); MODULE_PARM_DESC(free_ports, "Release IO ports after configuration? (default: 0 (=no))"); static struct scsi_host_template nsp_driver_template = { .proc_name = "nsp_cs", .proc_info = nsp_proc_info, .name = "WorkBit NinjaSCSI-3/32Bi(16bit)", .info = nsp_info, .queuecommand = nsp_queuecommand, /* .eh_abort_handler = nsp_eh_abort,*/ .eh_bus_reset_handler = nsp_eh_bus_reset, .eh_host_reset_handler = nsp_eh_host_reset, .can_queue = 1, .this_id = NSP_INITIATOR_ID, .sg_tablesize = SG_ALL, .cmd_per_lun = 1, .use_clustering = DISABLE_CLUSTERING, }; static nsp_hw_data nsp_data_base; /* attach <-> detect glue */ /* * debug, error print */ #ifndef NSP_DEBUG # define NSP_DEBUG_MASK 0x000000 # define nsp_msg(type, args...) nsp_cs_message("", 0, (type), args) # define nsp_dbg(mask, args...) /* */ #else # define NSP_DEBUG_MASK 0xffffff # define nsp_msg(type, args...) \ nsp_cs_message (__func__, __LINE__, (type), args) # define nsp_dbg(mask, args...) \ nsp_cs_dmessage(__func__, __LINE__, (mask), args) #endif #define NSP_DEBUG_QUEUECOMMAND BIT(0) #define NSP_DEBUG_REGISTER BIT(1) #define NSP_DEBUG_AUTOSCSI BIT(2) #define NSP_DEBUG_INTR BIT(3) #define NSP_DEBUG_SGLIST BIT(4) #define NSP_DEBUG_BUSFREE BIT(5) #define NSP_DEBUG_CDB_CONTENTS BIT(6) #define NSP_DEBUG_RESELECTION BIT(7) #define NSP_DEBUG_MSGINOCCUR BIT(8) #define NSP_DEBUG_EEPROM BIT(9) #define NSP_DEBUG_MSGOUTOCCUR BIT(10) #define NSP_DEBUG_BUSRESET BIT(11) #define NSP_DEBUG_RESTART BIT(12) #define NSP_DEBUG_SYNC BIT(13) #define NSP_DEBUG_WAIT BIT(14) #define NSP_DEBUG_TARGETFLAG BIT(15) #define NSP_DEBUG_PROC BIT(16) #define NSP_DEBUG_INIT BIT(17) #define NSP_DEBUG_DATA_IO BIT(18) #define NSP_SPECIAL_PRINT_REGISTER BIT(20) #define NSP_DEBUG_BUF_LEN 150 static inline void nsp_inc_resid(struct scsi_cmnd *SCpnt, int residInc) { scsi_set_resid(SCpnt, scsi_get_resid(SCpnt) + residInc); } static void nsp_cs_message(const char *func, int line, char *type, char *fmt, ...) { va_list args; char buf[NSP_DEBUG_BUF_LEN]; va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); #ifndef NSP_DEBUG printk("%snsp_cs: %s\n", type, buf); #else printk("%snsp_cs: %s (%d): %s\n", type, func, line, buf); #endif } #ifdef NSP_DEBUG static void nsp_cs_dmessage(const char *func, int line, int mask, char *fmt, ...) { va_list args; char buf[NSP_DEBUG_BUF_LEN]; va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); if (mask & NSP_DEBUG_MASK) { printk("nsp_cs-debug: 0x%x %s (%d): %s\n", mask, func, line, buf); } } #endif /***********************************************************/ /*==================================================== * Clenaup parameters and call done() functions. * You must be set SCpnt->result before call this function. */ static void nsp_scsi_done(struct scsi_cmnd *SCpnt) { nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; data->CurrentSC = NULL; SCpnt->scsi_done(SCpnt); } static int nsp_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { #ifdef NSP_DEBUG /*unsigned int host_id = SCpnt->device->host->this_id;*/ /*unsigned int base = SCpnt->device->host->io_port;*/ unsigned char target = scmd_id(SCpnt); #endif nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "SCpnt=0x%p target=%d lun=%d sglist=0x%p bufflen=%d sg_count=%d", SCpnt, target, SCpnt->device->lun, scsi_sglist(SCpnt), scsi_bufflen(SCpnt), scsi_sg_count(SCpnt)); //nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "before CurrentSC=0x%p", data->CurrentSC); SCpnt->scsi_done = done; if (data->CurrentSC != NULL) { nsp_msg(KERN_DEBUG, "CurrentSC!=NULL this can't be happen"); SCpnt->result = DID_BAD_TARGET << 16; nsp_scsi_done(SCpnt); return 0; } #if 0 /* XXX: pcmcia-cs generates SCSI command with "scsi_info" utility. This makes kernel crash when suspending... */ if (data->ScsiInfo->stop != 0) { nsp_msg(KERN_INFO, "suspending device. reject command."); SCpnt->result = DID_BAD_TARGET << 16; nsp_scsi_done(SCpnt); return SCSI_MLQUEUE_HOST_BUSY; } #endif show_command(SCpnt); data->CurrentSC = SCpnt; SCpnt->SCp.Status = CHECK_CONDITION; SCpnt->SCp.Message = 0; SCpnt->SCp.have_data_in = IO_UNKNOWN; SCpnt->SCp.sent_command = 0; SCpnt->SCp.phase = PH_UNDETERMINED; scsi_set_resid(SCpnt, scsi_bufflen(SCpnt)); /* setup scratch area SCp.ptr : buffer pointer SCp.this_residual : buffer length SCp.buffer : next buffer SCp.buffers_residual : left buffers in list SCp.phase : current state of the command */ if (scsi_bufflen(SCpnt)) { SCpnt->SCp.buffer = scsi_sglist(SCpnt); SCpnt->SCp.ptr = BUFFER_ADDR; SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length; SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1; } else { SCpnt->SCp.ptr = NULL; SCpnt->SCp.this_residual = 0; SCpnt->SCp.buffer = NULL; SCpnt->SCp.buffers_residual = 0; } if (nsphw_start_selection(SCpnt) == FALSE) { nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "selection fail"); SCpnt->result = DID_BUS_BUSY << 16; nsp_scsi_done(SCpnt); return 0; } //nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "out"); #ifdef NSP_DEBUG data->CmdId++; #endif return 0; } /* * setup PIO FIFO transfer mode and enable/disable to data out */ static void nsp_setup_fifo(nsp_hw_data *data, int enabled) { unsigned int base = data->BaseAddress; unsigned char transfer_mode_reg; //nsp_dbg(NSP_DEBUG_DATA_IO, "enabled=%d", enabled); if (enabled != FALSE) { transfer_mode_reg = TRANSFER_GO | BRAIND; } else { transfer_mode_reg = 0; } transfer_mode_reg |= data->TransferMode; nsp_index_write(base, TRANSFERMODE, transfer_mode_reg); } static void nsphw_init_sync(nsp_hw_data *data) { sync_data tmp_sync = { .SyncNegotiation = SYNC_NOT_YET, .SyncPeriod = 0, .SyncOffset = 0 }; int i; /* setup sync data */ for ( i = 0; i < ARRAY_SIZE(data->Sync); i++ ) { data->Sync[i] = tmp_sync; } } /* * Initialize Ninja hardware */ static int nsphw_init(nsp_hw_data *data) { unsigned int base = data->BaseAddress; nsp_dbg(NSP_DEBUG_INIT, "in base=0x%x", base); data->ScsiClockDiv = CLOCK_40M | FAST_20; data->CurrentSC = NULL; data->FifoCount = 0; data->TransferMode = MODE_IO8; nsphw_init_sync(data); /* block all interrupts */ nsp_write(base, IRQCONTROL, IRQCONTROL_ALLMASK); /* setup SCSI interface */ nsp_write(base, IFSELECT, IF_IFSEL); nsp_index_write(base, SCSIIRQMODE, 0); nsp_index_write(base, TRANSFERMODE, MODE_IO8); nsp_index_write(base, CLOCKDIV, data->ScsiClockDiv); nsp_index_write(base, PARITYCTRL, 0); nsp_index_write(base, POINTERCLR, POINTER_CLEAR | ACK_COUNTER_CLEAR | REQ_COUNTER_CLEAR | HOST_COUNTER_CLEAR); /* setup fifo asic */ nsp_write(base, IFSELECT, IF_REGSEL); nsp_index_write(base, TERMPWRCTRL, 0); if ((nsp_index_read(base, OTHERCONTROL) & TPWR_SENSE) == 0) { nsp_msg(KERN_INFO, "terminator power on"); nsp_index_write(base, TERMPWRCTRL, POWER_ON); } nsp_index_write(base, TIMERCOUNT, 0); nsp_index_write(base, TIMERCOUNT, 0); /* requires 2 times!! */ nsp_index_write(base, SYNCREG, 0); nsp_index_write(base, ACKWIDTH, 0); /* enable interrupts and ack them */ nsp_index_write(base, SCSIIRQMODE, SCSI_PHASE_CHANGE_EI | RESELECT_EI | SCSI_RESET_IRQ_EI ); nsp_write(base, IRQCONTROL, IRQCONTROL_ALLCLEAR); nsp_setup_fifo(data, FALSE); return TRUE; } /* * Start selection phase */ static int nsphw_start_selection(struct scsi_cmnd *SCpnt) { unsigned int host_id = SCpnt->device->host->this_id; unsigned int base = SCpnt->device->host->io_port; unsigned char target = scmd_id(SCpnt); nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; int time_out; unsigned char phase, arbit; //nsp_dbg(NSP_DEBUG_RESELECTION, "in"); phase = nsp_index_read(base, SCSIBUSMON); if(phase != BUSMON_BUS_FREE) { //nsp_dbg(NSP_DEBUG_RESELECTION, "bus busy"); return FALSE; } /* start arbitration */ //nsp_dbg(NSP_DEBUG_RESELECTION, "start arbit"); SCpnt->SCp.phase = PH_ARBSTART; nsp_index_write(base, SETARBIT, ARBIT_GO); time_out = 1000; do { /* XXX: what a stupid chip! */ arbit = nsp_index_read(base, ARBITSTATUS); //nsp_dbg(NSP_DEBUG_RESELECTION, "arbit=%d, wait_count=%d", arbit, wait_count); udelay(1); /* hold 1.2us */ } while((arbit & (ARBIT_WIN | ARBIT_FAIL)) == 0 && (time_out-- != 0)); if (!(arbit & ARBIT_WIN)) { //nsp_dbg(NSP_DEBUG_RESELECTION, "arbit fail"); nsp_index_write(base, SETARBIT, ARBIT_FLAG_CLEAR); return FALSE; } /* assert select line */ //nsp_dbg(NSP_DEBUG_RESELECTION, "assert SEL line"); SCpnt->SCp.phase = PH_SELSTART; udelay(3); /* wait 2.4us */ nsp_index_write(base, SCSIDATALATCH, BIT(host_id) | BIT(target)); nsp_index_write(base, SCSIBUSCTRL, SCSI_SEL | SCSI_BSY | SCSI_ATN); udelay(2); /* wait >1.2us */ nsp_index_write(base, SCSIBUSCTRL, SCSI_SEL | SCSI_BSY | SCSI_DATAOUT_ENB | SCSI_ATN); nsp_index_write(base, SETARBIT, ARBIT_FLAG_CLEAR); /*udelay(1);*/ /* wait >90ns */ nsp_index_write(base, SCSIBUSCTRL, SCSI_SEL | SCSI_DATAOUT_ENB | SCSI_ATN); /* check selection timeout */ nsp_start_timer(SCpnt, 1000/51); data->SelectionTimeOut = 1; return TRUE; } struct nsp_sync_table { unsigned int min_period; unsigned int max_period; unsigned int chip_period; unsigned int ack_width; }; static struct nsp_sync_table nsp_sync_table_40M[] = { {0x0c, 0x0c, 0x1, 0}, /* 20MB 50ns*/ {0x19, 0x19, 0x3, 1}, /* 10MB 100ns*/ {0x1a, 0x25, 0x5, 2}, /* 7.5MB 150ns*/ {0x26, 0x32, 0x7, 3}, /* 5MB 200ns*/ { 0, 0, 0, 0}, }; static struct nsp_sync_table nsp_sync_table_20M[] = { {0x19, 0x19, 0x1, 0}, /* 10MB 100ns*/ {0x1a, 0x25, 0x2, 0}, /* 7.5MB 150ns*/ {0x26, 0x32, 0x3, 1}, /* 5MB 200ns*/ { 0, 0, 0, 0}, }; /* * setup synchronous data transfer mode */ static int nsp_analyze_sdtr(struct scsi_cmnd *SCpnt) { unsigned char target = scmd_id(SCpnt); // unsigned char lun = SCpnt->device->lun; nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; sync_data *sync = &(data->Sync[target]); struct nsp_sync_table *sync_table; unsigned int period, offset; int i; nsp_dbg(NSP_DEBUG_SYNC, "in"); period = sync->SyncPeriod; offset = sync->SyncOffset; nsp_dbg(NSP_DEBUG_SYNC, "period=0x%x, offset=0x%x", period, offset); if ((data->ScsiClockDiv & (BIT(0)|BIT(1))) == CLOCK_20M) { sync_table = nsp_sync_table_20M; } else { sync_table = nsp_sync_table_40M; } for ( i = 0; sync_table->max_period != 0; i++, sync_table++) { if ( period >= sync_table->min_period && period <= sync_table->max_period ) { break; } } if (period != 0 && sync_table->max_period == 0) { /* * No proper period/offset found */ nsp_dbg(NSP_DEBUG_SYNC, "no proper period/offset"); sync->SyncPeriod = 0; sync->SyncOffset = 0; sync->SyncRegister = 0; sync->AckWidth = 0; return FALSE; } sync->SyncRegister = (sync_table->chip_period << SYNCREG_PERIOD_SHIFT) | (offset & SYNCREG_OFFSET_MASK); sync->AckWidth = sync_table->ack_width; nsp_dbg(NSP_DEBUG_SYNC, "sync_reg=0x%x, ack_width=0x%x", sync->SyncRegister, sync->AckWidth); return TRUE; } /* * start ninja hardware timer */ static void nsp_start_timer(struct scsi_cmnd *SCpnt, int time) { unsigned int base = SCpnt->device->host->io_port; nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; //nsp_dbg(NSP_DEBUG_INTR, "in SCpnt=0x%p, time=%d", SCpnt, time); data->TimerCount = time; nsp_index_write(base, TIMERCOUNT, time); } /* * wait for bus phase change */ static int nsp_negate_signal(struct scsi_cmnd *SCpnt, unsigned char mask, char *str) { unsigned int base = SCpnt->device->host->io_port; unsigned char reg; int time_out; //nsp_dbg(NSP_DEBUG_INTR, "in"); time_out = 100; do { reg = nsp_index_read(base, SCSIBUSMON); if (reg == 0xff) { break; } } while ((--time_out != 0) && (reg & mask) != 0); if (time_out == 0) { nsp_msg(KERN_DEBUG, " %s signal off timeut", str); } return 0; } /* * expect Ninja Irq */ static int nsp_expect_signal(struct scsi_cmnd *SCpnt, unsigned char current_phase, unsigned char mask) { unsigned int base = SCpnt->device->host->io_port; int time_out; unsigned char phase, i_src; //nsp_dbg(NSP_DEBUG_INTR, "current_phase=0x%x, mask=0x%x", current_phase, mask); time_out = 100; do { phase = nsp_index_read(base, SCSIBUSMON); if (phase == 0xff) { //nsp_dbg(NSP_DEBUG_INTR, "ret -1"); return -1; } i_src = nsp_read(base, IRQSTATUS); if (i_src & IRQSTATUS_SCSI) { //nsp_dbg(NSP_DEBUG_INTR, "ret 0 found scsi signal"); return 0; } if ((phase & mask) != 0 && (phase & BUSMON_PHASE_MASK) == current_phase) { //nsp_dbg(NSP_DEBUG_INTR, "ret 1 phase=0x%x", phase); return 1; } } while(time_out-- != 0); //nsp_dbg(NSP_DEBUG_INTR, "timeout"); return -1; } /* * transfer SCSI message */ static int nsp_xfer(struct scsi_cmnd *SCpnt, int phase) { unsigned int base = SCpnt->device->host->io_port; nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; char *buf = data->MsgBuffer; int len = min(MSGBUF_SIZE, data->MsgLen); int ptr; int ret; //nsp_dbg(NSP_DEBUG_DATA_IO, "in"); for (ptr = 0; len > 0; len--, ptr++) { ret = nsp_expect_signal(SCpnt, phase, BUSMON_REQ); if (ret <= 0) { nsp_dbg(NSP_DEBUG_DATA_IO, "xfer quit"); return 0; } /* if last byte, negate ATN */ if (len == 1 && SCpnt->SCp.phase == PH_MSG_OUT) { nsp_index_write(base, SCSIBUSCTRL, AUTODIRECTION | ACKENB); } /* read & write message */ if (phase & BUSMON_IO) { nsp_dbg(NSP_DEBUG_DATA_IO, "read msg"); buf[ptr] = nsp_index_read(base, SCSIDATAWITHACK); } else { nsp_dbg(NSP_DEBUG_DATA_IO, "write msg"); nsp_index_write(base, SCSIDATAWITHACK, buf[ptr]); } nsp_negate_signal(SCpnt, BUSMON_ACK, "xfer<ack>"); } return len; } /* * get extra SCSI data from fifo */ static int nsp_dataphase_bypass(struct scsi_cmnd *SCpnt) { nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; unsigned int count; //nsp_dbg(NSP_DEBUG_DATA_IO, "in"); if (SCpnt->SCp.have_data_in != IO_IN) { return 0; } count = nsp_fifo_count(SCpnt); if (data->FifoCount == count) { //nsp_dbg(NSP_DEBUG_DATA_IO, "not use bypass quirk"); return 0; } /* * XXX: NSP_QUIRK * data phase skip only occures in case of SCSI_LOW_READ */ nsp_dbg(NSP_DEBUG_DATA_IO, "use bypass quirk"); SCpnt->SCp.phase = PH_DATA; nsp_pio_read(SCpnt); nsp_setup_fifo(data, FALSE); return 0; } /* * accept reselection */ static int nsp_reselected(struct scsi_cmnd *SCpnt) { unsigned int base = SCpnt->device->host->io_port; unsigned int host_id = SCpnt->device->host->this_id; //nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; unsigned char bus_reg; unsigned char id_reg, tmp; int target; nsp_dbg(NSP_DEBUG_RESELECTION, "in"); id_reg = nsp_index_read(base, RESELECTID); tmp = id_reg & (~BIT(host_id)); target = 0; while(tmp != 0) { if (tmp & BIT(0)) { break; } tmp >>= 1; target++; } if (scmd_id(SCpnt) != target) { nsp_msg(KERN_ERR, "XXX: reselect ID must be %d in this implementation.", target); } nsp_negate_signal(SCpnt, BUSMON_SEL, "reselect<SEL>"); nsp_nexus(SCpnt); bus_reg = nsp_index_read(base, SCSIBUSCTRL) & ~(SCSI_BSY | SCSI_ATN); nsp_index_write(base, SCSIBUSCTRL, bus_reg); nsp_index_write(base, SCSIBUSCTRL, bus_reg | AUTODIRECTION | ACKENB); return TRUE; } /* * count how many data transferd */ static int nsp_fifo_count(struct scsi_cmnd *SCpnt) { unsigned int base = SCpnt->device->host->io_port; unsigned int count; unsigned int l, m, h, dummy; nsp_index_write(base, POINTERCLR, POINTER_CLEAR | ACK_COUNTER); l = nsp_index_read(base, TRANSFERCOUNT); m = nsp_index_read(base, TRANSFERCOUNT); h = nsp_index_read(base, TRANSFERCOUNT); dummy = nsp_index_read(base, TRANSFERCOUNT); /* required this! */ count = (h << 16) | (m << 8) | (l << 0); //nsp_dbg(NSP_DEBUG_DATA_IO, "count=0x%x", count); return count; } /* fifo size */ #define RFIFO_CRIT 64 #define WFIFO_CRIT 64 /* * read data in DATA IN phase */ static void nsp_pio_read(struct scsi_cmnd *SCpnt) { unsigned int base = SCpnt->device->host->io_port; unsigned long mmio_base = SCpnt->device->host->base; nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; long time_out; int ocount, res; unsigned char stat, fifo_stat; ocount = data->FifoCount; nsp_dbg(NSP_DEBUG_DATA_IO, "in SCpnt=0x%p resid=%d ocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d", SCpnt, scsi_get_resid(SCpnt), ocount, SCpnt->SCp.ptr, SCpnt->SCp.this_residual, SCpnt->SCp.buffer, SCpnt->SCp.buffers_residual); time_out = 1000; while ((time_out-- != 0) && (SCpnt->SCp.this_residual > 0 || SCpnt->SCp.buffers_residual > 0 ) ) { stat = nsp_index_read(base, SCSIBUSMON); stat &= BUSMON_PHASE_MASK; res = nsp_fifo_count(SCpnt) - ocount; //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this=0x%x ocount=0x%x res=0x%x", SCpnt->SCp.ptr, SCpnt->SCp.this_residual, ocount, res); if (res == 0) { /* if some data avilable ? */ if (stat == BUSPHASE_DATA_IN) { /* phase changed? */ //nsp_dbg(NSP_DEBUG_DATA_IO, " wait for data this=%d", SCpnt->SCp.this_residual); continue; } else { nsp_dbg(NSP_DEBUG_DATA_IO, "phase changed stat=0x%x", stat); break; } } fifo_stat = nsp_read(base, FIFOSTATUS); if ((fifo_stat & FIFOSTATUS_FULL_EMPTY) == 0 && stat == BUSPHASE_DATA_IN) { continue; } res = min(res, SCpnt->SCp.this_residual); switch (data->TransferMode) { case MODE_IO32: res &= ~(BIT(1)|BIT(0)); /* align 4 */ nsp_fifo32_read(base, SCpnt->SCp.ptr, res >> 2); break; case MODE_IO8: nsp_fifo8_read (base, SCpnt->SCp.ptr, res ); break; case MODE_MEM32: res &= ~(BIT(1)|BIT(0)); /* align 4 */ nsp_mmio_fifo32_read(mmio_base, SCpnt->SCp.ptr, res >> 2); break; default: nsp_dbg(NSP_DEBUG_DATA_IO, "unknown read mode"); return; } nsp_inc_resid(SCpnt, -res); SCpnt->SCp.ptr += res; SCpnt->SCp.this_residual -= res; ocount += res; //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this_residual=0x%x ocount=0x%x", SCpnt->SCp.ptr, SCpnt->SCp.this_residual, ocount); /* go to next scatter list if available */ if (SCpnt->SCp.this_residual == 0 && SCpnt->SCp.buffers_residual != 0 ) { //nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next timeout=%d", time_out); SCpnt->SCp.buffers_residual--; SCpnt->SCp.buffer++; SCpnt->SCp.ptr = BUFFER_ADDR; SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length; time_out = 1000; //nsp_dbg(NSP_DEBUG_DATA_IO, "page: 0x%p, off: 0x%x", SCpnt->SCp.buffer->page, SCpnt->SCp.buffer->offset); } } data->FifoCount = ocount; if (time_out < 0) { nsp_msg(KERN_DEBUG, "pio read timeout resid=%d this_residual=%d buffers_residual=%d", scsi_get_resid(SCpnt), SCpnt->SCp.this_residual, SCpnt->SCp.buffers_residual); } nsp_dbg(NSP_DEBUG_DATA_IO, "read ocount=0x%x", ocount); nsp_dbg(NSP_DEBUG_DATA_IO, "r cmd=%d resid=0x%x\n", data->CmdId, scsi_get_resid(SCpnt)); } /* * write data in DATA OUT phase */ static void nsp_pio_write(struct scsi_cmnd *SCpnt) { unsigned int base = SCpnt->device->host->io_port; unsigned long mmio_base = SCpnt->device->host->base; nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; int time_out; int ocount, res; unsigned char stat; ocount = data->FifoCount; nsp_dbg(NSP_DEBUG_DATA_IO, "in fifocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d resid=0x%x", data->FifoCount, SCpnt->SCp.ptr, SCpnt->SCp.this_residual, SCpnt->SCp.buffer, SCpnt->SCp.buffers_residual, scsi_get_resid(SCpnt)); time_out = 1000; while ((time_out-- != 0) && (SCpnt->SCp.this_residual > 0 || SCpnt->SCp.buffers_residual > 0)) { stat = nsp_index_read(base, SCSIBUSMON); stat &= BUSMON_PHASE_MASK; if (stat != BUSPHASE_DATA_OUT) { res = ocount - nsp_fifo_count(SCpnt); nsp_dbg(NSP_DEBUG_DATA_IO, "phase changed stat=0x%x, res=%d\n", stat, res); /* Put back pointer */ nsp_inc_resid(SCpnt, res); SCpnt->SCp.ptr -= res; SCpnt->SCp.this_residual += res; ocount -= res; break; } res = ocount - nsp_fifo_count(SCpnt); if (res > 0) { /* write all data? */ nsp_dbg(NSP_DEBUG_DATA_IO, "wait for all data out. ocount=0x%x res=%d", ocount, res); continue; } res = min(SCpnt->SCp.this_residual, WFIFO_CRIT); //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this=0x%x res=0x%x", SCpnt->SCp.ptr, SCpnt->SCp.this_residual, res); switch (data->TransferMode) { case MODE_IO32: res &= ~(BIT(1)|BIT(0)); /* align 4 */ nsp_fifo32_write(base, SCpnt->SCp.ptr, res >> 2); break; case MODE_IO8: nsp_fifo8_write (base, SCpnt->SCp.ptr, res ); break; case MODE_MEM32: res &= ~(BIT(1)|BIT(0)); /* align 4 */ nsp_mmio_fifo32_write(mmio_base, SCpnt->SCp.ptr, res >> 2); break; default: nsp_dbg(NSP_DEBUG_DATA_IO, "unknown write mode"); break; } nsp_inc_resid(SCpnt, -res); SCpnt->SCp.ptr += res; SCpnt->SCp.this_residual -= res; ocount += res; /* go to next scatter list if available */ if (SCpnt->SCp.this_residual == 0 && SCpnt->SCp.buffers_residual != 0 ) { //nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next"); SCpnt->SCp.buffers_residual--; SCpnt->SCp.buffer++; SCpnt->SCp.ptr = BUFFER_ADDR; SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length; time_out = 1000; } } data->FifoCount = ocount; if (time_out < 0) { nsp_msg(KERN_DEBUG, "pio write timeout resid=0x%x", scsi_get_resid(SCpnt)); } nsp_dbg(NSP_DEBUG_DATA_IO, "write ocount=0x%x", ocount); nsp_dbg(NSP_DEBUG_DATA_IO, "w cmd=%d resid=0x%x\n", data->CmdId, scsi_get_resid(SCpnt)); } #undef RFIFO_CRIT #undef WFIFO_CRIT /* * setup synchronous/asynchronous data transfer mode */ static int nsp_nexus(struct scsi_cmnd *SCpnt) { unsigned int base = SCpnt->device->host->io_port; unsigned char target = scmd_id(SCpnt); // unsigned char lun = SCpnt->device->lun; nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; sync_data *sync = &(data->Sync[target]); //nsp_dbg(NSP_DEBUG_DATA_IO, "in SCpnt=0x%p", SCpnt); /* setup synch transfer registers */ nsp_index_write(base, SYNCREG, sync->SyncRegister); nsp_index_write(base, ACKWIDTH, sync->AckWidth); if (scsi_get_resid(SCpnt) % 4 != 0 || scsi_get_resid(SCpnt) <= PAGE_SIZE ) { data->TransferMode = MODE_IO8; } else if (nsp_burst_mode == BURST_MEM32) { data->TransferMode = MODE_MEM32; } else if (nsp_burst_mode == BURST_IO32) { data->TransferMode = MODE_IO32; } else { data->TransferMode = MODE_IO8; } /* setup pdma fifo */ nsp_setup_fifo(data, TRUE); /* clear ack counter */ data->FifoCount = 0; nsp_index_write(base, POINTERCLR, POINTER_CLEAR | ACK_COUNTER_CLEAR | REQ_COUNTER_CLEAR | HOST_COUNTER_CLEAR); return 0; } #include "nsp_message.c" /* * interrupt handler */ static irqreturn_t nspintr(int irq, void *dev_id) { unsigned int base; unsigned char irq_status, irq_phase, phase; struct scsi_cmnd *tmpSC; unsigned char target, lun; unsigned int *sync_neg; int i, tmp; nsp_hw_data *data; //nsp_dbg(NSP_DEBUG_INTR, "dev_id=0x%p", dev_id); //nsp_dbg(NSP_DEBUG_INTR, "host=0x%p", ((scsi_info_t *)dev_id)->host); if ( dev_id != NULL && ((scsi_info_t *)dev_id)->host != NULL ) { scsi_info_t *info = (scsi_info_t *)dev_id; data = (nsp_hw_data *)info->host->hostdata; } else { nsp_dbg(NSP_DEBUG_INTR, "host data wrong"); return IRQ_NONE; } //nsp_dbg(NSP_DEBUG_INTR, "&nsp_data_base=0x%p, dev_id=0x%p", &nsp_data_base, dev_id); base = data->BaseAddress; //nsp_dbg(NSP_DEBUG_INTR, "base=0x%x", base); /* * interrupt check */ nsp_write(base, IRQCONTROL, IRQCONTROL_IRQDISABLE); irq_status = nsp_read(base, IRQSTATUS); //nsp_dbg(NSP_DEBUG_INTR, "irq_status=0x%x", irq_status); if ((irq_status == 0xff) || ((irq_status & IRQSTATUS_MASK) == 0)) { nsp_write(base, IRQCONTROL, 0); //nsp_dbg(NSP_DEBUG_INTR, "no irq/shared irq"); return IRQ_NONE; } /* XXX: IMPORTANT * Do not read an irq_phase register if no scsi phase interrupt. * Unless, you should lose a scsi phase interrupt. */ phase = nsp_index_read(base, SCSIBUSMON); if((irq_status & IRQSTATUS_SCSI) != 0) { irq_phase = nsp_index_read(base, IRQPHASESENCE); } else { irq_phase = 0; } //nsp_dbg(NSP_DEBUG_INTR, "irq_phase=0x%x", irq_phase); /* * timer interrupt handler (scsi vs timer interrupts) */ //nsp_dbg(NSP_DEBUG_INTR, "timercount=%d", data->TimerCount); if (data->TimerCount != 0) { //nsp_dbg(NSP_DEBUG_INTR, "stop timer"); nsp_index_write(base, TIMERCOUNT, 0); nsp_index_write(base, TIMERCOUNT, 0); data->TimerCount = 0; } if ((irq_status & IRQSTATUS_MASK) == IRQSTATUS_TIMER && data->SelectionTimeOut == 0) { //nsp_dbg(NSP_DEBUG_INTR, "timer start"); nsp_write(base, IRQCONTROL, IRQCONTROL_TIMER_CLEAR); return IRQ_HANDLED; } nsp_write(base, IRQCONTROL, IRQCONTROL_TIMER_CLEAR | IRQCONTROL_FIFO_CLEAR); if ((irq_status & IRQSTATUS_SCSI) && (irq_phase & SCSI_RESET_IRQ)) { nsp_msg(KERN_ERR, "bus reset (power off?)"); nsphw_init(data); nsp_bus_reset(data); if(data->CurrentSC != NULL) { tmpSC = data->CurrentSC; tmpSC->result = (DID_RESET << 16) | ((tmpSC->SCp.Message & 0xff) << 8) | ((tmpSC->SCp.Status & 0xff) << 0); nsp_scsi_done(tmpSC); } return IRQ_HANDLED; } if (data->CurrentSC == NULL) { nsp_msg(KERN_ERR, "CurrentSC==NULL irq_status=0x%x phase=0x%x irq_phase=0x%x this can't be happen. reset everything", irq_status, phase, irq_phase); nsphw_init(data); nsp_bus_reset(data); return IRQ_HANDLED; } tmpSC = data->CurrentSC; target = tmpSC->device->id; lun = tmpSC->device->lun; sync_neg = &(data->Sync[target].SyncNegotiation); /* * parse hardware SCSI irq reasons register */ if (irq_status & IRQSTATUS_SCSI) { if (irq_phase & RESELECT_IRQ) { nsp_dbg(NSP_DEBUG_INTR, "reselect"); nsp_write(base, IRQCONTROL, IRQCONTROL_RESELECT_CLEAR); if (nsp_reselected(tmpSC) != FALSE) { return IRQ_HANDLED; } } if ((irq_phase & (PHASE_CHANGE_IRQ | LATCHED_BUS_FREE)) == 0) { return IRQ_HANDLED; } } //show_phase(tmpSC); switch(tmpSC->SCp.phase) { case PH_SELSTART: // *sync_neg = SYNC_NOT_YET; if ((phase & BUSMON_BSY) == 0) { //nsp_dbg(NSP_DEBUG_INTR, "selection count=%d", data->SelectionTimeOut); if (data->SelectionTimeOut >= NSP_SELTIMEOUT) { nsp_dbg(NSP_DEBUG_INTR, "selection time out"); data->SelectionTimeOut = 0; nsp_index_write(base, SCSIBUSCTRL, 0); tmpSC->result = DID_TIME_OUT << 16; nsp_scsi_done(tmpSC); return IRQ_HANDLED; } data->SelectionTimeOut += 1; nsp_start_timer(tmpSC, 1000/51); return IRQ_HANDLED; } /* attention assert */ //nsp_dbg(NSP_DEBUG_INTR, "attention assert"); data->SelectionTimeOut = 0; tmpSC->SCp.phase = PH_SELECTED; nsp_index_write(base, SCSIBUSCTRL, SCSI_ATN); udelay(1); nsp_index_write(base, SCSIBUSCTRL, SCSI_ATN | AUTODIRECTION | ACKENB); return IRQ_HANDLED; break; case PH_RESELECT: //nsp_dbg(NSP_DEBUG_INTR, "phase reselect"); // *sync_neg = SYNC_NOT_YET; if ((phase & BUSMON_PHASE_MASK) != BUSPHASE_MESSAGE_IN) { tmpSC->result = DID_ABORT << 16; nsp_scsi_done(tmpSC); return IRQ_HANDLED; } /* fall thru */ default: if ((irq_status & (IRQSTATUS_SCSI | IRQSTATUS_FIFO)) == 0) { return IRQ_HANDLED; } break; } /* * SCSI sequencer */ //nsp_dbg(NSP_DEBUG_INTR, "start scsi seq"); /* normal disconnect */ if (((tmpSC->SCp.phase == PH_MSG_IN) || (tmpSC->SCp.phase == PH_MSG_OUT)) && (irq_phase & LATCHED_BUS_FREE) != 0 ) { nsp_dbg(NSP_DEBUG_INTR, "normal disconnect irq_status=0x%x, phase=0x%x, irq_phase=0x%x", irq_status, phase, irq_phase); //*sync_neg = SYNC_NOT_YET; if ((tmpSC->SCp.Message == MSG_COMMAND_COMPLETE)) { /* all command complete and return status */ tmpSC->result = (DID_OK << 16) | ((tmpSC->SCp.Message & 0xff) << 8) | ((tmpSC->SCp.Status & 0xff) << 0); nsp_dbg(NSP_DEBUG_INTR, "command complete result=0x%x", tmpSC->result); nsp_scsi_done(tmpSC); return IRQ_HANDLED; } return IRQ_HANDLED; } /* check unexpected bus free state */ if (phase == 0) { nsp_msg(KERN_DEBUG, "unexpected bus free. irq_status=0x%x, phase=0x%x, irq_phase=0x%x", irq_status, phase, irq_phase); *sync_neg = SYNC_NG; tmpSC->result = DID_ERROR << 16; nsp_scsi_done(tmpSC); return IRQ_HANDLED; } switch (phase & BUSMON_PHASE_MASK) { case BUSPHASE_COMMAND: nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_COMMAND"); if ((phase & BUSMON_REQ) == 0) { nsp_dbg(NSP_DEBUG_INTR, "REQ == 0"); return IRQ_HANDLED; } tmpSC->SCp.phase = PH_COMMAND; nsp_nexus(tmpSC); /* write scsi command */ nsp_dbg(NSP_DEBUG_INTR, "cmd_len=%d", tmpSC->cmd_len); nsp_index_write(base, COMMANDCTRL, CLEAR_COMMAND_POINTER); for (i = 0; i < tmpSC->cmd_len; i++) { nsp_index_write(base, COMMANDDATA, tmpSC->cmnd[i]); } nsp_index_write(base, COMMANDCTRL, CLEAR_COMMAND_POINTER | AUTO_COMMAND_GO); break; case BUSPHASE_DATA_OUT: nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_DATA_OUT"); tmpSC->SCp.phase = PH_DATA; tmpSC->SCp.have_data_in = IO_OUT; nsp_pio_write(tmpSC); break; case BUSPHASE_DATA_IN: nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_DATA_IN"); tmpSC->SCp.phase = PH_DATA; tmpSC->SCp.have_data_in = IO_IN; nsp_pio_read(tmpSC); break; case BUSPHASE_STATUS: nsp_dataphase_bypass(tmpSC); nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_STATUS"); tmpSC->SCp.phase = PH_STATUS; tmpSC->SCp.Status = nsp_index_read(base, SCSIDATAWITHACK); nsp_dbg(NSP_DEBUG_INTR, "message=0x%x status=0x%x", tmpSC->SCp.Message, tmpSC->SCp.Status); break; case BUSPHASE_MESSAGE_OUT: nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_MESSAGE_OUT"); if ((phase & BUSMON_REQ) == 0) { goto timer_out; } tmpSC->SCp.phase = PH_MSG_OUT; //*sync_neg = SYNC_NOT_YET; data->MsgLen = i = 0; data->MsgBuffer[i] = IDENTIFY(TRUE, lun); i++; if (*sync_neg == SYNC_NOT_YET) { data->Sync[target].SyncPeriod = 0; data->Sync[target].SyncOffset = 0; /**/ data->MsgBuffer[i] = MSG_EXTENDED; i++; data->MsgBuffer[i] = 3; i++; data->MsgBuffer[i] = MSG_EXT_SDTR; i++; data->MsgBuffer[i] = 0x0c; i++; data->MsgBuffer[i] = 15; i++; /**/ } data->MsgLen = i; nsp_analyze_sdtr(tmpSC); show_message(data); nsp_message_out(tmpSC); break; case BUSPHASE_MESSAGE_IN: nsp_dataphase_bypass(tmpSC); nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_MESSAGE_IN"); if ((phase & BUSMON_REQ) == 0) { goto timer_out; } tmpSC->SCp.phase = PH_MSG_IN; nsp_message_in(tmpSC); /**/ if (*sync_neg == SYNC_NOT_YET) { //nsp_dbg(NSP_DEBUG_INTR, "sync target=%d,lun=%d",target,lun); if (data->MsgLen >= 5 && data->MsgBuffer[0] == MSG_EXTENDED && data->MsgBuffer[1] == 3 && data->MsgBuffer[2] == MSG_EXT_SDTR ) { data->Sync[target].SyncPeriod = data->MsgBuffer[3]; data->Sync[target].SyncOffset = data->MsgBuffer[4]; //nsp_dbg(NSP_DEBUG_INTR, "sync ok, %d %d", data->MsgBuffer[3], data->MsgBuffer[4]); *sync_neg = SYNC_OK; } else { data->Sync[target].SyncPeriod = 0; data->Sync[target].SyncOffset = 0; *sync_neg = SYNC_NG; } nsp_analyze_sdtr(tmpSC); } /**/ /* search last messeage byte */ tmp = -1; for (i = 0; i < data->MsgLen; i++) { tmp = data->MsgBuffer[i]; if (data->MsgBuffer[i] == MSG_EXTENDED) { i += (1 + data->MsgBuffer[i+1]); } } tmpSC->SCp.Message = tmp; nsp_dbg(NSP_DEBUG_INTR, "message=0x%x len=%d", tmpSC->SCp.Message, data->MsgLen); show_message(data); break; case BUSPHASE_SELECT: default: nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE other"); break; } //nsp_dbg(NSP_DEBUG_INTR, "out"); return IRQ_HANDLED; timer_out: nsp_start_timer(tmpSC, 1000/102); return IRQ_HANDLED; } #ifdef NSP_DEBUG #include "nsp_debug.c" #endif /* NSP_DEBUG */ /*----------------------------------------------------------------*/ /* look for ninja3 card and init if found */ /*----------------------------------------------------------------*/ static struct Scsi_Host *nsp_detect(struct scsi_host_template *sht) { struct Scsi_Host *host; /* registered host structure */ nsp_hw_data *data_b = &nsp_data_base, *data; nsp_dbg(NSP_DEBUG_INIT, "this_id=%d", sht->this_id); host = scsi_host_alloc(&nsp_driver_template, sizeof(nsp_hw_data)); if (host == NULL) { nsp_dbg(NSP_DEBUG_INIT, "host failed"); return NULL; } memcpy(host->hostdata, data_b, sizeof(nsp_hw_data)); data = (nsp_hw_data *)host->hostdata; data->ScsiInfo->host = host; #ifdef NSP_DEBUG data->CmdId = 0; #endif nsp_dbg(NSP_DEBUG_INIT, "irq=%d,%d", data_b->IrqNumber, ((nsp_hw_data *)host->hostdata)->IrqNumber); host->unique_id = data->BaseAddress; host->io_port = data->BaseAddress; host->n_io_port = data->NumAddress; host->irq = data->IrqNumber; host->base = data->MmioAddress; spin_lock_init(&(data->Lock)); snprintf(data->nspinfo, sizeof(data->nspinfo), "NinjaSCSI-3/32Bi Driver $Revision: 1.23 $ IO:0x%04lx-0x%04lx MMIO(virt addr):0x%04lx IRQ:%02d", host->io_port, host->io_port + host->n_io_port - 1, host->base, host->irq); sht->name = data->nspinfo; nsp_dbg(NSP_DEBUG_INIT, "end"); return host; /* detect done. */ } /*----------------------------------------------------------------*/ /* return info string */ /*----------------------------------------------------------------*/ static const char *nsp_info(struct Scsi_Host *shpnt) { nsp_hw_data *data = (nsp_hw_data *)shpnt->hostdata; return data->nspinfo; } #undef SPRINTF #define SPRINTF(args...) \ do { \ if(length > (pos - buffer)) { \ pos += snprintf(pos, length - (pos - buffer) + 1, ## args); \ nsp_dbg(NSP_DEBUG_PROC, "buffer=0x%p pos=0x%p length=%d %d\n", buffer, pos, length, length - (pos - buffer));\ } \ } while(0) static int nsp_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int inout) { int id; char *pos = buffer; int thislength; int speed; unsigned long flags; nsp_hw_data *data; int hostno; if (inout) { return -EINVAL; } hostno = host->host_no; data = (nsp_hw_data *)host->hostdata; SPRINTF("NinjaSCSI status\n\n"); SPRINTF("Driver version: $Revision: 1.23 $\n"); SPRINTF("SCSI host No.: %d\n", hostno); SPRINTF("IRQ: %d\n", host->irq); SPRINTF("IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1); SPRINTF("MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1); SPRINTF("sg_tablesize: %d\n", host->sg_tablesize); SPRINTF("burst transfer mode: "); switch (nsp_burst_mode) { case BURST_IO8: SPRINTF("io8"); break; case BURST_IO32: SPRINTF("io32"); break; case BURST_MEM32: SPRINTF("mem32"); break; default: SPRINTF("???"); break; } SPRINTF("\n"); spin_lock_irqsave(&(data->Lock), flags); SPRINTF("CurrentSC: 0x%p\n\n", data->CurrentSC); spin_unlock_irqrestore(&(data->Lock), flags); SPRINTF("SDTR status\n"); for(id = 0; id < ARRAY_SIZE(data->Sync); id++) { SPRINTF("id %d: ", id); if (id == host->this_id) { SPRINTF("----- NinjaSCSI-3 host adapter\n"); continue; } switch(data->Sync[id].SyncNegotiation) { case SYNC_OK: SPRINTF(" sync"); break; case SYNC_NG: SPRINTF("async"); break; case SYNC_NOT_YET: SPRINTF(" none"); break; default: SPRINTF("?????"); break; } if (data->Sync[id].SyncPeriod != 0) { speed = 1000000 / (data->Sync[id].SyncPeriod * 4); SPRINTF(" transfer %d.%dMB/s, offset %d", speed / 1000, speed % 1000, data->Sync[id].SyncOffset ); } SPRINTF("\n"); } thislength = pos - (buffer + offset); if(thislength < 0) { *start = NULL; return 0; } thislength = min(thislength, length); *start = buffer + offset; return thislength; } #undef SPRINTF /*---------------------------------------------------------------*/ /* error handler */ /*---------------------------------------------------------------*/ /* static int nsp_eh_abort(struct scsi_cmnd *SCpnt) { nsp_dbg(NSP_DEBUG_BUSRESET, "SCpnt=0x%p", SCpnt); return nsp_eh_bus_reset(SCpnt); }*/ static int nsp_bus_reset(nsp_hw_data *data) { unsigned int base = data->BaseAddress; int i; nsp_write(base, IRQCONTROL, IRQCONTROL_ALLMASK); nsp_index_write(base, SCSIBUSCTRL, SCSI_RST); mdelay(100); /* 100ms */ nsp_index_write(base, SCSIBUSCTRL, 0); for(i = 0; i < 5; i++) { nsp_index_read(base, IRQPHASESENCE); /* dummy read */ } nsphw_init_sync(data); nsp_write(base, IRQCONTROL, IRQCONTROL_ALLCLEAR); return SUCCESS; } static int nsp_eh_bus_reset(struct scsi_cmnd *SCpnt) { nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; nsp_dbg(NSP_DEBUG_BUSRESET, "SCpnt=0x%p", SCpnt); return nsp_bus_reset(data); } static int nsp_eh_host_reset(struct scsi_cmnd *SCpnt) { nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata; nsp_dbg(NSP_DEBUG_BUSRESET, "in"); nsphw_init(data); return SUCCESS; } /********************************************************************** PCMCIA functions **********************************************************************/ /*====================================================================== nsp_cs_attach() creates an "instance" of the driver, allocating local data structures for one device. The device is registered with Card Services. The dev_link structure is initialized, but we don't actually configure the card at this point -- we wait until we receive a card insertion event. ======================================================================*/ static int nsp_cs_probe(struct pcmcia_device *link) { scsi_info_t *info; nsp_hw_data *data = &nsp_data_base; int ret; nsp_dbg(NSP_DEBUG_INIT, "in"); /* Create new SCSI device */ info = kzalloc(sizeof(*info), GFP_KERNEL); if (info == NULL) { return -ENOMEM; } info->p_dev = link; link->priv = info; data->ScsiInfo = info; nsp_dbg(NSP_DEBUG_INIT, "info=0x%p", info); /* The io structure describes IO port mapping */ link->io.NumPorts1 = 0x10; link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; link->io.IOAddrLines = 10; /* not used */ /* Interrupt setup */ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; /* Interrupt handler */ link->irq.Handler = &nspintr; link->irq.Attributes |= IRQF_SHARED; /* General socket configuration */ link->conf.Attributes = CONF_ENABLE_IRQ; link->conf.IntType = INT_MEMORY_AND_IO; ret = nsp_cs_config(link); nsp_dbg(NSP_DEBUG_INIT, "link=0x%p", link); return ret; } /* nsp_cs_attach */ /*====================================================================== This deletes a driver "instance". The device is de-registered with Card Services. If it has been released, all local data structures are freed. Otherwise, the structures will be freed when the device is released. ======================================================================*/ static void nsp_cs_detach(struct pcmcia_device *link) { nsp_dbg(NSP_DEBUG_INIT, "in, link=0x%p", link); ((scsi_info_t *)link->priv)->stop = 1; nsp_cs_release(link); kfree(link->priv); link->priv = NULL; } /* nsp_cs_detach */ /*====================================================================== nsp_cs_config() is scheduled to run after a CARD_INSERTION event is received, to configure the PCMCIA socket, and to make the ethernet device available to the system. ======================================================================*/ struct nsp_cs_configdata { nsp_hw_data *data; win_req_t req; }; static int nsp_cs_config_check(struct pcmcia_device *p_dev, cistpl_cftable_entry_t *cfg, cistpl_cftable_entry_t *dflt, unsigned int vcc, void *priv_data) { struct nsp_cs_configdata *cfg_mem = priv_data; if (cfg->index == 0) return -ENODEV; /* Does this card need audio output? */ if (cfg->flags & CISTPL_CFTABLE_AUDIO) { p_dev->conf.Attributes |= CONF_ENABLE_SPKR; p_dev->conf.Status = CCSR_AUDIO_ENA; } /* Use power settings for Vcc and Vpp if present */ /* Note that the CIS values need to be rescaled */ if (cfg->vcc.present & (1<<CISTPL_POWER_VNOM)) { if (vcc != cfg->vcc.param[CISTPL_POWER_VNOM]/10000) return -ENODEV; else if (dflt->vcc.present & (1<<CISTPL_POWER_VNOM)) { if (vcc != dflt->vcc.param[CISTPL_POWER_VNOM]/10000) return -ENODEV; } if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM)) { p_dev->conf.Vpp = cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000; } else if (dflt->vpp1.present & (1 << CISTPL_POWER_VNOM)) { p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM] / 10000; } /* Do we need to allocate an interrupt? */ if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1) p_dev->conf.Attributes |= CONF_ENABLE_IRQ; /* IO window settings */ p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0; if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; if (!(io->flags & CISTPL_IO_8BIT)) p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16; if (!(io->flags & CISTPL_IO_16BIT)) p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; p_dev->io.BasePort1 = io->win[0].base; p_dev->io.NumPorts1 = io->win[0].len; if (io->nwin > 1) { p_dev->io.Attributes2 = p_dev->io.Attributes1; p_dev->io.BasePort2 = io->win[1].base; p_dev->io.NumPorts2 = io->win[1].len; } /* This reserves IO space but doesn't actually enable it */ if (pcmcia_request_io(p_dev, &p_dev->io) != 0) goto next_entry; } if ((cfg->mem.nwin > 0) || (dflt->mem.nwin > 0)) { memreq_t map; cistpl_mem_t *mem = (cfg->mem.nwin) ? &cfg->mem : &dflt->mem; cfg_mem->req.Attributes = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM; cfg_mem->req.Attributes |= WIN_ENABLE; cfg_mem->req.Base = mem->win[0].host_addr; cfg_mem->req.Size = mem->win[0].len; if (cfg_mem->req.Size < 0x1000) cfg_mem->req.Size = 0x1000; cfg_mem->req.AccessSpeed = 0; if (pcmcia_request_window(p_dev, &cfg_mem->req, &p_dev->win) != 0) goto next_entry; map.Page = 0; map.CardOffset = mem->win[0].card_addr; if (pcmcia_map_mem_page(p_dev, p_dev->win, &map) != 0) goto next_entry; cfg_mem->data->MmioAddress = (unsigned long) ioremap_nocache(cfg_mem->req.Base, cfg_mem->req.Size); cfg_mem->data->MmioLength = cfg_mem->req.Size; } /* If we got this far, we're cool! */ return 0; } next_entry: nsp_dbg(NSP_DEBUG_INIT, "next"); pcmcia_disable_device(p_dev); return -ENODEV; } static int nsp_cs_config(struct pcmcia_device *link) { int ret; scsi_info_t *info = link->priv; struct nsp_cs_configdata *cfg_mem; struct Scsi_Host *host; nsp_hw_data *data = &nsp_data_base; nsp_dbg(NSP_DEBUG_INIT, "in"); cfg_mem = kzalloc(sizeof(*cfg_mem), GFP_KERNEL); if (!cfg_mem) return -ENOMEM; cfg_mem->data = data; ret = pcmcia_loop_config(link, nsp_cs_config_check, cfg_mem); goto cs_failed; if (link->conf.Attributes & CONF_ENABLE_IRQ) { if (pcmcia_request_irq(link, &link->irq)) goto cs_failed; } ret = pcmcia_request_configuration(link, &link->conf); if (ret) goto cs_failed; if (free_ports) { if (link->io.BasePort1) { release_region(link->io.BasePort1, link->io.NumPorts1); } if (link->io.BasePort2) { release_region(link->io.BasePort2, link->io.NumPorts2); } } /* Set port and IRQ */ data->BaseAddress = link->io.BasePort1; data->NumAddress = link->io.NumPorts1; data->IrqNumber = link->irq.AssignedIRQ; nsp_dbg(NSP_DEBUG_INIT, "I/O[0x%x+0x%x] IRQ %d", data->BaseAddress, data->NumAddress, data->IrqNumber); if(nsphw_init(data) == FALSE) { goto cs_failed; } host = nsp_detect(&nsp_driver_template); if (host == NULL) { nsp_dbg(NSP_DEBUG_INIT, "detect failed"); goto cs_failed; } ret = scsi_add_host (host, NULL); if (ret) goto cs_failed; scsi_scan_host(host); snprintf(info->node.dev_name, sizeof(info->node.dev_name), "scsi%d", host->host_no); link->dev_node = &info->node; info->host = host; /* Finally, report what we've done */ printk(KERN_INFO "nsp_cs: index 0x%02x: ", link->conf.ConfigIndex); if (link->conf.Vpp) { printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10); } if (link->conf.Attributes & CONF_ENABLE_IRQ) { printk(", irq %d", link->irq.AssignedIRQ); } if (link->io.NumPorts1) { printk(", io 0x%04x-0x%04x", link->io.BasePort1, link->io.BasePort1+link->io.NumPorts1-1); } if (link->io.NumPorts2) printk(" & 0x%04x-0x%04x", link->io.BasePort2, link->io.BasePort2+link->io.NumPorts2-1); if (link->win) printk(", mem 0x%06lx-0x%06lx", cfg_mem->req.Base, cfg_mem->req.Base+cfg_mem->req.Size-1); printk("\n"); kfree(cfg_mem); return 0; cs_failed: nsp_dbg(NSP_DEBUG_INIT, "config fail"); nsp_cs_release(link); kfree(cfg_mem); return -ENODEV; } /* nsp_cs_config */ /*====================================================================== After a card is removed, nsp_cs_release() will unregister the net device, and release the PCMCIA configuration. If the device is still open, this will be postponed until it is closed. ======================================================================*/ static void nsp_cs_release(struct pcmcia_device *link) { scsi_info_t *info = link->priv; nsp_hw_data *data = NULL; if (info->host == NULL) { nsp_msg(KERN_DEBUG, "unexpected card release call."); } else { data = (nsp_hw_data *)info->host->hostdata; } nsp_dbg(NSP_DEBUG_INIT, "link=0x%p", link); /* Unlink the device chain */ if (info->host != NULL) { scsi_remove_host(info->host); } link->dev_node = NULL; if (link->win) { if (data != NULL) { iounmap((void *)(data->MmioAddress)); } } pcmcia_disable_device(link); if (info->host != NULL) { scsi_host_put(info->host); } } /* nsp_cs_release */ static int nsp_cs_suspend(struct pcmcia_device *link) { scsi_info_t *info = link->priv; nsp_hw_data *data; nsp_dbg(NSP_DEBUG_INIT, "event: suspend"); if (info->host != NULL) { nsp_msg(KERN_INFO, "clear SDTR status"); data = (nsp_hw_data *)info->host->hostdata; nsphw_init_sync(data); } info->stop = 1; return 0; } static int nsp_cs_resume(struct pcmcia_device *link) { scsi_info_t *info = link->priv; nsp_hw_data *data; nsp_dbg(NSP_DEBUG_INIT, "event: resume"); info->stop = 0; if (info->host != NULL) { nsp_msg(KERN_INFO, "reset host and bus"); data = (nsp_hw_data *)info->host->hostdata; nsphw_init (data); nsp_bus_reset(data); } return 0; } /*======================================================================* * module entry point *====================================================================*/ static struct pcmcia_device_id nsp_cs_ids[] = { PCMCIA_DEVICE_PROD_ID123("IO DATA", "CBSC16 ", "1", 0x547e66dc, 0x0d63a3fd, 0x51de003a), PCMCIA_DEVICE_PROD_ID123("KME ", "SCSI-CARD-001", "1", 0x534c02bc, 0x52008408, 0x51de003a), PCMCIA_DEVICE_PROD_ID123("KME ", "SCSI-CARD-002", "1", 0x534c02bc, 0xcb09d5b2, 0x51de003a), PCMCIA_DEVICE_PROD_ID123("KME ", "SCSI-CARD-003", "1", 0x534c02bc, 0xbc0ee524, 0x51de003a), PCMCIA_DEVICE_PROD_ID123("KME ", "SCSI-CARD-004", "1", 0x534c02bc, 0x226a7087, 0x51de003a), PCMCIA_DEVICE_PROD_ID123("WBT", "NinjaSCSI-3", "R1.0", 0xc7ba805f, 0xfdc7c97d, 0x6973710e), PCMCIA_DEVICE_PROD_ID123("WORKBIT", "UltraNinja-16", "1", 0x28191418, 0xb70f4b09, 0x51de003a), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, nsp_cs_ids); static struct pcmcia_driver nsp_driver = { .owner = THIS_MODULE, .drv = { .name = "nsp_cs", }, .probe = nsp_cs_probe, .remove = nsp_cs_detach, .id_table = nsp_cs_ids, .suspend = nsp_cs_suspend, .resume = nsp_cs_resume, }; static int __init nsp_cs_init(void) { nsp_msg(KERN_INFO, "loading..."); return pcmcia_register_driver(&nsp_driver); } static void __exit nsp_cs_exit(void) { nsp_msg(KERN_INFO, "unloading..."); pcmcia_unregister_driver(&nsp_driver); } module_init(nsp_cs_init) module_exit(nsp_cs_exit) /* end */
gpl-2.0
Android-Butter/aux_kernel_m7
drivers/bluetooth/ath3k.c
49
11109
/* * Copyright (c) 2008-2009 Atheros Communications Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/usb.h> #include <net/bluetooth/bluetooth.h> #define VERSION "1.0" #define ATH3K_DNLOAD 0x01 #define ATH3K_GETSTATE 0x05 #define ATH3K_SET_NORMAL_MODE 0x07 #define ATH3K_GETVERSION 0x09 #define USB_REG_SWITCH_VID_PID 0x0a #define ATH3K_MODE_MASK 0x3F #define ATH3K_NORMAL_MODE 0x0E #define ATH3K_PATCH_UPDATE 0x80 #define ATH3K_SYSCFG_UPDATE 0x40 #define ATH3K_XTAL_FREQ_26M 0x00 #define ATH3K_XTAL_FREQ_40M 0x01 #define ATH3K_XTAL_FREQ_19P2 0x02 #define ATH3K_NAME_LEN 0xFF struct ath3k_version { unsigned int rom_version; unsigned int build_version; unsigned int ram_version; unsigned char ref_clock; unsigned char reserved[0x07]; }; static struct usb_device_id ath3k_table[] = { /* Atheros AR3011 */ { USB_DEVICE(0x0CF3, 0x3000) }, /* Atheros AR3011 with sflash firmware*/ { USB_DEVICE(0x0CF3, 0x3002) }, /* Atheros AR9285 Malbec with sflash firmware */ { USB_DEVICE(0x03F0, 0x311D) }, /* Atheros AR3012 with sflash firmware*/ { USB_DEVICE(0x0CF3, 0x0036) }, { USB_DEVICE(0x0CF3, 0x3004) }, { USB_DEVICE(0x0CF3, 0x311D) }, { USB_DEVICE(0x0CF3, 0x817a) }, { USB_DEVICE(0x13d3, 0x3375) }, { USB_DEVICE(0x04CA, 0x3005) }, { USB_DEVICE(0x13d3, 0x3362) }, { USB_DEVICE(0x0CF3, 0xE004) }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xE02C) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, ath3k_table); #define BTUSB_ATH3012 0x80 /* This table is to load patch and sysconfig files * for AR3012 */ static struct usb_device_id ath3k_blist_tbl[] = { /* Atheros AR3012 with sflash firmware*/ { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, { } /* Terminating entry */ }; #define USB_REQ_DFU_DNLOAD 1 #define BULK_SIZE 4096 #define FW_HDR_SIZE 20 static int ath3k_load_firmware(struct usb_device *udev, const struct firmware *firmware) { u8 *send_buf; int err, pipe, len, size, sent = 0; int count = firmware->size; BT_DBG("udev %p", udev); pipe = usb_sndctrlpipe(udev, 0); send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC); if (!send_buf) { BT_ERR("Can't allocate memory chunk for firmware"); return -ENOMEM; } memcpy(send_buf, firmware->data, 20); if ((err = usb_control_msg(udev, pipe, USB_REQ_DFU_DNLOAD, USB_TYPE_VENDOR, 0, 0, send_buf, 20, USB_CTRL_SET_TIMEOUT)) < 0) { BT_ERR("Can't change to loading configuration err"); goto error; } sent += 20; count -= 20; while (count) { size = min_t(uint, count, BULK_SIZE); pipe = usb_sndbulkpipe(udev, 0x02); memcpy(send_buf, firmware->data + sent, size); err = usb_bulk_msg(udev, pipe, send_buf, size, &len, 3000); if (err || (len != size)) { BT_ERR("Error in firmware loading err = %d," "len = %d, size = %d", err, len, size); goto error; } sent += size; count -= size; } error: kfree(send_buf); return err; } static int ath3k_get_state(struct usb_device *udev, unsigned char *state) { int pipe = 0; pipe = usb_rcvctrlpipe(udev, 0); return usb_control_msg(udev, pipe, ATH3K_GETSTATE, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, state, 0x01, USB_CTRL_SET_TIMEOUT); } static int ath3k_get_version(struct usb_device *udev, struct ath3k_version *version) { int pipe = 0; pipe = usb_rcvctrlpipe(udev, 0); return usb_control_msg(udev, pipe, ATH3K_GETVERSION, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, version, sizeof(struct ath3k_version), USB_CTRL_SET_TIMEOUT); } static int ath3k_load_fwfile(struct usb_device *udev, const struct firmware *firmware) { u8 *send_buf; int err, pipe, len, size, count, sent = 0; int ret; count = firmware->size; send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC); if (!send_buf) { BT_ERR("Can't allocate memory chunk for firmware"); return -ENOMEM; } size = min_t(uint, count, FW_HDR_SIZE); memcpy(send_buf, firmware->data, size); pipe = usb_sndctrlpipe(udev, 0); ret = usb_control_msg(udev, pipe, ATH3K_DNLOAD, USB_TYPE_VENDOR, 0, 0, send_buf, size, USB_CTRL_SET_TIMEOUT); if (ret < 0) { BT_ERR("Can't change to loading configuration err"); kfree(send_buf); return ret; } sent += size; count -= size; while (count) { size = min_t(uint, count, BULK_SIZE); pipe = usb_sndbulkpipe(udev, 0x02); memcpy(send_buf, firmware->data + sent, size); err = usb_bulk_msg(udev, pipe, send_buf, size, &len, 3000); if (err || (len != size)) { BT_ERR("Error in firmware loading err = %d," "len = %d, size = %d", err, len, size); kfree(send_buf); return err; } sent += size; count -= size; } kfree(send_buf); return 0; } static int ath3k_switch_pid(struct usb_device *udev) { int pipe = 0; pipe = usb_sndctrlpipe(udev, 0); return usb_control_msg(udev, pipe, USB_REG_SWITCH_VID_PID, USB_TYPE_VENDOR, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } static int ath3k_set_normal_mode(struct usb_device *udev) { unsigned char fw_state; int pipe = 0, ret; ret = ath3k_get_state(udev, &fw_state); if (ret < 0) { BT_ERR("Can't get state to change to normal mode err"); return ret; } if ((fw_state & ATH3K_MODE_MASK) == ATH3K_NORMAL_MODE) { BT_DBG("firmware was already in normal mode"); return 0; } pipe = usb_sndctrlpipe(udev, 0); return usb_control_msg(udev, pipe, ATH3K_SET_NORMAL_MODE, USB_TYPE_VENDOR, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } static int ath3k_load_patch(struct usb_device *udev) { unsigned char fw_state; char filename[ATH3K_NAME_LEN] = {0}; const struct firmware *firmware; struct ath3k_version fw_version, pt_version; int ret; ret = ath3k_get_state(udev, &fw_state); if (ret < 0) { BT_ERR("Can't get state to change to load ram patch err"); return ret; } if (fw_state & ATH3K_PATCH_UPDATE) { BT_DBG("Patch was already downloaded"); return 0; } ret = ath3k_get_version(udev, &fw_version); if (ret < 0) { BT_ERR("Can't get version to change to load ram patch err"); return ret; } snprintf(filename, ATH3K_NAME_LEN, "ar3k/AthrBT_0x%08x.dfu", fw_version.rom_version); ret = request_firmware(&firmware, filename, &udev->dev); if (ret < 0) { BT_ERR("Patch file not found %s", filename); return ret; } pt_version.rom_version = *(int *)(firmware->data + firmware->size - 8); pt_version.build_version = *(int *) (firmware->data + firmware->size - 4); if ((pt_version.rom_version != fw_version.rom_version) || (pt_version.build_version <= fw_version.build_version)) { BT_ERR("Patch file version did not match with firmware"); release_firmware(firmware); return -EINVAL; } ret = ath3k_load_fwfile(udev, firmware); release_firmware(firmware); return ret; } static int ath3k_load_syscfg(struct usb_device *udev) { unsigned char fw_state; char filename[ATH3K_NAME_LEN] = {0}; const struct firmware *firmware; struct ath3k_version fw_version; int clk_value, ret; ret = ath3k_get_state(udev, &fw_state); if (ret < 0) { BT_ERR("Can't get state to change to load configration err"); return -EBUSY; } ret = ath3k_get_version(udev, &fw_version); if (ret < 0) { BT_ERR("Can't get version to change to load ram patch err"); return ret; } switch (fw_version.ref_clock) { case ATH3K_XTAL_FREQ_26M: clk_value = 26; break; case ATH3K_XTAL_FREQ_40M: clk_value = 40; break; case ATH3K_XTAL_FREQ_19P2: clk_value = 19; break; default: clk_value = 0; break; } snprintf(filename, ATH3K_NAME_LEN, "ar3k/ramps_0x%08x_%d%s", fw_version.rom_version, clk_value, ".dfu"); ret = request_firmware(&firmware, filename, &udev->dev); if (ret < 0) { BT_ERR("Configuration file not found %s", filename); return ret; } ret = ath3k_load_fwfile(udev, firmware); release_firmware(firmware); return ret; } static int ath3k_probe(struct usb_interface *intf, const struct usb_device_id *id) { const struct firmware *firmware; struct usb_device *udev = interface_to_usbdev(intf); int ret; BT_DBG("intf %p id %p", intf, id); if (intf->cur_altsetting->desc.bInterfaceNumber != 0) return -ENODEV; /* match device ID in ath3k blacklist table */ if (!id->driver_info) { const struct usb_device_id *match; match = usb_match_id(intf, ath3k_blist_tbl); if (match) id = match; } /* load patch and sysconfig files for AR3012 */ if (id->driver_info & BTUSB_ATH3012) { /* New firmware with patch and sysconfig files already loaded */ if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x0001) return -ENODEV; ret = ath3k_load_patch(udev); if (ret < 0) { BT_ERR("Loading patch file failed"); return ret; } ret = ath3k_load_syscfg(udev); if (ret < 0) { BT_ERR("Loading sysconfig file failed"); return ret; } ret = ath3k_set_normal_mode(udev); if (ret < 0) { BT_ERR("Set normal mode failed"); return ret; } ath3k_switch_pid(udev); return 0; } if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) { BT_ERR("Error loading firmware"); return -EIO; } ret = ath3k_load_firmware(udev, firmware); release_firmware(firmware); return ret; } static void ath3k_disconnect(struct usb_interface *intf) { BT_DBG("ath3k_disconnect intf %p", intf); } static struct usb_driver ath3k_driver = { .name = "ath3k", .probe = ath3k_probe, .disconnect = ath3k_disconnect, .id_table = ath3k_table, }; static int __init ath3k_init(void) { BT_INFO("Atheros AR30xx firmware driver ver %s", VERSION); return usb_register(&ath3k_driver); } static void __exit ath3k_exit(void) { usb_deregister(&ath3k_driver); } module_init(ath3k_init); module_exit(ath3k_exit); MODULE_AUTHOR("Atheros Communications"); MODULE_DESCRIPTION("Atheros AR30xx firmware driver"); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("ath3k-1.fw");
gpl-2.0
NooNameR/qsd8x50-bravo-
drivers/md/dm-snap-persistent.c
1841
20966
/* * Copyright (C) 2001-2002 Sistina Software (UK) Limited. * Copyright (C) 2006-2008 Red Hat GmbH * * This file is released under the GPL. */ #include "dm-exception-store.h" #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/dm-io.h> #define DM_MSG_PREFIX "persistent snapshot" #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */ /*----------------------------------------------------------------- * Persistent snapshots, by persistent we mean that the snapshot * will survive a reboot. *---------------------------------------------------------------*/ /* * We need to store a record of which parts of the origin have * been copied to the snapshot device. The snapshot code * requires that we copy exception chunks to chunk aligned areas * of the COW store. It makes sense therefore, to store the * metadata in chunk size blocks. * * There is no backward or forward compatibility implemented, * snapshots with different disk versions than the kernel will * not be usable. It is expected that "lvcreate" will blank out * the start of a fresh COW device before calling the snapshot * constructor. * * The first chunk of the COW device just contains the header. * After this there is a chunk filled with exception metadata, * followed by as many exception chunks as can fit in the * metadata areas. * * All on disk structures are in little-endian format. The end * of the exceptions info is indicated by an exception with a * new_chunk of 0, which is invalid since it would point to the * header chunk. */ /* * Magic for persistent snapshots: "SnAp" - Feeble isn't it. */ #define SNAP_MAGIC 0x70416e53 /* * The on-disk version of the metadata. */ #define SNAPSHOT_DISK_VERSION 1 #define NUM_SNAPSHOT_HDR_CHUNKS 1 struct disk_header { uint32_t magic; /* * Is this snapshot valid. There is no way of recovering * an invalid snapshot. */ uint32_t valid; /* * Simple, incrementing version. no backward * compatibility. */ uint32_t version; /* In sectors */ uint32_t chunk_size; }; struct disk_exception { uint64_t old_chunk; uint64_t new_chunk; }; struct commit_callback { void (*callback)(void *, int success); void *context; }; /* * The top level structure for a persistent exception store. */ struct pstore { struct dm_exception_store *store; int version; int valid; uint32_t exceptions_per_area; /* * Now that we have an asynchronous kcopyd there is no * need for large chunk sizes, so it wont hurt to have a * whole chunks worth of metadata in memory at once. */ void *area; /* * An area of zeros used to clear the next area. */ void *zero_area; /* * An area used for header. The header can be written * concurrently with metadata (when invalidating the snapshot), * so it needs a separate buffer. */ void *header_area; /* * Used to keep track of which metadata area the data in * 'chunk' refers to. */ chunk_t current_area; /* * The next free chunk for an exception. * * When creating exceptions, all the chunks here and above are * free. It holds the next chunk to be allocated. On rare * occasions (e.g. after a system crash) holes can be left in * the exception store because chunks can be committed out of * order. * * When merging exceptions, it does not necessarily mean all the * chunks here and above are free. It holds the value it would * have held if all chunks had been committed in order of * allocation. Consequently the value may occasionally be * slightly too low, but since it's only used for 'status' and * it can never reach its minimum value too early this doesn't * matter. */ chunk_t next_free; /* * The index of next free exception in the current * metadata area. */ uint32_t current_committed; atomic_t pending_count; uint32_t callback_count; struct commit_callback *callbacks; struct dm_io_client *io_client; struct workqueue_struct *metadata_wq; }; static int alloc_area(struct pstore *ps) { int r = -ENOMEM; size_t len; len = ps->store->chunk_size << SECTOR_SHIFT; /* * Allocate the chunk_size block of memory that will hold * a single metadata area. */ ps->area = vmalloc(len); if (!ps->area) goto err_area; ps->zero_area = vmalloc(len); if (!ps->zero_area) goto err_zero_area; memset(ps->zero_area, 0, len); ps->header_area = vmalloc(len); if (!ps->header_area) goto err_header_area; return 0; err_header_area: vfree(ps->zero_area); err_zero_area: vfree(ps->area); err_area: return r; } static void free_area(struct pstore *ps) { if (ps->area) vfree(ps->area); ps->area = NULL; if (ps->zero_area) vfree(ps->zero_area); ps->zero_area = NULL; if (ps->header_area) vfree(ps->header_area); ps->header_area = NULL; } struct mdata_req { struct dm_io_region *where; struct dm_io_request *io_req; struct work_struct work; int result; }; static void do_metadata(struct work_struct *work) { struct mdata_req *req = container_of(work, struct mdata_req, work); req->result = dm_io(req->io_req, 1, req->where, NULL); } /* * Read or write a chunk aligned and sized block of data from a device. */ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, int metadata) { struct dm_io_region where = { .bdev = dm_snap_cow(ps->store->snap)->bdev, .sector = ps->store->chunk_size * chunk, .count = ps->store->chunk_size, }; struct dm_io_request io_req = { .bi_rw = rw, .mem.type = DM_IO_VMA, .mem.ptr.vma = area, .client = ps->io_client, .notify.fn = NULL, }; struct mdata_req req; if (!metadata) return dm_io(&io_req, 1, &where, NULL); req.where = &where; req.io_req = &io_req; /* * Issue the synchronous I/O from a different thread * to avoid generic_make_request recursion. */ INIT_WORK_ONSTACK(&req.work, do_metadata); queue_work(ps->metadata_wq, &req.work); flush_work(&req.work); return req.result; } /* * Convert a metadata area index to a chunk index. */ static chunk_t area_location(struct pstore *ps, chunk_t area) { return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area); } /* * Read or write a metadata area. Remembering to skip the first * chunk which holds the header. */ static int area_io(struct pstore *ps, int rw) { int r; chunk_t chunk; chunk = area_location(ps, ps->current_area); r = chunk_io(ps, ps->area, chunk, rw, 0); if (r) return r; return 0; } static void zero_memory_area(struct pstore *ps) { memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT); } static int zero_disk_area(struct pstore *ps, chunk_t area) { return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0); } static int read_header(struct pstore *ps, int *new_snapshot) { int r; struct disk_header *dh; unsigned chunk_size; int chunk_size_supplied = 1; char *chunk_err; /* * Use default chunk size (or logical_block_size, if larger) * if none supplied */ if (!ps->store->chunk_size) { ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, bdev_logical_block_size(dm_snap_cow(ps->store->snap)-> bdev) >> 9); ps->store->chunk_mask = ps->store->chunk_size - 1; ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1; chunk_size_supplied = 0; } ps->io_client = dm_io_client_create(); if (IS_ERR(ps->io_client)) return PTR_ERR(ps->io_client); r = alloc_area(ps); if (r) return r; r = chunk_io(ps, ps->header_area, 0, READ, 1); if (r) goto bad; dh = ps->header_area; if (le32_to_cpu(dh->magic) == 0) { *new_snapshot = 1; return 0; } if (le32_to_cpu(dh->magic) != SNAP_MAGIC) { DMWARN("Invalid or corrupt snapshot"); r = -ENXIO; goto bad; } *new_snapshot = 0; ps->valid = le32_to_cpu(dh->valid); ps->version = le32_to_cpu(dh->version); chunk_size = le32_to_cpu(dh->chunk_size); if (ps->store->chunk_size == chunk_size) return 0; if (chunk_size_supplied) DMWARN("chunk size %u in device metadata overrides " "table chunk size of %u.", chunk_size, ps->store->chunk_size); /* We had a bogus chunk_size. Fix stuff up. */ free_area(ps); r = dm_exception_store_set_chunk_size(ps->store, chunk_size, &chunk_err); if (r) { DMERR("invalid on-disk chunk size %u: %s.", chunk_size, chunk_err); return r; } r = alloc_area(ps); return r; bad: free_area(ps); return r; } static int write_header(struct pstore *ps) { struct disk_header *dh; memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT); dh = ps->header_area; dh->magic = cpu_to_le32(SNAP_MAGIC); dh->valid = cpu_to_le32(ps->valid); dh->version = cpu_to_le32(ps->version); dh->chunk_size = cpu_to_le32(ps->store->chunk_size); return chunk_io(ps, ps->header_area, 0, WRITE, 1); } /* * Access functions for the disk exceptions, these do the endian conversions. */ static struct disk_exception *get_exception(struct pstore *ps, uint32_t index) { BUG_ON(index >= ps->exceptions_per_area); return ((struct disk_exception *) ps->area) + index; } static void read_exception(struct pstore *ps, uint32_t index, struct disk_exception *result) { struct disk_exception *e = get_exception(ps, index); /* copy it */ result->old_chunk = le64_to_cpu(e->old_chunk); result->new_chunk = le64_to_cpu(e->new_chunk); } static void write_exception(struct pstore *ps, uint32_t index, struct disk_exception *de) { struct disk_exception *e = get_exception(ps, index); /* copy it */ e->old_chunk = cpu_to_le64(de->old_chunk); e->new_chunk = cpu_to_le64(de->new_chunk); } static void clear_exception(struct pstore *ps, uint32_t index) { struct disk_exception *e = get_exception(ps, index); /* clear it */ e->old_chunk = 0; e->new_chunk = 0; } /* * Registers the exceptions that are present in the current area. * 'full' is filled in to indicate if the area has been * filled. */ static int insert_exceptions(struct pstore *ps, int (*callback)(void *callback_context, chunk_t old, chunk_t new), void *callback_context, int *full) { int r; unsigned int i; struct disk_exception de; /* presume the area is full */ *full = 1; for (i = 0; i < ps->exceptions_per_area; i++) { read_exception(ps, i, &de); /* * If the new_chunk is pointing at the start of * the COW device, where the first metadata area * is we know that we've hit the end of the * exceptions. Therefore the area is not full. */ if (de.new_chunk == 0LL) { ps->current_committed = i; *full = 0; break; } /* * Keep track of the start of the free chunks. */ if (ps->next_free <= de.new_chunk) ps->next_free = de.new_chunk + 1; /* * Otherwise we add the exception to the snapshot. */ r = callback(callback_context, de.old_chunk, de.new_chunk); if (r) return r; } return 0; } static int read_exceptions(struct pstore *ps, int (*callback)(void *callback_context, chunk_t old, chunk_t new), void *callback_context) { int r, full = 1; /* * Keeping reading chunks and inserting exceptions until * we find a partially full area. */ for (ps->current_area = 0; full; ps->current_area++) { r = area_io(ps, READ); if (r) return r; r = insert_exceptions(ps, callback, callback_context, &full); if (r) return r; } ps->current_area--; return 0; } static struct pstore *get_info(struct dm_exception_store *store) { return (struct pstore *) store->context; } static void persistent_usage(struct dm_exception_store *store, sector_t *total_sectors, sector_t *sectors_allocated, sector_t *metadata_sectors) { struct pstore *ps = get_info(store); *sectors_allocated = ps->next_free * store->chunk_size; *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev); /* * First chunk is the fixed header. * Then there are (ps->current_area + 1) metadata chunks, each one * separated from the next by ps->exceptions_per_area data chunks. */ *metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) * store->chunk_size; } static void persistent_dtr(struct dm_exception_store *store) { struct pstore *ps = get_info(store); destroy_workqueue(ps->metadata_wq); /* Created in read_header */ if (ps->io_client) dm_io_client_destroy(ps->io_client); free_area(ps); /* Allocated in persistent_read_metadata */ if (ps->callbacks) vfree(ps->callbacks); kfree(ps); } static int persistent_read_metadata(struct dm_exception_store *store, int (*callback)(void *callback_context, chunk_t old, chunk_t new), void *callback_context) { int r, uninitialized_var(new_snapshot); struct pstore *ps = get_info(store); /* * Read the snapshot header. */ r = read_header(ps, &new_snapshot); if (r) return r; /* * Now we know correct chunk_size, complete the initialisation. */ ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) / sizeof(struct disk_exception); ps->callbacks = dm_vcalloc(ps->exceptions_per_area, sizeof(*ps->callbacks)); if (!ps->callbacks) return -ENOMEM; /* * Do we need to setup a new snapshot ? */ if (new_snapshot) { r = write_header(ps); if (r) { DMWARN("write_header failed"); return r; } ps->current_area = 0; zero_memory_area(ps); r = zero_disk_area(ps, 0); if (r) DMWARN("zero_disk_area(0) failed"); return r; } /* * Sanity checks. */ if (ps->version != SNAPSHOT_DISK_VERSION) { DMWARN("unable to handle snapshot disk version %d", ps->version); return -EINVAL; } /* * Metadata are valid, but snapshot is invalidated */ if (!ps->valid) return 1; /* * Read the metadata. */ r = read_exceptions(ps, callback, callback_context); return r; } static int persistent_prepare_exception(struct dm_exception_store *store, struct dm_exception *e) { struct pstore *ps = get_info(store); uint32_t stride; chunk_t next_free; sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev); /* Is there enough room ? */ if (size < ((ps->next_free + 1) * store->chunk_size)) return -ENOSPC; e->new_chunk = ps->next_free; /* * Move onto the next free pending, making sure to take * into account the location of the metadata chunks. */ stride = (ps->exceptions_per_area + 1); next_free = ++ps->next_free; if (sector_div(next_free, stride) == 1) ps->next_free++; atomic_inc(&ps->pending_count); return 0; } static void persistent_commit_exception(struct dm_exception_store *store, struct dm_exception *e, void (*callback) (void *, int success), void *callback_context) { unsigned int i; struct pstore *ps = get_info(store); struct disk_exception de; struct commit_callback *cb; de.old_chunk = e->old_chunk; de.new_chunk = e->new_chunk; write_exception(ps, ps->current_committed++, &de); /* * Add the callback to the back of the array. This code * is the only place where the callback array is * manipulated, and we know that it will never be called * multiple times concurrently. */ cb = ps->callbacks + ps->callback_count++; cb->callback = callback; cb->context = callback_context; /* * If there are exceptions in flight and we have not yet * filled this metadata area there's nothing more to do. */ if (!atomic_dec_and_test(&ps->pending_count) && (ps->current_committed != ps->exceptions_per_area)) return; /* * If we completely filled the current area, then wipe the next one. */ if ((ps->current_committed == ps->exceptions_per_area) && zero_disk_area(ps, ps->current_area + 1)) ps->valid = 0; /* * Commit exceptions to disk. */ if (ps->valid && area_io(ps, WRITE_FLUSH_FUA)) ps->valid = 0; /* * Advance to the next area if this one is full. */ if (ps->current_committed == ps->exceptions_per_area) { ps->current_committed = 0; ps->current_area++; zero_memory_area(ps); } for (i = 0; i < ps->callback_count; i++) { cb = ps->callbacks + i; cb->callback(cb->context, ps->valid); } ps->callback_count = 0; } static int persistent_prepare_merge(struct dm_exception_store *store, chunk_t *last_old_chunk, chunk_t *last_new_chunk) { struct pstore *ps = get_info(store); struct disk_exception de; int nr_consecutive; int r; /* * When current area is empty, move back to preceding area. */ if (!ps->current_committed) { /* * Have we finished? */ if (!ps->current_area) return 0; ps->current_area--; r = area_io(ps, READ); if (r < 0) return r; ps->current_committed = ps->exceptions_per_area; } read_exception(ps, ps->current_committed - 1, &de); *last_old_chunk = de.old_chunk; *last_new_chunk = de.new_chunk; /* * Find number of consecutive chunks within the current area, * working backwards. */ for (nr_consecutive = 1; nr_consecutive < ps->current_committed; nr_consecutive++) { read_exception(ps, ps->current_committed - 1 - nr_consecutive, &de); if (de.old_chunk != *last_old_chunk - nr_consecutive || de.new_chunk != *last_new_chunk - nr_consecutive) break; } return nr_consecutive; } static int persistent_commit_merge(struct dm_exception_store *store, int nr_merged) { int r, i; struct pstore *ps = get_info(store); BUG_ON(nr_merged > ps->current_committed); for (i = 0; i < nr_merged; i++) clear_exception(ps, ps->current_committed - 1 - i); r = area_io(ps, WRITE_FLUSH_FUA); if (r < 0) return r; ps->current_committed -= nr_merged; /* * At this stage, only persistent_usage() uses ps->next_free, so * we make no attempt to keep ps->next_free strictly accurate * as exceptions may have been committed out-of-order originally. * Once a snapshot has become merging, we set it to the value it * would have held had all the exceptions been committed in order. * * ps->current_area does not get reduced by prepare_merge() until * after commit_merge() has removed the nr_merged previous exceptions. */ ps->next_free = area_location(ps, ps->current_area) + ps->current_committed + 1; return 0; } static void persistent_drop_snapshot(struct dm_exception_store *store) { struct pstore *ps = get_info(store); ps->valid = 0; if (write_header(ps)) DMWARN("write header failed"); } static int persistent_ctr(struct dm_exception_store *store, unsigned argc, char **argv) { struct pstore *ps; /* allocate the pstore */ ps = kzalloc(sizeof(*ps), GFP_KERNEL); if (!ps) return -ENOMEM; ps->store = store; ps->valid = 1; ps->version = SNAPSHOT_DISK_VERSION; ps->area = NULL; ps->zero_area = NULL; ps->header_area = NULL; ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */ ps->current_committed = 0; ps->callback_count = 0; atomic_set(&ps->pending_count, 0); ps->callbacks = NULL; ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0); if (!ps->metadata_wq) { kfree(ps); DMERR("couldn't start header metadata update thread"); return -ENOMEM; } store->context = ps; return 0; } static unsigned persistent_status(struct dm_exception_store *store, status_type_t status, char *result, unsigned maxlen) { unsigned sz = 0; switch (status) { case STATUSTYPE_INFO: break; case STATUSTYPE_TABLE: DMEMIT(" P %llu", (unsigned long long)store->chunk_size); } return sz; } static struct dm_exception_store_type _persistent_type = { .name = "persistent", .module = THIS_MODULE, .ctr = persistent_ctr, .dtr = persistent_dtr, .read_metadata = persistent_read_metadata, .prepare_exception = persistent_prepare_exception, .commit_exception = persistent_commit_exception, .prepare_merge = persistent_prepare_merge, .commit_merge = persistent_commit_merge, .drop_snapshot = persistent_drop_snapshot, .usage = persistent_usage, .status = persistent_status, }; static struct dm_exception_store_type _persistent_compat_type = { .name = "P", .module = THIS_MODULE, .ctr = persistent_ctr, .dtr = persistent_dtr, .read_metadata = persistent_read_metadata, .prepare_exception = persistent_prepare_exception, .commit_exception = persistent_commit_exception, .prepare_merge = persistent_prepare_merge, .commit_merge = persistent_commit_merge, .drop_snapshot = persistent_drop_snapshot, .usage = persistent_usage, .status = persistent_status, }; int dm_persistent_snapshot_init(void) { int r; r = dm_exception_store_type_register(&_persistent_type); if (r) { DMERR("Unable to register persistent exception store type"); return r; } r = dm_exception_store_type_register(&_persistent_compat_type); if (r) { DMERR("Unable to register old-style persistent exception " "store type"); dm_exception_store_type_unregister(&_persistent_type); return r; } return r; } void dm_persistent_snapshot_exit(void) { dm_exception_store_type_unregister(&_persistent_type); dm_exception_store_type_unregister(&_persistent_compat_type); }
gpl-2.0
cooks8/android_kernel_samsung_smdk4x12
net/ipv4/netfilter/nf_nat_sip.c
2097
17182
/* SIP extension for NAT alteration. * * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar> * based on RR's ip_nat_ftp.c and other modules. * (C) 2007 United Security Providers * (C) 2007, 2008 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <net/ip.h> #include <linux/udp.h> #include <linux/tcp.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_helper.h> #include <net/netfilter/nf_nat_rule.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_expect.h> #include <linux/netfilter/nf_conntrack_sip.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Christian Hentschel <chentschel@arnet.com.ar>"); MODULE_DESCRIPTION("SIP NAT helper"); MODULE_ALIAS("ip_nat_sip"); static unsigned int mangle_packet(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int matchoff, unsigned int matchlen, const char *buffer, unsigned int buflen) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); struct tcphdr *th; unsigned int baseoff; if (nf_ct_protonum(ct) == IPPROTO_TCP) { th = (struct tcphdr *)(skb->data + ip_hdrlen(skb)); baseoff = ip_hdrlen(skb) + th->doff * 4; matchoff += dataoff - baseoff; if (!__nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff, matchlen, buffer, buflen, false)) return 0; } else { baseoff = ip_hdrlen(skb) + sizeof(struct udphdr); matchoff += dataoff - baseoff; if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, matchoff, matchlen, buffer, buflen)) return 0; } /* Reload data pointer and adjust datalen value */ *dptr = skb->data + dataoff; *datalen += buflen - matchlen; return 1; } static int map_addr(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int matchoff, unsigned int matchlen, union nf_inet_addr *addr, __be16 port) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; unsigned int buflen; __be32 newaddr; __be16 newport; if (ct->tuplehash[dir].tuple.src.u3.ip == addr->ip && ct->tuplehash[dir].tuple.src.u.udp.port == port) { newaddr = ct->tuplehash[!dir].tuple.dst.u3.ip; newport = ct->tuplehash[!dir].tuple.dst.u.udp.port; } else if (ct->tuplehash[dir].tuple.dst.u3.ip == addr->ip && ct->tuplehash[dir].tuple.dst.u.udp.port == port) { newaddr = ct->tuplehash[!dir].tuple.src.u3.ip; newport = ct->tuplehash[!dir].tuple.src.u.udp.port; } else return 1; if (newaddr == addr->ip && newport == port) return 1; buflen = sprintf(buffer, "%pI4:%u", &newaddr, ntohs(newport)); return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen, buffer, buflen); } static int map_sip_addr(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, enum sip_header_types type) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); unsigned int matchlen, matchoff; union nf_inet_addr addr; __be16 port; if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, type, NULL, &matchoff, &matchlen, &addr, &port) <= 0) return 1; return map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen, &addr, port); } static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); unsigned int coff, matchoff, matchlen; enum sip_header_types hdr; union nf_inet_addr addr; __be16 port; int request, in_header; /* Basic rules: requests and responses. */ if (strnicmp(*dptr, "SIP/2.0", strlen("SIP/2.0")) != 0) { if (ct_sip_parse_request(ct, *dptr, *datalen, &matchoff, &matchlen, &addr, &port) > 0 && !map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen, &addr, port)) return NF_DROP; request = 1; } else request = 0; if (nf_ct_protonum(ct) == IPPROTO_TCP) hdr = SIP_HDR_VIA_TCP; else hdr = SIP_HDR_VIA_UDP; /* Translate topmost Via header and parameters */ if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, hdr, NULL, &matchoff, &matchlen, &addr, &port) > 0) { unsigned int matchend, poff, plen, buflen, n; char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; /* We're only interested in headers related to this * connection */ if (request) { if (addr.ip != ct->tuplehash[dir].tuple.src.u3.ip || port != ct->tuplehash[dir].tuple.src.u.udp.port) goto next; } else { if (addr.ip != ct->tuplehash[dir].tuple.dst.u3.ip || port != ct->tuplehash[dir].tuple.dst.u.udp.port) goto next; } if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen, &addr, port)) return NF_DROP; matchend = matchoff + matchlen; /* The maddr= parameter (RFC 2361) specifies where to send * the reply. */ if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen, "maddr=", &poff, &plen, &addr) > 0 && addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) { buflen = sprintf(buffer, "%pI4", &ct->tuplehash[!dir].tuple.dst.u3.ip); if (!mangle_packet(skb, dataoff, dptr, datalen, poff, plen, buffer, buflen)) return NF_DROP; } /* The received= parameter (RFC 2361) contains the address * from which the server received the request. */ if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen, "received=", &poff, &plen, &addr) > 0 && addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip && addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) { buflen = sprintf(buffer, "%pI4", &ct->tuplehash[!dir].tuple.src.u3.ip); if (!mangle_packet(skb, dataoff, dptr, datalen, poff, plen, buffer, buflen)) return NF_DROP; } /* The rport= parameter (RFC 3581) contains the port number * from which the server received the request. */ if (ct_sip_parse_numerical_param(ct, *dptr, matchend, *datalen, "rport=", &poff, &plen, &n) > 0 && htons(n) == ct->tuplehash[dir].tuple.dst.u.udp.port && htons(n) != ct->tuplehash[!dir].tuple.src.u.udp.port) { __be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port; buflen = sprintf(buffer, "%u", ntohs(p)); if (!mangle_packet(skb, dataoff, dptr, datalen, poff, plen, buffer, buflen)) return NF_DROP; } } next: /* Translate Contact headers */ coff = 0; in_header = 0; while (ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen, SIP_HDR_CONTACT, &in_header, &matchoff, &matchlen, &addr, &port) > 0) { if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen, &addr, port)) return NF_DROP; } if (!map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_FROM) || !map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_TO)) return NF_DROP; return NF_ACCEPT; } static void ip_nat_sip_seq_adjust(struct sk_buff *skb, s16 off) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); const struct tcphdr *th; if (nf_ct_protonum(ct) != IPPROTO_TCP || off == 0) return; th = (struct tcphdr *)(skb->data + ip_hdrlen(skb)); nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off); } /* Handles expected signalling connections and media streams */ static void ip_nat_sip_expected(struct nf_conn *ct, struct nf_conntrack_expect *exp) { struct nf_nat_range range; /* This must be a fresh one. */ BUG_ON(ct->status & IPS_NAT_DONE_MASK); /* For DST manip, map port here to where it's expected. */ range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED); range.min = range.max = exp->saved_proto; range.min_ip = range.max_ip = exp->saved_ip; nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST); /* Change src to where master sends to, but only if the connection * actually came from the same source. */ if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == ct->master->tuplehash[exp->dir].tuple.src.u3.ip) { range.flags = IP_NAT_RANGE_MAP_IPS; range.min_ip = range.max_ip = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip; nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC); } } static unsigned int ip_nat_sip_expect(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, struct nf_conntrack_expect *exp, unsigned int matchoff, unsigned int matchlen) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); __be32 newip; u_int16_t port; char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; unsigned buflen; /* Connection will come from reply */ if (ct->tuplehash[dir].tuple.src.u3.ip == ct->tuplehash[!dir].tuple.dst.u3.ip) newip = exp->tuple.dst.u3.ip; else newip = ct->tuplehash[!dir].tuple.dst.u3.ip; /* If the signalling port matches the connection's source port in the * original direction, try to use the destination port in the opposite * direction. */ if (exp->tuple.dst.u.udp.port == ct->tuplehash[dir].tuple.src.u.udp.port) port = ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port); else port = ntohs(exp->tuple.dst.u.udp.port); exp->saved_ip = exp->tuple.dst.u3.ip; exp->tuple.dst.u3.ip = newip; exp->saved_proto.udp.port = exp->tuple.dst.u.udp.port; exp->dir = !dir; exp->expectfn = ip_nat_sip_expected; for (; port != 0; port++) { int ret; exp->tuple.dst.u.udp.port = htons(port); ret = nf_ct_expect_related(exp); if (ret == 0) break; else if (ret != -EBUSY) { port = 0; break; } } if (port == 0) return NF_DROP; if (exp->tuple.dst.u3.ip != exp->saved_ip || exp->tuple.dst.u.udp.port != exp->saved_proto.udp.port) { buflen = sprintf(buffer, "%pI4:%u", &newip, port); if (!mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen, buffer, buflen)) goto err; } return NF_ACCEPT; err: nf_ct_unexpect_related(exp); return NF_DROP; } static int mangle_content_len(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); unsigned int matchoff, matchlen; char buffer[sizeof("65536")]; int buflen, c_len; /* Get actual SDP length */ if (ct_sip_get_sdp_header(ct, *dptr, 0, *datalen, SDP_HDR_VERSION, SDP_HDR_UNSPEC, &matchoff, &matchlen) <= 0) return 0; c_len = *datalen - matchoff + strlen("v="); /* Now, update SDP length */ if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CONTENT_LENGTH, &matchoff, &matchlen) <= 0) return 0; buflen = sprintf(buffer, "%u", c_len); return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen, buffer, buflen); } static int mangle_sdp_packet(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int sdpoff, enum sdp_header_types type, enum sdp_header_types term, char *buffer, int buflen) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); unsigned int matchlen, matchoff; if (ct_sip_get_sdp_header(ct, *dptr, sdpoff, *datalen, type, term, &matchoff, &matchlen) <= 0) return -ENOENT; return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen, buffer, buflen) ? 0 : -EINVAL; } static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int sdpoff, enum sdp_header_types type, enum sdp_header_types term, const union nf_inet_addr *addr) { char buffer[sizeof("nnn.nnn.nnn.nnn")]; unsigned int buflen; buflen = sprintf(buffer, "%pI4", &addr->ip); if (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff, type, term, buffer, buflen)) return 0; return mangle_content_len(skb, dataoff, dptr, datalen); } static unsigned int ip_nat_sdp_port(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int matchoff, unsigned int matchlen, u_int16_t port) { char buffer[sizeof("nnnnn")]; unsigned int buflen; buflen = sprintf(buffer, "%u", port); if (!mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen, buffer, buflen)) return 0; return mangle_content_len(skb, dataoff, dptr, datalen); } static unsigned int ip_nat_sdp_session(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, unsigned int sdpoff, const union nf_inet_addr *addr) { char buffer[sizeof("nnn.nnn.nnn.nnn")]; unsigned int buflen; /* Mangle session description owner and contact addresses */ buflen = sprintf(buffer, "%pI4", &addr->ip); if (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff, SDP_HDR_OWNER_IP4, SDP_HDR_MEDIA, buffer, buflen)) return 0; switch (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff, SDP_HDR_CONNECTION_IP4, SDP_HDR_MEDIA, buffer, buflen)) { case 0: /* * RFC 2327: * * Session description * * c=* (connection information - not required if included in all media) */ case -ENOENT: break; default: return 0; } return mangle_content_len(skb, dataoff, dptr, datalen); } /* So, this packet has hit the connection tracking matching code. Mangle it, and change the expectation to match the new version. */ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff, const char **dptr, unsigned int *datalen, struct nf_conntrack_expect *rtp_exp, struct nf_conntrack_expect *rtcp_exp, unsigned int mediaoff, unsigned int medialen, union nf_inet_addr *rtp_addr) { enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); u_int16_t port; /* Connection will come from reply */ if (ct->tuplehash[dir].tuple.src.u3.ip == ct->tuplehash[!dir].tuple.dst.u3.ip) rtp_addr->ip = rtp_exp->tuple.dst.u3.ip; else rtp_addr->ip = ct->tuplehash[!dir].tuple.dst.u3.ip; rtp_exp->saved_ip = rtp_exp->tuple.dst.u3.ip; rtp_exp->tuple.dst.u3.ip = rtp_addr->ip; rtp_exp->saved_proto.udp.port = rtp_exp->tuple.dst.u.udp.port; rtp_exp->dir = !dir; rtp_exp->expectfn = ip_nat_sip_expected; rtcp_exp->saved_ip = rtcp_exp->tuple.dst.u3.ip; rtcp_exp->tuple.dst.u3.ip = rtp_addr->ip; rtcp_exp->saved_proto.udp.port = rtcp_exp->tuple.dst.u.udp.port; rtcp_exp->dir = !dir; rtcp_exp->expectfn = ip_nat_sip_expected; /* Try to get same pair of ports: if not, try to change them. */ for (port = ntohs(rtp_exp->tuple.dst.u.udp.port); port != 0; port += 2) { int ret; rtp_exp->tuple.dst.u.udp.port = htons(port); ret = nf_ct_expect_related(rtp_exp); if (ret == -EBUSY) continue; else if (ret < 0) { port = 0; break; } rtcp_exp->tuple.dst.u.udp.port = htons(port + 1); ret = nf_ct_expect_related(rtcp_exp); if (ret == 0) break; else if (ret != -EBUSY) { nf_ct_unexpect_related(rtp_exp); port = 0; break; } } if (port == 0) goto err1; /* Update media port. */ if (rtp_exp->tuple.dst.u.udp.port != rtp_exp->saved_proto.udp.port && !ip_nat_sdp_port(skb, dataoff, dptr, datalen, mediaoff, medialen, port)) goto err2; return NF_ACCEPT; err2: nf_ct_unexpect_related(rtp_exp); nf_ct_unexpect_related(rtcp_exp); err1: return NF_DROP; } static void __exit nf_nat_sip_fini(void) { rcu_assign_pointer(nf_nat_sip_hook, NULL); rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, NULL); rcu_assign_pointer(nf_nat_sip_expect_hook, NULL); rcu_assign_pointer(nf_nat_sdp_addr_hook, NULL); rcu_assign_pointer(nf_nat_sdp_port_hook, NULL); rcu_assign_pointer(nf_nat_sdp_session_hook, NULL); rcu_assign_pointer(nf_nat_sdp_media_hook, NULL); synchronize_rcu(); } static int __init nf_nat_sip_init(void) { BUG_ON(nf_nat_sip_hook != NULL); BUG_ON(nf_nat_sip_seq_adjust_hook != NULL); BUG_ON(nf_nat_sip_expect_hook != NULL); BUG_ON(nf_nat_sdp_addr_hook != NULL); BUG_ON(nf_nat_sdp_port_hook != NULL); BUG_ON(nf_nat_sdp_session_hook != NULL); BUG_ON(nf_nat_sdp_media_hook != NULL); rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip); rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, ip_nat_sip_seq_adjust); rcu_assign_pointer(nf_nat_sip_expect_hook, ip_nat_sip_expect); rcu_assign_pointer(nf_nat_sdp_addr_hook, ip_nat_sdp_addr); rcu_assign_pointer(nf_nat_sdp_port_hook, ip_nat_sdp_port); rcu_assign_pointer(nf_nat_sdp_session_hook, ip_nat_sdp_session); rcu_assign_pointer(nf_nat_sdp_media_hook, ip_nat_sdp_media); return 0; } module_init(nf_nat_sip_init); module_exit(nf_nat_sip_fini);
gpl-2.0
paulalesius/kernel-3.10.20-lenovo-tablet
drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
2097
2864
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <engine/software.h> #include <engine/disp.h> #include <core/class.h> #include "nv50.h" static struct nouveau_oclass nvf0_disp_sclass[] = { { NVF0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs }, { NVF0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs }, { NVF0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs }, { NVF0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs }, { NVF0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs }, {} }; static struct nouveau_oclass nvf0_disp_base_oclass[] = { { NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds }, {} }; static int nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv50_disp_priv *priv; int heads = nv_rd32(parent, 0x022448); int ret; ret = nouveau_disp_create(parent, engine, oclass, heads, "PDISP", "display", &priv); *pobject = nv_object(priv); if (ret) return ret; nv_engine(priv)->sclass = nvf0_disp_base_oclass; nv_engine(priv)->cclass = &nv50_disp_cclass; nv_subdev(priv)->intr = nvd0_disp_intr; INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor); priv->sclass = nvf0_disp_sclass; priv->head.nr = heads; priv->dac.nr = 3; priv->sor.nr = 4; priv->dac.power = nv50_dac_power; priv->dac.sense = nv50_dac_sense; priv->sor.power = nv50_sor_power; priv->sor.hda_eld = nvd0_hda_eld; priv->sor.hdmi = nvd0_hdmi_ctrl; priv->sor.dp = &nvd0_sor_dp_func; return 0; } struct nouveau_oclass nvf0_disp_oclass = { .handle = NV_ENGINE(DISP, 0x92), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nvf0_disp_ctor, .dtor = _nouveau_disp_dtor, .init = _nouveau_disp_init, .fini = _nouveau_disp_fini, }, };
gpl-2.0
LiquidSmooth-Devices/android_kernel_htc_flounder
drivers/net/wireless/iwlegacy/3945-mac.c
2097
108309
/****************************************************************************** * * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/pci-aspm.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/firmware.h> #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <net/ieee80211_radiotap.h> #include <net/mac80211.h> #include <asm/div64.h> #define DRV_NAME "iwl3945" #include "commands.h" #include "common.h" #include "3945.h" #include "iwl-spectrum.h" /* * module name, copyright, version, etc. */ #define DRV_DESCRIPTION \ "Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux" #ifdef CONFIG_IWLEGACY_DEBUG #define VD "d" #else #define VD #endif /* * add "s" to indicate spectrum measurement included. * we add it here to be consistent with previous releases in which * this was configurable. */ #define DRV_VERSION IWLWIFI_VERSION VD "s" #define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation" #define DRV_AUTHOR "<ilw@linux.intel.com>" MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_VERSION(DRV_VERSION); MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); MODULE_LICENSE("GPL"); /* module parameters */ struct il_mod_params il3945_mod_params = { .sw_crypto = 1, .restart_fw = 1, .disable_hw_scan = 1, /* the rest are 0 by default */ }; /** * il3945_get_antenna_flags - Get antenna flags for RXON command * @il: eeprom and antenna fields are used to determine antenna flags * * il->eeprom39 is used to determine if antenna AUX/MAIN are reversed * il3945_mod_params.antenna specifies the antenna diversity mode: * * IL_ANTENNA_DIVERSITY - NIC selects best antenna by itself * IL_ANTENNA_MAIN - Force MAIN antenna * IL_ANTENNA_AUX - Force AUX antenna */ __le32 il3945_get_antenna_flags(const struct il_priv *il) { struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; switch (il3945_mod_params.antenna) { case IL_ANTENNA_DIVERSITY: return 0; case IL_ANTENNA_MAIN: if (eeprom->antenna_switch_type) return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; case IL_ANTENNA_AUX: if (eeprom->antenna_switch_type) return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; } /* bad antenna selector value */ IL_ERR("Bad antenna selector value (0x%x)\n", il3945_mod_params.antenna); return 0; /* "diversity" is default if error */ } static int il3945_set_ccmp_dynamic_key_info(struct il_priv *il, struct ieee80211_key_conf *keyconf, u8 sta_id) { unsigned long flags; __le16 key_flags = 0; int ret; key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); if (sta_id == il->hw_params.bcast_id) key_flags |= STA_KEY_MULTICAST_MSK; keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; keyconf->hw_key_idx = keyconf->keyidx; key_flags &= ~STA_KEY_FLG_INVALID; spin_lock_irqsave(&il->sta_lock, flags); il->stations[sta_id].keyinfo.cipher = keyconf->cipher; il->stations[sta_id].keyinfo.keylen = keyconf->keylen; memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen); memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen); if ((il->stations[sta_id].sta.key. key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC) il->stations[sta_id].sta.key.key_offset = il_get_free_ucode_key_idx(il); /* else, we are overriding an existing key => no need to allocated room * in uCode. */ WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, "no space for a new key"); il->stations[sta_id].sta.key.key_flags = key_flags; il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; D_INFO("hwcrypto: modify ucode station key info\n"); ret = il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC); spin_unlock_irqrestore(&il->sta_lock, flags); return ret; } static int il3945_set_tkip_dynamic_key_info(struct il_priv *il, struct ieee80211_key_conf *keyconf, u8 sta_id) { return -EOPNOTSUPP; } static int il3945_set_wep_dynamic_key_info(struct il_priv *il, struct ieee80211_key_conf *keyconf, u8 sta_id) { return -EOPNOTSUPP; } static int il3945_clear_sta_key_info(struct il_priv *il, u8 sta_id) { unsigned long flags; struct il_addsta_cmd sta_cmd; spin_lock_irqsave(&il->sta_lock, flags); memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key)); memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo)); il->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; memcpy(&sta_cmd, &il->stations[sta_id].sta, sizeof(struct il_addsta_cmd)); spin_unlock_irqrestore(&il->sta_lock, flags); D_INFO("hwcrypto: clear ucode station key info\n"); return il_send_add_sta(il, &sta_cmd, CMD_SYNC); } static int il3945_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf, u8 sta_id) { int ret = 0; keyconf->hw_key_idx = HW_KEY_DYNAMIC; switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_CCMP: ret = il3945_set_ccmp_dynamic_key_info(il, keyconf, sta_id); break; case WLAN_CIPHER_SUITE_TKIP: ret = il3945_set_tkip_dynamic_key_info(il, keyconf, sta_id); break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: ret = il3945_set_wep_dynamic_key_info(il, keyconf, sta_id); break; default: IL_ERR("Unknown alg: %s alg=%x\n", __func__, keyconf->cipher); ret = -EINVAL; } D_WEP("Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n", keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret); return ret; } static int il3945_remove_static_key(struct il_priv *il) { int ret = -EOPNOTSUPP; return ret; } static int il3945_set_static_key(struct il_priv *il, struct ieee80211_key_conf *key) { if (key->cipher == WLAN_CIPHER_SUITE_WEP40 || key->cipher == WLAN_CIPHER_SUITE_WEP104) return -EOPNOTSUPP; IL_ERR("Static key invalid: cipher %x\n", key->cipher); return -EINVAL; } static void il3945_clear_free_frames(struct il_priv *il) { struct list_head *element; D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count); while (!list_empty(&il->free_frames)) { element = il->free_frames.next; list_del(element); kfree(list_entry(element, struct il3945_frame, list)); il->frames_count--; } if (il->frames_count) { IL_WARN("%d frames still in use. Did we lose one?\n", il->frames_count); il->frames_count = 0; } } static struct il3945_frame * il3945_get_free_frame(struct il_priv *il) { struct il3945_frame *frame; struct list_head *element; if (list_empty(&il->free_frames)) { frame = kzalloc(sizeof(*frame), GFP_KERNEL); if (!frame) { IL_ERR("Could not allocate frame!\n"); return NULL; } il->frames_count++; return frame; } element = il->free_frames.next; list_del(element); return list_entry(element, struct il3945_frame, list); } static void il3945_free_frame(struct il_priv *il, struct il3945_frame *frame) { memset(frame, 0, sizeof(*frame)); list_add(&frame->list, &il->free_frames); } unsigned int il3945_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr, int left) { if (!il_is_associated(il) || !il->beacon_skb) return 0; if (il->beacon_skb->len > left) return 0; memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len); return il->beacon_skb->len; } static int il3945_send_beacon_cmd(struct il_priv *il) { struct il3945_frame *frame; unsigned int frame_size; int rc; u8 rate; frame = il3945_get_free_frame(il); if (!frame) { IL_ERR("Could not obtain free frame buffer for beacon " "command.\n"); return -ENOMEM; } rate = il_get_lowest_plcp(il); frame_size = il3945_hw_get_beacon_cmd(il, frame, rate); rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]); il3945_free_frame(il, frame); return rc; } static void il3945_unset_hw_params(struct il_priv *il) { if (il->_3945.shared_virt) dma_free_coherent(&il->pci_dev->dev, sizeof(struct il3945_shared), il->_3945.shared_virt, il->_3945.shared_phys); } static void il3945_build_tx_cmd_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info, struct il_device_cmd *cmd, struct sk_buff *skb_frag, int sta_id) { struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload; struct il_hw_key *keyinfo = &il->stations[sta_id].keyinfo; tx_cmd->sec_ctl = 0; switch (keyinfo->cipher) { case WLAN_CIPHER_SUITE_CCMP: tx_cmd->sec_ctl = TX_CMD_SEC_CCM; memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen); D_TX("tx_cmd with AES hwcrypto\n"); break; case WLAN_CIPHER_SUITE_TKIP: break; case WLAN_CIPHER_SUITE_WEP104: tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; /* fall through */ case WLAN_CIPHER_SUITE_WEP40: tx_cmd->sec_ctl |= TX_CMD_SEC_WEP | (info->control.hw_key-> hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen); D_TX("Configuring packet for WEP encryption " "with key %d\n", info->control.hw_key->hw_key_idx); break; default: IL_ERR("Unknown encode cipher %x\n", keyinfo->cipher); break; } } /* * handle build C_TX command notification. */ static void il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd, struct ieee80211_tx_info *info, struct ieee80211_hdr *hdr, u8 std_id) { struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload; __le32 tx_flags = tx_cmd->tx_flags; __le16 fc = hdr->frame_control; tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { tx_flags |= TX_CMD_FLG_ACK_MSK; if (ieee80211_is_mgmt(fc)) tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; if (ieee80211_is_probe_resp(fc) && !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) tx_flags |= TX_CMD_FLG_TSF_MSK; } else { tx_flags &= (~TX_CMD_FLG_ACK_MSK); tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; } tx_cmd->sta_id = std_id; if (ieee80211_has_morefrags(fc)) tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; if (ieee80211_is_data_qos(fc)) { u8 *qc = ieee80211_get_qos_ctl(hdr); tx_cmd->tid_tspec = qc[0] & 0xf; tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; } else { tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; } il_tx_cmd_protection(il, info, fc, &tx_flags); tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); if (ieee80211_is_mgmt(fc)) { if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); else tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); } else { tx_cmd->timeout.pm_frame_timeout = 0; } tx_cmd->driver_txop = 0; tx_cmd->tx_flags = tx_flags; tx_cmd->next_frame_len = 0; } /* * start C_TX command process */ static int il3945_tx_skb(struct il_priv *il, struct ieee80211_sta *sta, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct il3945_tx_cmd *tx_cmd; struct il_tx_queue *txq = NULL; struct il_queue *q = NULL; struct il_device_cmd *out_cmd; struct il_cmd_meta *out_meta; dma_addr_t phys_addr; dma_addr_t txcmd_phys; int txq_id = skb_get_queue_mapping(skb); u16 len, idx, hdr_len; u16 firstlen, secondlen; u8 id; u8 unicast; u8 sta_id; u8 tid = 0; __le16 fc; u8 wait_write_ptr = 0; unsigned long flags; spin_lock_irqsave(&il->lock, flags); if (il_is_rfkill(il)) { D_DROP("Dropping - RF KILL\n"); goto drop_unlock; } if ((ieee80211_get_tx_rate(il->hw, info)->hw_value & 0xFF) == IL_INVALID_RATE) { IL_ERR("ERROR: No TX rate available.\n"); goto drop_unlock; } unicast = !is_multicast_ether_addr(hdr->addr1); id = 0; fc = hdr->frame_control; #ifdef CONFIG_IWLEGACY_DEBUG if (ieee80211_is_auth(fc)) D_TX("Sending AUTH frame\n"); else if (ieee80211_is_assoc_req(fc)) D_TX("Sending ASSOC frame\n"); else if (ieee80211_is_reassoc_req(fc)) D_TX("Sending REASSOC frame\n"); #endif spin_unlock_irqrestore(&il->lock, flags); hdr_len = ieee80211_hdrlen(fc); /* Find idx into station table for destination station */ sta_id = il_sta_id_or_broadcast(il, sta); if (sta_id == IL_INVALID_STATION) { D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1); goto drop; } D_RATE("station Id %d\n", sta_id); if (ieee80211_is_data_qos(fc)) { u8 *qc = ieee80211_get_qos_ctl(hdr); tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; if (unlikely(tid >= MAX_TID_COUNT)) goto drop; } /* Descriptor for chosen Tx queue */ txq = &il->txq[txq_id]; q = &txq->q; if ((il_queue_space(q) < q->high_mark)) goto drop; spin_lock_irqsave(&il->lock, flags); idx = il_get_cmd_idx(q, q->write_ptr, 0); txq->skbs[q->write_ptr] = skb; /* Init first empty entry in queue's array of Tx/cmd buffers */ out_cmd = txq->cmd[idx]; out_meta = &txq->meta[idx]; tx_cmd = (struct il3945_tx_cmd *)out_cmd->cmd.payload; memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); memset(tx_cmd, 0, sizeof(*tx_cmd)); /* * Set up the Tx-command (not MAC!) header. * Store the chosen Tx queue and TFD idx within the sequence field; * after Tx, uCode's Tx response will return this value so driver can * locate the frame within the tx queue and do post-tx processing. */ out_cmd->hdr.cmd = C_TX; out_cmd->hdr.sequence = cpu_to_le16((u16) (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr))); /* Copy MAC header from skb into command buffer */ memcpy(tx_cmd->hdr, hdr, hdr_len); if (info->control.hw_key) il3945_build_tx_cmd_hwcrypto(il, info, out_cmd, skb, sta_id); /* TODO need this for burst mode later on */ il3945_build_tx_cmd_basic(il, out_cmd, info, hdr, sta_id); il3945_hw_build_tx_cmd_rate(il, out_cmd, info, hdr, sta_id); /* Total # bytes to be transmitted */ tx_cmd->len = cpu_to_le16((u16) skb->len); tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; /* * Use the first empty entry in this queue's command buffer array * to contain the Tx command and MAC header concatenated together * (payload data will be in another buffer). * Size of this varies, due to varying MAC header length. * If end is not dword aligned, we'll have 2 extra bytes at the end * of the MAC header (device reads on dword boundaries). * We'll tell device about this padding later. */ len = sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) + hdr_len; firstlen = (len + 3) & ~3; /* Physical address of this Tx command's header (not MAC header!), * within command buffer array. */ txcmd_phys = pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen, PCI_DMA_TODEVICE); if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys))) goto drop_unlock; /* Set up TFD's 2nd entry to point directly to remainder of skb, * if any (802.11 null frames have no payload). */ secondlen = skb->len - hdr_len; if (secondlen > 0) { phys_addr = pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen, PCI_DMA_TODEVICE); if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) goto drop_unlock; } /* Add buffer containing Tx command and MAC(!) header to TFD's * first entry */ il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0); dma_unmap_addr_set(out_meta, mapping, txcmd_phys); dma_unmap_len_set(out_meta, len, firstlen); if (secondlen > 0) il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen, 0, U32_PAD(secondlen)); if (!ieee80211_has_morefrags(hdr->frame_control)) { txq->need_update = 1; } else { wait_write_ptr = 1; txq->need_update = 0; } il_update_stats(il, true, fc, skb->len); D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence)); D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd)); il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, ieee80211_hdrlen(fc)); /* Tell device the write idx *just past* this latest filled TFD */ q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); il_txq_update_write_ptr(il, txq); spin_unlock_irqrestore(&il->lock, flags); if (il_queue_space(q) < q->high_mark && il->mac80211_registered) { if (wait_write_ptr) { spin_lock_irqsave(&il->lock, flags); txq->need_update = 1; il_txq_update_write_ptr(il, txq); spin_unlock_irqrestore(&il->lock, flags); } il_stop_queue(il, txq); } return 0; drop_unlock: spin_unlock_irqrestore(&il->lock, flags); drop: return -1; } static int il3945_get_measurement(struct il_priv *il, struct ieee80211_measurement_params *params, u8 type) { struct il_spectrum_cmd spectrum; struct il_rx_pkt *pkt; struct il_host_cmd cmd = { .id = C_SPECTRUM_MEASUREMENT, .data = (void *)&spectrum, .flags = CMD_WANT_SKB, }; u32 add_time = le64_to_cpu(params->start_time); int rc; int spectrum_resp_status; int duration = le16_to_cpu(params->duration); if (il_is_associated(il)) add_time = il_usecs_to_beacons(il, le64_to_cpu(params->start_time) - il->_3945.last_tsf, le16_to_cpu(il->timing.beacon_interval)); memset(&spectrum, 0, sizeof(spectrum)); spectrum.channel_count = cpu_to_le16(1); spectrum.flags = RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK; spectrum.filter_flags = MEASUREMENT_FILTER_FLAG; cmd.len = sizeof(spectrum); spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); if (il_is_associated(il)) spectrum.start_time = il_add_beacon_time(il, il->_3945.last_beacon_time, add_time, le16_to_cpu(il->timing.beacon_interval)); else spectrum.start_time = 0; spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT); spectrum.channels[0].channel = params->channel; spectrum.channels[0].type = type; if (il->active.flags & RXON_FLG_BAND_24G_MSK) spectrum.flags |= RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK; rc = il_send_cmd_sync(il, &cmd); if (rc) return rc; pkt = (struct il_rx_pkt *)cmd.reply_page; if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { IL_ERR("Bad return from N_RX_ON_ASSOC command\n"); rc = -EIO; } spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status); switch (spectrum_resp_status) { case 0: /* Command will be handled */ if (pkt->u.spectrum.id != 0xff) { D_INFO("Replaced existing measurement: %d\n", pkt->u.spectrum.id); il->measurement_status &= ~MEASUREMENT_READY; } il->measurement_status |= MEASUREMENT_ACTIVE; rc = 0; break; case 1: /* Command will not be handled */ rc = -EAGAIN; break; } il_free_pages(il, cmd.reply_page); return rc; } static void il3945_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb) { struct il_rx_pkt *pkt = rxb_addr(rxb); struct il_alive_resp *palive; struct delayed_work *pwork; palive = &pkt->u.alive_frame; D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n", palive->is_valid, palive->ver_type, palive->ver_subtype); if (palive->ver_subtype == INITIALIZE_SUBTYPE) { D_INFO("Initialization Alive received.\n"); memcpy(&il->card_alive_init, &pkt->u.alive_frame, sizeof(struct il_alive_resp)); pwork = &il->init_alive_start; } else { D_INFO("Runtime Alive received.\n"); memcpy(&il->card_alive, &pkt->u.alive_frame, sizeof(struct il_alive_resp)); pwork = &il->alive_start; il3945_disable_events(il); } /* We delay the ALIVE response by 5ms to * give the HW RF Kill time to activate... */ if (palive->is_valid == UCODE_VALID_OK) queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5)); else IL_WARN("uCode did not respond OK.\n"); } static void il3945_hdl_add_sta(struct il_priv *il, struct il_rx_buf *rxb) { #ifdef CONFIG_IWLEGACY_DEBUG struct il_rx_pkt *pkt = rxb_addr(rxb); #endif D_RX("Received C_ADD_STA: 0x%02X\n", pkt->u.status); } static void il3945_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb) { struct il_rx_pkt *pkt = rxb_addr(rxb); struct il3945_beacon_notif *beacon = &(pkt->u.beacon_status); #ifdef CONFIG_IWLEGACY_DEBUG u8 rate = beacon->beacon_notify_hdr.rate; D_RX("beacon status %x retries %d iss %d " "tsf %d %d rate %d\n", le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK, beacon->beacon_notify_hdr.failure_frame, le32_to_cpu(beacon->ibss_mgr_status), le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate); #endif il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); } /* Handle notification from uCode that card's power state is changing * due to software, hardware, or critical temperature RFKILL */ static void il3945_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb) { struct il_rx_pkt *pkt = rxb_addr(rxb); u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); unsigned long status = il->status; IL_WARN("Card state received: HW:%s SW:%s\n", (flags & HW_CARD_DISABLED) ? "Kill" : "On", (flags & SW_CARD_DISABLED) ? "Kill" : "On"); _il_wr(il, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); if (flags & HW_CARD_DISABLED) set_bit(S_RFKILL, &il->status); else clear_bit(S_RFKILL, &il->status); il_scan_cancel(il); if ((test_bit(S_RFKILL, &status) != test_bit(S_RFKILL, &il->status))) wiphy_rfkill_set_hw_state(il->hw->wiphy, test_bit(S_RFKILL, &il->status)); else wake_up(&il->wait_command_queue); } /** * il3945_setup_handlers - Initialize Rx handler callbacks * * Setup the RX handlers for each of the reply types sent from the uCode * to the host. * * This function chains into the hardware specific files for them to setup * any hardware specific handlers as well. */ static void il3945_setup_handlers(struct il_priv *il) { il->handlers[N_ALIVE] = il3945_hdl_alive; il->handlers[C_ADD_STA] = il3945_hdl_add_sta; il->handlers[N_ERROR] = il_hdl_error; il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa; il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement; il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep; il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats; il->handlers[N_BEACON] = il3945_hdl_beacon; /* * The same handler is used for both the REPLY to a discrete * stats request from the host as well as for the periodic * stats notifications (after received beacons) from the uCode. */ il->handlers[C_STATS] = il3945_hdl_c_stats; il->handlers[N_STATS] = il3945_hdl_stats; il_setup_rx_scan_handlers(il); il->handlers[N_CARD_STATE] = il3945_hdl_card_state; /* Set up hardware specific Rx handlers */ il3945_hw_handler_setup(il); } /************************** RX-FUNCTIONS ****************************/ /* * Rx theory of operation * * The host allocates 32 DMA target addresses and passes the host address * to the firmware at register IL_RFDS_TBL_LOWER + N * RFD_SIZE where N is * 0 to 31 * * Rx Queue Indexes * The host/firmware share two idx registers for managing the Rx buffers. * * The READ idx maps to the first position that the firmware may be writing * to -- the driver can read up to (but not including) this position and get * good data. * The READ idx is managed by the firmware once the card is enabled. * * The WRITE idx maps to the last position the driver has read from -- the * position preceding WRITE is the last slot the firmware can place a packet. * * The queue is empty (no good data) if WRITE = READ - 1, and is full if * WRITE = READ. * * During initialization, the host sets up the READ queue position to the first * IDX position, and WRITE to the last (READ - 1 wrapped) * * When the firmware places a packet in a buffer, it will advance the READ idx * and fire the RX interrupt. The driver can then query the READ idx and * process as many packets as possible, moving the WRITE idx forward as it * resets the Rx queue buffers with new memory. * * The management in the driver is as follows: * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled * to replenish the iwl->rxq->rx_free. * + In il3945_rx_replenish (scheduled) if 'processed' != 'read' then the * iwl->rxq is replenished and the READ IDX is updated (updating the * 'processed' and 'read' driver idxes as well) * + A received packet is processed and handed to the kernel network stack, * detached from the iwl->rxq. The driver 'processed' idx is updated. * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ * IDX is not incremented and iwl->status(RX_STALLED) is set. If there * were enough free buffers and RX_STALLED is set it is cleared. * * * Driver sequence: * * il3945_rx_replenish() Replenishes rx_free list from rx_used, and calls * il3945_rx_queue_restock * il3945_rx_queue_restock() Moves available buffers from rx_free into Rx * queue, updates firmware pointers, and updates * the WRITE idx. If insufficient rx_free buffers * are available, schedules il3945_rx_replenish * * -- enable interrupts -- * ISR - il3945_rx() Detach il_rx_bufs from pool up to the * READ IDX, detaching the SKB from the pool. * Moves the packet buffer from queue to rx_used. * Calls il3945_rx_queue_restock to refill any empty * slots. * ... * */ /** * il3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr */ static inline __le32 il3945_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr) { return cpu_to_le32((u32) dma_addr); } /** * il3945_rx_queue_restock - refill RX queue from pre-allocated pool * * If there are slots in the RX queue that need to be restocked, * and we have free pre-allocated buffers, fill the ranks as much * as we can, pulling from rx_free. * * This moves the 'write' idx forward to catch up with 'processed', and * also updates the memory address in the firmware to reference the new * target buffer. */ static void il3945_rx_queue_restock(struct il_priv *il) { struct il_rx_queue *rxq = &il->rxq; struct list_head *element; struct il_rx_buf *rxb; unsigned long flags; int write; spin_lock_irqsave(&rxq->lock, flags); write = rxq->write & ~0x7; while (il_rx_queue_space(rxq) > 0 && rxq->free_count) { /* Get next free Rx buffer, remove from free list */ element = rxq->rx_free.next; rxb = list_entry(element, struct il_rx_buf, list); list_del(element); /* Point to Rx buffer via next RBD in circular buffer */ rxq->bd[rxq->write] = il3945_dma_addr2rbd_ptr(il, rxb->page_dma); rxq->queue[rxq->write] = rxb; rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; rxq->free_count--; } spin_unlock_irqrestore(&rxq->lock, flags); /* If the pre-allocated buffer pool is dropping low, schedule to * refill it */ if (rxq->free_count <= RX_LOW_WATERMARK) queue_work(il->workqueue, &il->rx_replenish); /* If we've added more space for the firmware to place data, tell it. * Increment device's write pointer in multiples of 8. */ if (rxq->write_actual != (rxq->write & ~0x7) || abs(rxq->write - rxq->read) > 7) { spin_lock_irqsave(&rxq->lock, flags); rxq->need_update = 1; spin_unlock_irqrestore(&rxq->lock, flags); il_rx_queue_update_write_ptr(il, rxq); } } /** * il3945_rx_replenish - Move all used packet from rx_used to rx_free * * When moving to rx_free an SKB is allocated for the slot. * * Also restock the Rx queue via il3945_rx_queue_restock. * This is called as a scheduled work item (except for during initialization) */ static void il3945_rx_allocate(struct il_priv *il, gfp_t priority) { struct il_rx_queue *rxq = &il->rxq; struct list_head *element; struct il_rx_buf *rxb; struct page *page; dma_addr_t page_dma; unsigned long flags; gfp_t gfp_mask = priority; while (1) { spin_lock_irqsave(&rxq->lock, flags); if (list_empty(&rxq->rx_used)) { spin_unlock_irqrestore(&rxq->lock, flags); return; } spin_unlock_irqrestore(&rxq->lock, flags); if (rxq->free_count > RX_LOW_WATERMARK) gfp_mask |= __GFP_NOWARN; if (il->hw_params.rx_page_order > 0) gfp_mask |= __GFP_COMP; /* Alloc a new receive buffer */ page = alloc_pages(gfp_mask, il->hw_params.rx_page_order); if (!page) { if (net_ratelimit()) D_INFO("Failed to allocate SKB buffer.\n"); if (rxq->free_count <= RX_LOW_WATERMARK && net_ratelimit()) IL_ERR("Failed to allocate SKB buffer with %0x." "Only %u free buffers remaining.\n", priority, rxq->free_count); /* We don't reschedule replenish work here -- we will * call the restock method and if it still needs * more buffers it will schedule replenish */ break; } /* Get physical address of RB/SKB */ page_dma = pci_map_page(il->pci_dev, page, 0, PAGE_SIZE << il->hw_params.rx_page_order, PCI_DMA_FROMDEVICE); if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) { __free_pages(page, il->hw_params.rx_page_order); break; } spin_lock_irqsave(&rxq->lock, flags); if (list_empty(&rxq->rx_used)) { spin_unlock_irqrestore(&rxq->lock, flags); pci_unmap_page(il->pci_dev, page_dma, PAGE_SIZE << il->hw_params.rx_page_order, PCI_DMA_FROMDEVICE); __free_pages(page, il->hw_params.rx_page_order); return; } element = rxq->rx_used.next; rxb = list_entry(element, struct il_rx_buf, list); list_del(element); rxb->page = page; rxb->page_dma = page_dma; list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; il->alloc_rxb_page++; spin_unlock_irqrestore(&rxq->lock, flags); } } void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq) { unsigned long flags; int i; spin_lock_irqsave(&rxq->lock, flags); INIT_LIST_HEAD(&rxq->rx_free); INIT_LIST_HEAD(&rxq->rx_used); /* Fill the rx_used queue with _all_ of the Rx buffers */ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { /* In the reset function, these buffers may have been allocated * to an SKB, so we need to unmap and free potential storage */ if (rxq->pool[i].page != NULL) { pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, PAGE_SIZE << il->hw_params.rx_page_order, PCI_DMA_FROMDEVICE); __il_free_pages(il, rxq->pool[i].page); rxq->pool[i].page = NULL; } list_add_tail(&rxq->pool[i].list, &rxq->rx_used); } /* Set us so that we have processed and used all buffers, but have * not restocked the Rx queue with fresh buffers */ rxq->read = rxq->write = 0; rxq->write_actual = 0; rxq->free_count = 0; spin_unlock_irqrestore(&rxq->lock, flags); } void il3945_rx_replenish(void *data) { struct il_priv *il = data; unsigned long flags; il3945_rx_allocate(il, GFP_KERNEL); spin_lock_irqsave(&il->lock, flags); il3945_rx_queue_restock(il); spin_unlock_irqrestore(&il->lock, flags); } static void il3945_rx_replenish_now(struct il_priv *il) { il3945_rx_allocate(il, GFP_ATOMIC); il3945_rx_queue_restock(il); } /* Assumes that the skb field of the buffers in 'pool' is kept accurate. * If an SKB has been detached, the POOL needs to have its SKB set to NULL * This free routine walks the list of POOL entries and if SKB is set to * non NULL it is unmapped and freed */ static void il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq) { int i; for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { if (rxq->pool[i].page != NULL) { pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, PAGE_SIZE << il->hw_params.rx_page_order, PCI_DMA_FROMDEVICE); __il_free_pages(il, rxq->pool[i].page); rxq->pool[i].page = NULL; } } dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, rxq->bd_dma); dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status), rxq->rb_stts, rxq->rb_stts_dma); rxq->bd = NULL; rxq->rb_stts = NULL; } /* Convert linear signal-to-noise ratio into dB */ static u8 ratio2dB[100] = { /* 0 1 2 3 4 5 6 7 8 9 */ 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */ 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */ 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */ 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */ 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */ 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */ 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */ 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */ 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */ 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */ }; /* Calculates a relative dB value from a ratio of linear * (i.e. not dB) signal levels. * Conversion assumes that levels are voltages (20*log), not powers (10*log). */ int il3945_calc_db_from_ratio(int sig_ratio) { /* 1000:1 or higher just report as 60 dB */ if (sig_ratio >= 1000) return 60; /* 100:1 or higher, divide by 10 and use table, * add 20 dB to make up for divide by 10 */ if (sig_ratio >= 100) return 20 + (int)ratio2dB[sig_ratio / 10]; /* We shouldn't see this */ if (sig_ratio < 1) return 0; /* Use table for ratios 1:1 - 99:1 */ return (int)ratio2dB[sig_ratio]; } /** * il3945_rx_handle - Main entry function for receiving responses from uCode * * Uses the il->handlers callback function array to invoke * the appropriate handlers, including command responses, * frame-received notifications, and other notifications. */ static void il3945_rx_handle(struct il_priv *il) { struct il_rx_buf *rxb; struct il_rx_pkt *pkt; struct il_rx_queue *rxq = &il->rxq; u32 r, i; int reclaim; unsigned long flags; u8 fill_rx = 0; u32 count = 8; int total_empty = 0; /* uCode's read idx (stored in shared DRAM) indicates the last Rx * buffer that the driver may process (last buffer filled by ucode). */ r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; i = rxq->read; /* calculate total frames need to be restock after handling RX */ total_empty = r - rxq->write_actual; if (total_empty < 0) total_empty += RX_QUEUE_SIZE; if (total_empty > (RX_QUEUE_SIZE / 2)) fill_rx = 1; /* Rx interrupt, but nothing sent from uCode */ if (i == r) D_RX("r = %d, i = %d\n", r, i); while (i != r) { int len; rxb = rxq->queue[i]; /* If an RXB doesn't have a Rx queue slot associated with it, * then a bug has been introduced in the queue refilling * routines -- catch it here */ BUG_ON(rxb == NULL); rxq->queue[i] = NULL; pci_unmap_page(il->pci_dev, rxb->page_dma, PAGE_SIZE << il->hw_params.rx_page_order, PCI_DMA_FROMDEVICE); pkt = rxb_addr(rxb); len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK; len += sizeof(u32); /* account for status word */ /* Reclaim a command buffer only if this packet is a response * to a (driver-originated) command. * If the packet (e.g. Rx frame) originated from uCode, * there is no command buffer to reclaim. * Ucode should set SEQ_RX_FRAME bit if ucode-originated, * but apparently a few don't get set; catch them here. */ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && pkt->hdr.cmd != N_STATS && pkt->hdr.cmd != C_TX; /* Based on type of command response or notification, * handle those that need handling via function in * handlers table. See il3945_setup_handlers() */ if (il->handlers[pkt->hdr.cmd]) { D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); il->isr_stats.handlers[pkt->hdr.cmd]++; il->handlers[pkt->hdr.cmd] (il, rxb); } else { /* No handling needed */ D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r, i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); } /* * XXX: After here, we should always check rxb->page * against NULL before touching it or its virtual * memory (pkt). Because some handler might have * already taken or freed the pages. */ if (reclaim) { /* Invoke any callbacks, transfer the buffer to caller, * and fire off the (possibly) blocking il_send_cmd() * as we reclaim the driver command queue */ if (rxb->page) il_tx_cmd_complete(il, rxb); else IL_WARN("Claim null rxb?\n"); } /* Reuse the page if possible. For notification packets and * SKBs that fail to Rx correctly, add them back into the * rx_free list for reuse later. */ spin_lock_irqsave(&rxq->lock, flags); if (rxb->page != NULL) { rxb->page_dma = pci_map_page(il->pci_dev, rxb->page, 0, PAGE_SIZE << il->hw_params. rx_page_order, PCI_DMA_FROMDEVICE); if (unlikely(pci_dma_mapping_error(il->pci_dev, rxb->page_dma))) { __il_free_pages(il, rxb->page); rxb->page = NULL; list_add_tail(&rxb->list, &rxq->rx_used); } else { list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; } } else list_add_tail(&rxb->list, &rxq->rx_used); spin_unlock_irqrestore(&rxq->lock, flags); i = (i + 1) & RX_QUEUE_MASK; /* If there are a lot of unused frames, * restock the Rx queue so ucode won't assert. */ if (fill_rx) { count++; if (count >= 8) { rxq->read = i; il3945_rx_replenish_now(il); count = 0; } } } /* Backtrack one entry */ rxq->read = i; if (fill_rx) il3945_rx_replenish_now(il); else il3945_rx_queue_restock(il); } /* call this function to flush any scheduled tasklet */ static inline void il3945_synchronize_irq(struct il_priv *il) { /* wait to make sure we flush pending tasklet */ synchronize_irq(il->pci_dev->irq); tasklet_kill(&il->irq_tasklet); } static const char * il3945_desc_lookup(int i) { switch (i) { case 1: return "FAIL"; case 2: return "BAD_PARAM"; case 3: return "BAD_CHECKSUM"; case 4: return "NMI_INTERRUPT"; case 5: return "SYSASSERT"; case 6: return "FATAL_ERROR"; } return "UNKNOWN"; } #define ERROR_START_OFFSET (1 * sizeof(u32)) #define ERROR_ELEM_SIZE (7 * sizeof(u32)) void il3945_dump_nic_error_log(struct il_priv *il) { u32 i; u32 desc, time, count, base, data1; u32 blink1, blink2, ilink1, ilink2; base = le32_to_cpu(il->card_alive.error_event_table_ptr); if (!il3945_hw_valid_rtc_data_addr(base)) { IL_ERR("Not valid error log pointer 0x%08X\n", base); return; } count = il_read_targ_mem(il, base); if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { IL_ERR("Start IWL Error Log Dump:\n"); IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count); } IL_ERR("Desc Time asrtPC blink2 " "ilink1 nmiPC Line\n"); for (i = ERROR_START_OFFSET; i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET; i += ERROR_ELEM_SIZE) { desc = il_read_targ_mem(il, base + i); time = il_read_targ_mem(il, base + i + 1 * sizeof(u32)); blink1 = il_read_targ_mem(il, base + i + 2 * sizeof(u32)); blink2 = il_read_targ_mem(il, base + i + 3 * sizeof(u32)); ilink1 = il_read_targ_mem(il, base + i + 4 * sizeof(u32)); ilink2 = il_read_targ_mem(il, base + i + 5 * sizeof(u32)); data1 = il_read_targ_mem(il, base + i + 6 * sizeof(u32)); IL_ERR("%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", il3945_desc_lookup(desc), desc, time, blink1, blink2, ilink1, ilink2, data1); } } static void il3945_irq_tasklet(struct il_priv *il) { u32 inta, handled = 0; u32 inta_fh; unsigned long flags; #ifdef CONFIG_IWLEGACY_DEBUG u32 inta_mask; #endif spin_lock_irqsave(&il->lock, flags); /* Ack/clear/reset pending uCode interrupts. * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, * and will clear only when CSR_FH_INT_STATUS gets cleared. */ inta = _il_rd(il, CSR_INT); _il_wr(il, CSR_INT, inta); /* Ack/clear/reset pending flow-handler (DMA) interrupts. * Any new interrupts that happen after this, either while we're * in this tasklet, or later, will show up in next ISR/tasklet. */ inta_fh = _il_rd(il, CSR_FH_INT_STATUS); _il_wr(il, CSR_FH_INT_STATUS, inta_fh); #ifdef CONFIG_IWLEGACY_DEBUG if (il_get_debug_level(il) & IL_DL_ISR) { /* just for debug */ inta_mask = _il_rd(il, CSR_INT_MASK); D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask, inta_fh); } #endif spin_unlock_irqrestore(&il->lock, flags); /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not * atomic, make sure that inta covers all the interrupts that * we've discovered, even if FH interrupt came in just after * reading CSR_INT. */ if (inta_fh & CSR39_FH_INT_RX_MASK) inta |= CSR_INT_BIT_FH_RX; if (inta_fh & CSR39_FH_INT_TX_MASK) inta |= CSR_INT_BIT_FH_TX; /* Now service all interrupt bits discovered above. */ if (inta & CSR_INT_BIT_HW_ERR) { IL_ERR("Hardware error detected. Restarting.\n"); /* Tell the device to stop sending interrupts */ il_disable_interrupts(il); il->isr_stats.hw++; il_irq_handle_error(il); handled |= CSR_INT_BIT_HW_ERR; return; } #ifdef CONFIG_IWLEGACY_DEBUG if (il_get_debug_level(il) & (IL_DL_ISR)) { /* NIC fires this, but we don't use it, redundant with WAKEUP */ if (inta & CSR_INT_BIT_SCD) { D_ISR("Scheduler finished to transmit " "the frame/frames.\n"); il->isr_stats.sch++; } /* Alive notification via Rx interrupt will do the real work */ if (inta & CSR_INT_BIT_ALIVE) { D_ISR("Alive interrupt\n"); il->isr_stats.alive++; } } #endif /* Safely ignore these bits for debug checks below */ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); /* Error detected by uCode */ if (inta & CSR_INT_BIT_SW_ERR) { IL_ERR("Microcode SW error detected. " "Restarting 0x%X.\n", inta); il->isr_stats.sw++; il_irq_handle_error(il); handled |= CSR_INT_BIT_SW_ERR; } /* uCode wakes up after power-down sleep */ if (inta & CSR_INT_BIT_WAKEUP) { D_ISR("Wakeup interrupt\n"); il_rx_queue_update_write_ptr(il, &il->rxq); il_txq_update_write_ptr(il, &il->txq[0]); il_txq_update_write_ptr(il, &il->txq[1]); il_txq_update_write_ptr(il, &il->txq[2]); il_txq_update_write_ptr(il, &il->txq[3]); il_txq_update_write_ptr(il, &il->txq[4]); il_txq_update_write_ptr(il, &il->txq[5]); il->isr_stats.wakeup++; handled |= CSR_INT_BIT_WAKEUP; } /* All uCode command responses, including Tx command responses, * Rx "responses" (frame-received notification), and other * notifications from uCode come through here*/ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { il3945_rx_handle(il); il->isr_stats.rx++; handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); } if (inta & CSR_INT_BIT_FH_TX) { D_ISR("Tx interrupt\n"); il->isr_stats.tx++; _il_wr(il, CSR_FH_INT_STATUS, (1 << 6)); il_wr(il, FH39_TCSR_CREDIT(FH39_SRVC_CHNL), 0x0); handled |= CSR_INT_BIT_FH_TX; } if (inta & ~handled) { IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled); il->isr_stats.unhandled++; } if (inta & ~il->inta_mask) { IL_WARN("Disabled INTA bits 0x%08x were pending\n", inta & ~il->inta_mask); IL_WARN(" with inta_fh = 0x%08x\n", inta_fh); } /* Re-enable all interrupts */ /* only Re-enable if disabled by irq */ if (test_bit(S_INT_ENABLED, &il->status)) il_enable_interrupts(il); #ifdef CONFIG_IWLEGACY_DEBUG if (il_get_debug_level(il) & (IL_DL_ISR)) { inta = _il_rd(il, CSR_INT); inta_mask = _il_rd(il, CSR_INT_MASK); inta_fh = _il_rd(il, CSR_FH_INT_STATUS); D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); } #endif } static int il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band, u8 is_active, u8 n_probes, struct il3945_scan_channel *scan_ch, struct ieee80211_vif *vif) { struct ieee80211_channel *chan; const struct ieee80211_supported_band *sband; const struct il_channel_info *ch_info; u16 passive_dwell = 0; u16 active_dwell = 0; int added, i; sband = il_get_hw_mode(il, band); if (!sband) return 0; active_dwell = il_get_active_dwell_time(il, band, n_probes); passive_dwell = il_get_passive_dwell_time(il, band, vif); if (passive_dwell <= active_dwell) passive_dwell = active_dwell + 1; for (i = 0, added = 0; i < il->scan_request->n_channels; i++) { chan = il->scan_request->channels[i]; if (chan->band != band) continue; scan_ch->channel = chan->hw_value; ch_info = il_get_channel_info(il, band, scan_ch->channel); if (!il_is_channel_valid(ch_info)) { D_SCAN("Channel %d is INVALID for this band.\n", scan_ch->channel); continue; } scan_ch->active_dwell = cpu_to_le16(active_dwell); scan_ch->passive_dwell = cpu_to_le16(passive_dwell); /* If passive , set up for auto-switch * and use long active_dwell time. */ if (!is_active || il_is_channel_passive(ch_info) || (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) { scan_ch->type = 0; /* passive */ if (IL_UCODE_API(il->ucode_ver) == 1) scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1); } else { scan_ch->type = 1; /* active */ } /* Set direct probe bits. These may be used both for active * scan channels (probes gets sent right away), * or for passive channels (probes get se sent only after * hearing clear Rx packet).*/ if (IL_UCODE_API(il->ucode_ver) >= 2) { if (n_probes) scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes); } else { /* uCode v1 does not allow setting direct probe bits on * passive channel. */ if ((scan_ch->type & 1) && n_probes) scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes); } /* Set txpower levels to defaults */ scan_ch->tpc.dsp_atten = 110; /* scan_pwr_info->tpc.dsp_atten; */ /*scan_pwr_info->tpc.tx_gain; */ if (band == IEEE80211_BAND_5GHZ) scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; else { scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); /* NOTE: if we were doing 6Mb OFDM for scans we'd use * power level: * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3; */ } D_SCAN("Scanning %d [%s %d]\n", scan_ch->channel, (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE", (scan_ch->type & 1) ? active_dwell : passive_dwell); scan_ch++; added++; } D_SCAN("total channels to scan %d\n", added); return added; } static void il3945_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates) { int i; for (i = 0; i < RATE_COUNT_LEGACY; i++) { rates[i].bitrate = il3945_rates[i].ieee * 5; rates[i].hw_value = i; /* Rate scaling will work on idxes */ rates[i].hw_value_short = i; rates[i].flags = 0; if (i > IL39_LAST_OFDM_RATE || i < IL_FIRST_OFDM_RATE) { /* * If CCK != 1M then set short preamble rate flag. */ rates[i].flags |= (il3945_rates[i].plcp == 10) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE; } } } /****************************************************************************** * * uCode download functions * ******************************************************************************/ static void il3945_dealloc_ucode_pci(struct il_priv *il) { il_free_fw_desc(il->pci_dev, &il->ucode_code); il_free_fw_desc(il->pci_dev, &il->ucode_data); il_free_fw_desc(il->pci_dev, &il->ucode_data_backup); il_free_fw_desc(il->pci_dev, &il->ucode_init); il_free_fw_desc(il->pci_dev, &il->ucode_init_data); il_free_fw_desc(il->pci_dev, &il->ucode_boot); } /** * il3945_verify_inst_full - verify runtime uCode image in card vs. host, * looking at all data. */ static int il3945_verify_inst_full(struct il_priv *il, __le32 * image, u32 len) { u32 val; u32 save_len = len; int rc = 0; u32 errcnt; D_INFO("ucode inst image size is %u\n", len); il_wr(il, HBUS_TARG_MEM_RADDR, IL39_RTC_INST_LOWER_BOUND); errcnt = 0; for (; len > 0; len -= sizeof(u32), image++) { /* read data comes through single port, auto-incr addr */ /* NOTE: Use the debugless read so we don't flood kernel log * if IL_DL_IO is set */ val = _il_rd(il, HBUS_TARG_MEM_RDAT); if (val != le32_to_cpu(*image)) { IL_ERR("uCode INST section is invalid at " "offset 0x%x, is 0x%x, s/b 0x%x\n", save_len - len, val, le32_to_cpu(*image)); rc = -EIO; errcnt++; if (errcnt >= 20) break; } } if (!errcnt) D_INFO("ucode image in INSTRUCTION memory is good\n"); return rc; } /** * il3945_verify_inst_sparse - verify runtime uCode image in card vs. host, * using sample data 100 bytes apart. If these sample points are good, * it's a pretty good bet that everything between them is good, too. */ static int il3945_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len) { u32 val; int rc = 0; u32 errcnt = 0; u32 i; D_INFO("ucode inst image size is %u\n", len); for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) { /* read data comes through single port, auto-incr addr */ /* NOTE: Use the debugless read so we don't flood kernel log * if IL_DL_IO is set */ il_wr(il, HBUS_TARG_MEM_RADDR, i + IL39_RTC_INST_LOWER_BOUND); val = _il_rd(il, HBUS_TARG_MEM_RDAT); if (val != le32_to_cpu(*image)) { #if 0 /* Enable this if you want to see details */ IL_ERR("uCode INST section is invalid at " "offset 0x%x, is 0x%x, s/b 0x%x\n", i, val, *image); #endif rc = -EIO; errcnt++; if (errcnt >= 3) break; } } return rc; } /** * il3945_verify_ucode - determine which instruction image is in SRAM, * and verify its contents */ static int il3945_verify_ucode(struct il_priv *il) { __le32 *image; u32 len; int rc = 0; /* Try bootstrap */ image = (__le32 *) il->ucode_boot.v_addr; len = il->ucode_boot.len; rc = il3945_verify_inst_sparse(il, image, len); if (rc == 0) { D_INFO("Bootstrap uCode is good in inst SRAM\n"); return 0; } /* Try initialize */ image = (__le32 *) il->ucode_init.v_addr; len = il->ucode_init.len; rc = il3945_verify_inst_sparse(il, image, len); if (rc == 0) { D_INFO("Initialize uCode is good in inst SRAM\n"); return 0; } /* Try runtime/protocol */ image = (__le32 *) il->ucode_code.v_addr; len = il->ucode_code.len; rc = il3945_verify_inst_sparse(il, image, len); if (rc == 0) { D_INFO("Runtime uCode is good in inst SRAM\n"); return 0; } IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); /* Since nothing seems to match, show first several data entries in * instruction SRAM, so maybe visual inspection will give a clue. * Selection of bootstrap image (vs. other images) is arbitrary. */ image = (__le32 *) il->ucode_boot.v_addr; len = il->ucode_boot.len; rc = il3945_verify_inst_full(il, image, len); return rc; } static void il3945_nic_start(struct il_priv *il) { /* Remove all resets to allow NIC to operate */ _il_wr(il, CSR_RESET, 0); } #define IL3945_UCODE_GET(item) \ static u32 il3945_ucode_get_##item(const struct il_ucode_header *ucode)\ { \ return le32_to_cpu(ucode->v1.item); \ } static u32 il3945_ucode_get_header_size(u32 api_ver) { return 24; } static u8 * il3945_ucode_get_data(const struct il_ucode_header *ucode) { return (u8 *) ucode->v1.data; } IL3945_UCODE_GET(inst_size); IL3945_UCODE_GET(data_size); IL3945_UCODE_GET(init_size); IL3945_UCODE_GET(init_data_size); IL3945_UCODE_GET(boot_size); /** * il3945_read_ucode - Read uCode images from disk file. * * Copy into buffers for card to fetch via bus-mastering */ static int il3945_read_ucode(struct il_priv *il) { const struct il_ucode_header *ucode; int ret = -EINVAL, idx; const struct firmware *ucode_raw; /* firmware file name contains uCode/driver compatibility version */ const char *name_pre = il->cfg->fw_name_pre; const unsigned int api_max = il->cfg->ucode_api_max; const unsigned int api_min = il->cfg->ucode_api_min; char buf[25]; u8 *src; size_t len; u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size; /* Ask kernel firmware_class module to get the boot firmware off disk. * request_firmware() is synchronous, file is in memory on return. */ for (idx = api_max; idx >= api_min; idx--) { sprintf(buf, "%s%u%s", name_pre, idx, ".ucode"); ret = request_firmware(&ucode_raw, buf, &il->pci_dev->dev); if (ret < 0) { IL_ERR("%s firmware file req failed: %d\n", buf, ret); if (ret == -ENOENT) continue; else goto error; } else { if (idx < api_max) IL_ERR("Loaded firmware %s, " "which is deprecated. " " Please use API v%u instead.\n", buf, api_max); D_INFO("Got firmware '%s' file " "(%zd bytes) from disk\n", buf, ucode_raw->size); break; } } if (ret < 0) goto error; /* Make sure that we got at least our header! */ if (ucode_raw->size < il3945_ucode_get_header_size(1)) { IL_ERR("File size way too small!\n"); ret = -EINVAL; goto err_release; } /* Data from ucode file: header followed by uCode images */ ucode = (struct il_ucode_header *)ucode_raw->data; il->ucode_ver = le32_to_cpu(ucode->ver); api_ver = IL_UCODE_API(il->ucode_ver); inst_size = il3945_ucode_get_inst_size(ucode); data_size = il3945_ucode_get_data_size(ucode); init_size = il3945_ucode_get_init_size(ucode); init_data_size = il3945_ucode_get_init_data_size(ucode); boot_size = il3945_ucode_get_boot_size(ucode); src = il3945_ucode_get_data(ucode); /* api_ver should match the api version forming part of the * firmware filename ... but we don't check for that and only rely * on the API version read from firmware header from here on forward */ if (api_ver < api_min || api_ver > api_max) { IL_ERR("Driver unable to support your firmware API. " "Driver supports v%u, firmware is v%u.\n", api_max, api_ver); il->ucode_ver = 0; ret = -EINVAL; goto err_release; } if (api_ver != api_max) IL_ERR("Firmware has old API version. Expected %u, " "got %u. New firmware can be obtained " "from http://www.intellinuxwireless.org.\n", api_max, api_ver); IL_INFO("loaded firmware version %u.%u.%u.%u\n", IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver)); snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version), "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver)); D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver); D_INFO("f/w package hdr runtime inst size = %u\n", inst_size); D_INFO("f/w package hdr runtime data size = %u\n", data_size); D_INFO("f/w package hdr init inst size = %u\n", init_size); D_INFO("f/w package hdr init data size = %u\n", init_data_size); D_INFO("f/w package hdr boot inst size = %u\n", boot_size); /* Verify size of file vs. image size info in file's header */ if (ucode_raw->size != il3945_ucode_get_header_size(api_ver) + inst_size + data_size + init_size + init_data_size + boot_size) { D_INFO("uCode file size %zd does not match expected size\n", ucode_raw->size); ret = -EINVAL; goto err_release; } /* Verify that uCode images will fit in card's SRAM */ if (inst_size > IL39_MAX_INST_SIZE) { D_INFO("uCode instr len %d too large to fit in\n", inst_size); ret = -EINVAL; goto err_release; } if (data_size > IL39_MAX_DATA_SIZE) { D_INFO("uCode data len %d too large to fit in\n", data_size); ret = -EINVAL; goto err_release; } if (init_size > IL39_MAX_INST_SIZE) { D_INFO("uCode init instr len %d too large to fit in\n", init_size); ret = -EINVAL; goto err_release; } if (init_data_size > IL39_MAX_DATA_SIZE) { D_INFO("uCode init data len %d too large to fit in\n", init_data_size); ret = -EINVAL; goto err_release; } if (boot_size > IL39_MAX_BSM_SIZE) { D_INFO("uCode boot instr len %d too large to fit in\n", boot_size); ret = -EINVAL; goto err_release; } /* Allocate ucode buffers for card's bus-master loading ... */ /* Runtime instructions and 2 copies of data: * 1) unmodified from disk * 2) backup cache for save/restore during power-downs */ il->ucode_code.len = inst_size; il_alloc_fw_desc(il->pci_dev, &il->ucode_code); il->ucode_data.len = data_size; il_alloc_fw_desc(il->pci_dev, &il->ucode_data); il->ucode_data_backup.len = data_size; il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup); if (!il->ucode_code.v_addr || !il->ucode_data.v_addr || !il->ucode_data_backup.v_addr) goto err_pci_alloc; /* Initialization instructions and data */ if (init_size && init_data_size) { il->ucode_init.len = init_size; il_alloc_fw_desc(il->pci_dev, &il->ucode_init); il->ucode_init_data.len = init_data_size; il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data); if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr) goto err_pci_alloc; } /* Bootstrap (instructions only, no data) */ if (boot_size) { il->ucode_boot.len = boot_size; il_alloc_fw_desc(il->pci_dev, &il->ucode_boot); if (!il->ucode_boot.v_addr) goto err_pci_alloc; } /* Copy images into buffers for card's bus-master reads ... */ /* Runtime instructions (first block of data in file) */ len = inst_size; D_INFO("Copying (but not loading) uCode instr len %zd\n", len); memcpy(il->ucode_code.v_addr, src, len); src += len; D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", il->ucode_code.v_addr, (u32) il->ucode_code.p_addr); /* Runtime data (2nd block) * NOTE: Copy into backup buffer will be done in il3945_up() */ len = data_size; D_INFO("Copying (but not loading) uCode data len %zd\n", len); memcpy(il->ucode_data.v_addr, src, len); memcpy(il->ucode_data_backup.v_addr, src, len); src += len; /* Initialization instructions (3rd block) */ if (init_size) { len = init_size; D_INFO("Copying (but not loading) init instr len %zd\n", len); memcpy(il->ucode_init.v_addr, src, len); src += len; } /* Initialization data (4th block) */ if (init_data_size) { len = init_data_size; D_INFO("Copying (but not loading) init data len %zd\n", len); memcpy(il->ucode_init_data.v_addr, src, len); src += len; } /* Bootstrap instructions (5th block) */ len = boot_size; D_INFO("Copying (but not loading) boot instr len %zd\n", len); memcpy(il->ucode_boot.v_addr, src, len); /* We have our copies now, allow OS release its copies */ release_firmware(ucode_raw); return 0; err_pci_alloc: IL_ERR("failed to allocate pci memory\n"); ret = -ENOMEM; il3945_dealloc_ucode_pci(il); err_release: release_firmware(ucode_raw); error: return ret; } /** * il3945_set_ucode_ptrs - Set uCode address location * * Tell initialization uCode where to find runtime uCode. * * BSM registers initially contain pointers to initialization uCode. * We need to replace them to load runtime uCode inst and data, * and to save runtime data when powering down. */ static int il3945_set_ucode_ptrs(struct il_priv *il) { dma_addr_t pinst; dma_addr_t pdata; /* bits 31:0 for 3945 */ pinst = il->ucode_code.p_addr; pdata = il->ucode_data_backup.p_addr; /* Tell bootstrap uCode where to find image to load */ il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst); il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata); il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len); /* Inst byte count must be last to set up, bit 31 signals uCode * that all new ptr/size info is in place */ il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, il->ucode_code.len | BSM_DRAM_INST_LOAD); D_INFO("Runtime uCode pointers are set.\n"); return 0; } /** * il3945_init_alive_start - Called after N_ALIVE notification received * * Called after N_ALIVE notification received from "initialize" uCode. * * Tell "initialize" uCode to go ahead and load the runtime uCode. */ static void il3945_init_alive_start(struct il_priv *il) { /* Check alive response for "valid" sign from uCode */ if (il->card_alive_init.is_valid != UCODE_VALID_OK) { /* We had an error bringing up the hardware, so take it * all the way back down so we can try again */ D_INFO("Initialize Alive failed.\n"); goto restart; } /* Bootstrap uCode has loaded initialize uCode ... verify inst image. * This is a paranoid check, because we would not have gotten the * "initialize" alive if code weren't properly loaded. */ if (il3945_verify_ucode(il)) { /* Runtime instruction load was bad; * take it all the way back down so we can try again */ D_INFO("Bad \"initialize\" uCode load.\n"); goto restart; } /* Send pointers to protocol/runtime uCode image ... init code will * load and launch runtime uCode, which will send us another "Alive" * notification. */ D_INFO("Initialization Alive received.\n"); if (il3945_set_ucode_ptrs(il)) { /* Runtime instruction load won't happen; * take it all the way back down so we can try again */ D_INFO("Couldn't set up uCode pointers.\n"); goto restart; } return; restart: queue_work(il->workqueue, &il->restart); } /** * il3945_alive_start - called after N_ALIVE notification received * from protocol/runtime uCode (initialization uCode's * Alive gets handled by il3945_init_alive_start()). */ static void il3945_alive_start(struct il_priv *il) { int thermal_spin = 0; u32 rfkill; D_INFO("Runtime Alive received.\n"); if (il->card_alive.is_valid != UCODE_VALID_OK) { /* We had an error bringing up the hardware, so take it * all the way back down so we can try again */ D_INFO("Alive failed.\n"); goto restart; } /* Initialize uCode has loaded Runtime uCode ... verify inst image. * This is a paranoid check, because we would not have gotten the * "runtime" alive if code weren't properly loaded. */ if (il3945_verify_ucode(il)) { /* Runtime instruction load was bad; * take it all the way back down so we can try again */ D_INFO("Bad runtime uCode load.\n"); goto restart; } rfkill = il_rd_prph(il, APMG_RFKILL_REG); D_INFO("RFKILL status: 0x%x\n", rfkill); if (rfkill & 0x1) { clear_bit(S_RFKILL, &il->status); /* if RFKILL is not on, then wait for thermal * sensor in adapter to kick in */ while (il3945_hw_get_temperature(il) == 0) { thermal_spin++; udelay(10); } if (thermal_spin) D_INFO("Thermal calibration took %dus\n", thermal_spin * 10); } else set_bit(S_RFKILL, &il->status); /* After the ALIVE response, we can send commands to 3945 uCode */ set_bit(S_ALIVE, &il->status); /* Enable watchdog to monitor the driver tx queues */ il_setup_watchdog(il); if (il_is_rfkill(il)) return; ieee80211_wake_queues(il->hw); il->active_rate = RATES_MASK_3945; il_power_update_mode(il, true); if (il_is_associated(il)) { struct il3945_rxon_cmd *active_rxon = (struct il3945_rxon_cmd *)(&il->active); il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; } else { /* Initialize our rx_config data */ il_connection_init_rx_config(il); } /* Configure Bluetooth device coexistence support */ il_send_bt_config(il); set_bit(S_READY, &il->status); /* Configure the adapter for unassociated operation */ il3945_commit_rxon(il); il3945_reg_txpower_periodic(il); D_INFO("ALIVE processing complete.\n"); wake_up(&il->wait_command_queue); return; restart: queue_work(il->workqueue, &il->restart); } static void il3945_cancel_deferred_work(struct il_priv *il); static void __il3945_down(struct il_priv *il) { unsigned long flags; int exit_pending; D_INFO(DRV_NAME " is going down\n"); il_scan_cancel_timeout(il, 200); exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status); /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set * to prevent rearm timer */ del_timer_sync(&il->watchdog); /* Station information will now be cleared in device */ il_clear_ucode_stations(il); il_dealloc_bcast_stations(il); il_clear_driver_stations(il); /* Unblock any waiting calls */ wake_up_all(&il->wait_command_queue); /* Wipe out the EXIT_PENDING status bit if we are not actually * exiting the module */ if (!exit_pending) clear_bit(S_EXIT_PENDING, &il->status); /* stop and reset the on-board processor */ _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); /* tell the device to stop sending interrupts */ spin_lock_irqsave(&il->lock, flags); il_disable_interrupts(il); spin_unlock_irqrestore(&il->lock, flags); il3945_synchronize_irq(il); if (il->mac80211_registered) ieee80211_stop_queues(il->hw); /* If we have not previously called il3945_init() then * clear all bits but the RF Kill bits and return */ if (!il_is_init(il)) { il->status = test_bit(S_RFKILL, &il->status) << S_RFKILL | test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED | test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING; goto exit; } /* ...otherwise clear out all the status bits but the RF Kill * bit and continue taking the NIC down. */ il->status &= test_bit(S_RFKILL, &il->status) << S_RFKILL | test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED | test_bit(S_FW_ERROR, &il->status) << S_FW_ERROR | test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING; /* * We disabled and synchronized interrupt, and priv->mutex is taken, so * here is the only thread which will program device registers, but * still have lockdep assertions, so we are taking reg_lock. */ spin_lock_irq(&il->reg_lock); /* FIXME: il_grab_nic_access if rfkill is off ? */ il3945_hw_txq_ctx_stop(il); il3945_hw_rxq_stop(il); /* Power-down device's busmaster DMA clocks */ _il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); udelay(5); /* Stop the device, and put it in low power state */ _il_apm_stop(il); spin_unlock_irq(&il->reg_lock); il3945_hw_txq_ctx_free(il); exit: memset(&il->card_alive, 0, sizeof(struct il_alive_resp)); if (il->beacon_skb) dev_kfree_skb(il->beacon_skb); il->beacon_skb = NULL; /* clear out any free frames */ il3945_clear_free_frames(il); } static void il3945_down(struct il_priv *il) { mutex_lock(&il->mutex); __il3945_down(il); mutex_unlock(&il->mutex); il3945_cancel_deferred_work(il); } #define MAX_HW_RESTARTS 5 static int il3945_alloc_bcast_station(struct il_priv *il) { unsigned long flags; u8 sta_id; spin_lock_irqsave(&il->sta_lock, flags); sta_id = il_prep_station(il, il_bcast_addr, false, NULL); if (sta_id == IL_INVALID_STATION) { IL_ERR("Unable to prepare broadcast station\n"); spin_unlock_irqrestore(&il->sta_lock, flags); return -EINVAL; } il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE; il->stations[sta_id].used |= IL_STA_BCAST; spin_unlock_irqrestore(&il->sta_lock, flags); return 0; } static int __il3945_up(struct il_priv *il) { int rc, i; rc = il3945_alloc_bcast_station(il); if (rc) return rc; if (test_bit(S_EXIT_PENDING, &il->status)) { IL_WARN("Exit pending; will not bring the NIC up\n"); return -EIO; } if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) { IL_ERR("ucode not available for device bring up\n"); return -EIO; } /* If platform's RF_KILL switch is NOT set to KILL */ if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) clear_bit(S_RFKILL, &il->status); else { set_bit(S_RFKILL, &il->status); IL_WARN("Radio disabled by HW RF Kill switch\n"); return -ENODEV; } _il_wr(il, CSR_INT, 0xFFFFFFFF); rc = il3945_hw_nic_init(il); if (rc) { IL_ERR("Unable to int nic\n"); return rc; } /* make sure rfkill handshake bits are cleared */ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); /* clear (again), then enable host interrupts */ _il_wr(il, CSR_INT, 0xFFFFFFFF); il_enable_interrupts(il); /* really make sure rfkill handshake bits are cleared */ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); /* Copy original ucode data image from disk into backup cache. * This will be used to initialize the on-board processor's * data SRAM for a clean start when the runtime program first loads. */ memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr, il->ucode_data.len); /* We return success when we resume from suspend and rf_kill is on. */ if (test_bit(S_RFKILL, &il->status)) return 0; for (i = 0; i < MAX_HW_RESTARTS; i++) { /* load bootstrap state machine, * load bootstrap program into processor's memory, * prepare to load the "initialize" uCode */ rc = il->ops->load_ucode(il); if (rc) { IL_ERR("Unable to set up bootstrap uCode: %d\n", rc); continue; } /* start card; "initialize" will load runtime ucode */ il3945_nic_start(il); D_INFO(DRV_NAME " is coming up\n"); return 0; } set_bit(S_EXIT_PENDING, &il->status); __il3945_down(il); clear_bit(S_EXIT_PENDING, &il->status); /* tried to restart and config the device for as long as our * patience could withstand */ IL_ERR("Unable to initialize device after %d attempts.\n", i); return -EIO; } /***************************************************************************** * * Workqueue callbacks * *****************************************************************************/ static void il3945_bg_init_alive_start(struct work_struct *data) { struct il_priv *il = container_of(data, struct il_priv, init_alive_start.work); mutex_lock(&il->mutex); if (test_bit(S_EXIT_PENDING, &il->status)) goto out; il3945_init_alive_start(il); out: mutex_unlock(&il->mutex); } static void il3945_bg_alive_start(struct work_struct *data) { struct il_priv *il = container_of(data, struct il_priv, alive_start.work); mutex_lock(&il->mutex); if (test_bit(S_EXIT_PENDING, &il->status) || il->txq == NULL) goto out; il3945_alive_start(il); out: mutex_unlock(&il->mutex); } /* * 3945 cannot interrupt driver when hardware rf kill switch toggles; * driver must poll CSR_GP_CNTRL_REG register for change. This register * *is* readable even when device has been SW_RESET into low power mode * (e.g. during RF KILL). */ static void il3945_rfkill_poll(struct work_struct *data) { struct il_priv *il = container_of(data, struct il_priv, _3945.rfkill_poll.work); bool old_rfkill = test_bit(S_RFKILL, &il->status); bool new_rfkill = !(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); if (new_rfkill != old_rfkill) { if (new_rfkill) set_bit(S_RFKILL, &il->status); else clear_bit(S_RFKILL, &il->status); wiphy_rfkill_set_hw_state(il->hw->wiphy, new_rfkill); D_RF_KILL("RF_KILL bit toggled to %s.\n", new_rfkill ? "disable radio" : "enable radio"); } /* Keep this running, even if radio now enabled. This will be * cancelled in mac_start() if system decides to start again */ queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, round_jiffies_relative(2 * HZ)); } int il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif) { struct il_host_cmd cmd = { .id = C_SCAN, .len = sizeof(struct il3945_scan_cmd), .flags = CMD_SIZE_HUGE, }; struct il3945_scan_cmd *scan; u8 n_probes = 0; enum ieee80211_band band; bool is_active = false; int ret; u16 len; lockdep_assert_held(&il->mutex); if (!il->scan_cmd) { il->scan_cmd = kmalloc(sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE, GFP_KERNEL); if (!il->scan_cmd) { D_SCAN("Fail to allocate scan memory\n"); return -ENOMEM; } } scan = il->scan_cmd; memset(scan, 0, sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE); scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH; scan->quiet_time = IL_ACTIVE_QUIET_TIME; if (il_is_associated(il)) { u16 interval; u32 extra; u32 suspend_time = 100; u32 scan_suspend_time = 100; D_INFO("Scanning while associated...\n"); interval = vif->bss_conf.beacon_int; scan->suspend_time = 0; scan->max_out_time = cpu_to_le32(200 * 1024); if (!interval) interval = suspend_time; /* * suspend time format: * 0-19: beacon interval in usec (time before exec.) * 20-23: 0 * 24-31: number of beacons (suspend between channels) */ extra = (suspend_time / interval) << 24; scan_suspend_time = 0xFF0FFFFF & (extra | ((suspend_time % interval) * 1024)); scan->suspend_time = cpu_to_le32(scan_suspend_time); D_SCAN("suspend_time 0x%X beacon interval %d\n", scan_suspend_time, interval); } if (il->scan_request->n_ssids) { int i, p = 0; D_SCAN("Kicking off active scan\n"); for (i = 0; i < il->scan_request->n_ssids; i++) { /* always does wildcard anyway */ if (!il->scan_request->ssids[i].ssid_len) continue; scan->direct_scan[p].id = WLAN_EID_SSID; scan->direct_scan[p].len = il->scan_request->ssids[i].ssid_len; memcpy(scan->direct_scan[p].ssid, il->scan_request->ssids[i].ssid, il->scan_request->ssids[i].ssid_len); n_probes++; p++; } is_active = true; } else D_SCAN("Kicking off passive scan.\n"); /* We don't build a direct scan probe request; the uCode will do * that based on the direct_mask added to each channel entry */ scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; scan->tx_cmd.sta_id = il->hw_params.bcast_id; scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; /* flags + rate selection */ switch (il->scan_band) { case IEEE80211_BAND_2GHZ: scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; scan->tx_cmd.rate = RATE_1M_PLCP; band = IEEE80211_BAND_2GHZ; break; case IEEE80211_BAND_5GHZ: scan->tx_cmd.rate = RATE_6M_PLCP; band = IEEE80211_BAND_5GHZ; break; default: IL_WARN("Invalid scan band\n"); return -EIO; } /* * If active scaning is requested but a certain channel is marked * passive, we can do active scanning if we detect transmissions. For * passive only scanning disable switching to active on any channel. */ scan->good_CRC_th = is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER; len = il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data, vif->addr, il->scan_request->ie, il->scan_request->ie_len, IL_MAX_SCAN_SIZE - sizeof(*scan)); scan->tx_cmd.len = cpu_to_le16(len); /* select Rx antennas */ scan->flags |= il3945_get_antenna_flags(il); scan->channel_count = il3945_get_channels_for_scan(il, band, is_active, n_probes, (void *)&scan->data[len], vif); if (scan->channel_count == 0) { D_SCAN("channel count %d\n", scan->channel_count); return -EIO; } cmd.len += le16_to_cpu(scan->tx_cmd.len) + scan->channel_count * sizeof(struct il3945_scan_channel); cmd.data = scan; scan->len = cpu_to_le16(cmd.len); set_bit(S_SCAN_HW, &il->status); ret = il_send_cmd_sync(il, &cmd); if (ret) clear_bit(S_SCAN_HW, &il->status); return ret; } void il3945_post_scan(struct il_priv *il) { /* * Since setting the RXON may have been deferred while * performing the scan, fire one off if needed */ if (memcmp(&il->staging, &il->active, sizeof(il->staging))) il3945_commit_rxon(il); } static void il3945_bg_restart(struct work_struct *data) { struct il_priv *il = container_of(data, struct il_priv, restart); if (test_bit(S_EXIT_PENDING, &il->status)) return; if (test_and_clear_bit(S_FW_ERROR, &il->status)) { mutex_lock(&il->mutex); il->is_open = 0; mutex_unlock(&il->mutex); il3945_down(il); ieee80211_restart_hw(il->hw); } else { il3945_down(il); mutex_lock(&il->mutex); if (test_bit(S_EXIT_PENDING, &il->status)) { mutex_unlock(&il->mutex); return; } __il3945_up(il); mutex_unlock(&il->mutex); } } static void il3945_bg_rx_replenish(struct work_struct *data) { struct il_priv *il = container_of(data, struct il_priv, rx_replenish); mutex_lock(&il->mutex); if (test_bit(S_EXIT_PENDING, &il->status)) goto out; il3945_rx_replenish(il); out: mutex_unlock(&il->mutex); } void il3945_post_associate(struct il_priv *il) { int rc = 0; struct ieee80211_conf *conf = NULL; if (!il->vif || !il->is_open) return; D_ASSOC("Associated as %d to: %pM\n", il->vif->bss_conf.aid, il->active.bssid_addr); if (test_bit(S_EXIT_PENDING, &il->status)) return; il_scan_cancel_timeout(il, 200); conf = &il->hw->conf; il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; il3945_commit_rxon(il); rc = il_send_rxon_timing(il); if (rc) IL_WARN("C_RXON_TIMING failed - " "Attempting to continue.\n"); il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; il->staging.assoc_id = cpu_to_le16(il->vif->bss_conf.aid); D_ASSOC("assoc id %d beacon interval %d\n", il->vif->bss_conf.aid, il->vif->bss_conf.beacon_int); if (il->vif->bss_conf.use_short_preamble) il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; else il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; if (il->staging.flags & RXON_FLG_BAND_24G_MSK) { if (il->vif->bss_conf.use_short_slot) il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; else il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; } il3945_commit_rxon(il); switch (il->vif->type) { case NL80211_IFTYPE_STATION: il3945_rate_scale_init(il->hw, IL_AP_ID); break; case NL80211_IFTYPE_ADHOC: il3945_send_beacon_cmd(il); break; default: IL_ERR("%s Should not be called in %d mode\n", __func__, il->vif->type); break; } } /***************************************************************************** * * mac80211 entry point functions * *****************************************************************************/ #define UCODE_READY_TIMEOUT (2 * HZ) static int il3945_mac_start(struct ieee80211_hw *hw) { struct il_priv *il = hw->priv; int ret; /* we should be verifying the device is ready to be opened */ mutex_lock(&il->mutex); D_MAC80211("enter\n"); /* fetch ucode file from disk, alloc and copy to bus-master buffers ... * ucode filename and max sizes are card-specific. */ if (!il->ucode_code.len) { ret = il3945_read_ucode(il); if (ret) { IL_ERR("Could not read microcode: %d\n", ret); mutex_unlock(&il->mutex); goto out_release_irq; } } ret = __il3945_up(il); mutex_unlock(&il->mutex); if (ret) goto out_release_irq; D_INFO("Start UP work.\n"); /* Wait for START_ALIVE from ucode. Otherwise callbacks from * mac80211 will not be run successfully. */ ret = wait_event_timeout(il->wait_command_queue, test_bit(S_READY, &il->status), UCODE_READY_TIMEOUT); if (!ret) { if (!test_bit(S_READY, &il->status)) { IL_ERR("Wait for START_ALIVE timeout after %dms.\n", jiffies_to_msecs(UCODE_READY_TIMEOUT)); ret = -ETIMEDOUT; goto out_release_irq; } } /* ucode is running and will send rfkill notifications, * no need to poll the killswitch state anymore */ cancel_delayed_work(&il->_3945.rfkill_poll); il->is_open = 1; D_MAC80211("leave\n"); return 0; out_release_irq: il->is_open = 0; D_MAC80211("leave - failed\n"); return ret; } static void il3945_mac_stop(struct ieee80211_hw *hw) { struct il_priv *il = hw->priv; D_MAC80211("enter\n"); if (!il->is_open) { D_MAC80211("leave - skip\n"); return; } il->is_open = 0; il3945_down(il); flush_workqueue(il->workqueue); /* start polling the killswitch state again */ queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, round_jiffies_relative(2 * HZ)); D_MAC80211("leave\n"); } static void il3945_mac_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct il_priv *il = hw->priv; D_MAC80211("enter\n"); D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); if (il3945_tx_skb(il, control->sta, skb)) dev_kfree_skb_any(skb); D_MAC80211("leave\n"); } void il3945_config_ap(struct il_priv *il) { struct ieee80211_vif *vif = il->vif; int rc = 0; if (test_bit(S_EXIT_PENDING, &il->status)) return; /* The following should be done only at AP bring up */ if (!(il_is_associated(il))) { /* RXON - unassoc (to set timing command) */ il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; il3945_commit_rxon(il); /* RXON Timing */ rc = il_send_rxon_timing(il); if (rc) IL_WARN("C_RXON_TIMING failed - " "Attempting to continue.\n"); il->staging.assoc_id = 0; if (vif->bss_conf.use_short_preamble) il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; else il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; if (il->staging.flags & RXON_FLG_BAND_24G_MSK) { if (vif->bss_conf.use_short_slot) il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; else il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; } /* restore RXON assoc */ il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; il3945_commit_rxon(il); } il3945_send_beacon_cmd(il); } static int il3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct il_priv *il = hw->priv; int ret = 0; u8 sta_id = IL_INVALID_STATION; u8 static_key; D_MAC80211("enter\n"); if (il3945_mod_params.sw_crypto) { D_MAC80211("leave - hwcrypto disabled\n"); return -EOPNOTSUPP; } /* * To support IBSS RSN, don't program group keys in IBSS, the * hardware will then not attempt to decrypt the frames. */ if (vif->type == NL80211_IFTYPE_ADHOC && !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { D_MAC80211("leave - IBSS RSN\n"); return -EOPNOTSUPP; } static_key = !il_is_associated(il); if (!static_key) { sta_id = il_sta_id_or_broadcast(il, sta); if (sta_id == IL_INVALID_STATION) { D_MAC80211("leave - station not found\n"); return -EINVAL; } } mutex_lock(&il->mutex); il_scan_cancel_timeout(il, 100); switch (cmd) { case SET_KEY: if (static_key) ret = il3945_set_static_key(il, key); else ret = il3945_set_dynamic_key(il, key, sta_id); D_MAC80211("enable hwcrypto key\n"); break; case DISABLE_KEY: if (static_key) ret = il3945_remove_static_key(il); else ret = il3945_clear_sta_key_info(il, sta_id); D_MAC80211("disable hwcrypto key\n"); break; default: ret = -EINVAL; } D_MAC80211("leave ret %d\n", ret); mutex_unlock(&il->mutex); return ret; } static int il3945_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct il_priv *il = hw->priv; struct il3945_sta_priv *sta_priv = (void *)sta->drv_priv; int ret; bool is_ap = vif->type == NL80211_IFTYPE_STATION; u8 sta_id; mutex_lock(&il->mutex); D_INFO("station %pM\n", sta->addr); sta_priv->common.sta_id = IL_INVALID_STATION; ret = il_add_station_common(il, sta->addr, is_ap, sta, &sta_id); if (ret) { IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret); /* Should we return success if return code is EEXIST ? */ mutex_unlock(&il->mutex); return ret; } sta_priv->common.sta_id = sta_id; /* Initialize rate scaling */ D_INFO("Initializing rate scaling for station %pM\n", sta->addr); il3945_rs_rate_init(il, sta, sta_id); mutex_unlock(&il->mutex); return 0; } static void il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { struct il_priv *il = hw->priv; __le32 filter_or = 0, filter_nand = 0; #define CHK(test, flag) do { \ if (*total_flags & (test)) \ filter_or |= (flag); \ else \ filter_nand |= (flag); \ } while (0) D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags, *total_flags); CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); #undef CHK mutex_lock(&il->mutex); il->staging.filter_flags &= ~filter_nand; il->staging.filter_flags |= filter_or; /* * Not committing directly because hardware can perform a scan, * but even if hw is ready, committing here breaks for some reason, * we'll eventually commit the filter flags change anyway. */ mutex_unlock(&il->mutex); /* * Receiving all multicast frames is always enabled by the * default flags setup in il_connection_init_rx_config() * since we currently do not support programming multicast * filters into the device. */ *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; } /***************************************************************************** * * sysfs attributes * *****************************************************************************/ #ifdef CONFIG_IWLEGACY_DEBUG /* * The following adds a new attribute to the sysfs representation * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/) * used for controlling the debug level. * * See the level definitions in iwl for details. * * The debug_level being managed using sysfs below is a per device debug * level that is used instead of the global debug level if it (the per * device debug level) is set. */ static ssize_t il3945_show_debug_level(struct device *d, struct device_attribute *attr, char *buf) { struct il_priv *il = dev_get_drvdata(d); return sprintf(buf, "0x%08X\n", il_get_debug_level(il)); } static ssize_t il3945_store_debug_level(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct il_priv *il = dev_get_drvdata(d); unsigned long val; int ret; ret = strict_strtoul(buf, 0, &val); if (ret) IL_INFO("%s is not in hex or decimal form.\n", buf); else il->debug_level = val; return strnlen(buf, count); } static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il3945_show_debug_level, il3945_store_debug_level); #endif /* CONFIG_IWLEGACY_DEBUG */ static ssize_t il3945_show_temperature(struct device *d, struct device_attribute *attr, char *buf) { struct il_priv *il = dev_get_drvdata(d); if (!il_is_alive(il)) return -EAGAIN; return sprintf(buf, "%d\n", il3945_hw_get_temperature(il)); } static DEVICE_ATTR(temperature, S_IRUGO, il3945_show_temperature, NULL); static ssize_t il3945_show_tx_power(struct device *d, struct device_attribute *attr, char *buf) { struct il_priv *il = dev_get_drvdata(d); return sprintf(buf, "%d\n", il->tx_power_user_lmt); } static ssize_t il3945_store_tx_power(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct il_priv *il = dev_get_drvdata(d); char *p = (char *)buf; u32 val; val = simple_strtoul(p, &p, 10); if (p == buf) IL_INFO(": %s is not in decimal form.\n", buf); else il3945_hw_reg_set_txpower(il, val); return count; } static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il3945_show_tx_power, il3945_store_tx_power); static ssize_t il3945_show_flags(struct device *d, struct device_attribute *attr, char *buf) { struct il_priv *il = dev_get_drvdata(d); return sprintf(buf, "0x%04X\n", il->active.flags); } static ssize_t il3945_store_flags(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct il_priv *il = dev_get_drvdata(d); u32 flags = simple_strtoul(buf, NULL, 0); mutex_lock(&il->mutex); if (le32_to_cpu(il->staging.flags) != flags) { /* Cancel any currently running scans... */ if (il_scan_cancel_timeout(il, 100)) IL_WARN("Could not cancel scan.\n"); else { D_INFO("Committing rxon.flags = 0x%04X\n", flags); il->staging.flags = cpu_to_le32(flags); il3945_commit_rxon(il); } } mutex_unlock(&il->mutex); return count; } static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, il3945_show_flags, il3945_store_flags); static ssize_t il3945_show_filter_flags(struct device *d, struct device_attribute *attr, char *buf) { struct il_priv *il = dev_get_drvdata(d); return sprintf(buf, "0x%04X\n", le32_to_cpu(il->active.filter_flags)); } static ssize_t il3945_store_filter_flags(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct il_priv *il = dev_get_drvdata(d); u32 filter_flags = simple_strtoul(buf, NULL, 0); mutex_lock(&il->mutex); if (le32_to_cpu(il->staging.filter_flags) != filter_flags) { /* Cancel any currently running scans... */ if (il_scan_cancel_timeout(il, 100)) IL_WARN("Could not cancel scan.\n"); else { D_INFO("Committing rxon.filter_flags = " "0x%04X\n", filter_flags); il->staging.filter_flags = cpu_to_le32(filter_flags); il3945_commit_rxon(il); } } mutex_unlock(&il->mutex); return count; } static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, il3945_show_filter_flags, il3945_store_filter_flags); static ssize_t il3945_show_measurement(struct device *d, struct device_attribute *attr, char *buf) { struct il_priv *il = dev_get_drvdata(d); struct il_spectrum_notification measure_report; u32 size = sizeof(measure_report), len = 0, ofs = 0; u8 *data = (u8 *) &measure_report; unsigned long flags; spin_lock_irqsave(&il->lock, flags); if (!(il->measurement_status & MEASUREMENT_READY)) { spin_unlock_irqrestore(&il->lock, flags); return 0; } memcpy(&measure_report, &il->measure_report, size); il->measurement_status = 0; spin_unlock_irqrestore(&il->lock, flags); while (size && PAGE_SIZE - len) { hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, PAGE_SIZE - len, 1); len = strlen(buf); if (PAGE_SIZE - len) buf[len++] = '\n'; ofs += 16; size -= min(size, 16U); } return len; } static ssize_t il3945_store_measurement(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct il_priv *il = dev_get_drvdata(d); struct ieee80211_measurement_params params = { .channel = le16_to_cpu(il->active.channel), .start_time = cpu_to_le64(il->_3945.last_tsf), .duration = cpu_to_le16(1), }; u8 type = IL_MEASURE_BASIC; u8 buffer[32]; u8 channel; if (count) { char *p = buffer; strlcpy(buffer, buf, sizeof(buffer)); channel = simple_strtoul(p, NULL, 0); if (channel) params.channel = channel; p = buffer; while (*p && *p != ' ') p++; if (*p) type = simple_strtoul(p + 1, NULL, 0); } D_INFO("Invoking measurement of type %d on " "channel %d (for '%s')\n", type, params.channel, buf); il3945_get_measurement(il, &params, type); return count; } static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, il3945_show_measurement, il3945_store_measurement); static ssize_t il3945_store_retry_rate(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct il_priv *il = dev_get_drvdata(d); il->retry_rate = simple_strtoul(buf, NULL, 0); if (il->retry_rate <= 0) il->retry_rate = 1; return count; } static ssize_t il3945_show_retry_rate(struct device *d, struct device_attribute *attr, char *buf) { struct il_priv *il = dev_get_drvdata(d); return sprintf(buf, "%d", il->retry_rate); } static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, il3945_show_retry_rate, il3945_store_retry_rate); static ssize_t il3945_show_channels(struct device *d, struct device_attribute *attr, char *buf) { /* all this shit doesn't belong into sysfs anyway */ return 0; } static DEVICE_ATTR(channels, S_IRUSR, il3945_show_channels, NULL); static ssize_t il3945_show_antenna(struct device *d, struct device_attribute *attr, char *buf) { struct il_priv *il = dev_get_drvdata(d); if (!il_is_alive(il)) return -EAGAIN; return sprintf(buf, "%d\n", il3945_mod_params.antenna); } static ssize_t il3945_store_antenna(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct il_priv *il __maybe_unused = dev_get_drvdata(d); int ant; if (count == 0) return 0; if (sscanf(buf, "%1i", &ant) != 1) { D_INFO("not in hex or decimal form.\n"); return count; } if (ant >= 0 && ant <= 2) { D_INFO("Setting antenna select to %d.\n", ant); il3945_mod_params.antenna = (enum il3945_antenna)ant; } else D_INFO("Bad antenna select value %d.\n", ant); return count; } static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, il3945_show_antenna, il3945_store_antenna); static ssize_t il3945_show_status(struct device *d, struct device_attribute *attr, char *buf) { struct il_priv *il = dev_get_drvdata(d); if (!il_is_alive(il)) return -EAGAIN; return sprintf(buf, "0x%08x\n", (int)il->status); } static DEVICE_ATTR(status, S_IRUGO, il3945_show_status, NULL); static ssize_t il3945_dump_error_log(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct il_priv *il = dev_get_drvdata(d); char *p = (char *)buf; if (p[0] == '1') il3945_dump_nic_error_log(il); return strnlen(buf, count); } static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, il3945_dump_error_log); /***************************************************************************** * * driver setup and tear down * *****************************************************************************/ static void il3945_setup_deferred_work(struct il_priv *il) { il->workqueue = create_singlethread_workqueue(DRV_NAME); init_waitqueue_head(&il->wait_command_queue); INIT_WORK(&il->restart, il3945_bg_restart); INIT_WORK(&il->rx_replenish, il3945_bg_rx_replenish); INIT_DELAYED_WORK(&il->init_alive_start, il3945_bg_init_alive_start); INIT_DELAYED_WORK(&il->alive_start, il3945_bg_alive_start); INIT_DELAYED_WORK(&il->_3945.rfkill_poll, il3945_rfkill_poll); il_setup_scan_deferred_work(il); il3945_hw_setup_deferred_work(il); init_timer(&il->watchdog); il->watchdog.data = (unsigned long)il; il->watchdog.function = il_bg_watchdog; tasklet_init(&il->irq_tasklet, (void (*)(unsigned long))il3945_irq_tasklet, (unsigned long)il); } static void il3945_cancel_deferred_work(struct il_priv *il) { il3945_hw_cancel_deferred_work(il); cancel_delayed_work_sync(&il->init_alive_start); cancel_delayed_work(&il->alive_start); il_cancel_scan_deferred_work(il); } static struct attribute *il3945_sysfs_entries[] = { &dev_attr_antenna.attr, &dev_attr_channels.attr, &dev_attr_dump_errors.attr, &dev_attr_flags.attr, &dev_attr_filter_flags.attr, &dev_attr_measurement.attr, &dev_attr_retry_rate.attr, &dev_attr_status.attr, &dev_attr_temperature.attr, &dev_attr_tx_power.attr, #ifdef CONFIG_IWLEGACY_DEBUG &dev_attr_debug_level.attr, #endif NULL }; static struct attribute_group il3945_attribute_group = { .name = NULL, /* put in device directory */ .attrs = il3945_sysfs_entries, }; static struct ieee80211_ops il3945_mac_ops __read_mostly = { .tx = il3945_mac_tx, .start = il3945_mac_start, .stop = il3945_mac_stop, .add_interface = il_mac_add_interface, .remove_interface = il_mac_remove_interface, .change_interface = il_mac_change_interface, .config = il_mac_config, .configure_filter = il3945_configure_filter, .set_key = il3945_mac_set_key, .conf_tx = il_mac_conf_tx, .reset_tsf = il_mac_reset_tsf, .bss_info_changed = il_mac_bss_info_changed, .hw_scan = il_mac_hw_scan, .sta_add = il3945_mac_sta_add, .sta_remove = il_mac_sta_remove, .tx_last_beacon = il_mac_tx_last_beacon, .flush = il_mac_flush, }; static int il3945_init_drv(struct il_priv *il) { int ret; struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom; il->retry_rate = 1; il->beacon_skb = NULL; spin_lock_init(&il->sta_lock); spin_lock_init(&il->hcmd_lock); INIT_LIST_HEAD(&il->free_frames); mutex_init(&il->mutex); il->ieee_channels = NULL; il->ieee_rates = NULL; il->band = IEEE80211_BAND_2GHZ; il->iw_mode = NL80211_IFTYPE_STATION; il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF; /* initialize force reset */ il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD; if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { IL_WARN("Unsupported EEPROM version: 0x%04X\n", eeprom->version); ret = -EINVAL; goto err; } ret = il_init_channel_map(il); if (ret) { IL_ERR("initializing regulatory failed: %d\n", ret); goto err; } /* Set up txpower settings in driver for all channels */ if (il3945_txpower_set_from_eeprom(il)) { ret = -EIO; goto err_free_channel_map; } ret = il_init_geos(il); if (ret) { IL_ERR("initializing geos failed: %d\n", ret); goto err_free_channel_map; } il3945_init_hw_rates(il, il->ieee_rates); return 0; err_free_channel_map: il_free_channel_map(il); err: return ret; } #define IL3945_MAX_PROBE_REQUEST 200 static int il3945_setup_mac(struct il_priv *il) { int ret; struct ieee80211_hw *hw = il->hw; hw->rate_control_algorithm = "iwl-3945-rs"; hw->sta_data_size = sizeof(struct il3945_sta_priv); hw->vif_data_size = sizeof(struct il_vif_priv); /* Tell mac80211 our characteristics */ hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SPECTRUM_MGMT | IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS; hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS | WIPHY_FLAG_IBSS_RSN; hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; /* we create the 802.11 header and a zero-length SSID element */ hw->wiphy->max_scan_ie_len = IL3945_MAX_PROBE_REQUEST - 24 - 2; /* Default value; 4 EDCA QOS priorities */ hw->queues = 4; if (il->bands[IEEE80211_BAND_2GHZ].n_channels) il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &il->bands[IEEE80211_BAND_2GHZ]; if (il->bands[IEEE80211_BAND_5GHZ].n_channels) il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &il->bands[IEEE80211_BAND_5GHZ]; il_leds_init(il); ret = ieee80211_register_hw(il->hw); if (ret) { IL_ERR("Failed to register hw (error %d)\n", ret); return ret; } il->mac80211_registered = 1; return 0; } static int il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int err = 0; struct il_priv *il; struct ieee80211_hw *hw; struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data); struct il3945_eeprom *eeprom; unsigned long flags; /*********************** * 1. Allocating HW data * ********************/ hw = ieee80211_alloc_hw(sizeof(struct il_priv), &il3945_mac_ops); if (!hw) { err = -ENOMEM; goto out; } il = hw->priv; il->hw = hw; SET_IEEE80211_DEV(hw, &pdev->dev); il->cmd_queue = IL39_CMD_QUEUE_NUM; /* * Disabling hardware scan means that mac80211 will perform scans * "the hard way", rather than using device's scan. */ if (il3945_mod_params.disable_hw_scan) { D_INFO("Disabling hw_scan\n"); il3945_mac_ops.hw_scan = NULL; } D_INFO("*** LOAD DRIVER ***\n"); il->cfg = cfg; il->ops = &il3945_ops; #ifdef CONFIG_IWLEGACY_DEBUGFS il->debugfs_ops = &il3945_debugfs_ops; #endif il->pci_dev = pdev; il->inta_mask = CSR_INI_SET_MASK; /*************************** * 2. Initializing PCI bus * *************************/ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); if (pci_enable_device(pdev)) { err = -ENODEV; goto out_ieee80211_free_hw; } pci_set_master(pdev); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (!err) err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { IL_WARN("No suitable DMA available.\n"); goto out_pci_disable_device; } pci_set_drvdata(pdev, il); err = pci_request_regions(pdev, DRV_NAME); if (err) goto out_pci_disable_device; /*********************** * 3. Read REV Register * ********************/ il->hw_base = pci_ioremap_bar(pdev, 0); if (!il->hw_base) { err = -ENODEV; goto out_pci_release_regions; } D_INFO("pci_resource_len = 0x%08llx\n", (unsigned long long)pci_resource_len(pdev, 0)); D_INFO("pci_resource_base = %p\n", il->hw_base); /* We disable the RETRY_TIMEOUT register (0x41) to keep * PCI Tx retries from interfering with C3 CPU state */ pci_write_config_byte(pdev, 0x41, 0x00); /* these spin locks will be used in apm_init and EEPROM access * we should init now */ spin_lock_init(&il->reg_lock); spin_lock_init(&il->lock); /* * stop and reset the on-board processor just in case it is in a * strange state ... like being left stranded by a primary kernel * and this is now the kdump kernel trying to start up */ _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); /*********************** * 4. Read EEPROM * ********************/ /* Read the EEPROM */ err = il_eeprom_init(il); if (err) { IL_ERR("Unable to init EEPROM\n"); goto out_iounmap; } /* MAC Address location in EEPROM same for 3945/4965 */ eeprom = (struct il3945_eeprom *)il->eeprom; D_INFO("MAC address: %pM\n", eeprom->mac_address); SET_IEEE80211_PERM_ADDR(il->hw, eeprom->mac_address); /*********************** * 5. Setup HW Constants * ********************/ /* Device-specific setup */ if (il3945_hw_set_hw_params(il)) { IL_ERR("failed to set hw settings\n"); goto out_eeprom_free; } /*********************** * 6. Setup il * ********************/ err = il3945_init_drv(il); if (err) { IL_ERR("initializing driver failed\n"); goto out_unset_hw_params; } IL_INFO("Detected Intel Wireless WiFi Link %s\n", il->cfg->name); /*********************** * 7. Setup Services * ********************/ spin_lock_irqsave(&il->lock, flags); il_disable_interrupts(il); spin_unlock_irqrestore(&il->lock, flags); pci_enable_msi(il->pci_dev); err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il); if (err) { IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq); goto out_disable_msi; } err = sysfs_create_group(&pdev->dev.kobj, &il3945_attribute_group); if (err) { IL_ERR("failed to create sysfs device attributes\n"); goto out_release_irq; } il_set_rxon_channel(il, &il->bands[IEEE80211_BAND_2GHZ].channels[5]); il3945_setup_deferred_work(il); il3945_setup_handlers(il); il_power_initialize(il); /********************************* * 8. Setup and Register mac80211 * *******************************/ il_enable_interrupts(il); err = il3945_setup_mac(il); if (err) goto out_remove_sysfs; err = il_dbgfs_register(il, DRV_NAME); if (err) IL_ERR("failed to create debugfs files. Ignoring error: %d\n", err); /* Start monitoring the killswitch */ queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2 * HZ); return 0; out_remove_sysfs: destroy_workqueue(il->workqueue); il->workqueue = NULL; sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group); out_release_irq: free_irq(il->pci_dev->irq, il); out_disable_msi: pci_disable_msi(il->pci_dev); il_free_geos(il); il_free_channel_map(il); out_unset_hw_params: il3945_unset_hw_params(il); out_eeprom_free: il_eeprom_free(il); out_iounmap: iounmap(il->hw_base); out_pci_release_regions: pci_release_regions(pdev); out_pci_disable_device: pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); out_ieee80211_free_hw: ieee80211_free_hw(il->hw); out: return err; } static void il3945_pci_remove(struct pci_dev *pdev) { struct il_priv *il = pci_get_drvdata(pdev); unsigned long flags; if (!il) return; D_INFO("*** UNLOAD DRIVER ***\n"); il_dbgfs_unregister(il); set_bit(S_EXIT_PENDING, &il->status); il_leds_exit(il); if (il->mac80211_registered) { ieee80211_unregister_hw(il->hw); il->mac80211_registered = 0; } else { il3945_down(il); } /* * Make sure device is reset to low power before unloading driver. * This may be redundant with il_down(), but there are paths to * run il_down() without calling apm_ops.stop(), and there are * paths to avoid running il_down() at all before leaving driver. * This (inexpensive) call *makes sure* device is reset. */ il_apm_stop(il); /* make sure we flush any pending irq or * tasklet for the driver */ spin_lock_irqsave(&il->lock, flags); il_disable_interrupts(il); spin_unlock_irqrestore(&il->lock, flags); il3945_synchronize_irq(il); sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group); cancel_delayed_work_sync(&il->_3945.rfkill_poll); il3945_dealloc_ucode_pci(il); if (il->rxq.bd) il3945_rx_queue_free(il, &il->rxq); il3945_hw_txq_ctx_free(il); il3945_unset_hw_params(il); /*netif_stop_queue(dev); */ flush_workqueue(il->workqueue); /* ieee80211_unregister_hw calls il3945_mac_stop, which flushes * il->workqueue... so we can't take down the workqueue * until now... */ destroy_workqueue(il->workqueue); il->workqueue = NULL; free_irq(pdev->irq, il); pci_disable_msi(pdev); iounmap(il->hw_base); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); il_free_channel_map(il); il_free_geos(il); kfree(il->scan_cmd); if (il->beacon_skb) dev_kfree_skb(il->beacon_skb); ieee80211_free_hw(il->hw); } /***************************************************************************** * * driver and module entry point * *****************************************************************************/ static struct pci_driver il3945_driver = { .name = DRV_NAME, .id_table = il3945_hw_card_ids, .probe = il3945_pci_probe, .remove = il3945_pci_remove, .driver.pm = IL_LEGACY_PM_OPS, }; static int __init il3945_init(void) { int ret; pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); pr_info(DRV_COPYRIGHT "\n"); ret = il3945_rate_control_register(); if (ret) { pr_err("Unable to register rate control algorithm: %d\n", ret); return ret; } ret = pci_register_driver(&il3945_driver); if (ret) { pr_err("Unable to initialize PCI module\n"); goto error_register; } return ret; error_register: il3945_rate_control_unregister(); return ret; } static void __exit il3945_exit(void) { pci_unregister_driver(&il3945_driver); il3945_rate_control_unregister(); } MODULE_FIRMWARE(IL3945_MODULE_FIRMWARE(IL3945_UCODE_API_MAX)); module_param_named(antenna, il3945_mod_params.antenna, int, S_IRUGO); MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); module_param_named(swcrypto, il3945_mod_params.sw_crypto, int, S_IRUGO); MODULE_PARM_DESC(swcrypto, "using software crypto (default 1 [software])"); module_param_named(disable_hw_scan, il3945_mod_params.disable_hw_scan, int, S_IRUGO); MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)"); #ifdef CONFIG_IWLEGACY_DEBUG module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "debug output mask"); #endif module_param_named(fw_restart, il3945_mod_params.restart_fw, int, S_IRUGO); MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); module_exit(il3945_exit); module_init(il3945_init);
gpl-2.0
cattleprod/XCeLL-X69
sound/pci/ctxfi/xfi.c
3633
4470
/* * xfi linux driver. * * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved. * * This source file is released under GPL v2 license (no other versions). * See the COPYING file included in the main directory of this source * distribution for the license terms and conditions. */ #include <linux/init.h> #include <linux/pci.h> #include <linux/moduleparam.h> #include <linux/pci_ids.h> #include <sound/core.h> #include <sound/initval.h> #include "ctatc.h" #include "cthardware.h" MODULE_AUTHOR("Creative Technology Ltd"); MODULE_DESCRIPTION("X-Fi driver version 1.03"); MODULE_LICENSE("GPL v2"); MODULE_SUPPORTED_DEVICE("{{Creative Labs, Sound Blaster X-Fi}"); static unsigned int reference_rate = 48000; static unsigned int multiple = 2; MODULE_PARM_DESC(reference_rate, "Reference rate (default=48000)"); module_param(reference_rate, uint, S_IRUGO); MODULE_PARM_DESC(multiple, "Rate multiplier (default=2)"); module_param(multiple, uint, S_IRUGO); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; static unsigned int subsystem[SNDRV_CARDS]; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for Creative X-Fi driver"); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for Creative X-Fi driver"); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Creative X-Fi driver"); module_param_array(subsystem, int, NULL, 0444); MODULE_PARM_DESC(subsystem, "Override subsystem ID for Creative X-Fi driver"); static DEFINE_PCI_DEVICE_TABLE(ct_pci_dev_ids) = { /* only X-Fi is supported, so... */ { PCI_DEVICE(PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_20K1), .driver_data = ATC20K1, }, { PCI_DEVICE(PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_20K2), .driver_data = ATC20K2, }, { 0, } }; MODULE_DEVICE_TABLE(pci, ct_pci_dev_ids); static int __devinit ct_card_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct ct_atc *atc; int err; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err) return err; if ((reference_rate != 48000) && (reference_rate != 44100)) { printk(KERN_ERR "ctxfi: Invalid reference_rate value %u!!!\n", reference_rate); printk(KERN_ERR "ctxfi: The valid values for reference_rate " "are 48000 and 44100, Value 48000 is assumed.\n"); reference_rate = 48000; } if ((multiple != 1) && (multiple != 2)) { printk(KERN_ERR "ctxfi: Invalid multiple value %u!!!\n", multiple); printk(KERN_ERR "ctxfi: The valid values for multiple are " "1 and 2, Value 2 is assumed.\n"); multiple = 2; } err = ct_atc_create(card, pci, reference_rate, multiple, pci_id->driver_data, subsystem[dev], &atc); if (err < 0) goto error; card->private_data = atc; /* Create alsa devices supported by this card */ err = ct_atc_create_alsa_devs(atc); if (err < 0) goto error; strcpy(card->driver, "SB-XFi"); strcpy(card->shortname, "Creative X-Fi"); snprintf(card->longname, sizeof(card->longname), "%s %s %s", card->shortname, atc->chip_name, atc->model_name); err = snd_card_register(card); if (err < 0) goto error; pci_set_drvdata(pci, card); dev++; return 0; error: snd_card_free(card); return err; } static void __devexit ct_card_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } #ifdef CONFIG_PM static int ct_card_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct ct_atc *atc = card->private_data; return atc->suspend(atc, state); } static int ct_card_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct ct_atc *atc = card->private_data; return atc->resume(atc); } #endif static struct pci_driver ct_driver = { .name = "SB-XFi", .id_table = ct_pci_dev_ids, .probe = ct_card_probe, .remove = __devexit_p(ct_card_remove), #ifdef CONFIG_PM .suspend = ct_card_suspend, .resume = ct_card_resume, #endif }; static int __init ct_card_init(void) { return pci_register_driver(&ct_driver); } static void __exit ct_card_exit(void) { pci_unregister_driver(&ct_driver); } module_init(ct_card_init) module_exit(ct_card_exit)
gpl-2.0
AICP/kernel_samsung_smdk4412
arch/arm/mach-s3c2440/dma.c
3889
5677
/* linux/arch/arm/mach-s3c2440/dma.c * * Copyright (c) 2006 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * S3C2440 DMA selection * * http://armlinux.simtec.co.uk/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/sysdev.h> #include <linux/serial_core.h> #include <mach/map.h> #include <mach/dma.h> #include <plat/dma-s3c24xx.h> #include <plat/cpu.h> #include <plat/regs-serial.h> #include <mach/regs-gpio.h> #include <plat/regs-ac97.h> #include <plat/regs-dma.h> #include <mach/regs-mem.h> #include <mach/regs-lcd.h> #include <mach/regs-sdi.h> #include <plat/regs-iis.h> #include <plat/regs-spi.h> static struct s3c24xx_dma_map __initdata s3c2440_dma_mappings[] = { [DMACH_XD0] = { .name = "xdreq0", .channels[0] = S3C2410_DCON_CH0_XDREQ0 | DMA_CH_VALID, }, [DMACH_XD1] = { .name = "xdreq1", .channels[1] = S3C2410_DCON_CH1_XDREQ1 | DMA_CH_VALID, }, [DMACH_SDI] = { .name = "sdi", .channels[0] = S3C2410_DCON_CH0_SDI | DMA_CH_VALID, .channels[1] = S3C2440_DCON_CH1_SDI | DMA_CH_VALID, .channels[2] = S3C2410_DCON_CH2_SDI | DMA_CH_VALID, .channels[3] = S3C2410_DCON_CH3_SDI | DMA_CH_VALID, .hw_addr.to = S3C2410_PA_IIS + S3C2410_IISFIFO, .hw_addr.from = S3C2410_PA_IIS + S3C2410_IISFIFO, }, [DMACH_SPI0] = { .name = "spi0", .channels[1] = S3C2410_DCON_CH1_SPI | DMA_CH_VALID, .hw_addr.to = S3C2410_PA_SPI + S3C2410_SPTDAT, .hw_addr.from = S3C2410_PA_SPI + S3C2410_SPRDAT, }, [DMACH_SPI1] = { .name = "spi1", .channels[3] = S3C2410_DCON_CH3_SPI | DMA_CH_VALID, .hw_addr.to = S3C2410_PA_SPI + 0x20 + S3C2410_SPTDAT, .hw_addr.from = S3C2410_PA_SPI + 0x20 + S3C2410_SPRDAT, }, [DMACH_UART0] = { .name = "uart0", .channels[0] = S3C2410_DCON_CH0_UART0 | DMA_CH_VALID, .hw_addr.to = S3C2410_PA_UART0 + S3C2410_UTXH, .hw_addr.from = S3C2410_PA_UART0 + S3C2410_URXH, }, [DMACH_UART1] = { .name = "uart1", .channels[1] = S3C2410_DCON_CH1_UART1 | DMA_CH_VALID, .hw_addr.to = S3C2410_PA_UART1 + S3C2410_UTXH, .hw_addr.from = S3C2410_PA_UART1 + S3C2410_URXH, }, [DMACH_UART2] = { .name = "uart2", .channels[3] = S3C2410_DCON_CH3_UART2 | DMA_CH_VALID, .hw_addr.to = S3C2410_PA_UART2 + S3C2410_UTXH, .hw_addr.from = S3C2410_PA_UART2 + S3C2410_URXH, }, [DMACH_TIMER] = { .name = "timer", .channels[0] = S3C2410_DCON_CH0_TIMER | DMA_CH_VALID, .channels[2] = S3C2410_DCON_CH2_TIMER | DMA_CH_VALID, .channels[3] = S3C2410_DCON_CH3_TIMER | DMA_CH_VALID, }, [DMACH_I2S_IN] = { .name = "i2s-sdi", .channels[1] = S3C2410_DCON_CH1_I2SSDI | DMA_CH_VALID, .channels[2] = S3C2410_DCON_CH2_I2SSDI | DMA_CH_VALID, .hw_addr.from = S3C2410_PA_IIS + S3C2410_IISFIFO, }, [DMACH_I2S_OUT] = { .name = "i2s-sdo", .channels[0] = S3C2440_DCON_CH0_I2SSDO | DMA_CH_VALID, .channels[2] = S3C2410_DCON_CH2_I2SSDO | DMA_CH_VALID, .hw_addr.to = S3C2410_PA_IIS + S3C2410_IISFIFO, }, [DMACH_PCM_IN] = { .name = "pcm-in", .channels[0] = S3C2440_DCON_CH0_PCMIN | DMA_CH_VALID, .channels[2] = S3C2440_DCON_CH2_PCMIN | DMA_CH_VALID, .hw_addr.from = S3C2440_PA_AC97 + S3C_AC97_PCM_DATA, }, [DMACH_PCM_OUT] = { .name = "pcm-out", .channels[1] = S3C2440_DCON_CH1_PCMOUT | DMA_CH_VALID, .channels[3] = S3C2440_DCON_CH3_PCMOUT | DMA_CH_VALID, .hw_addr.to = S3C2440_PA_AC97 + S3C_AC97_PCM_DATA, }, [DMACH_MIC_IN] = { .name = "mic-in", .channels[2] = S3C2440_DCON_CH2_MICIN | DMA_CH_VALID, .channels[3] = S3C2440_DCON_CH3_MICIN | DMA_CH_VALID, .hw_addr.from = S3C2440_PA_AC97 + S3C_AC97_MIC_DATA, }, [DMACH_USB_EP1] = { .name = "usb-ep1", .channels[0] = S3C2410_DCON_CH0_USBEP1 | DMA_CH_VALID, }, [DMACH_USB_EP2] = { .name = "usb-ep2", .channels[1] = S3C2410_DCON_CH1_USBEP2 | DMA_CH_VALID, }, [DMACH_USB_EP3] = { .name = "usb-ep3", .channels[2] = S3C2410_DCON_CH2_USBEP3 | DMA_CH_VALID, }, [DMACH_USB_EP4] = { .name = "usb-ep4", .channels[3] = S3C2410_DCON_CH3_USBEP4 | DMA_CH_VALID, }, }; static void s3c2440_dma_select(struct s3c2410_dma_chan *chan, struct s3c24xx_dma_map *map) { chan->dcon = map->channels[chan->number] & ~DMA_CH_VALID; } static struct s3c24xx_dma_selection __initdata s3c2440_dma_sel = { .select = s3c2440_dma_select, .dcon_mask = 7 << 24, .map = s3c2440_dma_mappings, .map_size = ARRAY_SIZE(s3c2440_dma_mappings), }; static struct s3c24xx_dma_order __initdata s3c2440_dma_order = { .channels = { [DMACH_SDI] = { .list = { [0] = 3 | DMA_CH_VALID, [1] = 2 | DMA_CH_VALID, [2] = 1 | DMA_CH_VALID, [3] = 0 | DMA_CH_VALID, }, }, [DMACH_I2S_IN] = { .list = { [0] = 1 | DMA_CH_VALID, [1] = 2 | DMA_CH_VALID, }, }, [DMACH_I2S_OUT] = { .list = { [0] = 2 | DMA_CH_VALID, [1] = 1 | DMA_CH_VALID, }, }, [DMACH_PCM_IN] = { .list = { [0] = 2 | DMA_CH_VALID, [1] = 1 | DMA_CH_VALID, }, }, [DMACH_PCM_OUT] = { .list = { [0] = 1 | DMA_CH_VALID, [1] = 3 | DMA_CH_VALID, }, }, [DMACH_MIC_IN] = { .list = { [0] = 3 | DMA_CH_VALID, [1] = 2 | DMA_CH_VALID, }, }, }, }; static int __init s3c2440_dma_add(struct sys_device *sysdev) { s3c2410_dma_init(); s3c24xx_dma_order_set(&s3c2440_dma_order); return s3c24xx_dma_init_map(&s3c2440_dma_sel); } static struct sysdev_driver s3c2440_dma_driver = { .add = s3c2440_dma_add, }; static int __init s3c2440_dma_init(void) { return sysdev_driver_register(&s3c2440_sysclass, &s3c2440_dma_driver); } arch_initcall(s3c2440_dma_init);
gpl-2.0
CyanogenMod/motorola-kernel-stingray
lib/spinlock_debug.c
3889
7022
/* * Copyright 2005, Red Hat, Inc., Ingo Molnar * Released under the General Public License (GPL). * * This file contains the spinlock/rwlock implementations for * DEBUG_SPINLOCK. */ #include <linux/spinlock.h> #include <linux/nmi.h> #include <linux/interrupt.h> #include <linux/debug_locks.h> #include <linux/delay.h> #include <linux/module.h> void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* * Make sure we are not reinitializing a held lock: */ debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lockdep_init_map(&lock->dep_map, name, key, 0); #endif lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; lock->magic = SPINLOCK_MAGIC; lock->owner = SPINLOCK_OWNER_INIT; lock->owner_cpu = -1; } EXPORT_SYMBOL(__raw_spin_lock_init); void __rwlock_init(rwlock_t *lock, const char *name, struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* * Make sure we are not reinitializing a held lock: */ debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lockdep_init_map(&lock->dep_map, name, key, 0); #endif lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED; lock->magic = RWLOCK_MAGIC; lock->owner = SPINLOCK_OWNER_INIT; lock->owner_cpu = -1; } EXPORT_SYMBOL(__rwlock_init); static void spin_bug(raw_spinlock_t *lock, const char *msg) { struct task_struct *owner = NULL; if (!debug_locks_off()) return; if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) owner = lock->owner; printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", msg, raw_smp_processor_id(), current->comm, task_pid_nr(current)); printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, " ".owner_cpu: %d\n", lock, lock->magic, owner ? owner->comm : "<none>", owner ? task_pid_nr(owner) : -1, lock->owner_cpu); dump_stack(); } #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) static inline void debug_spin_lock_before(raw_spinlock_t *lock) { SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); SPIN_BUG_ON(lock->owner == current, lock, "recursion"); SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(), lock, "cpu recursion"); } static inline void debug_spin_lock_after(raw_spinlock_t *lock) { lock->owner_cpu = raw_smp_processor_id(); lock->owner = current; } static inline void debug_spin_unlock(raw_spinlock_t *lock) { SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked"); SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), lock, "wrong CPU"); lock->owner = SPINLOCK_OWNER_INIT; lock->owner_cpu = -1; } static void __spin_lock_debug(raw_spinlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; int print_once = 1; for (;;) { for (i = 0; i < loops; i++) { if (arch_spin_trylock(&lock->raw_lock)) return; __delay(1); } /* lockup suspected: */ if (print_once) { print_once = 0; printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, " "%s/%d, %p\n", raw_smp_processor_id(), current->comm, task_pid_nr(current), lock); dump_stack(); #ifdef CONFIG_SMP trigger_all_cpu_backtrace(); #endif } } } void do_raw_spin_lock(raw_spinlock_t *lock) { debug_spin_lock_before(lock); if (unlikely(!arch_spin_trylock(&lock->raw_lock))) __spin_lock_debug(lock); debug_spin_lock_after(lock); } int do_raw_spin_trylock(raw_spinlock_t *lock) { int ret = arch_spin_trylock(&lock->raw_lock); if (ret) debug_spin_lock_after(lock); #ifndef CONFIG_SMP /* * Must not happen on UP: */ SPIN_BUG_ON(!ret, lock, "trylock failure on UP"); #endif return ret; } void do_raw_spin_unlock(raw_spinlock_t *lock) { debug_spin_unlock(lock); arch_spin_unlock(&lock->raw_lock); } static void rwlock_bug(rwlock_t *lock, const char *msg) { if (!debug_locks_off()) return; printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n", msg, raw_smp_processor_id(), current->comm, task_pid_nr(current), lock); dump_stack(); } #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) #if 0 /* __write_lock_debug() can lock up - maybe this can too? */ static void __read_lock_debug(rwlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; int print_once = 1; for (;;) { for (i = 0; i < loops; i++) { if (arch_read_trylock(&lock->raw_lock)) return; __delay(1); } /* lockup suspected: */ if (print_once) { print_once = 0; printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, " "%s/%d, %p\n", raw_smp_processor_id(), current->comm, current->pid, lock); dump_stack(); } } } #endif void do_raw_read_lock(rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); arch_read_lock(&lock->raw_lock); } int do_raw_read_trylock(rwlock_t *lock) { int ret = arch_read_trylock(&lock->raw_lock); #ifndef CONFIG_SMP /* * Must not happen on UP: */ RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP"); #endif return ret; } void do_raw_read_unlock(rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); arch_read_unlock(&lock->raw_lock); } static inline void debug_write_lock_before(rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); RWLOCK_BUG_ON(lock->owner == current, lock, "recursion"); RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(), lock, "cpu recursion"); } static inline void debug_write_lock_after(rwlock_t *lock) { lock->owner_cpu = raw_smp_processor_id(); lock->owner = current; } static inline void debug_write_unlock(rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner"); RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), lock, "wrong CPU"); lock->owner = SPINLOCK_OWNER_INIT; lock->owner_cpu = -1; } #if 0 /* This can cause lockups */ static void __write_lock_debug(rwlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; int print_once = 1; for (;;) { for (i = 0; i < loops; i++) { if (arch_write_trylock(&lock->raw_lock)) return; __delay(1); } /* lockup suspected: */ if (print_once) { print_once = 0; printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, " "%s/%d, %p\n", raw_smp_processor_id(), current->comm, current->pid, lock); dump_stack(); } } } #endif void do_raw_write_lock(rwlock_t *lock) { debug_write_lock_before(lock); arch_write_lock(&lock->raw_lock); debug_write_lock_after(lock); } int do_raw_write_trylock(rwlock_t *lock) { int ret = arch_write_trylock(&lock->raw_lock); if (ret) debug_write_lock_after(lock); #ifndef CONFIG_SMP /* * Must not happen on UP: */ RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP"); #endif return ret; } void do_raw_write_unlock(rwlock_t *lock) { debug_write_unlock(lock); arch_write_unlock(&lock->raw_lock); }
gpl-2.0
Badadroid/android_kernel_samsung_wave
arch/arm/mach-s3c2440/dma.c
3889
5677
/* linux/arch/arm/mach-s3c2440/dma.c * * Copyright (c) 2006 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * S3C2440 DMA selection * * http://armlinux.simtec.co.uk/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/sysdev.h> #include <linux/serial_core.h> #include <mach/map.h> #include <mach/dma.h> #include <plat/dma-s3c24xx.h> #include <plat/cpu.h> #include <plat/regs-serial.h> #include <mach/regs-gpio.h> #include <plat/regs-ac97.h> #include <plat/regs-dma.h> #include <mach/regs-mem.h> #include <mach/regs-lcd.h> #include <mach/regs-sdi.h> #include <plat/regs-iis.h> #include <plat/regs-spi.h> static struct s3c24xx_dma_map __initdata s3c2440_dma_mappings[] = { [DMACH_XD0] = { .name = "xdreq0", .channels[0] = S3C2410_DCON_CH0_XDREQ0 | DMA_CH_VALID, }, [DMACH_XD1] = { .name = "xdreq1", .channels[1] = S3C2410_DCON_CH1_XDREQ1 | DMA_CH_VALID, }, [DMACH_SDI] = { .name = "sdi", .channels[0] = S3C2410_DCON_CH0_SDI | DMA_CH_VALID, .channels[1] = S3C2440_DCON_CH1_SDI | DMA_CH_VALID, .channels[2] = S3C2410_DCON_CH2_SDI | DMA_CH_VALID, .channels[3] = S3C2410_DCON_CH3_SDI | DMA_CH_VALID, .hw_addr.to = S3C2410_PA_IIS + S3C2410_IISFIFO, .hw_addr.from = S3C2410_PA_IIS + S3C2410_IISFIFO, }, [DMACH_SPI0] = { .name = "spi0", .channels[1] = S3C2410_DCON_CH1_SPI | DMA_CH_VALID, .hw_addr.to = S3C2410_PA_SPI + S3C2410_SPTDAT, .hw_addr.from = S3C2410_PA_SPI + S3C2410_SPRDAT, }, [DMACH_SPI1] = { .name = "spi1", .channels[3] = S3C2410_DCON_CH3_SPI | DMA_CH_VALID, .hw_addr.to = S3C2410_PA_SPI + 0x20 + S3C2410_SPTDAT, .hw_addr.from = S3C2410_PA_SPI + 0x20 + S3C2410_SPRDAT, }, [DMACH_UART0] = { .name = "uart0", .channels[0] = S3C2410_DCON_CH0_UART0 | DMA_CH_VALID, .hw_addr.to = S3C2410_PA_UART0 + S3C2410_UTXH, .hw_addr.from = S3C2410_PA_UART0 + S3C2410_URXH, }, [DMACH_UART1] = { .name = "uart1", .channels[1] = S3C2410_DCON_CH1_UART1 | DMA_CH_VALID, .hw_addr.to = S3C2410_PA_UART1 + S3C2410_UTXH, .hw_addr.from = S3C2410_PA_UART1 + S3C2410_URXH, }, [DMACH_UART2] = { .name = "uart2", .channels[3] = S3C2410_DCON_CH3_UART2 | DMA_CH_VALID, .hw_addr.to = S3C2410_PA_UART2 + S3C2410_UTXH, .hw_addr.from = S3C2410_PA_UART2 + S3C2410_URXH, }, [DMACH_TIMER] = { .name = "timer", .channels[0] = S3C2410_DCON_CH0_TIMER | DMA_CH_VALID, .channels[2] = S3C2410_DCON_CH2_TIMER | DMA_CH_VALID, .channels[3] = S3C2410_DCON_CH3_TIMER | DMA_CH_VALID, }, [DMACH_I2S_IN] = { .name = "i2s-sdi", .channels[1] = S3C2410_DCON_CH1_I2SSDI | DMA_CH_VALID, .channels[2] = S3C2410_DCON_CH2_I2SSDI | DMA_CH_VALID, .hw_addr.from = S3C2410_PA_IIS + S3C2410_IISFIFO, }, [DMACH_I2S_OUT] = { .name = "i2s-sdo", .channels[0] = S3C2440_DCON_CH0_I2SSDO | DMA_CH_VALID, .channels[2] = S3C2410_DCON_CH2_I2SSDO | DMA_CH_VALID, .hw_addr.to = S3C2410_PA_IIS + S3C2410_IISFIFO, }, [DMACH_PCM_IN] = { .name = "pcm-in", .channels[0] = S3C2440_DCON_CH0_PCMIN | DMA_CH_VALID, .channels[2] = S3C2440_DCON_CH2_PCMIN | DMA_CH_VALID, .hw_addr.from = S3C2440_PA_AC97 + S3C_AC97_PCM_DATA, }, [DMACH_PCM_OUT] = { .name = "pcm-out", .channels[1] = S3C2440_DCON_CH1_PCMOUT | DMA_CH_VALID, .channels[3] = S3C2440_DCON_CH3_PCMOUT | DMA_CH_VALID, .hw_addr.to = S3C2440_PA_AC97 + S3C_AC97_PCM_DATA, }, [DMACH_MIC_IN] = { .name = "mic-in", .channels[2] = S3C2440_DCON_CH2_MICIN | DMA_CH_VALID, .channels[3] = S3C2440_DCON_CH3_MICIN | DMA_CH_VALID, .hw_addr.from = S3C2440_PA_AC97 + S3C_AC97_MIC_DATA, }, [DMACH_USB_EP1] = { .name = "usb-ep1", .channels[0] = S3C2410_DCON_CH0_USBEP1 | DMA_CH_VALID, }, [DMACH_USB_EP2] = { .name = "usb-ep2", .channels[1] = S3C2410_DCON_CH1_USBEP2 | DMA_CH_VALID, }, [DMACH_USB_EP3] = { .name = "usb-ep3", .channels[2] = S3C2410_DCON_CH2_USBEP3 | DMA_CH_VALID, }, [DMACH_USB_EP4] = { .name = "usb-ep4", .channels[3] = S3C2410_DCON_CH3_USBEP4 | DMA_CH_VALID, }, }; static void s3c2440_dma_select(struct s3c2410_dma_chan *chan, struct s3c24xx_dma_map *map) { chan->dcon = map->channels[chan->number] & ~DMA_CH_VALID; } static struct s3c24xx_dma_selection __initdata s3c2440_dma_sel = { .select = s3c2440_dma_select, .dcon_mask = 7 << 24, .map = s3c2440_dma_mappings, .map_size = ARRAY_SIZE(s3c2440_dma_mappings), }; static struct s3c24xx_dma_order __initdata s3c2440_dma_order = { .channels = { [DMACH_SDI] = { .list = { [0] = 3 | DMA_CH_VALID, [1] = 2 | DMA_CH_VALID, [2] = 1 | DMA_CH_VALID, [3] = 0 | DMA_CH_VALID, }, }, [DMACH_I2S_IN] = { .list = { [0] = 1 | DMA_CH_VALID, [1] = 2 | DMA_CH_VALID, }, }, [DMACH_I2S_OUT] = { .list = { [0] = 2 | DMA_CH_VALID, [1] = 1 | DMA_CH_VALID, }, }, [DMACH_PCM_IN] = { .list = { [0] = 2 | DMA_CH_VALID, [1] = 1 | DMA_CH_VALID, }, }, [DMACH_PCM_OUT] = { .list = { [0] = 1 | DMA_CH_VALID, [1] = 3 | DMA_CH_VALID, }, }, [DMACH_MIC_IN] = { .list = { [0] = 3 | DMA_CH_VALID, [1] = 2 | DMA_CH_VALID, }, }, }, }; static int __init s3c2440_dma_add(struct sys_device *sysdev) { s3c2410_dma_init(); s3c24xx_dma_order_set(&s3c2440_dma_order); return s3c24xx_dma_init_map(&s3c2440_dma_sel); } static struct sysdev_driver s3c2440_dma_driver = { .add = s3c2440_dma_add, }; static int __init s3c2440_dma_init(void) { return sysdev_driver_register(&s3c2440_sysclass, &s3c2440_dma_driver); } arch_initcall(s3c2440_dma_init);
gpl-2.0
htc-mirror/ville-u-ics-3.0.8-e2a40ab
drivers/dma/coh901318_lli.c
4145
6503
/* * driver/dma/coh901318_lli.c * * Copyright (C) 2007-2009 ST-Ericsson * License terms: GNU General Public License (GPL) version 2 * Support functions for handling lli for dma * Author: Per Friden <per.friden@stericsson.com> */ #include <linux/dma-mapping.h> #include <linux/spinlock.h> #include <linux/dmapool.h> #include <linux/memory.h> #include <linux/gfp.h> #include <mach/coh901318.h> #include "coh901318_lli.h" #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG)) #define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0) #define DEBUGFS_POOL_COUNTER_ADD(pool, add) (pool->debugfs_pool_counter += add) #else #define DEBUGFS_POOL_COUNTER_RESET(pool) #define DEBUGFS_POOL_COUNTER_ADD(pool, add) #endif static struct coh901318_lli * coh901318_lli_next(struct coh901318_lli *data) { if (data == NULL || data->link_addr == 0) return NULL; return (struct coh901318_lli *) data->virt_link_addr; } int coh901318_pool_create(struct coh901318_pool *pool, struct device *dev, size_t size, size_t align) { spin_lock_init(&pool->lock); pool->dev = dev; pool->dmapool = dma_pool_create("lli_pool", dev, size, align, 0); DEBUGFS_POOL_COUNTER_RESET(pool); return 0; } int coh901318_pool_destroy(struct coh901318_pool *pool) { dma_pool_destroy(pool->dmapool); return 0; } struct coh901318_lli * coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len) { int i; struct coh901318_lli *head; struct coh901318_lli *lli; struct coh901318_lli *lli_prev; dma_addr_t phy; if (len == 0) goto err; spin_lock(&pool->lock); head = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy); if (head == NULL) goto err; DEBUGFS_POOL_COUNTER_ADD(pool, 1); lli = head; lli->phy_this = phy; lli->link_addr = 0x00000000; lli->virt_link_addr = 0x00000000U; for (i = 1; i < len; i++) { lli_prev = lli; lli = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy); if (lli == NULL) goto err_clean_up; DEBUGFS_POOL_COUNTER_ADD(pool, 1); lli->phy_this = phy; lli->link_addr = 0x00000000; lli->virt_link_addr = 0x00000000U; lli_prev->link_addr = phy; lli_prev->virt_link_addr = lli; } spin_unlock(&pool->lock); return head; err: spin_unlock(&pool->lock); return NULL; err_clean_up: lli_prev->link_addr = 0x00000000U; spin_unlock(&pool->lock); coh901318_lli_free(pool, &head); return NULL; } void coh901318_lli_free(struct coh901318_pool *pool, struct coh901318_lli **lli) { struct coh901318_lli *l; struct coh901318_lli *next; if (lli == NULL) return; l = *lli; if (l == NULL) return; spin_lock(&pool->lock); while (l->link_addr) { next = l->virt_link_addr; dma_pool_free(pool->dmapool, l, l->phy_this); DEBUGFS_POOL_COUNTER_ADD(pool, -1); l = next; } dma_pool_free(pool->dmapool, l, l->phy_this); DEBUGFS_POOL_COUNTER_ADD(pool, -1); spin_unlock(&pool->lock); *lli = NULL; } int coh901318_lli_fill_memcpy(struct coh901318_pool *pool, struct coh901318_lli *lli, dma_addr_t source, unsigned int size, dma_addr_t destination, u32 ctrl_chained, u32 ctrl_eom) { int s = size; dma_addr_t src = source; dma_addr_t dst = destination; lli->src_addr = src; lli->dst_addr = dst; while (lli->link_addr) { lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE; lli->src_addr = src; lli->dst_addr = dst; s -= MAX_DMA_PACKET_SIZE; lli = coh901318_lli_next(lli); src += MAX_DMA_PACKET_SIZE; dst += MAX_DMA_PACKET_SIZE; } lli->control = ctrl_eom | s; lli->src_addr = src; lli->dst_addr = dst; return 0; } int coh901318_lli_fill_single(struct coh901318_pool *pool, struct coh901318_lli *lli, dma_addr_t buf, unsigned int size, dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom, enum dma_data_direction dir) { int s = size; dma_addr_t src; dma_addr_t dst; if (dir == DMA_TO_DEVICE) { src = buf; dst = dev_addr; } else if (dir == DMA_FROM_DEVICE) { src = dev_addr; dst = buf; } else { return -EINVAL; } while (lli->link_addr) { size_t block_size = MAX_DMA_PACKET_SIZE; lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE; /* If we are on the next-to-final block and there will * be less than half a DMA packet left for the last * block, then we want to make this block a little * smaller to balance the sizes. This is meant to * avoid too small transfers if the buffer size is * (MAX_DMA_PACKET_SIZE*N + 1) */ if (s < (MAX_DMA_PACKET_SIZE + MAX_DMA_PACKET_SIZE/2)) block_size = MAX_DMA_PACKET_SIZE/2; s -= block_size; lli->src_addr = src; lli->dst_addr = dst; lli = coh901318_lli_next(lli); if (dir == DMA_TO_DEVICE) src += block_size; else if (dir == DMA_FROM_DEVICE) dst += block_size; } lli->control = ctrl_eom | s; lli->src_addr = src; lli->dst_addr = dst; return 0; } int coh901318_lli_fill_sg(struct coh901318_pool *pool, struct coh901318_lli *lli, struct scatterlist *sgl, unsigned int nents, dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl, u32 ctrl_last, enum dma_data_direction dir, u32 ctrl_irq_mask) { int i; struct scatterlist *sg; u32 ctrl_sg; dma_addr_t src = 0; dma_addr_t dst = 0; u32 bytes_to_transfer; u32 elem_size; if (lli == NULL) goto err; spin_lock(&pool->lock); if (dir == DMA_TO_DEVICE) dst = dev_addr; else if (dir == DMA_FROM_DEVICE) src = dev_addr; else goto err; for_each_sg(sgl, sg, nents, i) { if (sg_is_chain(sg)) { /* sg continues to the next sg-element don't * send ctrl_finish until the last * sg-element in the chain */ ctrl_sg = ctrl_chained; } else if (i == nents - 1) ctrl_sg = ctrl_last; else ctrl_sg = ctrl ? ctrl : ctrl_last; if (dir == DMA_TO_DEVICE) /* increment source address */ src = sg_phys(sg); else /* increment destination address */ dst = sg_phys(sg); bytes_to_transfer = sg_dma_len(sg); while (bytes_to_transfer) { u32 val; if (bytes_to_transfer > MAX_DMA_PACKET_SIZE) { elem_size = MAX_DMA_PACKET_SIZE; val = ctrl_chained; } else { elem_size = bytes_to_transfer; val = ctrl_sg; } lli->control = val | elem_size; lli->src_addr = src; lli->dst_addr = dst; if (dir == DMA_FROM_DEVICE) dst += elem_size; else src += elem_size; BUG_ON(lli->link_addr & 3); bytes_to_transfer -= elem_size; lli = coh901318_lli_next(lli); } } spin_unlock(&pool->lock); return 0; err: spin_unlock(&pool->lock); return -EINVAL; }
gpl-2.0
floft/rpi-linux
drivers/char/pcmcia/cm4000_cs.c
4401
49392
/* * A driver for the PCMCIA Smartcard Reader "Omnikey CardMan Mobile 4000" * * cm4000_cs.c support.linux@omnikey.com * * Tue Oct 23 11:32:43 GMT 2001 herp - cleaned up header files * Sun Jan 20 10:11:15 MET 2002 herp - added modversion header files * Thu Nov 14 16:34:11 GMT 2002 mh - added PPS functionality * Tue Nov 19 16:36:27 GMT 2002 mh - added SUSPEND/RESUME functionailty * Wed Jul 28 12:55:01 CEST 2004 mh - kernel 2.6 adjustments * * current version: 2.4.0gm4 * * (C) 2000,2001,2002,2003,2004 Omnikey AG * * (C) 2005-2006 Harald Welte <laforge@gnumonks.org> * - Adhere to Kernel CodingStyle * - Port to 2.6.13 "new" style PCMCIA * - Check for copy_{from,to}_user return values * - Use nonseekable_open() * - add class interface for udev device creation * * All rights reserved. Licensed under dual BSD/GPL license. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/delay.h> #include <linux/bitrev.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/io.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> #include <linux/cm4000_cs.h> /* #define ATR_CSUM */ #define reader_to_dev(x) (&x->p_dev->dev) /* n (debug level) is ignored */ /* additional debug output may be enabled by re-compiling with * CM4000_DEBUG set */ /* #define CM4000_DEBUG */ #define DEBUGP(n, rdr, x, args...) do { \ dev_dbg(reader_to_dev(rdr), "%s:" x, \ __func__ , ## args); \ } while (0) static DEFINE_MUTEX(cmm_mutex); #define T_1SEC (HZ) #define T_10MSEC msecs_to_jiffies(10) #define T_20MSEC msecs_to_jiffies(20) #define T_40MSEC msecs_to_jiffies(40) #define T_50MSEC msecs_to_jiffies(50) #define T_100MSEC msecs_to_jiffies(100) #define T_500MSEC msecs_to_jiffies(500) static void cm4000_release(struct pcmcia_device *link); static int major; /* major number we get from the kernel */ /* note: the first state has to have number 0 always */ #define M_FETCH_ATR 0 #define M_TIMEOUT_WAIT 1 #define M_READ_ATR_LEN 2 #define M_READ_ATR 3 #define M_ATR_PRESENT 4 #define M_BAD_CARD 5 #define M_CARDOFF 6 #define LOCK_IO 0 #define LOCK_MONITOR 1 #define IS_AUTOPPS_ACT 6 #define IS_PROCBYTE_PRESENT 7 #define IS_INVREV 8 #define IS_ANY_T0 9 #define IS_ANY_T1 10 #define IS_ATR_PRESENT 11 #define IS_ATR_VALID 12 #define IS_CMM_ABSENT 13 #define IS_BAD_LENGTH 14 #define IS_BAD_CSUM 15 #define IS_BAD_CARD 16 #define REG_FLAGS0(x) (x + 0) #define REG_FLAGS1(x) (x + 1) #define REG_NUM_BYTES(x) (x + 2) #define REG_BUF_ADDR(x) (x + 3) #define REG_BUF_DATA(x) (x + 4) #define REG_NUM_SEND(x) (x + 5) #define REG_BAUDRATE(x) (x + 6) #define REG_STOPBITS(x) (x + 7) struct cm4000_dev { struct pcmcia_device *p_dev; unsigned char atr[MAX_ATR]; unsigned char rbuf[512]; unsigned char sbuf[512]; wait_queue_head_t devq; /* when removing cardman must not be zeroed! */ wait_queue_head_t ioq; /* if IO is locked, wait on this Q */ wait_queue_head_t atrq; /* wait for ATR valid */ wait_queue_head_t readq; /* used by write to wake blk.read */ /* warning: do not move this fields. * initialising to zero depends on it - see ZERO_DEV below. */ unsigned char atr_csum; unsigned char atr_len_retry; unsigned short atr_len; unsigned short rlen; /* bytes avail. after write */ unsigned short rpos; /* latest read pos. write zeroes */ unsigned char procbyte; /* T=0 procedure byte */ unsigned char mstate; /* state of card monitor */ unsigned char cwarn; /* slow down warning */ unsigned char flags0; /* cardman IO-flags 0 */ unsigned char flags1; /* cardman IO-flags 1 */ unsigned int mdelay; /* variable monitor speeds, in jiffies */ unsigned int baudv; /* baud value for speed */ unsigned char ta1; unsigned char proto; /* T=0, T=1, ... */ unsigned long flags; /* lock+flags (MONITOR,IO,ATR) * for concurrent access */ unsigned char pts[4]; struct timer_list timer; /* used to keep monitor running */ int monitor_running; }; #define ZERO_DEV(dev) \ memset(&dev->atr_csum,0, \ sizeof(struct cm4000_dev) - \ offsetof(struct cm4000_dev, atr_csum)) static struct pcmcia_device *dev_table[CM4000_MAX_DEV]; static struct class *cmm_class; /* This table doesn't use spaces after the comma between fields and thus * violates CodingStyle. However, I don't really think wrapping it around will * make it any clearer to read -HW */ static unsigned char fi_di_table[10][14] = { /*FI 00 01 02 03 04 05 06 07 08 09 10 11 12 13 */ /*DI */ /* 0 */ {0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11}, /* 1 */ {0x01,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x91,0x11,0x11,0x11,0x11}, /* 2 */ {0x02,0x12,0x22,0x32,0x11,0x11,0x11,0x11,0x11,0x92,0xA2,0xB2,0x11,0x11}, /* 3 */ {0x03,0x13,0x23,0x33,0x43,0x53,0x63,0x11,0x11,0x93,0xA3,0xB3,0xC3,0xD3}, /* 4 */ {0x04,0x14,0x24,0x34,0x44,0x54,0x64,0x11,0x11,0x94,0xA4,0xB4,0xC4,0xD4}, /* 5 */ {0x00,0x15,0x25,0x35,0x45,0x55,0x65,0x11,0x11,0x95,0xA5,0xB5,0xC5,0xD5}, /* 6 */ {0x06,0x16,0x26,0x36,0x46,0x56,0x66,0x11,0x11,0x96,0xA6,0xB6,0xC6,0xD6}, /* 7 */ {0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11}, /* 8 */ {0x08,0x11,0x28,0x38,0x48,0x58,0x68,0x11,0x11,0x98,0xA8,0xB8,0xC8,0xD8}, /* 9 */ {0x09,0x19,0x29,0x39,0x49,0x59,0x69,0x11,0x11,0x99,0xA9,0xB9,0xC9,0xD9} }; #ifndef CM4000_DEBUG #define xoutb outb #define xinb inb #else static inline void xoutb(unsigned char val, unsigned short port) { pr_debug("outb(val=%.2x,port=%.4x)\n", val, port); outb(val, port); } static inline unsigned char xinb(unsigned short port) { unsigned char val; val = inb(port); pr_debug("%.2x=inb(%.4x)\n", val, port); return val; } #endif static inline unsigned char invert_revert(unsigned char ch) { return bitrev8(~ch); } static void str_invert_revert(unsigned char *b, int len) { int i; for (i = 0; i < len; i++) b[i] = invert_revert(b[i]); } #define ATRLENCK(dev,pos) \ if (pos>=dev->atr_len || pos>=MAX_ATR) \ goto return_0; static unsigned int calc_baudv(unsigned char fidi) { unsigned int wcrcf, wbrcf, fi_rfu, di_rfu; fi_rfu = 372; di_rfu = 1; /* FI */ switch ((fidi >> 4) & 0x0F) { case 0x00: wcrcf = 372; break; case 0x01: wcrcf = 372; break; case 0x02: wcrcf = 558; break; case 0x03: wcrcf = 744; break; case 0x04: wcrcf = 1116; break; case 0x05: wcrcf = 1488; break; case 0x06: wcrcf = 1860; break; case 0x07: wcrcf = fi_rfu; break; case 0x08: wcrcf = fi_rfu; break; case 0x09: wcrcf = 512; break; case 0x0A: wcrcf = 768; break; case 0x0B: wcrcf = 1024; break; case 0x0C: wcrcf = 1536; break; case 0x0D: wcrcf = 2048; break; default: wcrcf = fi_rfu; break; } /* DI */ switch (fidi & 0x0F) { case 0x00: wbrcf = di_rfu; break; case 0x01: wbrcf = 1; break; case 0x02: wbrcf = 2; break; case 0x03: wbrcf = 4; break; case 0x04: wbrcf = 8; break; case 0x05: wbrcf = 16; break; case 0x06: wbrcf = 32; break; case 0x07: wbrcf = di_rfu; break; case 0x08: wbrcf = 12; break; case 0x09: wbrcf = 20; break; default: wbrcf = di_rfu; break; } return (wcrcf / wbrcf); } static unsigned short io_read_num_rec_bytes(unsigned int iobase, unsigned short *s) { unsigned short tmp; tmp = *s = 0; do { *s = tmp; tmp = inb(REG_NUM_BYTES(iobase)) | (inb(REG_FLAGS0(iobase)) & 4 ? 0x100 : 0); } while (tmp != *s); return *s; } static int parse_atr(struct cm4000_dev *dev) { unsigned char any_t1, any_t0; unsigned char ch, ifno; int ix, done; DEBUGP(3, dev, "-> parse_atr: dev->atr_len = %i\n", dev->atr_len); if (dev->atr_len < 3) { DEBUGP(5, dev, "parse_atr: atr_len < 3\n"); return 0; } if (dev->atr[0] == 0x3f) set_bit(IS_INVREV, &dev->flags); else clear_bit(IS_INVREV, &dev->flags); ix = 1; ifno = 1; ch = dev->atr[1]; dev->proto = 0; /* XXX PROTO */ any_t1 = any_t0 = done = 0; dev->ta1 = 0x11; /* defaults to 9600 baud */ do { if (ifno == 1 && (ch & 0x10)) { /* read first interface byte and TA1 is present */ dev->ta1 = dev->atr[2]; DEBUGP(5, dev, "Card says FiDi is 0x%.2x\n", dev->ta1); ifno++; } else if ((ifno == 2) && (ch & 0x10)) { /* TA(2) */ dev->ta1 = 0x11; ifno++; } DEBUGP(5, dev, "Yi=%.2x\n", ch & 0xf0); ix += ((ch & 0x10) >> 4) /* no of int.face chars */ +((ch & 0x20) >> 5) + ((ch & 0x40) >> 6) + ((ch & 0x80) >> 7); /* ATRLENCK(dev,ix); */ if (ch & 0x80) { /* TDi */ ch = dev->atr[ix]; if ((ch & 0x0f)) { any_t1 = 1; DEBUGP(5, dev, "card is capable of T=1\n"); } else { any_t0 = 1; DEBUGP(5, dev, "card is capable of T=0\n"); } } else done = 1; } while (!done); DEBUGP(5, dev, "ix=%d noHist=%d any_t1=%d\n", ix, dev->atr[1] & 15, any_t1); if (ix + 1 + (dev->atr[1] & 0x0f) + any_t1 != dev->atr_len) { DEBUGP(5, dev, "length error\n"); return 0; } if (any_t0) set_bit(IS_ANY_T0, &dev->flags); if (any_t1) { /* compute csum */ dev->atr_csum = 0; #ifdef ATR_CSUM for (i = 1; i < dev->atr_len; i++) dev->atr_csum ^= dev->atr[i]; if (dev->atr_csum) { set_bit(IS_BAD_CSUM, &dev->flags); DEBUGP(5, dev, "bad checksum\n"); goto return_0; } #endif if (any_t0 == 0) dev->proto = 1; /* XXX PROTO */ set_bit(IS_ANY_T1, &dev->flags); } return 1; } struct card_fixup { char atr[12]; u_int8_t atr_len; u_int8_t stopbits; }; static struct card_fixup card_fixups[] = { { /* ACOS */ .atr = { 0x3b, 0xb3, 0x11, 0x00, 0x00, 0x41, 0x01 }, .atr_len = 7, .stopbits = 0x03, }, { /* Motorola */ .atr = {0x3b, 0x76, 0x13, 0x00, 0x00, 0x80, 0x62, 0x07, 0x41, 0x81, 0x81 }, .atr_len = 11, .stopbits = 0x04, }, }; static void set_cardparameter(struct cm4000_dev *dev) { int i; unsigned int iobase = dev->p_dev->resource[0]->start; u_int8_t stopbits = 0x02; /* ISO default */ DEBUGP(3, dev, "-> set_cardparameter\n"); dev->flags1 = dev->flags1 | (((dev->baudv - 1) & 0x0100) >> 8); xoutb(dev->flags1, REG_FLAGS1(iobase)); DEBUGP(5, dev, "flags1 = 0x%02x\n", dev->flags1); /* set baudrate */ xoutb((unsigned char)((dev->baudv - 1) & 0xFF), REG_BAUDRATE(iobase)); DEBUGP(5, dev, "baudv = %i -> write 0x%02x\n", dev->baudv, ((dev->baudv - 1) & 0xFF)); /* set stopbits */ for (i = 0; i < ARRAY_SIZE(card_fixups); i++) { if (!memcmp(dev->atr, card_fixups[i].atr, card_fixups[i].atr_len)) stopbits = card_fixups[i].stopbits; } xoutb(stopbits, REG_STOPBITS(iobase)); DEBUGP(3, dev, "<- set_cardparameter\n"); } static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq) { unsigned long tmp, i; unsigned short num_bytes_read; unsigned char pts_reply[4]; ssize_t rc; unsigned int iobase = dev->p_dev->resource[0]->start; rc = 0; DEBUGP(3, dev, "-> set_protocol\n"); DEBUGP(5, dev, "ptsreq->Protocol = 0x%.8x, ptsreq->Flags=0x%.8x, " "ptsreq->pts1=0x%.2x, ptsreq->pts2=0x%.2x, " "ptsreq->pts3=0x%.2x\n", (unsigned int)ptsreq->protocol, (unsigned int)ptsreq->flags, ptsreq->pts1, ptsreq->pts2, ptsreq->pts3); /* Fill PTS structure */ dev->pts[0] = 0xff; dev->pts[1] = 0x00; tmp = ptsreq->protocol; while ((tmp = (tmp >> 1)) > 0) dev->pts[1]++; dev->proto = dev->pts[1]; /* Set new protocol */ dev->pts[1] = (0x01 << 4) | (dev->pts[1]); /* Correct Fi/Di according to CM4000 Fi/Di table */ DEBUGP(5, dev, "Ta(1) from ATR is 0x%.2x\n", dev->ta1); /* set Fi/Di according to ATR TA(1) */ dev->pts[2] = fi_di_table[dev->ta1 & 0x0F][(dev->ta1 >> 4) & 0x0F]; /* Calculate PCK character */ dev->pts[3] = dev->pts[0] ^ dev->pts[1] ^ dev->pts[2]; DEBUGP(5, dev, "pts0=%.2x, pts1=%.2x, pts2=%.2x, pts3=%.2x\n", dev->pts[0], dev->pts[1], dev->pts[2], dev->pts[3]); /* check card convention */ if (test_bit(IS_INVREV, &dev->flags)) str_invert_revert(dev->pts, 4); /* reset SM */ xoutb(0x80, REG_FLAGS0(iobase)); /* Enable access to the message buffer */ DEBUGP(5, dev, "Enable access to the messages buffer\n"); dev->flags1 = 0x20 /* T_Active */ | (test_bit(IS_INVREV, &dev->flags) ? 0x02 : 0x00) /* inv parity */ | ((dev->baudv >> 8) & 0x01); /* MSB-baud */ xoutb(dev->flags1, REG_FLAGS1(iobase)); DEBUGP(5, dev, "Enable message buffer -> flags1 = 0x%.2x\n", dev->flags1); /* write challenge to the buffer */ DEBUGP(5, dev, "Write challenge to buffer: "); for (i = 0; i < 4; i++) { xoutb(i, REG_BUF_ADDR(iobase)); xoutb(dev->pts[i], REG_BUF_DATA(iobase)); /* buf data */ #ifdef CM4000_DEBUG pr_debug("0x%.2x ", dev->pts[i]); } pr_debug("\n"); #else } #endif /* set number of bytes to write */ DEBUGP(5, dev, "Set number of bytes to write\n"); xoutb(0x04, REG_NUM_SEND(iobase)); /* Trigger CARDMAN CONTROLLER */ xoutb(0x50, REG_FLAGS0(iobase)); /* Monitor progress */ /* wait for xmit done */ DEBUGP(5, dev, "Waiting for NumRecBytes getting valid\n"); for (i = 0; i < 100; i++) { if (inb(REG_FLAGS0(iobase)) & 0x08) { DEBUGP(5, dev, "NumRecBytes is valid\n"); break; } mdelay(10); } if (i == 100) { DEBUGP(5, dev, "Timeout waiting for NumRecBytes getting " "valid\n"); rc = -EIO; goto exit_setprotocol; } DEBUGP(5, dev, "Reading NumRecBytes\n"); for (i = 0; i < 100; i++) { io_read_num_rec_bytes(iobase, &num_bytes_read); if (num_bytes_read >= 4) { DEBUGP(2, dev, "NumRecBytes = %i\n", num_bytes_read); break; } mdelay(10); } /* check whether it is a short PTS reply? */ if (num_bytes_read == 3) i = 0; if (i == 100) { DEBUGP(5, dev, "Timeout reading num_bytes_read\n"); rc = -EIO; goto exit_setprotocol; } DEBUGP(5, dev, "Reset the CARDMAN CONTROLLER\n"); xoutb(0x80, REG_FLAGS0(iobase)); /* Read PPS reply */ DEBUGP(5, dev, "Read PPS reply\n"); for (i = 0; i < num_bytes_read; i++) { xoutb(i, REG_BUF_ADDR(iobase)); pts_reply[i] = inb(REG_BUF_DATA(iobase)); } #ifdef CM4000_DEBUG DEBUGP(2, dev, "PTSreply: "); for (i = 0; i < num_bytes_read; i++) { pr_debug("0x%.2x ", pts_reply[i]); } pr_debug("\n"); #endif /* CM4000_DEBUG */ DEBUGP(5, dev, "Clear Tactive in Flags1\n"); xoutb(0x20, REG_FLAGS1(iobase)); /* Compare ptsreq and ptsreply */ if ((dev->pts[0] == pts_reply[0]) && (dev->pts[1] == pts_reply[1]) && (dev->pts[2] == pts_reply[2]) && (dev->pts[3] == pts_reply[3])) { /* setcardparameter according to PPS */ dev->baudv = calc_baudv(dev->pts[2]); set_cardparameter(dev); } else if ((dev->pts[0] == pts_reply[0]) && ((dev->pts[1] & 0xef) == pts_reply[1]) && ((pts_reply[0] ^ pts_reply[1]) == pts_reply[2])) { /* short PTS reply, set card parameter to default values */ dev->baudv = calc_baudv(0x11); set_cardparameter(dev); } else rc = -EIO; exit_setprotocol: DEBUGP(3, dev, "<- set_protocol\n"); return rc; } static int io_detect_cm4000(unsigned int iobase, struct cm4000_dev *dev) { /* note: statemachine is assumed to be reset */ if (inb(REG_FLAGS0(iobase)) & 8) { clear_bit(IS_ATR_VALID, &dev->flags); set_bit(IS_CMM_ABSENT, &dev->flags); return 0; /* detect CMM = 1 -> failure */ } /* xoutb(0x40, REG_FLAGS1(iobase)); detectCMM */ xoutb(dev->flags1 | 0x40, REG_FLAGS1(iobase)); if ((inb(REG_FLAGS0(iobase)) & 8) == 0) { clear_bit(IS_ATR_VALID, &dev->flags); set_bit(IS_CMM_ABSENT, &dev->flags); return 0; /* detect CMM=0 -> failure */ } /* clear detectCMM again by restoring original flags1 */ xoutb(dev->flags1, REG_FLAGS1(iobase)); return 1; } static void terminate_monitor(struct cm4000_dev *dev) { /* tell the monitor to stop and wait until * it terminates. */ DEBUGP(3, dev, "-> terminate_monitor\n"); wait_event_interruptible(dev->devq, test_and_set_bit(LOCK_MONITOR, (void *)&dev->flags)); /* now, LOCK_MONITOR has been set. * allow a last cycle in the monitor. * the monitor will indicate that it has * finished by clearing this bit. */ DEBUGP(5, dev, "Now allow last cycle of monitor!\n"); while (test_bit(LOCK_MONITOR, (void *)&dev->flags)) msleep(25); DEBUGP(5, dev, "Delete timer\n"); del_timer_sync(&dev->timer); #ifdef CM4000_DEBUG dev->monitor_running = 0; #endif DEBUGP(3, dev, "<- terminate_monitor\n"); } /* * monitor the card every 50msec. as a side-effect, retrieve the * atr once a card is inserted. another side-effect of retrieving the * atr is that the card will be powered on, so there is no need to * power on the card explictely from the application: the driver * is already doing that for you. */ static void monitor_card(unsigned long p) { struct cm4000_dev *dev = (struct cm4000_dev *) p; unsigned int iobase = dev->p_dev->resource[0]->start; unsigned short s; struct ptsreq ptsreq; int i, atrc; DEBUGP(7, dev, "-> monitor_card\n"); /* if someone has set the lock for us: we're done! */ if (test_and_set_bit(LOCK_MONITOR, &dev->flags)) { DEBUGP(4, dev, "About to stop monitor\n"); /* no */ dev->rlen = dev->rpos = dev->atr_csum = dev->atr_len_retry = dev->cwarn = 0; dev->mstate = M_FETCH_ATR; clear_bit(LOCK_MONITOR, &dev->flags); /* close et al. are sleeping on devq, so wake it */ wake_up_interruptible(&dev->devq); DEBUGP(2, dev, "<- monitor_card (we are done now)\n"); return; } /* try to lock io: if it is already locked, just add another timer */ if (test_and_set_bit(LOCK_IO, (void *)&dev->flags)) { DEBUGP(4, dev, "Couldn't get IO lock\n"); goto return_with_timer; } /* is a card/a reader inserted at all ? */ dev->flags0 = xinb(REG_FLAGS0(iobase)); DEBUGP(7, dev, "dev->flags0 = 0x%2x\n", dev->flags0); DEBUGP(7, dev, "smartcard present: %s\n", dev->flags0 & 1 ? "yes" : "no"); DEBUGP(7, dev, "cardman present: %s\n", dev->flags0 == 0xff ? "no" : "yes"); if ((dev->flags0 & 1) == 0 /* no smartcard inserted */ || dev->flags0 == 0xff) { /* no cardman inserted */ /* no */ dev->rlen = dev->rpos = dev->atr_csum = dev->atr_len_retry = dev->cwarn = 0; dev->mstate = M_FETCH_ATR; dev->flags &= 0x000000ff; /* only keep IO and MONITOR locks */ if (dev->flags0 == 0xff) { DEBUGP(4, dev, "set IS_CMM_ABSENT bit\n"); set_bit(IS_CMM_ABSENT, &dev->flags); } else if (test_bit(IS_CMM_ABSENT, &dev->flags)) { DEBUGP(4, dev, "clear IS_CMM_ABSENT bit " "(card is removed)\n"); clear_bit(IS_CMM_ABSENT, &dev->flags); } goto release_io; } else if ((dev->flags0 & 1) && test_bit(IS_CMM_ABSENT, &dev->flags)) { /* cardman and card present but cardman was absent before * (after suspend with inserted card) */ DEBUGP(4, dev, "clear IS_CMM_ABSENT bit (card is inserted)\n"); clear_bit(IS_CMM_ABSENT, &dev->flags); } if (test_bit(IS_ATR_VALID, &dev->flags) == 1) { DEBUGP(7, dev, "believe ATR is already valid (do nothing)\n"); goto release_io; } switch (dev->mstate) { unsigned char flags0; case M_CARDOFF: DEBUGP(4, dev, "M_CARDOFF\n"); flags0 = inb(REG_FLAGS0(iobase)); if (flags0 & 0x02) { /* wait until Flags0 indicate power is off */ dev->mdelay = T_10MSEC; } else { /* Flags0 indicate power off and no card inserted now; * Reset CARDMAN CONTROLLER */ xoutb(0x80, REG_FLAGS0(iobase)); /* prepare for fetching ATR again: after card off ATR * is read again automatically */ dev->rlen = dev->rpos = dev->atr_csum = dev->atr_len_retry = dev->cwarn = 0; dev->mstate = M_FETCH_ATR; /* minimal gap between CARDOFF and read ATR is 50msec */ dev->mdelay = T_50MSEC; } break; case M_FETCH_ATR: DEBUGP(4, dev, "M_FETCH_ATR\n"); xoutb(0x80, REG_FLAGS0(iobase)); DEBUGP(4, dev, "Reset BAUDV to 9600\n"); dev->baudv = 0x173; /* 9600 */ xoutb(0x02, REG_STOPBITS(iobase)); /* stopbits=2 */ xoutb(0x73, REG_BAUDRATE(iobase)); /* baud value */ xoutb(0x21, REG_FLAGS1(iobase)); /* T_Active=1, baud value */ /* warm start vs. power on: */ xoutb(dev->flags0 & 2 ? 0x46 : 0x44, REG_FLAGS0(iobase)); dev->mdelay = T_40MSEC; dev->mstate = M_TIMEOUT_WAIT; break; case M_TIMEOUT_WAIT: DEBUGP(4, dev, "M_TIMEOUT_WAIT\n"); /* numRecBytes */ io_read_num_rec_bytes(iobase, &dev->atr_len); dev->mdelay = T_10MSEC; dev->mstate = M_READ_ATR_LEN; break; case M_READ_ATR_LEN: DEBUGP(4, dev, "M_READ_ATR_LEN\n"); /* infinite loop possible, since there is no timeout */ #define MAX_ATR_LEN_RETRY 100 if (dev->atr_len == io_read_num_rec_bytes(iobase, &s)) { if (dev->atr_len_retry++ >= MAX_ATR_LEN_RETRY) { /* + XX msec */ dev->mdelay = T_10MSEC; dev->mstate = M_READ_ATR; } } else { dev->atr_len = s; dev->atr_len_retry = 0; /* set new timeout */ } DEBUGP(4, dev, "Current ATR_LEN = %i\n", dev->atr_len); break; case M_READ_ATR: DEBUGP(4, dev, "M_READ_ATR\n"); xoutb(0x80, REG_FLAGS0(iobase)); /* reset SM */ for (i = 0; i < dev->atr_len; i++) { xoutb(i, REG_BUF_ADDR(iobase)); dev->atr[i] = inb(REG_BUF_DATA(iobase)); } /* Deactivate T_Active flags */ DEBUGP(4, dev, "Deactivate T_Active flags\n"); dev->flags1 = 0x01; xoutb(dev->flags1, REG_FLAGS1(iobase)); /* atr is present (which doesn't mean it's valid) */ set_bit(IS_ATR_PRESENT, &dev->flags); if (dev->atr[0] == 0x03) str_invert_revert(dev->atr, dev->atr_len); atrc = parse_atr(dev); if (atrc == 0) { /* atr invalid */ dev->mdelay = 0; dev->mstate = M_BAD_CARD; } else { dev->mdelay = T_50MSEC; dev->mstate = M_ATR_PRESENT; set_bit(IS_ATR_VALID, &dev->flags); } if (test_bit(IS_ATR_VALID, &dev->flags) == 1) { DEBUGP(4, dev, "monitor_card: ATR valid\n"); /* if ta1 == 0x11, no PPS necessary (default values) */ /* do not do PPS with multi protocol cards */ if ((test_bit(IS_AUTOPPS_ACT, &dev->flags) == 0) && (dev->ta1 != 0x11) && !(test_bit(IS_ANY_T0, &dev->flags) && test_bit(IS_ANY_T1, &dev->flags))) { DEBUGP(4, dev, "Perform AUTOPPS\n"); set_bit(IS_AUTOPPS_ACT, &dev->flags); ptsreq.protocol = (0x01 << dev->proto); ptsreq.flags = 0x01; ptsreq.pts1 = 0x00; ptsreq.pts2 = 0x00; ptsreq.pts3 = 0x00; if (set_protocol(dev, &ptsreq) == 0) { DEBUGP(4, dev, "AUTOPPS ret SUCC\n"); clear_bit(IS_AUTOPPS_ACT, &dev->flags); wake_up_interruptible(&dev->atrq); } else { DEBUGP(4, dev, "AUTOPPS failed: " "repower using defaults\n"); /* prepare for repowering */ clear_bit(IS_ATR_PRESENT, &dev->flags); clear_bit(IS_ATR_VALID, &dev->flags); dev->rlen = dev->rpos = dev->atr_csum = dev->atr_len_retry = dev->cwarn = 0; dev->mstate = M_FETCH_ATR; dev->mdelay = T_50MSEC; } } else { /* for cards which use slightly different * params (extra guard time) */ set_cardparameter(dev); if (test_bit(IS_AUTOPPS_ACT, &dev->flags) == 1) DEBUGP(4, dev, "AUTOPPS already active " "2nd try:use default values\n"); if (dev->ta1 == 0x11) DEBUGP(4, dev, "No AUTOPPS necessary " "TA(1)==0x11\n"); if (test_bit(IS_ANY_T0, &dev->flags) && test_bit(IS_ANY_T1, &dev->flags)) DEBUGP(4, dev, "Do NOT perform AUTOPPS " "with multiprotocol cards\n"); clear_bit(IS_AUTOPPS_ACT, &dev->flags); wake_up_interruptible(&dev->atrq); } } else { DEBUGP(4, dev, "ATR invalid\n"); wake_up_interruptible(&dev->atrq); } break; case M_BAD_CARD: DEBUGP(4, dev, "M_BAD_CARD\n"); /* slow down warning, but prompt immediately after insertion */ if (dev->cwarn == 0 || dev->cwarn == 10) { set_bit(IS_BAD_CARD, &dev->flags); dev_warn(&dev->p_dev->dev, MODULE_NAME ": "); if (test_bit(IS_BAD_CSUM, &dev->flags)) { DEBUGP(4, dev, "ATR checksum (0x%.2x, should " "be zero) failed\n", dev->atr_csum); } #ifdef CM4000_DEBUG else if (test_bit(IS_BAD_LENGTH, &dev->flags)) { DEBUGP(4, dev, "ATR length error\n"); } else { DEBUGP(4, dev, "card damaged or wrong way " "inserted\n"); } #endif dev->cwarn = 0; wake_up_interruptible(&dev->atrq); /* wake open */ } dev->cwarn++; dev->mdelay = T_100MSEC; dev->mstate = M_FETCH_ATR; break; default: DEBUGP(7, dev, "Unknown action\n"); break; /* nothing */ } release_io: DEBUGP(7, dev, "release_io\n"); clear_bit(LOCK_IO, &dev->flags); wake_up_interruptible(&dev->ioq); /* whoever needs IO */ return_with_timer: DEBUGP(7, dev, "<- monitor_card (returns with timer)\n"); mod_timer(&dev->timer, jiffies + dev->mdelay); clear_bit(LOCK_MONITOR, &dev->flags); } /* Interface to userland (file_operations) */ static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count, loff_t *ppos) { struct cm4000_dev *dev = filp->private_data; unsigned int iobase = dev->p_dev->resource[0]->start; ssize_t rc; int i, j, k; DEBUGP(2, dev, "-> cmm_read(%s,%d)\n", current->comm, current->pid); if (count == 0) /* according to manpage */ return 0; if (!pcmcia_dev_present(dev->p_dev) || /* device removed */ test_bit(IS_CMM_ABSENT, &dev->flags)) return -ENODEV; if (test_bit(IS_BAD_CSUM, &dev->flags)) return -EIO; /* also see the note about this in cmm_write */ if (wait_event_interruptible (dev->atrq, ((filp->f_flags & O_NONBLOCK) || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) != 0)))) { if (filp->f_flags & O_NONBLOCK) return -EAGAIN; return -ERESTARTSYS; } if (test_bit(IS_ATR_VALID, &dev->flags) == 0) return -EIO; /* this one implements blocking IO */ if (wait_event_interruptible (dev->readq, ((filp->f_flags & O_NONBLOCK) || (dev->rpos < dev->rlen)))) { if (filp->f_flags & O_NONBLOCK) return -EAGAIN; return -ERESTARTSYS; } /* lock io */ if (wait_event_interruptible (dev->ioq, ((filp->f_flags & O_NONBLOCK) || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) == 0)))) { if (filp->f_flags & O_NONBLOCK) return -EAGAIN; return -ERESTARTSYS; } rc = 0; dev->flags0 = inb(REG_FLAGS0(iobase)); if ((dev->flags0 & 1) == 0 /* no smartcard inserted */ || dev->flags0 == 0xff) { /* no cardman inserted */ clear_bit(IS_ATR_VALID, &dev->flags); if (dev->flags0 & 1) { set_bit(IS_CMM_ABSENT, &dev->flags); rc = -ENODEV; } else { rc = -EIO; } goto release_io; } DEBUGP(4, dev, "begin read answer\n"); j = min(count, (size_t)(dev->rlen - dev->rpos)); k = dev->rpos; if (k + j > 255) j = 256 - k; DEBUGP(4, dev, "read1 j=%d\n", j); for (i = 0; i < j; i++) { xoutb(k++, REG_BUF_ADDR(iobase)); dev->rbuf[i] = xinb(REG_BUF_DATA(iobase)); } j = min(count, (size_t)(dev->rlen - dev->rpos)); if (k + j > 255) { DEBUGP(4, dev, "read2 j=%d\n", j); dev->flags1 |= 0x10; /* MSB buf addr set */ xoutb(dev->flags1, REG_FLAGS1(iobase)); for (; i < j; i++) { xoutb(k++, REG_BUF_ADDR(iobase)); dev->rbuf[i] = xinb(REG_BUF_DATA(iobase)); } } if (dev->proto == 0 && count > dev->rlen - dev->rpos && i) { DEBUGP(4, dev, "T=0 and count > buffer\n"); dev->rbuf[i] = dev->rbuf[i - 1]; dev->rbuf[i - 1] = dev->procbyte; j++; } count = j; dev->rpos = dev->rlen + 1; /* Clear T1Active */ DEBUGP(4, dev, "Clear T1Active\n"); dev->flags1 &= 0xdf; xoutb(dev->flags1, REG_FLAGS1(iobase)); xoutb(0, REG_FLAGS1(iobase)); /* clear detectCMM */ /* last check before exit */ if (!io_detect_cm4000(iobase, dev)) { rc = -ENODEV; goto release_io; } if (test_bit(IS_INVREV, &dev->flags) && count > 0) str_invert_revert(dev->rbuf, count); if (copy_to_user(buf, dev->rbuf, count)) rc = -EFAULT; release_io: clear_bit(LOCK_IO, &dev->flags); wake_up_interruptible(&dev->ioq); DEBUGP(2, dev, "<- cmm_read returns: rc = %Zi\n", (rc < 0 ? rc : count)); return rc < 0 ? rc : count; } static ssize_t cmm_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct cm4000_dev *dev = filp->private_data; unsigned int iobase = dev->p_dev->resource[0]->start; unsigned short s; unsigned char tmp; unsigned char infolen; unsigned char sendT0; unsigned short nsend; unsigned short nr; ssize_t rc; int i; DEBUGP(2, dev, "-> cmm_write(%s,%d)\n", current->comm, current->pid); if (count == 0) /* according to manpage */ return 0; if (dev->proto == 0 && count < 4) { /* T0 must have at least 4 bytes */ DEBUGP(4, dev, "T0 short write\n"); return -EIO; } nr = count & 0x1ff; /* max bytes to write */ sendT0 = dev->proto ? 0 : nr > 5 ? 0x08 : 0; if (!pcmcia_dev_present(dev->p_dev) || /* device removed */ test_bit(IS_CMM_ABSENT, &dev->flags)) return -ENODEV; if (test_bit(IS_BAD_CSUM, &dev->flags)) { DEBUGP(4, dev, "bad csum\n"); return -EIO; } /* * wait for atr to become valid. * note: it is important to lock this code. if we dont, the monitor * could be run between test_bit and the call to sleep on the * atr-queue. if *then* the monitor detects atr valid, it will wake up * any process on the atr-queue, *but* since we have been interrupted, * we do not yet sleep on this queue. this would result in a missed * wake_up and the calling process would sleep forever (until * interrupted). also, do *not* restore_flags before sleep_on, because * this could result in the same situation! */ if (wait_event_interruptible (dev->atrq, ((filp->f_flags & O_NONBLOCK) || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) != 0)))) { if (filp->f_flags & O_NONBLOCK) return -EAGAIN; return -ERESTARTSYS; } if (test_bit(IS_ATR_VALID, &dev->flags) == 0) { /* invalid atr */ DEBUGP(4, dev, "invalid ATR\n"); return -EIO; } /* lock io */ if (wait_event_interruptible (dev->ioq, ((filp->f_flags & O_NONBLOCK) || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) == 0)))) { if (filp->f_flags & O_NONBLOCK) return -EAGAIN; return -ERESTARTSYS; } if (copy_from_user(dev->sbuf, buf, ((count > 512) ? 512 : count))) return -EFAULT; rc = 0; dev->flags0 = inb(REG_FLAGS0(iobase)); if ((dev->flags0 & 1) == 0 /* no smartcard inserted */ || dev->flags0 == 0xff) { /* no cardman inserted */ clear_bit(IS_ATR_VALID, &dev->flags); if (dev->flags0 & 1) { set_bit(IS_CMM_ABSENT, &dev->flags); rc = -ENODEV; } else { DEBUGP(4, dev, "IO error\n"); rc = -EIO; } goto release_io; } xoutb(0x80, REG_FLAGS0(iobase)); /* reset SM */ if (!io_detect_cm4000(iobase, dev)) { rc = -ENODEV; goto release_io; } /* reflect T=0 send/read mode in flags1 */ dev->flags1 |= (sendT0); set_cardparameter(dev); /* dummy read, reset flag procedure received */ tmp = inb(REG_FLAGS1(iobase)); dev->flags1 = 0x20 /* T_Active */ | (sendT0) | (test_bit(IS_INVREV, &dev->flags) ? 2 : 0)/* inverse parity */ | (((dev->baudv - 1) & 0x0100) >> 8); /* MSB-Baud */ DEBUGP(1, dev, "set dev->flags1 = 0x%.2x\n", dev->flags1); xoutb(dev->flags1, REG_FLAGS1(iobase)); /* xmit data */ DEBUGP(4, dev, "Xmit data\n"); for (i = 0; i < nr; i++) { if (i >= 256) { dev->flags1 = 0x20 /* T_Active */ | (sendT0) /* SendT0 */ /* inverse parity: */ | (test_bit(IS_INVREV, &dev->flags) ? 2 : 0) | (((dev->baudv - 1) & 0x0100) >> 8) /* MSB-Baud */ | 0x10; /* set address high */ DEBUGP(4, dev, "dev->flags = 0x%.2x - set address " "high\n", dev->flags1); xoutb(dev->flags1, REG_FLAGS1(iobase)); } if (test_bit(IS_INVREV, &dev->flags)) { DEBUGP(4, dev, "Apply inverse convention for 0x%.2x " "-> 0x%.2x\n", (unsigned char)dev->sbuf[i], invert_revert(dev->sbuf[i])); xoutb(i, REG_BUF_ADDR(iobase)); xoutb(invert_revert(dev->sbuf[i]), REG_BUF_DATA(iobase)); } else { xoutb(i, REG_BUF_ADDR(iobase)); xoutb(dev->sbuf[i], REG_BUF_DATA(iobase)); } } DEBUGP(4, dev, "Xmit done\n"); if (dev->proto == 0) { /* T=0 proto: 0 byte reply */ if (nr == 4) { DEBUGP(4, dev, "T=0 assumes 0 byte reply\n"); xoutb(i, REG_BUF_ADDR(iobase)); if (test_bit(IS_INVREV, &dev->flags)) xoutb(0xff, REG_BUF_DATA(iobase)); else xoutb(0x00, REG_BUF_DATA(iobase)); } /* numSendBytes */ if (sendT0) nsend = nr; else { if (nr == 4) nsend = 5; else { nsend = 5 + (unsigned char)dev->sbuf[4]; if (dev->sbuf[4] == 0) nsend += 0x100; } } } else nsend = nr; /* T0: output procedure byte */ if (test_bit(IS_INVREV, &dev->flags)) { DEBUGP(4, dev, "T=0 set Procedure byte (inverse-reverse) " "0x%.2x\n", invert_revert(dev->sbuf[1])); xoutb(invert_revert(dev->sbuf[1]), REG_NUM_BYTES(iobase)); } else { DEBUGP(4, dev, "T=0 set Procedure byte 0x%.2x\n", dev->sbuf[1]); xoutb(dev->sbuf[1], REG_NUM_BYTES(iobase)); } DEBUGP(1, dev, "set NumSendBytes = 0x%.2x\n", (unsigned char)(nsend & 0xff)); xoutb((unsigned char)(nsend & 0xff), REG_NUM_SEND(iobase)); DEBUGP(1, dev, "Trigger CARDMAN CONTROLLER (0x%.2x)\n", 0x40 /* SM_Active */ | (dev->flags0 & 2 ? 0 : 4) /* power on if needed */ |(dev->proto ? 0x10 : 0x08) /* T=1/T=0 */ |(nsend & 0x100) >> 8 /* MSB numSendBytes */ ); xoutb(0x40 /* SM_Active */ | (dev->flags0 & 2 ? 0 : 4) /* power on if needed */ |(dev->proto ? 0x10 : 0x08) /* T=1/T=0 */ |(nsend & 0x100) >> 8, /* MSB numSendBytes */ REG_FLAGS0(iobase)); /* wait for xmit done */ if (dev->proto == 1) { DEBUGP(4, dev, "Wait for xmit done\n"); for (i = 0; i < 1000; i++) { if (inb(REG_FLAGS0(iobase)) & 0x08) break; msleep_interruptible(10); } if (i == 1000) { DEBUGP(4, dev, "timeout waiting for xmit done\n"); rc = -EIO; goto release_io; } } /* T=1: wait for infoLen */ infolen = 0; if (dev->proto) { /* wait until infoLen is valid */ for (i = 0; i < 6000; i++) { /* max waiting time of 1 min */ io_read_num_rec_bytes(iobase, &s); if (s >= 3) { infolen = inb(REG_FLAGS1(iobase)); DEBUGP(4, dev, "infolen=%d\n", infolen); break; } msleep_interruptible(10); } if (i == 6000) { DEBUGP(4, dev, "timeout waiting for infoLen\n"); rc = -EIO; goto release_io; } } else clear_bit(IS_PROCBYTE_PRESENT, &dev->flags); /* numRecBytes | bit9 of numRecytes */ io_read_num_rec_bytes(iobase, &dev->rlen); for (i = 0; i < 600; i++) { /* max waiting time of 2 sec */ if (dev->proto) { if (dev->rlen >= infolen + 4) break; } msleep_interruptible(10); /* numRecBytes | bit9 of numRecytes */ io_read_num_rec_bytes(iobase, &s); if (s > dev->rlen) { DEBUGP(1, dev, "NumRecBytes inc (reset timeout)\n"); i = 0; /* reset timeout */ dev->rlen = s; } /* T=0: we are done when numRecBytes doesn't * increment any more and NoProcedureByte * is set and numRecBytes == bytes sent + 6 * (header bytes + data + 1 for sw2) * except when the card replies an error * which means, no data will be sent back. */ else if (dev->proto == 0) { if ((inb(REG_BUF_ADDR(iobase)) & 0x80)) { /* no procedure byte received since last read */ DEBUGP(1, dev, "NoProcedure byte set\n"); /* i=0; */ } else { /* procedure byte received since last read */ DEBUGP(1, dev, "NoProcedure byte unset " "(reset timeout)\n"); dev->procbyte = inb(REG_FLAGS1(iobase)); DEBUGP(1, dev, "Read procedure byte 0x%.2x\n", dev->procbyte); i = 0; /* resettimeout */ } if (inb(REG_FLAGS0(iobase)) & 0x08) { DEBUGP(1, dev, "T0Done flag (read reply)\n"); break; } } if (dev->proto) infolen = inb(REG_FLAGS1(iobase)); } if (i == 600) { DEBUGP(1, dev, "timeout waiting for numRecBytes\n"); rc = -EIO; goto release_io; } else { if (dev->proto == 0) { DEBUGP(1, dev, "Wait for T0Done bit to be set\n"); for (i = 0; i < 1000; i++) { if (inb(REG_FLAGS0(iobase)) & 0x08) break; msleep_interruptible(10); } if (i == 1000) { DEBUGP(1, dev, "timeout waiting for T0Done\n"); rc = -EIO; goto release_io; } dev->procbyte = inb(REG_FLAGS1(iobase)); DEBUGP(4, dev, "Read procedure byte 0x%.2x\n", dev->procbyte); io_read_num_rec_bytes(iobase, &dev->rlen); DEBUGP(4, dev, "Read NumRecBytes = %i\n", dev->rlen); } } /* T=1: read offset=zero, T=0: read offset=after challenge */ dev->rpos = dev->proto ? 0 : nr == 4 ? 5 : nr > dev->rlen ? 5 : nr; DEBUGP(4, dev, "dev->rlen = %i, dev->rpos = %i, nr = %i\n", dev->rlen, dev->rpos, nr); release_io: DEBUGP(4, dev, "Reset SM\n"); xoutb(0x80, REG_FLAGS0(iobase)); /* reset SM */ if (rc < 0) { DEBUGP(4, dev, "Write failed but clear T_Active\n"); dev->flags1 &= 0xdf; xoutb(dev->flags1, REG_FLAGS1(iobase)); } clear_bit(LOCK_IO, &dev->flags); wake_up_interruptible(&dev->ioq); wake_up_interruptible(&dev->readq); /* tell read we have data */ /* ITSEC E2: clear write buffer */ memset((char *)dev->sbuf, 0, 512); /* return error or actually written bytes */ DEBUGP(2, dev, "<- cmm_write\n"); return rc < 0 ? rc : nr; } static void start_monitor(struct cm4000_dev *dev) { DEBUGP(3, dev, "-> start_monitor\n"); if (!dev->monitor_running) { DEBUGP(5, dev, "create, init and add timer\n"); setup_timer(&dev->timer, monitor_card, (unsigned long)dev); dev->monitor_running = 1; mod_timer(&dev->timer, jiffies); } else DEBUGP(5, dev, "monitor already running\n"); DEBUGP(3, dev, "<- start_monitor\n"); } static void stop_monitor(struct cm4000_dev *dev) { DEBUGP(3, dev, "-> stop_monitor\n"); if (dev->monitor_running) { DEBUGP(5, dev, "stopping monitor\n"); terminate_monitor(dev); /* reset monitor SM */ clear_bit(IS_ATR_VALID, &dev->flags); clear_bit(IS_ATR_PRESENT, &dev->flags); } else DEBUGP(5, dev, "monitor already stopped\n"); DEBUGP(3, dev, "<- stop_monitor\n"); } static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct cm4000_dev *dev = filp->private_data; unsigned int iobase = dev->p_dev->resource[0]->start; struct inode *inode = file_inode(filp); struct pcmcia_device *link; int size; int rc; void __user *argp = (void __user *)arg; #ifdef CM4000_DEBUG char *ioctl_names[CM_IOC_MAXNR + 1] = { [_IOC_NR(CM_IOCGSTATUS)] "CM_IOCGSTATUS", [_IOC_NR(CM_IOCGATR)] "CM_IOCGATR", [_IOC_NR(CM_IOCARDOFF)] "CM_IOCARDOFF", [_IOC_NR(CM_IOCSPTS)] "CM_IOCSPTS", [_IOC_NR(CM_IOSDBGLVL)] "CM4000_DBGLVL", }; DEBUGP(3, dev, "cmm_ioctl(device=%d.%d) %s\n", imajor(inode), iminor(inode), ioctl_names[_IOC_NR(cmd)]); #endif mutex_lock(&cmm_mutex); rc = -ENODEV; link = dev_table[iminor(inode)]; if (!pcmcia_dev_present(link)) { DEBUGP(4, dev, "DEV_OK false\n"); goto out; } if (test_bit(IS_CMM_ABSENT, &dev->flags)) { DEBUGP(4, dev, "CMM_ABSENT flag set\n"); goto out; } rc = -EINVAL; if (_IOC_TYPE(cmd) != CM_IOC_MAGIC) { DEBUGP(4, dev, "ioctype mismatch\n"); goto out; } if (_IOC_NR(cmd) > CM_IOC_MAXNR) { DEBUGP(4, dev, "iocnr mismatch\n"); goto out; } size = _IOC_SIZE(cmd); rc = -EFAULT; DEBUGP(4, dev, "iocdir=%.4x iocr=%.4x iocw=%.4x iocsize=%d cmd=%.4x\n", _IOC_DIR(cmd), _IOC_READ, _IOC_WRITE, size, cmd); if (_IOC_DIR(cmd) & _IOC_READ) { if (!access_ok(VERIFY_WRITE, argp, size)) goto out; } if (_IOC_DIR(cmd) & _IOC_WRITE) { if (!access_ok(VERIFY_READ, argp, size)) goto out; } rc = 0; switch (cmd) { case CM_IOCGSTATUS: DEBUGP(4, dev, " ... in CM_IOCGSTATUS\n"); { int status; /* clear other bits, but leave inserted & powered as * they are */ status = dev->flags0 & 3; if (test_bit(IS_ATR_PRESENT, &dev->flags)) status |= CM_ATR_PRESENT; if (test_bit(IS_ATR_VALID, &dev->flags)) status |= CM_ATR_VALID; if (test_bit(IS_CMM_ABSENT, &dev->flags)) status |= CM_NO_READER; if (test_bit(IS_BAD_CARD, &dev->flags)) status |= CM_BAD_CARD; if (copy_to_user(argp, &status, sizeof(int))) rc = -EFAULT; } break; case CM_IOCGATR: DEBUGP(4, dev, "... in CM_IOCGATR\n"); { struct atreq __user *atreq = argp; int tmp; /* allow nonblocking io and being interrupted */ if (wait_event_interruptible (dev->atrq, ((filp->f_flags & O_NONBLOCK) || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) != 0)))) { if (filp->f_flags & O_NONBLOCK) rc = -EAGAIN; else rc = -ERESTARTSYS; break; } rc = -EFAULT; if (test_bit(IS_ATR_VALID, &dev->flags) == 0) { tmp = -1; if (copy_to_user(&(atreq->atr_len), &tmp, sizeof(int))) break; } else { if (copy_to_user(atreq->atr, dev->atr, dev->atr_len)) break; tmp = dev->atr_len; if (copy_to_user(&(atreq->atr_len), &tmp, sizeof(int))) break; } rc = 0; break; } case CM_IOCARDOFF: #ifdef CM4000_DEBUG DEBUGP(4, dev, "... in CM_IOCARDOFF\n"); if (dev->flags0 & 0x01) { DEBUGP(4, dev, " Card inserted\n"); } else { DEBUGP(2, dev, " No card inserted\n"); } if (dev->flags0 & 0x02) { DEBUGP(4, dev, " Card powered\n"); } else { DEBUGP(2, dev, " Card not powered\n"); } #endif /* is a card inserted and powered? */ if ((dev->flags0 & 0x01) && (dev->flags0 & 0x02)) { /* get IO lock */ if (wait_event_interruptible (dev->ioq, ((filp->f_flags & O_NONBLOCK) || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) == 0)))) { if (filp->f_flags & O_NONBLOCK) rc = -EAGAIN; else rc = -ERESTARTSYS; break; } /* Set Flags0 = 0x42 */ DEBUGP(4, dev, "Set Flags0=0x42 \n"); xoutb(0x42, REG_FLAGS0(iobase)); clear_bit(IS_ATR_PRESENT, &dev->flags); clear_bit(IS_ATR_VALID, &dev->flags); dev->mstate = M_CARDOFF; clear_bit(LOCK_IO, &dev->flags); if (wait_event_interruptible (dev->atrq, ((filp->f_flags & O_NONBLOCK) || (test_bit(IS_ATR_VALID, (void *)&dev->flags) != 0)))) { if (filp->f_flags & O_NONBLOCK) rc = -EAGAIN; else rc = -ERESTARTSYS; break; } } /* release lock */ clear_bit(LOCK_IO, &dev->flags); wake_up_interruptible(&dev->ioq); rc = 0; break; case CM_IOCSPTS: { struct ptsreq krnptsreq; if (copy_from_user(&krnptsreq, argp, sizeof(struct ptsreq))) { rc = -EFAULT; break; } rc = 0; DEBUGP(4, dev, "... in CM_IOCSPTS\n"); /* wait for ATR to get valid */ if (wait_event_interruptible (dev->atrq, ((filp->f_flags & O_NONBLOCK) || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) != 0)))) { if (filp->f_flags & O_NONBLOCK) rc = -EAGAIN; else rc = -ERESTARTSYS; break; } /* get IO lock */ if (wait_event_interruptible (dev->ioq, ((filp->f_flags & O_NONBLOCK) || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) == 0)))) { if (filp->f_flags & O_NONBLOCK) rc = -EAGAIN; else rc = -ERESTARTSYS; break; } if ((rc = set_protocol(dev, &krnptsreq)) != 0) { /* auto power_on again */ dev->mstate = M_FETCH_ATR; clear_bit(IS_ATR_VALID, &dev->flags); } /* release lock */ clear_bit(LOCK_IO, &dev->flags); wake_up_interruptible(&dev->ioq); } break; #ifdef CM4000_DEBUG case CM_IOSDBGLVL: rc = -ENOTTY; break; #endif default: DEBUGP(4, dev, "... in default (unknown IOCTL code)\n"); rc = -ENOTTY; } out: mutex_unlock(&cmm_mutex); return rc; } static int cmm_open(struct inode *inode, struct file *filp) { struct cm4000_dev *dev; struct pcmcia_device *link; int minor = iminor(inode); int ret; if (minor >= CM4000_MAX_DEV) return -ENODEV; mutex_lock(&cmm_mutex); link = dev_table[minor]; if (link == NULL || !pcmcia_dev_present(link)) { ret = -ENODEV; goto out; } if (link->open) { ret = -EBUSY; goto out; } dev = link->priv; filp->private_data = dev; DEBUGP(2, dev, "-> cmm_open(device=%d.%d process=%s,%d)\n", imajor(inode), minor, current->comm, current->pid); /* init device variables, they may be "polluted" after close * or, the device may never have been closed (i.e. open failed) */ ZERO_DEV(dev); /* opening will always block since the * monitor will be started by open, which * means we have to wait for ATR becoming * valid = block until valid (or card * inserted) */ if (filp->f_flags & O_NONBLOCK) { ret = -EAGAIN; goto out; } dev->mdelay = T_50MSEC; /* start monitoring the cardstatus */ start_monitor(dev); link->open = 1; /* only one open per device */ DEBUGP(2, dev, "<- cmm_open\n"); ret = nonseekable_open(inode, filp); out: mutex_unlock(&cmm_mutex); return ret; } static int cmm_close(struct inode *inode, struct file *filp) { struct cm4000_dev *dev; struct pcmcia_device *link; int minor = iminor(inode); if (minor >= CM4000_MAX_DEV) return -ENODEV; link = dev_table[minor]; if (link == NULL) return -ENODEV; dev = link->priv; DEBUGP(2, dev, "-> cmm_close(maj/min=%d.%d)\n", imajor(inode), minor); stop_monitor(dev); ZERO_DEV(dev); link->open = 0; /* only one open per device */ wake_up(&dev->devq); /* socket removed? */ DEBUGP(2, dev, "cmm_close\n"); return 0; } static void cmm_cm4000_release(struct pcmcia_device * link) { struct cm4000_dev *dev = link->priv; /* dont terminate the monitor, rather rely on * close doing that for us. */ DEBUGP(3, dev, "-> cmm_cm4000_release\n"); while (link->open) { printk(KERN_INFO MODULE_NAME ": delaying release until " "process has terminated\n"); /* note: don't interrupt us: * close the applications which own * the devices _first_ ! */ wait_event(dev->devq, (link->open == 0)); } /* dev->devq=NULL; this cannot be zeroed earlier */ DEBUGP(3, dev, "<- cmm_cm4000_release\n"); return; } /*==== Interface to PCMCIA Layer =======================================*/ static int cm4000_config_check(struct pcmcia_device *p_dev, void *priv_data) { return pcmcia_request_io(p_dev); } static int cm4000_config(struct pcmcia_device * link, int devno) { struct cm4000_dev *dev; link->config_flags |= CONF_AUTO_SET_IO; /* read the config-tuples */ if (pcmcia_loop_config(link, cm4000_config_check, NULL)) goto cs_release; if (pcmcia_enable_device(link)) goto cs_release; dev = link->priv; return 0; cs_release: cm4000_release(link); return -ENODEV; } static int cm4000_suspend(struct pcmcia_device *link) { struct cm4000_dev *dev; dev = link->priv; stop_monitor(dev); return 0; } static int cm4000_resume(struct pcmcia_device *link) { struct cm4000_dev *dev; dev = link->priv; if (link->open) start_monitor(dev); return 0; } static void cm4000_release(struct pcmcia_device *link) { cmm_cm4000_release(link); /* delay release until device closed */ pcmcia_disable_device(link); } static int cm4000_probe(struct pcmcia_device *link) { struct cm4000_dev *dev; int i, ret; for (i = 0; i < CM4000_MAX_DEV; i++) if (dev_table[i] == NULL) break; if (i == CM4000_MAX_DEV) { printk(KERN_NOTICE MODULE_NAME ": all devices in use\n"); return -ENODEV; } /* create a new cm4000_cs device */ dev = kzalloc(sizeof(struct cm4000_dev), GFP_KERNEL); if (dev == NULL) return -ENOMEM; dev->p_dev = link; link->priv = dev; dev_table[i] = link; init_waitqueue_head(&dev->devq); init_waitqueue_head(&dev->ioq); init_waitqueue_head(&dev->atrq); init_waitqueue_head(&dev->readq); ret = cm4000_config(link, i); if (ret) { dev_table[i] = NULL; kfree(dev); return ret; } device_create(cmm_class, NULL, MKDEV(major, i), NULL, "cmm%d", i); return 0; } static void cm4000_detach(struct pcmcia_device *link) { struct cm4000_dev *dev = link->priv; int devno; /* find device */ for (devno = 0; devno < CM4000_MAX_DEV; devno++) if (dev_table[devno] == link) break; if (devno == CM4000_MAX_DEV) return; stop_monitor(dev); cm4000_release(link); dev_table[devno] = NULL; kfree(dev); device_destroy(cmm_class, MKDEV(major, devno)); return; } static const struct file_operations cm4000_fops = { .owner = THIS_MODULE, .read = cmm_read, .write = cmm_write, .unlocked_ioctl = cmm_ioctl, .open = cmm_open, .release= cmm_close, .llseek = no_llseek, }; static const struct pcmcia_device_id cm4000_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x0223, 0x0002), PCMCIA_DEVICE_PROD_ID12("CardMan", "4000", 0x2FB368CA, 0xA2BD8C39), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, cm4000_ids); static struct pcmcia_driver cm4000_driver = { .owner = THIS_MODULE, .name = "cm4000_cs", .probe = cm4000_probe, .remove = cm4000_detach, .suspend = cm4000_suspend, .resume = cm4000_resume, .id_table = cm4000_ids, }; static int __init cmm_init(void) { int rc; cmm_class = class_create(THIS_MODULE, "cardman_4000"); if (IS_ERR(cmm_class)) return PTR_ERR(cmm_class); major = register_chrdev(0, DEVICE_NAME, &cm4000_fops); if (major < 0) { printk(KERN_WARNING MODULE_NAME ": could not get major number\n"); class_destroy(cmm_class); return major; } rc = pcmcia_register_driver(&cm4000_driver); if (rc < 0) { unregister_chrdev(major, DEVICE_NAME); class_destroy(cmm_class); return rc; } return 0; } static void __exit cmm_exit(void) { pcmcia_unregister_driver(&cm4000_driver); unregister_chrdev(major, DEVICE_NAME); class_destroy(cmm_class); }; module_init(cmm_init); module_exit(cmm_exit); MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
CyanHacker-Lollipop/kernel_htc_msm8974
drivers/pci/setup-bus.c
4401
41107
/* * drivers/pci/setup-bus.c * * Extruded from code written by * Dave Rusling (david.rusling@reo.mts.dec.com) * David Mosberger (davidm@cs.arizona.edu) * David Miller (davem@redhat.com) * * Support routines for initializing a PCI subsystem. */ /* * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru> * PCI-PCI bridges cleanup, sorted resource allocation. * Feb 2002, Ivan Kokshaysky <ink@jurassic.park.msu.ru> * Converted to allocation in 3 passes, which gives * tighter packing. Prefetchable range support. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/cache.h> #include <linux/slab.h> #include <asm-generic/pci-bridge.h> #include "pci.h" unsigned int pci_flags; struct pci_dev_resource { struct list_head list; struct resource *res; struct pci_dev *dev; resource_size_t start; resource_size_t end; resource_size_t add_size; resource_size_t min_align; unsigned long flags; }; static void free_list(struct list_head *head) { struct pci_dev_resource *dev_res, *tmp; list_for_each_entry_safe(dev_res, tmp, head, list) { list_del(&dev_res->list); kfree(dev_res); } } /** * add_to_list() - add a new resource tracker to the list * @head: Head of the list * @dev: device corresponding to which the resource * belongs * @res: The resource to be tracked * @add_size: additional size to be optionally added * to the resource */ static int add_to_list(struct list_head *head, struct pci_dev *dev, struct resource *res, resource_size_t add_size, resource_size_t min_align) { struct pci_dev_resource *tmp; tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) { pr_warning("add_to_list: kmalloc() failed!\n"); return -ENOMEM; } tmp->res = res; tmp->dev = dev; tmp->start = res->start; tmp->end = res->end; tmp->flags = res->flags; tmp->add_size = add_size; tmp->min_align = min_align; list_add(&tmp->list, head); return 0; } static void remove_from_list(struct list_head *head, struct resource *res) { struct pci_dev_resource *dev_res, *tmp; list_for_each_entry_safe(dev_res, tmp, head, list) { if (dev_res->res == res) { list_del(&dev_res->list); kfree(dev_res); break; } } } static resource_size_t get_res_add_size(struct list_head *head, struct resource *res) { struct pci_dev_resource *dev_res; list_for_each_entry(dev_res, head, list) { if (dev_res->res == res) { int idx = res - &dev_res->dev->resource[0]; dev_printk(KERN_DEBUG, &dev_res->dev->dev, "res[%d]=%pR get_res_add_size add_size %llx\n", idx, dev_res->res, (unsigned long long)dev_res->add_size); return dev_res->add_size; } } return 0; } /* Sort resources by alignment */ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head) { int i; for (i = 0; i < PCI_NUM_RESOURCES; i++) { struct resource *r; struct pci_dev_resource *dev_res, *tmp; resource_size_t r_align; struct list_head *n; r = &dev->resource[i]; if (r->flags & IORESOURCE_PCI_FIXED) continue; if (!(r->flags) || r->parent) continue; r_align = pci_resource_alignment(dev, r); if (!r_align) { dev_warn(&dev->dev, "BAR %d: %pR has bogus alignment\n", i, r); continue; } tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) panic("pdev_sort_resources(): " "kmalloc() failed!\n"); tmp->res = r; tmp->dev = dev; /* fallback is smallest one or list is empty*/ n = head; list_for_each_entry(dev_res, head, list) { resource_size_t align; align = pci_resource_alignment(dev_res->dev, dev_res->res); if (r_align > align) { n = &dev_res->list; break; } } /* Insert it just before n*/ list_add_tail(&tmp->list, n); } } static void __dev_sort_resources(struct pci_dev *dev, struct list_head *head) { u16 class = dev->class >> 8; /* Don't touch classless devices or host bridges or ioapics. */ if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST) return; /* Don't touch ioapic devices already enabled by firmware */ if (class == PCI_CLASS_SYSTEM_PIC) { u16 command; pci_read_config_word(dev, PCI_COMMAND, &command); if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) return; } pdev_sort_resources(dev, head); } static inline void reset_resource(struct resource *res) { res->start = 0; res->end = 0; res->flags = 0; } /** * reassign_resources_sorted() - satisfy any additional resource requests * * @realloc_head : head of the list tracking requests requiring additional * resources * @head : head of the list tracking requests with allocated * resources * * Walk through each element of the realloc_head and try to procure * additional resources for the element, provided the element * is in the head list. */ static void reassign_resources_sorted(struct list_head *realloc_head, struct list_head *head) { struct resource *res; struct pci_dev_resource *add_res, *tmp; struct pci_dev_resource *dev_res; resource_size_t add_size; int idx; list_for_each_entry_safe(add_res, tmp, realloc_head, list) { bool found_match = false; res = add_res->res; /* skip resource that has been reset */ if (!res->flags) goto out; /* skip this resource if not found in head list */ list_for_each_entry(dev_res, head, list) { if (dev_res->res == res) { found_match = true; break; } } if (!found_match)/* just skip */ continue; idx = res - &add_res->dev->resource[0]; add_size = add_res->add_size; if (!resource_size(res)) { res->start = add_res->start; res->end = res->start + add_size - 1; if (pci_assign_resource(add_res->dev, idx)) reset_resource(res); } else { resource_size_t align = add_res->min_align; res->flags |= add_res->flags & (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN); if (pci_reassign_resource(add_res->dev, idx, add_size, align)) dev_printk(KERN_DEBUG, &add_res->dev->dev, "failed to add %llx res[%d]=%pR\n", (unsigned long long)add_size, idx, res); } out: list_del(&add_res->list); kfree(add_res); } } /** * assign_requested_resources_sorted() - satisfy resource requests * * @head : head of the list tracking requests for resources * @failed_list : head of the list tracking requests that could * not be allocated * * Satisfy resource requests of each element in the list. Add * requests that could not satisfied to the failed_list. */ static void assign_requested_resources_sorted(struct list_head *head, struct list_head *fail_head) { struct resource *res; struct pci_dev_resource *dev_res; int idx; list_for_each_entry(dev_res, head, list) { res = dev_res->res; idx = res - &dev_res->dev->resource[0]; if (resource_size(res) && pci_assign_resource(dev_res->dev, idx)) { if (fail_head && !pci_is_root_bus(dev_res->dev->bus)) { /* * if the failed res is for ROM BAR, and it will * be enabled later, don't add it to the list */ if (!((idx == PCI_ROM_RESOURCE) && (!(res->flags & IORESOURCE_ROM_ENABLE)))) add_to_list(fail_head, dev_res->dev, res, 0 /* dont care */, 0 /* dont care */); } reset_resource(res); } } } static void __assign_resources_sorted(struct list_head *head, struct list_head *realloc_head, struct list_head *fail_head) { /* * Should not assign requested resources at first. * they could be adjacent, so later reassign can not reallocate * them one by one in parent resource window. * Try to assign requested + add_size at begining * if could do that, could get out early. * if could not do that, we still try to assign requested at first, * then try to reassign add_size for some resources. */ LIST_HEAD(save_head); LIST_HEAD(local_fail_head); struct pci_dev_resource *save_res; struct pci_dev_resource *dev_res; /* Check if optional add_size is there */ if (!realloc_head || list_empty(realloc_head)) goto requested_and_reassign; /* Save original start, end, flags etc at first */ list_for_each_entry(dev_res, head, list) { if (add_to_list(&save_head, dev_res->dev, dev_res->res, 0, 0)) { free_list(&save_head); goto requested_and_reassign; } } /* Update res in head list with add_size in realloc_head list */ list_for_each_entry(dev_res, head, list) dev_res->res->end += get_res_add_size(realloc_head, dev_res->res); /* Try updated head list with add_size added */ assign_requested_resources_sorted(head, &local_fail_head); /* all assigned with add_size ? */ if (list_empty(&local_fail_head)) { /* Remove head list from realloc_head list */ list_for_each_entry(dev_res, head, list) remove_from_list(realloc_head, dev_res->res); free_list(&save_head); free_list(head); return; } free_list(&local_fail_head); /* Release assigned resource */ list_for_each_entry(dev_res, head, list) if (dev_res->res->parent) release_resource(dev_res->res); /* Restore start/end/flags from saved list */ list_for_each_entry(save_res, &save_head, list) { struct resource *res = save_res->res; res->start = save_res->start; res->end = save_res->end; res->flags = save_res->flags; } free_list(&save_head); requested_and_reassign: /* Satisfy the must-have resource requests */ assign_requested_resources_sorted(head, fail_head); /* Try to satisfy any additional optional resource requests */ if (realloc_head) reassign_resources_sorted(realloc_head, head); free_list(head); } static void pdev_assign_resources_sorted(struct pci_dev *dev, struct list_head *add_head, struct list_head *fail_head) { LIST_HEAD(head); __dev_sort_resources(dev, &head); __assign_resources_sorted(&head, add_head, fail_head); } static void pbus_assign_resources_sorted(const struct pci_bus *bus, struct list_head *realloc_head, struct list_head *fail_head) { struct pci_dev *dev; LIST_HEAD(head); list_for_each_entry(dev, &bus->devices, bus_list) __dev_sort_resources(dev, &head); __assign_resources_sorted(&head, realloc_head, fail_head); } void pci_setup_cardbus(struct pci_bus *bus) { struct pci_dev *bridge = bus->self; struct resource *res; struct pci_bus_region region; dev_info(&bridge->dev, "CardBus bridge to [bus %02x-%02x]\n", bus->secondary, bus->subordinate); res = bus->resource[0]; pcibios_resource_to_bus(bridge, &region, res); if (res->flags & IORESOURCE_IO) { /* * The IO resource is allocated a range twice as large as it * would normally need. This allows us to set both IO regs. */ dev_info(&bridge->dev, " bridge window %pR\n", res); pci_write_config_dword(bridge, PCI_CB_IO_BASE_0, region.start); pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0, region.end); } res = bus->resource[1]; pcibios_resource_to_bus(bridge, &region, res); if (res->flags & IORESOURCE_IO) { dev_info(&bridge->dev, " bridge window %pR\n", res); pci_write_config_dword(bridge, PCI_CB_IO_BASE_1, region.start); pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1, region.end); } res = bus->resource[2]; pcibios_resource_to_bus(bridge, &region, res); if (res->flags & IORESOURCE_MEM) { dev_info(&bridge->dev, " bridge window %pR\n", res); pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0, region.start); pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0, region.end); } res = bus->resource[3]; pcibios_resource_to_bus(bridge, &region, res); if (res->flags & IORESOURCE_MEM) { dev_info(&bridge->dev, " bridge window %pR\n", res); pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1, region.start); pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1, region.end); } } EXPORT_SYMBOL(pci_setup_cardbus); /* Initialize bridges with base/limit values we have collected. PCI-to-PCI Bridge Architecture Specification rev. 1.1 (1998) requires that if there is no I/O ports or memory behind the bridge, corresponding range must be turned off by writing base value greater than limit to the bridge's base/limit registers. Note: care must be taken when updating I/O base/limit registers of bridges which support 32-bit I/O. This update requires two config space writes, so it's quite possible that an I/O window of the bridge will have some undesirable address (e.g. 0) after the first write. Ditto 64-bit prefetchable MMIO. */ static void pci_setup_bridge_io(struct pci_bus *bus) { struct pci_dev *bridge = bus->self; struct resource *res; struct pci_bus_region region; u32 l, io_upper16; /* Set up the top and bottom of the PCI I/O segment for this bus. */ res = bus->resource[0]; pcibios_resource_to_bus(bridge, &region, res); if (res->flags & IORESOURCE_IO) { pci_read_config_dword(bridge, PCI_IO_BASE, &l); l &= 0xffff0000; l |= (region.start >> 8) & 0x00f0; l |= region.end & 0xf000; /* Set up upper 16 bits of I/O base/limit. */ io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); dev_info(&bridge->dev, " bridge window %pR\n", res); } else { /* Clear upper 16 bits of I/O base/limit. */ io_upper16 = 0; l = 0x00f0; } /* Temporarily disable the I/O range before updating PCI_IO_BASE. */ pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); /* Update lower 16 bits of I/O base/limit. */ pci_write_config_dword(bridge, PCI_IO_BASE, l); /* Update upper 16 bits of I/O base/limit. */ pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); } static void pci_setup_bridge_mmio(struct pci_bus *bus) { struct pci_dev *bridge = bus->self; struct resource *res; struct pci_bus_region region; u32 l; /* Set up the top and bottom of the PCI Memory segment for this bus. */ res = bus->resource[1]; pcibios_resource_to_bus(bridge, &region, res); if (res->flags & IORESOURCE_MEM) { l = (region.start >> 16) & 0xfff0; l |= region.end & 0xfff00000; dev_info(&bridge->dev, " bridge window %pR\n", res); } else { l = 0x0000fff0; } pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); } static void pci_setup_bridge_mmio_pref(struct pci_bus *bus) { struct pci_dev *bridge = bus->self; struct resource *res; struct pci_bus_region region; u32 l, bu, lu; /* Clear out the upper 32 bits of PREF limit. If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily disables PREF range, which is ok. */ pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0); /* Set up PREF base/limit. */ bu = lu = 0; res = bus->resource[2]; pcibios_resource_to_bus(bridge, &region, res); if (res->flags & IORESOURCE_PREFETCH) { l = (region.start >> 16) & 0xfff0; l |= region.end & 0xfff00000; if (res->flags & IORESOURCE_MEM_64) { bu = upper_32_bits(region.start); lu = upper_32_bits(region.end); } dev_info(&bridge->dev, " bridge window %pR\n", res); } else { l = 0x0000fff0; } pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l); /* Set the upper 32 bits of PREF base & limit. */ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); } static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type) { struct pci_dev *bridge = bus->self; dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n", bus->secondary, bus->subordinate); if (type & IORESOURCE_IO) pci_setup_bridge_io(bus); if (type & IORESOURCE_MEM) pci_setup_bridge_mmio(bus); if (type & IORESOURCE_PREFETCH) pci_setup_bridge_mmio_pref(bus); pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); } void pci_setup_bridge(struct pci_bus *bus) { unsigned long type = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH; __pci_setup_bridge(bus, type); } /* Check whether the bridge supports optional I/O and prefetchable memory ranges. If not, the respective base/limit registers must be read-only and read as 0. */ static void pci_bridge_check_ranges(struct pci_bus *bus) { u16 io; u32 pmem; struct pci_dev *bridge = bus->self; struct resource *b_res; b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; b_res[1].flags |= IORESOURCE_MEM; pci_read_config_word(bridge, PCI_IO_BASE, &io); if (!io) { pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0); pci_read_config_word(bridge, PCI_IO_BASE, &io); pci_write_config_word(bridge, PCI_IO_BASE, 0x0); } if (io) b_res[0].flags |= IORESOURCE_IO; /* DECchip 21050 pass 2 errata: the bridge may miss an address disconnect boundary by one PCI data phase. Workaround: do not use prefetching on this device. */ if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) return; pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); if (!pmem) { pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0xfff0fff0); pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); } if (pmem) { b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { b_res[2].flags |= IORESOURCE_MEM_64; b_res[2].flags |= PCI_PREF_RANGE_TYPE_64; } } /* double check if bridge does support 64 bit pref */ if (b_res[2].flags & IORESOURCE_MEM_64) { u32 mem_base_hi, tmp; pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &mem_base_hi); pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, 0xffffffff); pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp); if (!tmp) b_res[2].flags &= ~IORESOURCE_MEM_64; pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, mem_base_hi); } } /* Helper function for sizing routines: find first available bus resource of a given type. Note: we intentionally skip the bus resources which have already been assigned (that is, have non-NULL parent resource). */ static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned long type) { int i; struct resource *r; unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH; pci_bus_for_each_resource(bus, r, i) { if (r == &ioport_resource || r == &iomem_resource) continue; if (r && (r->flags & type_mask) == type && !r->parent) return r; } return NULL; } static resource_size_t calculate_iosize(resource_size_t size, resource_size_t min_size, resource_size_t size1, resource_size_t old_size, resource_size_t align) { if (size < min_size) size = min_size; if (old_size == 1 ) old_size = 0; /* To be fixed in 2.5: we should have sort of HAVE_ISA flag in the struct pci_bus. */ #if defined(CONFIG_ISA) || defined(CONFIG_EISA) size = (size & 0xff) + ((size & ~0xffUL) << 2); #endif size = ALIGN(size + size1, align); if (size < old_size) size = old_size; return size; } static resource_size_t calculate_memsize(resource_size_t size, resource_size_t min_size, resource_size_t size1, resource_size_t old_size, resource_size_t align) { if (size < min_size) size = min_size; if (old_size == 1 ) old_size = 0; if (size < old_size) size = old_size; size = ALIGN(size + size1, align); return size; } /** * pbus_size_io() - size the io window of a given bus * * @bus : the bus * @min_size : the minimum io window that must to be allocated * @add_size : additional optional io window * @realloc_head : track the additional io window on this list * * Sizing the IO windows of the PCI-PCI bridge is trivial, * since these windows have 4K granularity and the IO ranges * of non-bridge PCI devices are limited to 256 bytes. * We must be careful with the ISA aliasing though. */ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, resource_size_t add_size, struct list_head *realloc_head) { struct pci_dev *dev; struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); unsigned long size = 0, size0 = 0, size1 = 0; resource_size_t children_add_size = 0; if (!b_res) return; list_for_each_entry(dev, &bus->devices, bus_list) { int i; for (i = 0; i < PCI_NUM_RESOURCES; i++) { struct resource *r = &dev->resource[i]; unsigned long r_size; if (r->parent || !(r->flags & IORESOURCE_IO)) continue; r_size = resource_size(r); if (r_size < 0x400) /* Might be re-aligned for ISA */ size += r_size; else size1 += r_size; if (realloc_head) children_add_size += get_res_add_size(realloc_head, r); } } size0 = calculate_iosize(size, min_size, size1, resource_size(b_res), 4096); if (children_add_size > add_size) add_size = children_add_size; size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 : calculate_iosize(size, min_size, add_size + size1, resource_size(b_res), 4096); if (!size0 && !size1) { if (b_res->start || b_res->end) dev_info(&bus->self->dev, "disabling bridge window " "%pR to [bus %02x-%02x] (unused)\n", b_res, bus->secondary, bus->subordinate); b_res->flags = 0; return; } /* Alignment of the IO window is always 4K */ b_res->start = 4096; b_res->end = b_res->start + size0 - 1; b_res->flags |= IORESOURCE_STARTALIGN; if (size1 > size0 && realloc_head) { add_to_list(realloc_head, bus->self, b_res, size1-size0, 4096); dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window " "%pR to [bus %02x-%02x] add_size %lx\n", b_res, bus->secondary, bus->subordinate, size1-size0); } } /** * pbus_size_mem() - size the memory window of a given bus * * @bus : the bus * @min_size : the minimum memory window that must to be allocated * @add_size : additional optional memory window * @realloc_head : track the additional memory window on this list * * Calculate the size of the bus and minimal alignment which * guarantees that all child resources fit in this size. */ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long type, resource_size_t min_size, resource_size_t add_size, struct list_head *realloc_head) { struct pci_dev *dev; resource_size_t min_align, align, size, size0, size1; resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */ int order, max_order; struct resource *b_res = find_free_bus_resource(bus, type); unsigned int mem64_mask = 0; resource_size_t children_add_size = 0; if (!b_res) return 0; memset(aligns, 0, sizeof(aligns)); max_order = 0; size = 0; mem64_mask = b_res->flags & IORESOURCE_MEM_64; b_res->flags &= ~IORESOURCE_MEM_64; list_for_each_entry(dev, &bus->devices, bus_list) { int i; for (i = 0; i < PCI_NUM_RESOURCES; i++) { struct resource *r = &dev->resource[i]; resource_size_t r_size; if (r->parent || (r->flags & mask) != type) continue; r_size = resource_size(r); #ifdef CONFIG_PCI_IOV /* put SRIOV requested res to the optional list */ if (realloc_head && i >= PCI_IOV_RESOURCES && i <= PCI_IOV_RESOURCE_END) { r->end = r->start - 1; add_to_list(realloc_head, dev, r, r_size, 0/* dont' care */); children_add_size += r_size; continue; } #endif /* For bridges size != alignment */ align = pci_resource_alignment(dev, r); order = __ffs(align) - 20; if (order > 11) { dev_warn(&dev->dev, "disabling BAR %d: %pR " "(bad alignment %#llx)\n", i, r, (unsigned long long) align); r->flags = 0; continue; } size += r_size; if (order < 0) order = 0; /* Exclude ranges with size > align from calculation of the alignment. */ if (r_size == align) aligns[order] += align; if (order > max_order) max_order = order; mem64_mask &= r->flags & IORESOURCE_MEM_64; if (realloc_head) children_add_size += get_res_add_size(realloc_head, r); } } align = 0; min_align = 0; for (order = 0; order <= max_order; order++) { resource_size_t align1 = 1; align1 <<= (order + 20); if (!align) min_align = align1; else if (ALIGN(align + min_align, min_align) < align1) min_align = align1 >> 1; align += aligns[order]; } size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); if (children_add_size > add_size) add_size = children_add_size; size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 : calculate_memsize(size, min_size, add_size, resource_size(b_res), min_align); if (!size0 && !size1) { if (b_res->start || b_res->end) dev_info(&bus->self->dev, "disabling bridge window " "%pR to [bus %02x-%02x] (unused)\n", b_res, bus->secondary, bus->subordinate); b_res->flags = 0; return 1; } b_res->start = min_align; b_res->end = size0 + min_align - 1; b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask; if (size1 > size0 && realloc_head) { add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align); dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window " "%pR to [bus %02x-%02x] add_size %llx\n", b_res, bus->secondary, bus->subordinate, (unsigned long long)size1-size0); } return 1; } unsigned long pci_cardbus_resource_alignment(struct resource *res) { if (res->flags & IORESOURCE_IO) return pci_cardbus_io_size; if (res->flags & IORESOURCE_MEM) return pci_cardbus_mem_size; return 0; } static void pci_bus_size_cardbus(struct pci_bus *bus, struct list_head *realloc_head) { struct pci_dev *bridge = bus->self; struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; resource_size_t b_res_3_size = pci_cardbus_mem_size * 2; u16 ctrl; if (b_res[0].parent) goto handle_b_res_1; /* * Reserve some resources for CardBus. We reserve * a fixed amount of bus space for CardBus bridges. */ b_res[0].start = pci_cardbus_io_size; b_res[0].end = b_res[0].start + pci_cardbus_io_size - 1; b_res[0].flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN; if (realloc_head) { b_res[0].end -= pci_cardbus_io_size; add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size, pci_cardbus_io_size); } handle_b_res_1: if (b_res[1].parent) goto handle_b_res_2; b_res[1].start = pci_cardbus_io_size; b_res[1].end = b_res[1].start + pci_cardbus_io_size - 1; b_res[1].flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN; if (realloc_head) { b_res[1].end -= pci_cardbus_io_size; add_to_list(realloc_head, bridge, b_res+1, pci_cardbus_io_size, pci_cardbus_io_size); } handle_b_res_2: /* MEM1 must not be pref mmio */ pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl); if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM1) { ctrl &= ~PCI_CB_BRIDGE_CTL_PREFETCH_MEM1; pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl); pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl); } /* * Check whether prefetchable memory is supported * by this bridge. */ pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl); if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) { ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0; pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl); pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl); } if (b_res[2].parent) goto handle_b_res_3; /* * If we have prefetchable memory support, allocate * two regions. Otherwise, allocate one region of * twice the size. */ if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) { b_res[2].start = pci_cardbus_mem_size; b_res[2].end = b_res[2].start + pci_cardbus_mem_size - 1; b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_STARTALIGN; if (realloc_head) { b_res[2].end -= pci_cardbus_mem_size; add_to_list(realloc_head, bridge, b_res+2, pci_cardbus_mem_size, pci_cardbus_mem_size); } /* reduce that to half */ b_res_3_size = pci_cardbus_mem_size; } handle_b_res_3: if (b_res[3].parent) goto handle_done; b_res[3].start = pci_cardbus_mem_size; b_res[3].end = b_res[3].start + b_res_3_size - 1; b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_STARTALIGN; if (realloc_head) { b_res[3].end -= b_res_3_size; add_to_list(realloc_head, bridge, b_res+3, b_res_3_size, pci_cardbus_mem_size); } handle_done: ; } void __ref __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head) { struct pci_dev *dev; unsigned long mask, prefmask; resource_size_t additional_mem_size = 0, additional_io_size = 0; list_for_each_entry(dev, &bus->devices, bus_list) { struct pci_bus *b = dev->subordinate; if (!b) continue; switch (dev->class >> 8) { case PCI_CLASS_BRIDGE_CARDBUS: pci_bus_size_cardbus(b, realloc_head); break; case PCI_CLASS_BRIDGE_PCI: default: __pci_bus_size_bridges(b, realloc_head); break; } } /* The root bus? */ if (!bus->self) return; switch (bus->self->class >> 8) { case PCI_CLASS_BRIDGE_CARDBUS: /* don't size cardbuses yet. */ break; case PCI_CLASS_BRIDGE_PCI: pci_bridge_check_ranges(bus); if (bus->self->is_hotplug_bridge) { additional_io_size = pci_hotplug_io_size; additional_mem_size = pci_hotplug_mem_size; } /* * Follow thru */ default: pbus_size_io(bus, realloc_head ? 0 : additional_io_size, additional_io_size, realloc_head); /* If the bridge supports prefetchable range, size it separately. If it doesn't, or its prefetchable window has already been allocated by arch code, try non-prefetchable range for both types of PCI memory resources. */ mask = IORESOURCE_MEM; prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH; if (pbus_size_mem(bus, prefmask, prefmask, realloc_head ? 0 : additional_mem_size, additional_mem_size, realloc_head)) mask = prefmask; /* Success, size non-prefetch only. */ else additional_mem_size += additional_mem_size; pbus_size_mem(bus, mask, IORESOURCE_MEM, realloc_head ? 0 : additional_mem_size, additional_mem_size, realloc_head); break; } } void __ref pci_bus_size_bridges(struct pci_bus *bus) { __pci_bus_size_bridges(bus, NULL); } EXPORT_SYMBOL(pci_bus_size_bridges); static void __ref __pci_bus_assign_resources(const struct pci_bus *bus, struct list_head *realloc_head, struct list_head *fail_head) { struct pci_bus *b; struct pci_dev *dev; pbus_assign_resources_sorted(bus, realloc_head, fail_head); list_for_each_entry(dev, &bus->devices, bus_list) { b = dev->subordinate; if (!b) continue; __pci_bus_assign_resources(b, realloc_head, fail_head); switch (dev->class >> 8) { case PCI_CLASS_BRIDGE_PCI: if (!pci_is_enabled(dev)) pci_setup_bridge(b); break; case PCI_CLASS_BRIDGE_CARDBUS: pci_setup_cardbus(b); break; default: dev_info(&dev->dev, "not setting up bridge for bus " "%04x:%02x\n", pci_domain_nr(b), b->number); break; } } } void __ref pci_bus_assign_resources(const struct pci_bus *bus) { __pci_bus_assign_resources(bus, NULL, NULL); } EXPORT_SYMBOL(pci_bus_assign_resources); static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge, struct list_head *add_head, struct list_head *fail_head) { struct pci_bus *b; pdev_assign_resources_sorted((struct pci_dev *)bridge, add_head, fail_head); b = bridge->subordinate; if (!b) return; __pci_bus_assign_resources(b, add_head, fail_head); switch (bridge->class >> 8) { case PCI_CLASS_BRIDGE_PCI: pci_setup_bridge(b); break; case PCI_CLASS_BRIDGE_CARDBUS: pci_setup_cardbus(b); break; default: dev_info(&bridge->dev, "not setting up bridge for bus " "%04x:%02x\n", pci_domain_nr(b), b->number); break; } } static void pci_bridge_release_resources(struct pci_bus *bus, unsigned long type) { int idx; bool changed = false; struct pci_dev *dev; struct resource *r; unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH; dev = bus->self; for (idx = PCI_BRIDGE_RESOURCES; idx <= PCI_BRIDGE_RESOURCE_END; idx++) { r = &dev->resource[idx]; if ((r->flags & type_mask) != type) continue; if (!r->parent) continue; /* * if there are children under that, we should release them * all */ release_child_resources(r); if (!release_resource(r)) { dev_printk(KERN_DEBUG, &dev->dev, "resource %d %pR released\n", idx, r); /* keep the old size */ r->end = resource_size(r) - 1; r->start = 0; r->flags = 0; changed = true; } } if (changed) { /* avoiding touch the one without PREF */ if (type & IORESOURCE_PREFETCH) type = IORESOURCE_PREFETCH; __pci_setup_bridge(bus, type); } } enum release_type { leaf_only, whole_subtree, }; /* * try to release pci bridge resources that is from leaf bridge, * so we can allocate big new one later */ static void __ref pci_bus_release_bridge_resources(struct pci_bus *bus, unsigned long type, enum release_type rel_type) { struct pci_dev *dev; bool is_leaf_bridge = true; list_for_each_entry(dev, &bus->devices, bus_list) { struct pci_bus *b = dev->subordinate; if (!b) continue; is_leaf_bridge = false; if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) continue; if (rel_type == whole_subtree) pci_bus_release_bridge_resources(b, type, whole_subtree); } if (pci_is_root_bus(bus)) return; if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI) return; if ((rel_type == whole_subtree) || is_leaf_bridge) pci_bridge_release_resources(bus, type); } static void pci_bus_dump_res(struct pci_bus *bus) { struct resource *res; int i; pci_bus_for_each_resource(bus, res, i) { if (!res || !res->end || !res->flags) continue; dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res); } } static void pci_bus_dump_resources(struct pci_bus *bus) { struct pci_bus *b; struct pci_dev *dev; pci_bus_dump_res(bus); list_for_each_entry(dev, &bus->devices, bus_list) { b = dev->subordinate; if (!b) continue; pci_bus_dump_resources(b); } } static int __init pci_bus_get_depth(struct pci_bus *bus) { int depth = 0; struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { int ret; struct pci_bus *b = dev->subordinate; if (!b) continue; ret = pci_bus_get_depth(b); if (ret + 1 > depth) depth = ret + 1; } return depth; } static int __init pci_get_max_depth(void) { int depth = 0; struct pci_bus *bus; list_for_each_entry(bus, &pci_root_buses, node) { int ret; ret = pci_bus_get_depth(bus); if (ret > depth) depth = ret; } return depth; } /* * -1: undefined, will auto detect later * 0: disabled by user * 1: disabled by auto detect * 2: enabled by user * 3: enabled by auto detect */ enum enable_type { undefined = -1, user_disabled, auto_disabled, user_enabled, auto_enabled, }; static enum enable_type pci_realloc_enable __initdata = undefined; void __init pci_realloc_get_opt(char *str) { if (!strncmp(str, "off", 3)) pci_realloc_enable = user_disabled; else if (!strncmp(str, "on", 2)) pci_realloc_enable = user_enabled; } static bool __init pci_realloc_enabled(void) { return pci_realloc_enable >= user_enabled; } static void __init pci_realloc_detect(void) { #if defined(CONFIG_PCI_IOV) && defined(CONFIG_PCI_REALLOC_ENABLE_AUTO) struct pci_dev *dev = NULL; if (pci_realloc_enable != undefined) return; for_each_pci_dev(dev) { int i; for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++) { struct resource *r = &dev->resource[i]; /* Not assigned, or rejected by kernel ? */ if (r->flags && !r->start) { pci_realloc_enable = auto_enabled; return; } } } #endif } /* * first try will not touch pci bridge res * second and later try will clear small leaf bridge res * will stop till to the max deepth if can not find good one */ void __init pci_assign_unassigned_resources(void) { struct pci_bus *bus; LIST_HEAD(realloc_head); /* list of resources that want additional resources */ struct list_head *add_list = NULL; int tried_times = 0; enum release_type rel_type = leaf_only; LIST_HEAD(fail_head); struct pci_dev_resource *fail_res; unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH; int pci_try_num = 1; /* don't realloc if asked to do so */ pci_realloc_detect(); if (pci_realloc_enabled()) { int max_depth = pci_get_max_depth(); pci_try_num = max_depth + 1; printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n", max_depth, pci_try_num); } again: /* * last try will use add_list, otherwise will try good to have as * must have, so can realloc parent bridge resource */ if (tried_times + 1 == pci_try_num) add_list = &realloc_head; /* Depth first, calculate sizes and alignments of all subordinate buses. */ list_for_each_entry(bus, &pci_root_buses, node) __pci_bus_size_bridges(bus, add_list); /* Depth last, allocate resources and update the hardware. */ list_for_each_entry(bus, &pci_root_buses, node) __pci_bus_assign_resources(bus, add_list, &fail_head); if (add_list) BUG_ON(!list_empty(add_list)); tried_times++; /* any device complain? */ if (list_empty(&fail_head)) goto enable_and_dump; if (tried_times >= pci_try_num) { if (pci_realloc_enable == undefined) printk(KERN_INFO "Some PCI device resources are unassigned, try booting with pci=realloc\n"); else if (pci_realloc_enable == auto_enabled) printk(KERN_INFO "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n"); free_list(&fail_head); goto enable_and_dump; } printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n", tried_times + 1); /* third times and later will not check if it is leaf */ if ((tried_times + 1) > 2) rel_type = whole_subtree; /* * Try to release leaf bridge's resources that doesn't fit resource of * child device under that bridge */ list_for_each_entry(fail_res, &fail_head, list) { bus = fail_res->dev->bus; pci_bus_release_bridge_resources(bus, fail_res->flags & type_mask, rel_type); } /* restore size and flags */ list_for_each_entry(fail_res, &fail_head, list) { struct resource *res = fail_res->res; res->start = fail_res->start; res->end = fail_res->end; res->flags = fail_res->flags; if (fail_res->dev->subordinate) res->flags = 0; } free_list(&fail_head); goto again; enable_and_dump: /* Depth last, update the hardware. */ list_for_each_entry(bus, &pci_root_buses, node) pci_enable_bridges(bus); /* dump the resource on buses */ list_for_each_entry(bus, &pci_root_buses, node) pci_bus_dump_resources(bus); } void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge) { struct pci_bus *parent = bridge->subordinate; LIST_HEAD(add_list); /* list of resources that want additional resources */ int tried_times = 0; LIST_HEAD(fail_head); struct pci_dev_resource *fail_res; int retval; unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH; again: __pci_bus_size_bridges(parent, &add_list); __pci_bridge_assign_resources(bridge, &add_list, &fail_head); BUG_ON(!list_empty(&add_list)); tried_times++; if (list_empty(&fail_head)) goto enable_all; if (tried_times >= 2) { /* still fail, don't need to try more */ free_list(&fail_head); goto enable_all; } printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n", tried_times + 1); /* * Try to release leaf bridge's resources that doesn't fit resource of * child device under that bridge */ list_for_each_entry(fail_res, &fail_head, list) { struct pci_bus *bus = fail_res->dev->bus; unsigned long flags = fail_res->flags; pci_bus_release_bridge_resources(bus, flags & type_mask, whole_subtree); } /* restore size and flags */ list_for_each_entry(fail_res, &fail_head, list) { struct resource *res = fail_res->res; res->start = fail_res->start; res->end = fail_res->end; res->flags = fail_res->flags; if (fail_res->dev->subordinate) res->flags = 0; } free_list(&fail_head); goto again; enable_all: retval = pci_reenable_device(bridge); pci_set_master(bridge); pci_enable_bridges(parent); } EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources); #ifdef CONFIG_HOTPLUG /** * pci_rescan_bus - scan a PCI bus for devices. * @bus: PCI bus to scan * * Scan a PCI bus and child buses for new devices, adds them, * and enables them. * * Returns the max number of subordinate bus discovered. */ unsigned int __ref pci_rescan_bus(struct pci_bus *bus) { unsigned int max; struct pci_dev *dev; LIST_HEAD(add_list); /* list of resources that want additional resources */ max = pci_scan_child_bus(bus); down_read(&pci_bus_sem); list_for_each_entry(dev, &bus->devices, bus_list) if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) if (dev->subordinate) __pci_bus_size_bridges(dev->subordinate, &add_list); up_read(&pci_bus_sem); __pci_bus_assign_resources(bus, &add_list, NULL); BUG_ON(!list_empty(&add_list)); pci_enable_bridges(bus); pci_bus_add_devices(bus); return max; } EXPORT_SYMBOL_GPL(pci_rescan_bus); #endif
gpl-2.0
skritchz/android_kernel_wiko_peaxjb
drivers/media/video/s5p-tv/mixer_vp_layer.c
7473
6396
/* * Samsung TV Mixer driver * * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. * * Tomasz Stanislawski, <t.stanislaws@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published * by the Free Software Foundiation. either version 2 of the License, * or (at your option) any later version */ #include "mixer.h" #include "regs-vp.h" #include <media/videobuf2-dma-contig.h> /* FORMAT DEFINITIONS */ static const struct mxr_format mxr_fmt_nv12 = { .name = "NV12", .fourcc = V4L2_PIX_FMT_NV12, .colorspace = V4L2_COLORSPACE_JPEG, .num_planes = 2, .plane = { { .width = 1, .height = 1, .size = 1 }, { .width = 2, .height = 2, .size = 2 }, }, .num_subframes = 1, .cookie = VP_MODE_NV12 | VP_MODE_MEM_LINEAR, }; static const struct mxr_format mxr_fmt_nv21 = { .name = "NV21", .fourcc = V4L2_PIX_FMT_NV21, .colorspace = V4L2_COLORSPACE_JPEG, .num_planes = 2, .plane = { { .width = 1, .height = 1, .size = 1 }, { .width = 2, .height = 2, .size = 2 }, }, .num_subframes = 1, .cookie = VP_MODE_NV21 | VP_MODE_MEM_LINEAR, }; static const struct mxr_format mxr_fmt_nv12m = { .name = "NV12 (mplane)", .fourcc = V4L2_PIX_FMT_NV12M, .colorspace = V4L2_COLORSPACE_JPEG, .num_planes = 2, .plane = { { .width = 1, .height = 1, .size = 1 }, { .width = 2, .height = 2, .size = 2 }, }, .num_subframes = 2, .plane2subframe = {0, 1}, .cookie = VP_MODE_NV12 | VP_MODE_MEM_LINEAR, }; static const struct mxr_format mxr_fmt_nv12mt = { .name = "NV12 tiled (mplane)", .fourcc = V4L2_PIX_FMT_NV12MT, .colorspace = V4L2_COLORSPACE_JPEG, .num_planes = 2, .plane = { { .width = 128, .height = 32, .size = 4096 }, { .width = 128, .height = 32, .size = 2048 }, }, .num_subframes = 2, .plane2subframe = {0, 1}, .cookie = VP_MODE_NV12 | VP_MODE_MEM_TILED, }; static const struct mxr_format *mxr_video_format[] = { &mxr_fmt_nv12, &mxr_fmt_nv21, &mxr_fmt_nv12m, &mxr_fmt_nv12mt, }; /* AUXILIARY CALLBACKS */ static void mxr_vp_layer_release(struct mxr_layer *layer) { mxr_base_layer_unregister(layer); mxr_base_layer_release(layer); } static void mxr_vp_buffer_set(struct mxr_layer *layer, struct mxr_buffer *buf) { dma_addr_t luma_addr[2] = {0, 0}; dma_addr_t chroma_addr[2] = {0, 0}; if (buf == NULL) { mxr_reg_vp_buffer(layer->mdev, luma_addr, chroma_addr); return; } luma_addr[0] = vb2_dma_contig_plane_dma_addr(&buf->vb, 0); if (layer->fmt->num_subframes == 2) { chroma_addr[0] = vb2_dma_contig_plane_dma_addr(&buf->vb, 1); } else { /* FIXME: mxr_get_plane_size compute integer division, * which is slow and should not be performed in interrupt */ chroma_addr[0] = luma_addr[0] + mxr_get_plane_size( &layer->fmt->plane[0], layer->geo.src.full_width, layer->geo.src.full_height); } if (layer->fmt->cookie & VP_MODE_MEM_TILED) { luma_addr[1] = luma_addr[0] + 0x40; chroma_addr[1] = chroma_addr[0] + 0x40; } else { luma_addr[1] = luma_addr[0] + layer->geo.src.full_width; chroma_addr[1] = chroma_addr[0]; } mxr_reg_vp_buffer(layer->mdev, luma_addr, chroma_addr); } static void mxr_vp_stream_set(struct mxr_layer *layer, int en) { mxr_reg_vp_layer_stream(layer->mdev, en); } static void mxr_vp_format_set(struct mxr_layer *layer) { mxr_reg_vp_format(layer->mdev, layer->fmt, &layer->geo); } static inline unsigned int do_center(unsigned int center, unsigned int size, unsigned int upper, unsigned int flags) { unsigned int lower; if (flags & MXR_NO_OFFSET) return 0; lower = center - min(center, size / 2); return min(lower, upper - size); } static void mxr_vp_fix_geometry(struct mxr_layer *layer, enum mxr_geometry_stage stage, unsigned long flags) { struct mxr_geometry *geo = &layer->geo; struct mxr_crop *src = &geo->src; struct mxr_crop *dst = &geo->dst; unsigned long x_center, y_center; switch (stage) { case MXR_GEOMETRY_SINK: /* nothing to be fixed here */ case MXR_GEOMETRY_COMPOSE: /* remember center of the area */ x_center = dst->x_offset + dst->width / 2; y_center = dst->y_offset + dst->height / 2; /* ensure that compose is reachable using 16x scaling */ dst->width = clamp(dst->width, 8U, 16 * src->full_width); dst->height = clamp(dst->height, 1U, 16 * src->full_height); /* setup offsets */ dst->x_offset = do_center(x_center, dst->width, dst->full_width, flags); dst->y_offset = do_center(y_center, dst->height, dst->full_height, flags); flags = 0; /* remove possible MXR_NO_OFFSET flag */ /* fall through */ case MXR_GEOMETRY_CROP: /* remember center of the area */ x_center = src->x_offset + src->width / 2; y_center = src->y_offset + src->height / 2; /* ensure scaling is between 0.25x .. 16x */ src->width = clamp(src->width, round_up(dst->width / 16, 4), dst->width * 4); src->height = clamp(src->height, round_up(dst->height / 16, 4), dst->height * 4); /* hardware limits */ src->width = clamp(src->width, 32U, 2047U); src->height = clamp(src->height, 4U, 2047U); /* setup offsets */ src->x_offset = do_center(x_center, src->width, src->full_width, flags); src->y_offset = do_center(y_center, src->height, src->full_height, flags); /* setting scaling ratio */ geo->x_ratio = (src->width << 16) / dst->width; geo->y_ratio = (src->height << 16) / dst->height; /* fall through */ case MXR_GEOMETRY_SOURCE: src->full_width = clamp(src->full_width, ALIGN(src->width + src->x_offset, 8), 8192U); src->full_height = clamp(src->full_height, src->height + src->y_offset, 8192U); }; } /* PUBLIC API */ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx) { struct mxr_layer *layer; int ret; struct mxr_layer_ops ops = { .release = mxr_vp_layer_release, .buffer_set = mxr_vp_buffer_set, .stream_set = mxr_vp_stream_set, .format_set = mxr_vp_format_set, .fix_geometry = mxr_vp_fix_geometry, }; char name[32]; sprintf(name, "video%d", idx); layer = mxr_base_layer_create(mdev, idx, name, &ops); if (layer == NULL) { mxr_err(mdev, "failed to initialize layer(%d) base\n", idx); goto fail; } layer->fmt_array = mxr_video_format; layer->fmt_array_size = ARRAY_SIZE(mxr_video_format); ret = mxr_base_layer_register(layer); if (ret) goto fail_layer; return layer; fail_layer: mxr_base_layer_release(layer); fail: return NULL; }
gpl-2.0
TeslaProject/android_kernel_moto_shamu
kernel/trace/ring_buffer_benchmark.c
7473
10723
/* * ring buffer tester and benchmark * * Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com> */ #include <linux/ring_buffer.h> #include <linux/completion.h> #include <linux/kthread.h> #include <linux/module.h> #include <linux/time.h> #include <asm/local.h> struct rb_page { u64 ts; local_t commit; char data[4080]; }; /* run time and sleep time in seconds */ #define RUN_TIME 10 #define SLEEP_TIME 10 /* number of events for writer to wake up the reader */ static int wakeup_interval = 100; static int reader_finish; static struct completion read_start; static struct completion read_done; static struct ring_buffer *buffer; static struct task_struct *producer; static struct task_struct *consumer; static unsigned long read; static int disable_reader; module_param(disable_reader, uint, 0644); MODULE_PARM_DESC(disable_reader, "only run producer"); static int write_iteration = 50; module_param(write_iteration, uint, 0644); MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings"); static int producer_nice = 19; static int consumer_nice = 19; static int producer_fifo = -1; static int consumer_fifo = -1; module_param(producer_nice, uint, 0644); MODULE_PARM_DESC(producer_nice, "nice prio for producer"); module_param(consumer_nice, uint, 0644); MODULE_PARM_DESC(consumer_nice, "nice prio for consumer"); module_param(producer_fifo, uint, 0644); MODULE_PARM_DESC(producer_fifo, "fifo prio for producer"); module_param(consumer_fifo, uint, 0644); MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer"); static int read_events; static int kill_test; #define KILL_TEST() \ do { \ if (!kill_test) { \ kill_test = 1; \ WARN_ON(1); \ } \ } while (0) enum event_status { EVENT_FOUND, EVENT_DROPPED, }; static enum event_status read_event(int cpu) { struct ring_buffer_event *event; int *entry; u64 ts; event = ring_buffer_consume(buffer, cpu, &ts, NULL); if (!event) return EVENT_DROPPED; entry = ring_buffer_event_data(event); if (*entry != cpu) { KILL_TEST(); return EVENT_DROPPED; } read++; return EVENT_FOUND; } static enum event_status read_page(int cpu) { struct ring_buffer_event *event; struct rb_page *rpage; unsigned long commit; void *bpage; int *entry; int ret; int inc; int i; bpage = ring_buffer_alloc_read_page(buffer, cpu); if (!bpage) return EVENT_DROPPED; ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1); if (ret >= 0) { rpage = bpage; /* The commit may have missed event flags set, clear them */ commit = local_read(&rpage->commit) & 0xfffff; for (i = 0; i < commit && !kill_test; i += inc) { if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) { KILL_TEST(); break; } inc = -1; event = (void *)&rpage->data[i]; switch (event->type_len) { case RINGBUF_TYPE_PADDING: /* failed writes may be discarded events */ if (!event->time_delta) KILL_TEST(); inc = event->array[0] + 4; break; case RINGBUF_TYPE_TIME_EXTEND: inc = 8; break; case 0: entry = ring_buffer_event_data(event); if (*entry != cpu) { KILL_TEST(); break; } read++; if (!event->array[0]) { KILL_TEST(); break; } inc = event->array[0] + 4; break; default: entry = ring_buffer_event_data(event); if (*entry != cpu) { KILL_TEST(); break; } read++; inc = ((event->type_len + 1) * 4); } if (kill_test) break; if (inc <= 0) { KILL_TEST(); break; } } } ring_buffer_free_read_page(buffer, bpage); if (ret < 0) return EVENT_DROPPED; return EVENT_FOUND; } static void ring_buffer_consumer(void) { /* toggle between reading pages and events */ read_events ^= 1; read = 0; while (!reader_finish && !kill_test) { int found; do { int cpu; found = 0; for_each_online_cpu(cpu) { enum event_status stat; if (read_events) stat = read_event(cpu); else stat = read_page(cpu); if (kill_test) break; if (stat == EVENT_FOUND) found = 1; } } while (found && !kill_test); set_current_state(TASK_INTERRUPTIBLE); if (reader_finish) break; schedule(); __set_current_state(TASK_RUNNING); } reader_finish = 0; complete(&read_done); } static void ring_buffer_producer(void) { struct timeval start_tv; struct timeval end_tv; unsigned long long time; unsigned long long entries; unsigned long long overruns; unsigned long missed = 0; unsigned long hit = 0; unsigned long avg; int cnt = 0; /* * Hammer the buffer for 10 secs (this may * make the system stall) */ trace_printk("Starting ring buffer hammer\n"); do_gettimeofday(&start_tv); do { struct ring_buffer_event *event; int *entry; int i; for (i = 0; i < write_iteration; i++) { event = ring_buffer_lock_reserve(buffer, 10); if (!event) { missed++; } else { hit++; entry = ring_buffer_event_data(event); *entry = smp_processor_id(); ring_buffer_unlock_commit(buffer, event); } } do_gettimeofday(&end_tv); cnt++; if (consumer && !(cnt % wakeup_interval)) wake_up_process(consumer); #ifndef CONFIG_PREEMPT /* * If we are a non preempt kernel, the 10 second run will * stop everything while it runs. Instead, we will call * cond_resched and also add any time that was lost by a * rescedule. * * Do a cond resched at the same frequency we would wake up * the reader. */ if (cnt % wakeup_interval) cond_resched(); #endif } while (end_tv.tv_sec < (start_tv.tv_sec + RUN_TIME) && !kill_test); trace_printk("End ring buffer hammer\n"); if (consumer) { /* Init both completions here to avoid races */ init_completion(&read_start); init_completion(&read_done); /* the completions must be visible before the finish var */ smp_wmb(); reader_finish = 1; /* finish var visible before waking up the consumer */ smp_wmb(); wake_up_process(consumer); wait_for_completion(&read_done); } time = end_tv.tv_sec - start_tv.tv_sec; time *= USEC_PER_SEC; time += (long long)((long)end_tv.tv_usec - (long)start_tv.tv_usec); entries = ring_buffer_entries(buffer); overruns = ring_buffer_overruns(buffer); if (kill_test) trace_printk("ERROR!\n"); if (!disable_reader) { if (consumer_fifo < 0) trace_printk("Running Consumer at nice: %d\n", consumer_nice); else trace_printk("Running Consumer at SCHED_FIFO %d\n", consumer_fifo); } if (producer_fifo < 0) trace_printk("Running Producer at nice: %d\n", producer_nice); else trace_printk("Running Producer at SCHED_FIFO %d\n", producer_fifo); /* Let the user know that the test is running at low priority */ if (producer_fifo < 0 && consumer_fifo < 0 && producer_nice == 19 && consumer_nice == 19) trace_printk("WARNING!!! This test is running at lowest priority.\n"); trace_printk("Time: %lld (usecs)\n", time); trace_printk("Overruns: %lld\n", overruns); if (disable_reader) trace_printk("Read: (reader disabled)\n"); else trace_printk("Read: %ld (by %s)\n", read, read_events ? "events" : "pages"); trace_printk("Entries: %lld\n", entries); trace_printk("Total: %lld\n", entries + overruns + read); trace_printk("Missed: %ld\n", missed); trace_printk("Hit: %ld\n", hit); /* Convert time from usecs to millisecs */ do_div(time, USEC_PER_MSEC); if (time) hit /= (long)time; else trace_printk("TIME IS ZERO??\n"); trace_printk("Entries per millisec: %ld\n", hit); if (hit) { /* Calculate the average time in nanosecs */ avg = NSEC_PER_MSEC / hit; trace_printk("%ld ns per entry\n", avg); } if (missed) { if (time) missed /= (long)time; trace_printk("Total iterations per millisec: %ld\n", hit + missed); /* it is possible that hit + missed will overflow and be zero */ if (!(hit + missed)) { trace_printk("hit + missed overflowed and totalled zero!\n"); hit--; /* make it non zero */ } /* Caculate the average time in nanosecs */ avg = NSEC_PER_MSEC / (hit + missed); trace_printk("%ld ns per entry\n", avg); } } static void wait_to_die(void) { set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { schedule(); set_current_state(TASK_INTERRUPTIBLE); } __set_current_state(TASK_RUNNING); } static int ring_buffer_consumer_thread(void *arg) { while (!kthread_should_stop() && !kill_test) { complete(&read_start); ring_buffer_consumer(); set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop() || kill_test) break; schedule(); __set_current_state(TASK_RUNNING); } __set_current_state(TASK_RUNNING); if (kill_test) wait_to_die(); return 0; } static int ring_buffer_producer_thread(void *arg) { init_completion(&read_start); while (!kthread_should_stop() && !kill_test) { ring_buffer_reset(buffer); if (consumer) { smp_wmb(); wake_up_process(consumer); wait_for_completion(&read_start); } ring_buffer_producer(); trace_printk("Sleeping for 10 secs\n"); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ * SLEEP_TIME); __set_current_state(TASK_RUNNING); } if (kill_test) wait_to_die(); return 0; } static int __init ring_buffer_benchmark_init(void) { int ret; /* make a one meg buffer in overwite mode */ buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE); if (!buffer) return -ENOMEM; if (!disable_reader) { consumer = kthread_create(ring_buffer_consumer_thread, NULL, "rb_consumer"); ret = PTR_ERR(consumer); if (IS_ERR(consumer)) goto out_fail; } producer = kthread_run(ring_buffer_producer_thread, NULL, "rb_producer"); ret = PTR_ERR(producer); if (IS_ERR(producer)) goto out_kill; /* * Run them as low-prio background tasks by default: */ if (!disable_reader) { if (consumer_fifo >= 0) { struct sched_param param = { .sched_priority = consumer_fifo }; sched_setscheduler(consumer, SCHED_FIFO, &param); } else set_user_nice(consumer, consumer_nice); } if (producer_fifo >= 0) { struct sched_param param = { .sched_priority = consumer_fifo }; sched_setscheduler(producer, SCHED_FIFO, &param); } else set_user_nice(producer, producer_nice); return 0; out_kill: if (consumer) kthread_stop(consumer); out_fail: ring_buffer_free(buffer); return ret; } static void __exit ring_buffer_benchmark_exit(void) { kthread_stop(producer); if (consumer) kthread_stop(consumer); ring_buffer_free(buffer); } module_init(ring_buffer_benchmark_init); module_exit(ring_buffer_benchmark_exit); MODULE_AUTHOR("Steven Rostedt"); MODULE_DESCRIPTION("ring_buffer_benchmark"); MODULE_LICENSE("GPL");
gpl-2.0
tdm/kernel_huawei_msm8928
drivers/s390/char/keyboard.c
7985
12529
/* * drivers/s390/char/keyboard.c * ebcdic keycode functions for s390 console drivers * * S390 version * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), */ #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/sysrq.h> #include <linux/consolemap.h> #include <linux/kbd_kern.h> #include <linux/kbd_diacr.h> #include <asm/uaccess.h> #include "keyboard.h" /* * Handler Tables. */ #define K_HANDLERS\ k_self, k_fn, k_spec, k_ignore,\ k_dead, k_ignore, k_ignore, k_ignore,\ k_ignore, k_ignore, k_ignore, k_ignore,\ k_ignore, k_ignore, k_ignore, k_ignore typedef void (k_handler_fn)(struct kbd_data *, unsigned char); static k_handler_fn K_HANDLERS; static k_handler_fn *k_handler[16] = { K_HANDLERS }; /* maximum values each key_handler can handle */ static const int kbd_max_vals[] = { 255, ARRAY_SIZE(func_table) - 1, NR_FN_HANDLER - 1, 0, NR_DEAD - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; static const int KBD_NR_TYPES = ARRAY_SIZE(kbd_max_vals); static unsigned char ret_diacr[NR_DEAD] = { '`', '\'', '^', '~', '"', ',' }; /* * Alloc/free of kbd_data structures. */ struct kbd_data * kbd_alloc(void) { struct kbd_data *kbd; int i; kbd = kzalloc(sizeof(struct kbd_data), GFP_KERNEL); if (!kbd) goto out; kbd->key_maps = kzalloc(sizeof(key_maps), GFP_KERNEL); if (!kbd->key_maps) goto out_kbd; for (i = 0; i < ARRAY_SIZE(key_maps); i++) { if (key_maps[i]) { kbd->key_maps[i] = kmemdup(key_maps[i], sizeof(u_short) * NR_KEYS, GFP_KERNEL); if (!kbd->key_maps[i]) goto out_maps; } } kbd->func_table = kzalloc(sizeof(func_table), GFP_KERNEL); if (!kbd->func_table) goto out_maps; for (i = 0; i < ARRAY_SIZE(func_table); i++) { if (func_table[i]) { kbd->func_table[i] = kstrdup(func_table[i], GFP_KERNEL); if (!kbd->func_table[i]) goto out_func; } } kbd->fn_handler = kzalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL); if (!kbd->fn_handler) goto out_func; kbd->accent_table = kmemdup(accent_table, sizeof(struct kbdiacruc) * MAX_DIACR, GFP_KERNEL); if (!kbd->accent_table) goto out_fn_handler; kbd->accent_table_size = accent_table_size; return kbd; out_fn_handler: kfree(kbd->fn_handler); out_func: for (i = 0; i < ARRAY_SIZE(func_table); i++) kfree(kbd->func_table[i]); kfree(kbd->func_table); out_maps: for (i = 0; i < ARRAY_SIZE(key_maps); i++) kfree(kbd->key_maps[i]); kfree(kbd->key_maps); out_kbd: kfree(kbd); out: return NULL; } void kbd_free(struct kbd_data *kbd) { int i; kfree(kbd->accent_table); kfree(kbd->fn_handler); for (i = 0; i < ARRAY_SIZE(func_table); i++) kfree(kbd->func_table[i]); kfree(kbd->func_table); for (i = 0; i < ARRAY_SIZE(key_maps); i++) kfree(kbd->key_maps[i]); kfree(kbd->key_maps); kfree(kbd); } /* * Generate ascii -> ebcdic translation table from kbd_data. */ void kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc) { unsigned short *keymap, keysym; int i, j, k; memset(ascebc, 0x40, 256); for (i = 0; i < ARRAY_SIZE(key_maps); i++) { keymap = kbd->key_maps[i]; if (!keymap) continue; for (j = 0; j < NR_KEYS; j++) { k = ((i & 1) << 7) + j; keysym = keymap[j]; if (KTYP(keysym) == (KT_LATIN | 0xf0) || KTYP(keysym) == (KT_LETTER | 0xf0)) ascebc[KVAL(keysym)] = k; else if (KTYP(keysym) == (KT_DEAD | 0xf0)) ascebc[ret_diacr[KVAL(keysym)]] = k; } } } #if 0 /* * Generate ebcdic -> ascii translation table from kbd_data. */ void kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc) { unsigned short *keymap, keysym; int i, j, k; memset(ebcasc, ' ', 256); for (i = 0; i < ARRAY_SIZE(key_maps); i++) { keymap = kbd->key_maps[i]; if (!keymap) continue; for (j = 0; j < NR_KEYS; j++) { keysym = keymap[j]; k = ((i & 1) << 7) + j; if (KTYP(keysym) == (KT_LATIN | 0xf0) || KTYP(keysym) == (KT_LETTER | 0xf0)) ebcasc[k] = KVAL(keysym); else if (KTYP(keysym) == (KT_DEAD | 0xf0)) ebcasc[k] = ret_diacr[KVAL(keysym)]; } } } #endif /* * We have a combining character DIACR here, followed by the character CH. * If the combination occurs in the table, return the corresponding value. * Otherwise, if CH is a space or equals DIACR, return DIACR. * Otherwise, conclude that DIACR was not combining after all, * queue it and return CH. */ static unsigned int handle_diacr(struct kbd_data *kbd, unsigned int ch) { int i, d; d = kbd->diacr; kbd->diacr = 0; for (i = 0; i < kbd->accent_table_size; i++) { if (kbd->accent_table[i].diacr == d && kbd->accent_table[i].base == ch) return kbd->accent_table[i].result; } if (ch == ' ' || ch == d) return d; kbd_put_queue(kbd->tty, d); return ch; } /* * Handle dead key. */ static void k_dead(struct kbd_data *kbd, unsigned char value) { value = ret_diacr[value]; kbd->diacr = (kbd->diacr ? handle_diacr(kbd, value) : value); } /* * Normal character handler. */ static void k_self(struct kbd_data *kbd, unsigned char value) { if (kbd->diacr) value = handle_diacr(kbd, value); kbd_put_queue(kbd->tty, value); } /* * Special key handlers */ static void k_ignore(struct kbd_data *kbd, unsigned char value) { } /* * Function key handler. */ static void k_fn(struct kbd_data *kbd, unsigned char value) { if (kbd->func_table[value]) kbd_puts_queue(kbd->tty, kbd->func_table[value]); } static void k_spec(struct kbd_data *kbd, unsigned char value) { if (value >= NR_FN_HANDLER) return; if (kbd->fn_handler[value]) kbd->fn_handler[value](kbd); } /* * Put utf8 character to tty flip buffer. * UTF-8 is defined for words of up to 31 bits, * but we need only 16 bits here */ static void to_utf8(struct tty_struct *tty, ushort c) { if (c < 0x80) /* 0******* */ kbd_put_queue(tty, c); else if (c < 0x800) { /* 110***** 10****** */ kbd_put_queue(tty, 0xc0 | (c >> 6)); kbd_put_queue(tty, 0x80 | (c & 0x3f)); } else { /* 1110**** 10****** 10****** */ kbd_put_queue(tty, 0xe0 | (c >> 12)); kbd_put_queue(tty, 0x80 | ((c >> 6) & 0x3f)); kbd_put_queue(tty, 0x80 | (c & 0x3f)); } } /* * Process keycode. */ void kbd_keycode(struct kbd_data *kbd, unsigned int keycode) { unsigned short keysym; unsigned char type, value; if (!kbd || !kbd->tty) return; if (keycode >= 384) keysym = kbd->key_maps[5][keycode - 384]; else if (keycode >= 256) keysym = kbd->key_maps[4][keycode - 256]; else if (keycode >= 128) keysym = kbd->key_maps[1][keycode - 128]; else keysym = kbd->key_maps[0][keycode]; type = KTYP(keysym); if (type >= 0xf0) { type -= 0xf0; if (type == KT_LETTER) type = KT_LATIN; value = KVAL(keysym); #ifdef CONFIG_MAGIC_SYSRQ /* Handle the SysRq Hack */ if (kbd->sysrq) { if (kbd->sysrq == K(KT_LATIN, '-')) { kbd->sysrq = 0; handle_sysrq(value); return; } if (value == '-') { kbd->sysrq = K(KT_LATIN, '-'); return; } /* Incomplete sysrq sequence. */ (*k_handler[KTYP(kbd->sysrq)])(kbd, KVAL(kbd->sysrq)); kbd->sysrq = 0; } else if ((type == KT_LATIN && value == '^') || (type == KT_DEAD && ret_diacr[value] == '^')) { kbd->sysrq = K(type, value); return; } #endif (*k_handler[type])(kbd, value); } else to_utf8(kbd->tty, keysym); } /* * Ioctl stuff. */ static int do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe, int cmd, int perm) { struct kbentry tmp; ushort *key_map, val, ov; if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry))) return -EFAULT; #if NR_KEYS < 256 if (tmp.kb_index >= NR_KEYS) return -EINVAL; #endif #if MAX_NR_KEYMAPS < 256 if (tmp.kb_table >= MAX_NR_KEYMAPS) return -EINVAL; #endif switch (cmd) { case KDGKBENT: key_map = kbd->key_maps[tmp.kb_table]; if (key_map) { val = U(key_map[tmp.kb_index]); if (KTYP(val) >= KBD_NR_TYPES) val = K_HOLE; } else val = (tmp.kb_index ? K_HOLE : K_NOSUCHMAP); return put_user(val, &user_kbe->kb_value); case KDSKBENT: if (!perm) return -EPERM; if (!tmp.kb_index && tmp.kb_value == K_NOSUCHMAP) { /* disallocate map */ key_map = kbd->key_maps[tmp.kb_table]; if (key_map) { kbd->key_maps[tmp.kb_table] = NULL; kfree(key_map); } break; } if (KTYP(tmp.kb_value) >= KBD_NR_TYPES) return -EINVAL; if (KVAL(tmp.kb_value) > kbd_max_vals[KTYP(tmp.kb_value)]) return -EINVAL; if (!(key_map = kbd->key_maps[tmp.kb_table])) { int j; key_map = kmalloc(sizeof(plain_map), GFP_KERNEL); if (!key_map) return -ENOMEM; kbd->key_maps[tmp.kb_table] = key_map; for (j = 0; j < NR_KEYS; j++) key_map[j] = U(K_HOLE); } ov = U(key_map[tmp.kb_index]); if (tmp.kb_value == ov) break; /* nothing to do */ /* * Attention Key. */ if (((ov == K_SAK) || (tmp.kb_value == K_SAK)) && !capable(CAP_SYS_ADMIN)) return -EPERM; key_map[tmp.kb_index] = U(tmp.kb_value); break; } return 0; } static int do_kdgkb_ioctl(struct kbd_data *kbd, struct kbsentry __user *u_kbs, int cmd, int perm) { unsigned char kb_func; char *p; int len; /* Get u_kbs->kb_func. */ if (get_user(kb_func, &u_kbs->kb_func)) return -EFAULT; #if MAX_NR_FUNC < 256 if (kb_func >= MAX_NR_FUNC) return -EINVAL; #endif switch (cmd) { case KDGKBSENT: p = kbd->func_table[kb_func]; if (p) { len = strlen(p); if (len >= sizeof(u_kbs->kb_string)) len = sizeof(u_kbs->kb_string) - 1; if (copy_to_user(u_kbs->kb_string, p, len)) return -EFAULT; } else len = 0; if (put_user('\0', u_kbs->kb_string + len)) return -EFAULT; break; case KDSKBSENT: if (!perm) return -EPERM; len = strnlen_user(u_kbs->kb_string, sizeof(u_kbs->kb_string) - 1); if (!len) return -EFAULT; if (len > sizeof(u_kbs->kb_string) - 1) return -EINVAL; p = kmalloc(len + 1, GFP_KERNEL); if (!p) return -ENOMEM; if (copy_from_user(p, u_kbs->kb_string, len)) { kfree(p); return -EFAULT; } p[len] = 0; kfree(kbd->func_table[kb_func]); kbd->func_table[kb_func] = p; break; } return 0; } int kbd_ioctl(struct kbd_data *kbd, unsigned int cmd, unsigned long arg) { void __user *argp; unsigned int ct; int perm; argp = (void __user *)arg; /* * To have permissions to do most of the vt ioctls, we either have * to be the owner of the tty, or have CAP_SYS_TTY_CONFIG. */ perm = current->signal->tty == kbd->tty || capable(CAP_SYS_TTY_CONFIG); switch (cmd) { case KDGKBTYPE: return put_user(KB_101, (char __user *)argp); case KDGKBENT: case KDSKBENT: return do_kdsk_ioctl(kbd, argp, cmd, perm); case KDGKBSENT: case KDSKBSENT: return do_kdgkb_ioctl(kbd, argp, cmd, perm); case KDGKBDIACR: { struct kbdiacrs __user *a = argp; struct kbdiacr diacr; int i; if (put_user(kbd->accent_table_size, &a->kb_cnt)) return -EFAULT; for (i = 0; i < kbd->accent_table_size; i++) { diacr.diacr = kbd->accent_table[i].diacr; diacr.base = kbd->accent_table[i].base; diacr.result = kbd->accent_table[i].result; if (copy_to_user(a->kbdiacr + i, &diacr, sizeof(struct kbdiacr))) return -EFAULT; } return 0; } case KDGKBDIACRUC: { struct kbdiacrsuc __user *a = argp; ct = kbd->accent_table_size; if (put_user(ct, &a->kb_cnt)) return -EFAULT; if (copy_to_user(a->kbdiacruc, kbd->accent_table, ct * sizeof(struct kbdiacruc))) return -EFAULT; return 0; } case KDSKBDIACR: { struct kbdiacrs __user *a = argp; struct kbdiacr diacr; int i; if (!perm) return -EPERM; if (get_user(ct, &a->kb_cnt)) return -EFAULT; if (ct >= MAX_DIACR) return -EINVAL; kbd->accent_table_size = ct; for (i = 0; i < ct; i++) { if (copy_from_user(&diacr, a->kbdiacr + i, sizeof(struct kbdiacr))) return -EFAULT; kbd->accent_table[i].diacr = diacr.diacr; kbd->accent_table[i].base = diacr.base; kbd->accent_table[i].result = diacr.result; } return 0; } case KDSKBDIACRUC: { struct kbdiacrsuc __user *a = argp; if (!perm) return -EPERM; if (get_user(ct, &a->kb_cnt)) return -EFAULT; if (ct >= MAX_DIACR) return -EINVAL; kbd->accent_table_size = ct; if (copy_from_user(kbd->accent_table, a->kbdiacruc, ct * sizeof(struct kbdiacruc))) return -EFAULT; return 0; } default: return -ENOIOCTLCMD; } } EXPORT_SYMBOL(kbd_ioctl); EXPORT_SYMBOL(kbd_ascebc); EXPORT_SYMBOL(kbd_free); EXPORT_SYMBOL(kbd_alloc); EXPORT_SYMBOL(kbd_keycode);
gpl-2.0
StelixROM/kernel_lge_msm8974
drivers/staging/comedi/drivers/pcmad.c
8241
4707
/* comedi/drivers/pcmad.c Hardware driver for Winsystems PCM-A/D12 and PCM-A/D16 COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000,2001 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: pcmad Description: Winsystems PCM-A/D12, PCM-A/D16 Author: ds Devices: [Winsystems] PCM-A/D12 (pcmad12), PCM-A/D16 (pcmad16) Status: untested This driver was written on a bet that I couldn't write a driver in less than 2 hours. I won the bet, but never got paid. =( Configuration options: [0] - I/O port base [1] - unused [2] - Analog input reference 0 = single ended 1 = differential [3] - Analog input encoding (must match jumpers) 0 = straight binary 1 = two's complement */ #include <linux/interrupt.h> #include "../comedidev.h" #include <linux/ioport.h> #define PCMAD_SIZE 4 #define PCMAD_STATUS 0 #define PCMAD_LSB 1 #define PCMAD_MSB 2 #define PCMAD_CONVERT 1 struct pcmad_board_struct { const char *name; int n_ai_bits; }; static const struct pcmad_board_struct pcmad_boards[] = { { .name = "pcmad12", .n_ai_bits = 12, }, { .name = "pcmad16", .n_ai_bits = 16, }, }; #define this_board ((const struct pcmad_board_struct *)(dev->board_ptr)) #define n_pcmad_boards ARRAY_SIZE(pcmad_boards) struct pcmad_priv_struct { int differential; int twos_comp; }; #define devpriv ((struct pcmad_priv_struct *)dev->private) static int pcmad_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int pcmad_detach(struct comedi_device *dev); static struct comedi_driver driver_pcmad = { .driver_name = "pcmad", .module = THIS_MODULE, .attach = pcmad_attach, .detach = pcmad_detach, .board_name = &pcmad_boards[0].name, .num_names = n_pcmad_boards, .offset = sizeof(pcmad_boards[0]), }; static int __init driver_pcmad_init_module(void) { return comedi_driver_register(&driver_pcmad); } static void __exit driver_pcmad_cleanup_module(void) { comedi_driver_unregister(&driver_pcmad); } module_init(driver_pcmad_init_module); module_exit(driver_pcmad_cleanup_module); #define TIMEOUT 100 static int pcmad_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; int chan; int n; chan = CR_CHAN(insn->chanspec); for (n = 0; n < insn->n; n++) { outb(chan, dev->iobase + PCMAD_CONVERT); for (i = 0; i < TIMEOUT; i++) { if ((inb(dev->iobase + PCMAD_STATUS) & 0x3) == 0x3) break; } data[n] = inb(dev->iobase + PCMAD_LSB); data[n] |= (inb(dev->iobase + PCMAD_MSB) << 8); if (devpriv->twos_comp) data[n] ^= (1 << (this_board->n_ai_bits - 1)); } return n; } /* * options: * 0 i/o base * 1 unused * 2 0=single ended 1=differential * 3 0=straight binary 1=two's comp */ static int pcmad_attach(struct comedi_device *dev, struct comedi_devconfig *it) { int ret; struct comedi_subdevice *s; unsigned long iobase; iobase = it->options[0]; printk(KERN_INFO "comedi%d: pcmad: 0x%04lx ", dev->minor, iobase); if (!request_region(iobase, PCMAD_SIZE, "pcmad")) { printk(KERN_CONT "I/O port conflict\n"); return -EIO; } printk(KERN_CONT "\n"); dev->iobase = iobase; ret = alloc_subdevices(dev, 1); if (ret < 0) return ret; ret = alloc_private(dev, sizeof(struct pcmad_priv_struct)); if (ret < 0) return ret; dev->board_name = this_board->name; s = dev->subdevices + 0; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | AREF_GROUND; s->n_chan = 16; /* XXX */ s->len_chanlist = 1; s->insn_read = pcmad_ai_insn_read; s->maxdata = (1 << this_board->n_ai_bits) - 1; s->range_table = &range_unknown; return 0; } static int pcmad_detach(struct comedi_device *dev) { printk(KERN_INFO "comedi%d: pcmad: remove\n", dev->minor); if (dev->irq) free_irq(dev->irq, dev); if (dev->iobase) release_region(dev->iobase, PCMAD_SIZE); return 0; } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
boa19861105/B2_UHL
drivers/md/dm-sysfs.c
8753
2213
/* * Copyright (C) 2008 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ #include <linux/sysfs.h> #include <linux/dm-ioctl.h> #include "dm.h" struct dm_sysfs_attr { struct attribute attr; ssize_t (*show)(struct mapped_device *, char *); ssize_t (*store)(struct mapped_device *, char *); }; #define DM_ATTR_RO(_name) \ struct dm_sysfs_attr dm_attr_##_name = \ __ATTR(_name, S_IRUGO, dm_attr_##_name##_show, NULL) static ssize_t dm_attr_show(struct kobject *kobj, struct attribute *attr, char *page) { struct dm_sysfs_attr *dm_attr; struct mapped_device *md; ssize_t ret; dm_attr = container_of(attr, struct dm_sysfs_attr, attr); if (!dm_attr->show) return -EIO; md = dm_get_from_kobject(kobj); if (!md) return -EINVAL; ret = dm_attr->show(md, page); dm_put(md); return ret; } static ssize_t dm_attr_name_show(struct mapped_device *md, char *buf) { if (dm_copy_name_and_uuid(md, buf, NULL)) return -EIO; strcat(buf, "\n"); return strlen(buf); } static ssize_t dm_attr_uuid_show(struct mapped_device *md, char *buf) { if (dm_copy_name_and_uuid(md, NULL, buf)) return -EIO; strcat(buf, "\n"); return strlen(buf); } static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf) { sprintf(buf, "%d\n", dm_suspended_md(md)); return strlen(buf); } static DM_ATTR_RO(name); static DM_ATTR_RO(uuid); static DM_ATTR_RO(suspended); static struct attribute *dm_attrs[] = { &dm_attr_name.attr, &dm_attr_uuid.attr, &dm_attr_suspended.attr, NULL, }; static const struct sysfs_ops dm_sysfs_ops = { .show = dm_attr_show, }; /* * dm kobject is embedded in mapped_device structure * no need to define release function here */ static struct kobj_type dm_ktype = { .sysfs_ops = &dm_sysfs_ops, .default_attrs = dm_attrs, }; /* * Initialize kobj * because nobody using md yet, no need to call explicit dm_get/put */ int dm_sysfs_init(struct mapped_device *md) { return kobject_init_and_add(dm_kobject(md), &dm_ktype, &disk_to_dev(dm_disk(md))->kobj, "%s", "dm"); } /* * Remove kobj, called after all references removed */ void dm_sysfs_exit(struct mapped_device *md) { kobject_put(dm_kobject(md)); }
gpl-2.0
fire855/android_kernel_wiko_l5510
drivers/md/dm-sysfs.c
8753
2213
/* * Copyright (C) 2008 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ #include <linux/sysfs.h> #include <linux/dm-ioctl.h> #include "dm.h" struct dm_sysfs_attr { struct attribute attr; ssize_t (*show)(struct mapped_device *, char *); ssize_t (*store)(struct mapped_device *, char *); }; #define DM_ATTR_RO(_name) \ struct dm_sysfs_attr dm_attr_##_name = \ __ATTR(_name, S_IRUGO, dm_attr_##_name##_show, NULL) static ssize_t dm_attr_show(struct kobject *kobj, struct attribute *attr, char *page) { struct dm_sysfs_attr *dm_attr; struct mapped_device *md; ssize_t ret; dm_attr = container_of(attr, struct dm_sysfs_attr, attr); if (!dm_attr->show) return -EIO; md = dm_get_from_kobject(kobj); if (!md) return -EINVAL; ret = dm_attr->show(md, page); dm_put(md); return ret; } static ssize_t dm_attr_name_show(struct mapped_device *md, char *buf) { if (dm_copy_name_and_uuid(md, buf, NULL)) return -EIO; strcat(buf, "\n"); return strlen(buf); } static ssize_t dm_attr_uuid_show(struct mapped_device *md, char *buf) { if (dm_copy_name_and_uuid(md, NULL, buf)) return -EIO; strcat(buf, "\n"); return strlen(buf); } static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf) { sprintf(buf, "%d\n", dm_suspended_md(md)); return strlen(buf); } static DM_ATTR_RO(name); static DM_ATTR_RO(uuid); static DM_ATTR_RO(suspended); static struct attribute *dm_attrs[] = { &dm_attr_name.attr, &dm_attr_uuid.attr, &dm_attr_suspended.attr, NULL, }; static const struct sysfs_ops dm_sysfs_ops = { .show = dm_attr_show, }; /* * dm kobject is embedded in mapped_device structure * no need to define release function here */ static struct kobj_type dm_ktype = { .sysfs_ops = &dm_sysfs_ops, .default_attrs = dm_attrs, }; /* * Initialize kobj * because nobody using md yet, no need to call explicit dm_get/put */ int dm_sysfs_init(struct mapped_device *md) { return kobject_init_and_add(dm_kobject(md), &dm_ktype, &disk_to_dev(dm_disk(md))->kobj, "%s", "dm"); } /* * Remove kobj, called after all references removed */ void dm_sysfs_exit(struct mapped_device *md) { kobject_put(dm_kobject(md)); }
gpl-2.0
SaberMod/lge-kernel-mako
arch/mips/txx9/jmr3927/setup.c
12849
6221
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * * Copyright 2001 MontaVista Software Inc. * Author: MontaVista Software, Inc. * ahennessy@mvista.com * * Copyright (C) 2000-2001 Toshiba Corporation * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org) */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <asm/reboot.h> #include <asm/txx9pio.h> #include <asm/txx9/generic.h> #include <asm/txx9/pci.h> #include <asm/txx9/jmr3927.h> #include <asm/mipsregs.h> static void jmr3927_machine_restart(char *command) { local_irq_disable(); #if 1 /* Resetting PCI bus */ jmr3927_ioc_reg_out(0, JMR3927_IOC_RESET_ADDR); jmr3927_ioc_reg_out(JMR3927_IOC_RESET_PCI, JMR3927_IOC_RESET_ADDR); (void)jmr3927_ioc_reg_in(JMR3927_IOC_RESET_ADDR); /* flush WB */ mdelay(1); jmr3927_ioc_reg_out(0, JMR3927_IOC_RESET_ADDR); #endif jmr3927_ioc_reg_out(JMR3927_IOC_RESET_CPU, JMR3927_IOC_RESET_ADDR); /* fallback */ (*_machine_halt)(); } static void __init jmr3927_time_init(void) { tx3927_time_init(0, 1); } #define DO_WRITE_THROUGH static void jmr3927_board_init(void); static void __init jmr3927_mem_setup(void) { set_io_port_base(JMR3927_PORT_BASE + JMR3927_PCIIO); _machine_restart = jmr3927_machine_restart; /* cache setup */ { unsigned int conf; #ifdef DO_WRITE_THROUGH int mips_config_cwfon = 0; int mips_config_wbon = 0; #else int mips_config_cwfon = 1; int mips_config_wbon = 1; #endif conf = read_c0_conf(); conf &= ~(TX39_CONF_WBON | TX39_CONF_CWFON); conf |= mips_config_wbon ? TX39_CONF_WBON : 0; conf |= mips_config_cwfon ? TX39_CONF_CWFON : 0; write_c0_conf(conf); write_c0_cache(0); } /* initialize board */ jmr3927_board_init(); tx3927_sio_init(0, 1 << 1); /* ch1: noCTS */ } static void __init jmr3927_pci_setup(void) { #ifdef CONFIG_PCI int extarb = !(tx3927_ccfgptr->ccfg & TX3927_CCFG_PCIXARB); struct pci_controller *c; c = txx9_alloc_pci_controller(&txx9_primary_pcic, JMR3927_PCIMEM, JMR3927_PCIMEM_SIZE, JMR3927_PCIIO, JMR3927_PCIIO_SIZE); register_pci_controller(c); if (!extarb) { /* Reset PCI Bus */ jmr3927_ioc_reg_out(0, JMR3927_IOC_RESET_ADDR); udelay(100); jmr3927_ioc_reg_out(JMR3927_IOC_RESET_PCI, JMR3927_IOC_RESET_ADDR); udelay(100); jmr3927_ioc_reg_out(0, JMR3927_IOC_RESET_ADDR); } tx3927_pcic_setup(c, JMR3927_SDRAM_SIZE, extarb); tx3927_setup_pcierr_irq(); #endif /* CONFIG_PCI */ } static void __init jmr3927_board_init(void) { txx9_cpu_clock = JMR3927_CORECLK; /* SDRAMC are configured by PROM */ /* ROMC */ tx3927_romcptr->cr[1] = JMR3927_ROMCE1 | 0x00030048; tx3927_romcptr->cr[2] = JMR3927_ROMCE2 | 0x000064c8; tx3927_romcptr->cr[3] = JMR3927_ROMCE3 | 0x0003f698; tx3927_romcptr->cr[5] = JMR3927_ROMCE5 | 0x0000f218; /* Pin selection */ tx3927_ccfgptr->pcfg &= ~TX3927_PCFG_SELALL; tx3927_ccfgptr->pcfg |= TX3927_PCFG_SELSIOC(0) | TX3927_PCFG_SELSIO_ALL | (TX3927_PCFG_SELDMA_ALL & ~TX3927_PCFG_SELDMA(1)); tx3927_setup(); /* PIO[15:12] connected to LEDs */ __raw_writel(0x0000f000, &tx3927_pioptr->dir); gpio_request(11, "dipsw1"); gpio_request(10, "dipsw2"); jmr3927_pci_setup(); /* SIO0 DTR on */ jmr3927_ioc_reg_out(0, JMR3927_IOC_DTR_ADDR); jmr3927_led_set(0); printk(KERN_INFO "JMR-TX3927 (Rev %d) --- IOC(Rev %d) DIPSW:%d,%d,%d,%d\n", jmr3927_ioc_reg_in(JMR3927_IOC_BREV_ADDR) & JMR3927_REV_MASK, jmr3927_ioc_reg_in(JMR3927_IOC_REV_ADDR) & JMR3927_REV_MASK, jmr3927_dipsw1(), jmr3927_dipsw2(), jmr3927_dipsw3(), jmr3927_dipsw4()); } /* This trick makes rtc-ds1742 driver usable as is. */ static unsigned long jmr3927_swizzle_addr_b(unsigned long port) { if ((port & 0xffff0000) != JMR3927_IOC_NVRAMB_ADDR) return port; port = (port & 0xffff0000) | (port & 0x7fff << 1); #ifdef __BIG_ENDIAN return port; #else return port | 1; #endif } static void __init jmr3927_rtc_init(void) { static struct resource __initdata res = { .start = JMR3927_IOC_NVRAMB_ADDR - IO_BASE, .end = JMR3927_IOC_NVRAMB_ADDR - IO_BASE + 0x800 - 1, .flags = IORESOURCE_MEM, }; platform_device_register_simple("rtc-ds1742", -1, &res, 1); } static void __init jmr3927_mtd_init(void) { int i; for (i = 0; i < 2; i++) tx3927_mtd_init(i); } static void __init jmr3927_device_init(void) { unsigned long iocled_base = JMR3927_IOC_LED_ADDR - IO_BASE; #ifdef __LITTLE_ENDIAN iocled_base |= 1; #endif __swizzle_addr_b = jmr3927_swizzle_addr_b; jmr3927_rtc_init(); tx3927_wdt_init(); jmr3927_mtd_init(); txx9_iocled_init(iocled_base, -1, 8, 1, "green", NULL); } struct txx9_board_vec jmr3927_vec __initdata = { .system = "Toshiba JMR_TX3927", .prom_init = jmr3927_prom_init, .mem_setup = jmr3927_mem_setup, .irq_setup = jmr3927_irq_setup, .time_init = jmr3927_time_init, .device_init = jmr3927_device_init, #ifdef CONFIG_PCI .pci_map_irq = jmr3927_pci_map_irq, #endif };
gpl-2.0
Cl3Kener/UBER-L
arch/mips/txx9/jmr3927/setup.c
12849
6221
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * * Copyright 2001 MontaVista Software Inc. * Author: MontaVista Software, Inc. * ahennessy@mvista.com * * Copyright (C) 2000-2001 Toshiba Corporation * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org) */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <asm/reboot.h> #include <asm/txx9pio.h> #include <asm/txx9/generic.h> #include <asm/txx9/pci.h> #include <asm/txx9/jmr3927.h> #include <asm/mipsregs.h> static void jmr3927_machine_restart(char *command) { local_irq_disable(); #if 1 /* Resetting PCI bus */ jmr3927_ioc_reg_out(0, JMR3927_IOC_RESET_ADDR); jmr3927_ioc_reg_out(JMR3927_IOC_RESET_PCI, JMR3927_IOC_RESET_ADDR); (void)jmr3927_ioc_reg_in(JMR3927_IOC_RESET_ADDR); /* flush WB */ mdelay(1); jmr3927_ioc_reg_out(0, JMR3927_IOC_RESET_ADDR); #endif jmr3927_ioc_reg_out(JMR3927_IOC_RESET_CPU, JMR3927_IOC_RESET_ADDR); /* fallback */ (*_machine_halt)(); } static void __init jmr3927_time_init(void) { tx3927_time_init(0, 1); } #define DO_WRITE_THROUGH static void jmr3927_board_init(void); static void __init jmr3927_mem_setup(void) { set_io_port_base(JMR3927_PORT_BASE + JMR3927_PCIIO); _machine_restart = jmr3927_machine_restart; /* cache setup */ { unsigned int conf; #ifdef DO_WRITE_THROUGH int mips_config_cwfon = 0; int mips_config_wbon = 0; #else int mips_config_cwfon = 1; int mips_config_wbon = 1; #endif conf = read_c0_conf(); conf &= ~(TX39_CONF_WBON | TX39_CONF_CWFON); conf |= mips_config_wbon ? TX39_CONF_WBON : 0; conf |= mips_config_cwfon ? TX39_CONF_CWFON : 0; write_c0_conf(conf); write_c0_cache(0); } /* initialize board */ jmr3927_board_init(); tx3927_sio_init(0, 1 << 1); /* ch1: noCTS */ } static void __init jmr3927_pci_setup(void) { #ifdef CONFIG_PCI int extarb = !(tx3927_ccfgptr->ccfg & TX3927_CCFG_PCIXARB); struct pci_controller *c; c = txx9_alloc_pci_controller(&txx9_primary_pcic, JMR3927_PCIMEM, JMR3927_PCIMEM_SIZE, JMR3927_PCIIO, JMR3927_PCIIO_SIZE); register_pci_controller(c); if (!extarb) { /* Reset PCI Bus */ jmr3927_ioc_reg_out(0, JMR3927_IOC_RESET_ADDR); udelay(100); jmr3927_ioc_reg_out(JMR3927_IOC_RESET_PCI, JMR3927_IOC_RESET_ADDR); udelay(100); jmr3927_ioc_reg_out(0, JMR3927_IOC_RESET_ADDR); } tx3927_pcic_setup(c, JMR3927_SDRAM_SIZE, extarb); tx3927_setup_pcierr_irq(); #endif /* CONFIG_PCI */ } static void __init jmr3927_board_init(void) { txx9_cpu_clock = JMR3927_CORECLK; /* SDRAMC are configured by PROM */ /* ROMC */ tx3927_romcptr->cr[1] = JMR3927_ROMCE1 | 0x00030048; tx3927_romcptr->cr[2] = JMR3927_ROMCE2 | 0x000064c8; tx3927_romcptr->cr[3] = JMR3927_ROMCE3 | 0x0003f698; tx3927_romcptr->cr[5] = JMR3927_ROMCE5 | 0x0000f218; /* Pin selection */ tx3927_ccfgptr->pcfg &= ~TX3927_PCFG_SELALL; tx3927_ccfgptr->pcfg |= TX3927_PCFG_SELSIOC(0) | TX3927_PCFG_SELSIO_ALL | (TX3927_PCFG_SELDMA_ALL & ~TX3927_PCFG_SELDMA(1)); tx3927_setup(); /* PIO[15:12] connected to LEDs */ __raw_writel(0x0000f000, &tx3927_pioptr->dir); gpio_request(11, "dipsw1"); gpio_request(10, "dipsw2"); jmr3927_pci_setup(); /* SIO0 DTR on */ jmr3927_ioc_reg_out(0, JMR3927_IOC_DTR_ADDR); jmr3927_led_set(0); printk(KERN_INFO "JMR-TX3927 (Rev %d) --- IOC(Rev %d) DIPSW:%d,%d,%d,%d\n", jmr3927_ioc_reg_in(JMR3927_IOC_BREV_ADDR) & JMR3927_REV_MASK, jmr3927_ioc_reg_in(JMR3927_IOC_REV_ADDR) & JMR3927_REV_MASK, jmr3927_dipsw1(), jmr3927_dipsw2(), jmr3927_dipsw3(), jmr3927_dipsw4()); } /* This trick makes rtc-ds1742 driver usable as is. */ static unsigned long jmr3927_swizzle_addr_b(unsigned long port) { if ((port & 0xffff0000) != JMR3927_IOC_NVRAMB_ADDR) return port; port = (port & 0xffff0000) | (port & 0x7fff << 1); #ifdef __BIG_ENDIAN return port; #else return port | 1; #endif } static void __init jmr3927_rtc_init(void) { static struct resource __initdata res = { .start = JMR3927_IOC_NVRAMB_ADDR - IO_BASE, .end = JMR3927_IOC_NVRAMB_ADDR - IO_BASE + 0x800 - 1, .flags = IORESOURCE_MEM, }; platform_device_register_simple("rtc-ds1742", -1, &res, 1); } static void __init jmr3927_mtd_init(void) { int i; for (i = 0; i < 2; i++) tx3927_mtd_init(i); } static void __init jmr3927_device_init(void) { unsigned long iocled_base = JMR3927_IOC_LED_ADDR - IO_BASE; #ifdef __LITTLE_ENDIAN iocled_base |= 1; #endif __swizzle_addr_b = jmr3927_swizzle_addr_b; jmr3927_rtc_init(); tx3927_wdt_init(); jmr3927_mtd_init(); txx9_iocled_init(iocled_base, -1, 8, 1, "green", NULL); } struct txx9_board_vec jmr3927_vec __initdata = { .system = "Toshiba JMR_TX3927", .prom_init = jmr3927_prom_init, .mem_setup = jmr3927_mem_setup, .irq_setup = jmr3927_irq_setup, .time_init = jmr3927_time_init, .device_init = jmr3927_device_init, #ifdef CONFIG_PCI .pci_map_irq = jmr3927_pci_map_irq, #endif };
gpl-2.0
koalo/linux
drivers/media/common/b2c2/flexcop-hw-filter.c
13617
6638
/* * Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III * flexcop-hw-filter.c - pid and mac address filtering and control functions * see flexcop.c for copyright information */ #include "flexcop.h" static void flexcop_rcv_data_ctrl(struct flexcop_device *fc, int onoff) { flexcop_set_ibi_value(ctrl_208, Rcv_Data_sig, onoff); deb_ts("rcv_data is now: '%s'\n", onoff ? "on" : "off"); } void flexcop_smc_ctrl(struct flexcop_device *fc, int onoff) { flexcop_set_ibi_value(ctrl_208, SMC_Enable_sig, onoff); } static void flexcop_null_filter_ctrl(struct flexcop_device *fc, int onoff) { flexcop_set_ibi_value(ctrl_208, Null_filter_sig, onoff); } void flexcop_set_mac_filter(struct flexcop_device *fc, u8 mac[6]) { flexcop_ibi_value v418, v41c; v41c = fc->read_ibi_reg(fc, mac_address_41c); v418.mac_address_418.MAC1 = mac[0]; v418.mac_address_418.MAC2 = mac[1]; v418.mac_address_418.MAC3 = mac[2]; v418.mac_address_418.MAC6 = mac[3]; v41c.mac_address_41c.MAC7 = mac[4]; v41c.mac_address_41c.MAC8 = mac[5]; fc->write_ibi_reg(fc, mac_address_418, v418); fc->write_ibi_reg(fc, mac_address_41c, v41c); } void flexcop_mac_filter_ctrl(struct flexcop_device *fc, int onoff) { flexcop_set_ibi_value(ctrl_208, MAC_filter_Mode_sig, onoff); } static void flexcop_pid_group_filter(struct flexcop_device *fc, u16 pid, u16 mask) { /* index_reg_310.extra_index_reg need to 0 or 7 to work */ flexcop_ibi_value v30c; v30c.pid_filter_30c_ext_ind_0_7.Group_PID = pid; v30c.pid_filter_30c_ext_ind_0_7.Group_mask = mask; fc->write_ibi_reg(fc, pid_filter_30c, v30c); } static void flexcop_pid_group_filter_ctrl(struct flexcop_device *fc, int onoff) { flexcop_set_ibi_value(ctrl_208, Mask_filter_sig, onoff); } /* this fancy define reduces the code size of the quite similar PID controlling of * the first 6 PIDs */ #define pid_ctrl(vregname,field,enablefield,trans_field,transval) \ flexcop_ibi_value vpid = fc->read_ibi_reg(fc, vregname), \ v208 = fc->read_ibi_reg(fc, ctrl_208); \ vpid.vregname.field = onoff ? pid : 0x1fff; \ vpid.vregname.trans_field = transval; \ v208.ctrl_208.enablefield = onoff; \ fc->write_ibi_reg(fc, vregname, vpid); \ fc->write_ibi_reg(fc, ctrl_208, v208); static void flexcop_pid_Stream1_PID_ctrl(struct flexcop_device *fc, u16 pid, int onoff) { pid_ctrl(pid_filter_300, Stream1_PID, Stream1_filter_sig, Stream1_trans, 0); } static void flexcop_pid_Stream2_PID_ctrl(struct flexcop_device *fc, u16 pid, int onoff) { pid_ctrl(pid_filter_300, Stream2_PID, Stream2_filter_sig, Stream2_trans, 0); } static void flexcop_pid_PCR_PID_ctrl(struct flexcop_device *fc, u16 pid, int onoff) { pid_ctrl(pid_filter_304, PCR_PID, PCR_filter_sig, PCR_trans, 0); } static void flexcop_pid_PMT_PID_ctrl(struct flexcop_device *fc, u16 pid, int onoff) { pid_ctrl(pid_filter_304, PMT_PID, PMT_filter_sig, PMT_trans, 0); } static void flexcop_pid_EMM_PID_ctrl(struct flexcop_device *fc, u16 pid, int onoff) { pid_ctrl(pid_filter_308, EMM_PID, EMM_filter_sig, EMM_trans, 0); } static void flexcop_pid_ECM_PID_ctrl(struct flexcop_device *fc, u16 pid, int onoff) { pid_ctrl(pid_filter_308, ECM_PID, ECM_filter_sig, ECM_trans, 0); } static void flexcop_pid_control(struct flexcop_device *fc, int index, u16 pid, int onoff) { if (pid == 0x2000) return; deb_ts("setting pid: %5d %04x at index %d '%s'\n", pid, pid, index, onoff ? "on" : "off"); /* We could use bit magic here to reduce source code size. * I decided against it, but to use the real register names */ switch (index) { case 0: flexcop_pid_Stream1_PID_ctrl(fc, pid, onoff); break; case 1: flexcop_pid_Stream2_PID_ctrl(fc, pid, onoff); break; case 2: flexcop_pid_PCR_PID_ctrl(fc, pid, onoff); break; case 3: flexcop_pid_PMT_PID_ctrl(fc, pid, onoff); break; case 4: flexcop_pid_EMM_PID_ctrl(fc, pid, onoff); break; case 5: flexcop_pid_ECM_PID_ctrl(fc, pid, onoff); break; default: if (fc->has_32_hw_pid_filter && index < 38) { flexcop_ibi_value vpid, vid; /* set the index */ vid = fc->read_ibi_reg(fc, index_reg_310); vid.index_reg_310.index_reg = index - 6; fc->write_ibi_reg(fc, index_reg_310, vid); vpid = fc->read_ibi_reg(fc, pid_n_reg_314); vpid.pid_n_reg_314.PID = onoff ? pid : 0x1fff; vpid.pid_n_reg_314.PID_enable_bit = onoff; fc->write_ibi_reg(fc, pid_n_reg_314, vpid); } break; } } static int flexcop_toggle_fullts_streaming(struct flexcop_device *fc, int onoff) { if (fc->fullts_streaming_state != onoff) { deb_ts("%s full TS transfer\n",onoff ? "enabling" : "disabling"); flexcop_pid_group_filter(fc, 0, 0x1fe0 * (!onoff)); flexcop_pid_group_filter_ctrl(fc, onoff); fc->fullts_streaming_state = onoff; } return 0; } int flexcop_pid_feed_control(struct flexcop_device *fc, struct dvb_demux_feed *dvbdmxfeed, int onoff) { int max_pid_filter = 6 + fc->has_32_hw_pid_filter*32; fc->feedcount += onoff ? 1 : -1; /* the number of PIDs/Feed currently requested */ if (dvbdmxfeed->index >= max_pid_filter) fc->extra_feedcount += onoff ? 1 : -1; /* toggle complete-TS-streaming when: * - pid_filtering is not enabled and it is the first or last feed requested * - pid_filtering is enabled, * - but the number of requested feeds is exceeded * - or the requested pid is 0x2000 */ if (!fc->pid_filtering && fc->feedcount == onoff) flexcop_toggle_fullts_streaming(fc, onoff); if (fc->pid_filtering) { flexcop_pid_control \ (fc, dvbdmxfeed->index, dvbdmxfeed->pid, onoff); if (fc->extra_feedcount > 0) flexcop_toggle_fullts_streaming(fc, 1); else if (dvbdmxfeed->pid == 0x2000) flexcop_toggle_fullts_streaming(fc, onoff); else flexcop_toggle_fullts_streaming(fc, 0); } /* if it was the first or last feed request change the stream-status */ if (fc->feedcount == onoff) { flexcop_rcv_data_ctrl(fc, onoff); if (fc->stream_control) /* device specific stream control */ fc->stream_control(fc, onoff); /* feeding stopped -> reset the flexcop filter*/ if (onoff == 0) { flexcop_reset_block_300(fc); flexcop_hw_filter_init(fc); } } return 0; } EXPORT_SYMBOL(flexcop_pid_feed_control); void flexcop_hw_filter_init(struct flexcop_device *fc) { int i; flexcop_ibi_value v; for (i = 0; i < 6 + 32*fc->has_32_hw_pid_filter; i++) flexcop_pid_control(fc, i, 0x1fff, 0); flexcop_pid_group_filter(fc, 0, 0x1fe0); flexcop_pid_group_filter_ctrl(fc, 0); v = fc->read_ibi_reg(fc, pid_filter_308); v.pid_filter_308.EMM_filter_4 = 1; v.pid_filter_308.EMM_filter_6 = 0; fc->write_ibi_reg(fc, pid_filter_308, v); flexcop_null_filter_ctrl(fc, 1); }
gpl-2.0
fedya/aircam-openwrt
build_dir/linux-gm812x/linux-2.6.28.fa2/arch/mn10300/boot/tools/build.c
13873
4729
/* * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1997 Martin Mares */ /* * This file builds a disk-image from three different files: * * - bootsect: exactly 512 bytes of 8086 machine code, loads the rest * - setup: 8086 machine code, sets up system parm * - system: 80386 code for actual system * * It does some checking that all files are of the correct type, and * just writes the result to stdout, removing headers and padding to * the right amount. It also writes some system data to stderr. */ /* * Changes by tytso to allow root device specification * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996 * Cross compiling fixes by Gertjan van Wingerde, July 1996 * Rewritten by Martin Mares, April 1997 */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <stdarg.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/sysmacros.h> #include <unistd.h> #include <fcntl.h> #include <asm/boot.h> #define DEFAULT_MAJOR_ROOT 0 #define DEFAULT_MINOR_ROOT 0 /* Minimal number of setup sectors (see also bootsect.S) */ #define SETUP_SECTS 4 uint8_t buf[1024]; int fd; int is_big_kernel; __attribute__((noreturn)) void die(const char *str, ...) { va_list args; va_start(args, str); vfprintf(stderr, str, args); fputc('\n', stderr); exit(1); } void file_open(const char *name) { fd = open(name, O_RDONLY, 0); if (fd < 0) die("Unable to open `%s': %m", name); } __attribute__((noreturn)) void usage(void) { die("Usage: build [-b] bootsect setup system [rootdev] [> image]"); } int main(int argc, char **argv) { unsigned int i, c, sz, setup_sectors; uint32_t sys_size; uint8_t major_root, minor_root; struct stat sb; if (argc > 2 && !strcmp(argv[1], "-b")) { is_big_kernel = 1; argc--, argv++; } if ((argc < 4) || (argc > 5)) usage(); if (argc > 4) { if (!strcmp(argv[4], "CURRENT")) { if (stat("/", &sb)) { perror("/"); die("Couldn't stat /"); } major_root = major(sb.st_dev); minor_root = minor(sb.st_dev); } else if (strcmp(argv[4], "FLOPPY")) { if (stat(argv[4], &sb)) { perror(argv[4]); die("Couldn't stat root device."); } major_root = major(sb.st_rdev); minor_root = minor(sb.st_rdev); } else { major_root = 0; minor_root = 0; } } else { major_root = DEFAULT_MAJOR_ROOT; minor_root = DEFAULT_MINOR_ROOT; } fprintf(stderr, "Root device is (%d, %d)\n", major_root, minor_root); file_open(argv[1]); i = read(fd, buf, sizeof(buf)); fprintf(stderr, "Boot sector %d bytes.\n", i); if (i != 512) die("Boot block must be exactly 512 bytes"); if (buf[510] != 0x55 || buf[511] != 0xaa) die("Boot block hasn't got boot flag (0xAA55)"); buf[508] = minor_root; buf[509] = major_root; if (write(1, buf, 512) != 512) die("Write call failed"); close(fd); /* Copy the setup code */ file_open(argv[2]); for (i = 0; (c = read(fd, buf, sizeof(buf))) > 0; i += c) if (write(1, buf, c) != c) die("Write call failed"); if (c != 0) die("read-error on `setup'"); close(fd); /* Pad unused space with zeros */ setup_sectors = (i + 511) / 512; /* for compatibility with ancient versions of LILO. */ if (setup_sectors < SETUP_SECTS) setup_sectors = SETUP_SECTS; fprintf(stderr, "Setup is %d bytes.\n", i); memset(buf, 0, sizeof(buf)); while (i < setup_sectors * 512) { c = setup_sectors * 512 - i; if (c > sizeof(buf)) c = sizeof(buf); if (write(1, buf, c) != c) die("Write call failed"); i += c; } file_open(argv[3]); if (fstat(fd, &sb)) die("Unable to stat `%s': %m", argv[3]); sz = sb.st_size; fprintf(stderr, "System is %d kB\n", sz / 1024); sys_size = (sz + 15) / 16; /* 0x28000*16 = 2.5 MB, conservative estimate for the current maximum */ if (sys_size > (is_big_kernel ? 0x28000 : DEF_SYSSIZE)) die("System is too big. Try using %smodules.", is_big_kernel ? "" : "bzImage or "); if (sys_size > 0xffff) fprintf(stderr, "warning: kernel is too big for standalone boot " "from floppy\n"); while (sz > 0) { int l, n; l = (sz > sizeof(buf)) ? sizeof(buf) : sz; n = read(fd, buf, l); if (n != l) { if (n < 0) die("Error reading %s: %m", argv[3]); else die("%s: Unexpected EOF", argv[3]); } if (write(1, buf, l) != l) die("Write failed"); sz -= l; } close(fd); /* Write sizes to the bootsector */ if (lseek(1, 497, SEEK_SET) != 497) die("Output: seek failed"); buf[0] = setup_sectors; if (write(1, buf, 1) != 1) die("Write of setup sector count failed"); if (lseek(1, 500, SEEK_SET) != 500) die("Output: seek failed"); buf[0] = (sys_size & 0xff); buf[1] = ((sys_size >> 8) & 0xff); if (write(1, buf, 2) != 2) die("Write of image length failed"); return 0; }
gpl-2.0
32bitmicro/newlib-nano-1.0
newlib/libc/stdlib/wcsnrtombs.c
50
4418
/* FUNCTION <<wcsrtombs>>, <<wcsnrtombs>>---convert a wide-character string to a character string INDEX wcsrtombs INDEX _wcsrtombs_r INDEX wcsnrtombs INDEX _wcsnrtombs_r ANSI_SYNOPSIS #include <wchar.h> size_t wcsrtombs(char *<[dst]>, const wchar_t **<[src]>, size_t <[len]>, mbstate_t *<[ps]>); #include <wchar.h> size_t _wcsrtombs_r(struct _reent *<[ptr]>, char *<[dst]>, const wchar_t **<[src]>, size_t <[len]>, mbstate_t *<[ps]>); #include <wchar.h> size_t wcsnrtombs(char *<[dst]>, const wchar_t **<[src]>, size_t <[nwc]>, size_t <[len]>, mbstate_t *<[ps]>); #include <wchar.h> size_t _wcsnrtombs_r(struct _reent *<[ptr]>, char *<[dst]>, const wchar_t **<[src]>, size_t <[nwc]>, size_t <[len]>, mbstate_t *<[ps]>); TRAD_SYNOPSIS #include <wchar.h> size_t wcsrtombs(<[dst]>, <[src]>, <[len]>, <[ps]>) char *<[dst]>; const wchar_t **<[src]>; size_t <[len]>; mbstate_t *<[ps]>; #include <wchar.h> size_t _wcsrtombs_r(<[ptr]>, <[dst]>, <[src]>, <[len]>, <[ps]>) struct _rent *<[ptr]>; char *<[dst]>; const wchar_t **<[src]>; size_t <[len]>; mbstate_t *<[ps]>; #include <wchar.h> size_t wcsnrtombs(<[dst]>, <[src]>, <[nwc]>, <[len]>, <[ps]>) char *<[dst]>; const wchar_t **<[src]>; size_t <[nwc]>; size_t <[len]>; mbstate_t *<[ps]>; #include <wchar.h> size_t _wcsnrtombs_r(<[ptr]>, <[dst]>, <[src]>, <[nwc]>, <[len]>, <[ps]>) struct _rent *<[ptr]>; char *<[dst]>; const wchar_t **<[src]>; size_t <[nwc]>; size_t <[len]>; mbstate_t *<[ps]>; DESCRIPTION The <<wcsrtombs>> function converts a string of wide characters indirectly pointed to by <[src]> to a corresponding multibyte character string stored in the array pointed to by <[dst}>. No more than <[len]> bytes are written to <[dst}>. If <[dst}> is NULL, no characters are stored. If <[dst}> is not NULL, the pointer pointed to by <[src]> is updated to point to the character after the one that conversion stopped at. If conversion stops because a null character is encountered, *<[src]> is set to NULL. The mbstate_t argument, <[ps]>, is used to keep track of the shift state. If it is NULL, <<wcsrtombs>> uses an internal, static mbstate_t object, which is initialized to the initial conversion state at program startup. The <<wcsnrtombs>> function behaves identically to <<wcsrtombs>>, except that conversion stops after reading at most <[nwc]> characters from the buffer pointed to by <[src]>. RETURNS The <<wcsrtombs>> and <<wcsnrtombs>> functions return the number of bytes stored in the array pointed to by <[dst]> (not including any terminating null), if successful, otherwise it returns (size_t)-1. PORTABILITY <<wcsrtombs>> is defined by C99 standard. <<wcsnrtombs>> is defined by the POSIX.1-2008 standard. */ #include <reent.h> #include <newlib.h> #include <wchar.h> #include <stdlib.h> #include <stdio.h> #include <errno.h> #include "local.h" size_t _DEFUN (_wcsnrtombs_r, (r, dst, src, nwc, len, ps), struct _reent *r _AND char *dst _AND const wchar_t **src _AND size_t nwc _AND size_t len _AND mbstate_t *ps) { char *ptr = dst; char buff[10]; wchar_t *pwcs; size_t n; int i; #ifdef _MB_CAPABLE if (ps == NULL) { _REENT_CHECK_MISC(r); ps = &(_REENT_WCSRTOMBS_STATE(r)); } #endif /* If no dst pointer, treat len as maximum possible value. */ if (dst == NULL) len = (size_t)-1; n = 0; pwcs = (wchar_t *)(*src); while (n < len && nwc-- > 0) { int count = ps->__count; wint_t wch = ps->__value.__wch; int bytes = __wctomb (r, buff, *pwcs, __locale_charset (), ps); if (bytes == -1) { r->_errno = EILSEQ; ps->__count = 0; return (size_t)-1; } if (n + bytes <= len) { n += bytes; if (dst) { for (i = 0; i < bytes; ++i) *ptr++ = buff[i]; ++(*src); } if (*pwcs++ == 0x00) { if (dst) *src = NULL; ps->__count = 0; return n - 1; } } else { /* not enough room, we must back up state to before __wctomb call */ ps->__count = count; ps->__value.__wch = wch; len = 0; } } return n; } #ifndef _REENT_ONLY size_t _DEFUN (wcsnrtombs, (dst, src, nwc, len, ps), char *dst _AND const wchar_t **src _AND size_t nwc _AND size_t len _AND mbstate_t *ps) { return _wcsnrtombs_r (_REENT, dst, src, nwc, len, ps); } #endif /* !_REENT_ONLY */
gpl-2.0
KangBangKreations/KangBangKore-Kernel
drivers/staging/iio/adc/ad7793.c
50
24430
/* * AD7792/AD7793 SPI ADC driver * * Copyright 2011-2012 Analog Devices Inc. * * Licensed under the GPL-2. */ #include <linux/interrupt.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/spi/spi.h> #include <linux/regulator/consumer.h> #include <linux/err.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/iio/buffer.h> #include <linux/iio/kfifo_buf.h> #include <linux/iio/trigger.h> #include <linux/iio/trigger_consumer.h> #include "ad7793.h" /* NOTE: * The AD7792/AD7793 features a dual use data out ready DOUT/RDY output. * In order to avoid contentions on the SPI bus, it's therefore necessary * to use spi bus locking. * * The DOUT/RDY output must also be wired to an interrupt capable GPIO. */ struct ad7793_chip_info { struct iio_chan_spec channel[7]; }; struct ad7793_state { struct spi_device *spi; struct iio_trigger *trig; const struct ad7793_chip_info *chip_info; struct regulator *reg; struct ad7793_platform_data *pdata; wait_queue_head_t wq_data_avail; bool done; bool irq_dis; u16 int_vref_mv; u16 mode; u16 conf; u32 scale_avail[8][2]; /* Note this uses fact that 8 the mask always fits in a long */ unsigned long available_scan_masks[7]; /* * DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache lines. */ u8 data[4] ____cacheline_aligned; }; enum ad7793_supported_device_ids { ID_AD7792, ID_AD7793, }; static int __ad7793_write_reg(struct ad7793_state *st, bool locked, bool cs_change, unsigned char reg, unsigned size, unsigned val) { u8 *data = st->data; struct spi_transfer t = { .tx_buf = data, .len = size + 1, .cs_change = cs_change, }; struct spi_message m; data[0] = AD7793_COMM_WRITE | AD7793_COMM_ADDR(reg); switch (size) { case 3: data[1] = val >> 16; data[2] = val >> 8; data[3] = val; break; case 2: data[1] = val >> 8; data[2] = val; break; case 1: data[1] = val; break; default: return -EINVAL; } spi_message_init(&m); spi_message_add_tail(&t, &m); if (locked) return spi_sync_locked(st->spi, &m); else return spi_sync(st->spi, &m); } static int ad7793_write_reg(struct ad7793_state *st, unsigned reg, unsigned size, unsigned val) { return __ad7793_write_reg(st, false, false, reg, size, val); } static int __ad7793_read_reg(struct ad7793_state *st, bool locked, bool cs_change, unsigned char reg, int *val, unsigned size) { u8 *data = st->data; int ret; struct spi_transfer t[] = { { .tx_buf = data, .len = 1, }, { .rx_buf = data, .len = size, .cs_change = cs_change, }, }; struct spi_message m; data[0] = AD7793_COMM_READ | AD7793_COMM_ADDR(reg); spi_message_init(&m); spi_message_add_tail(&t[0], &m); spi_message_add_tail(&t[1], &m); if (locked) ret = spi_sync_locked(st->spi, &m); else ret = spi_sync(st->spi, &m); if (ret < 0) return ret; switch (size) { case 3: *val = data[0] << 16 | data[1] << 8 | data[2]; break; case 2: *val = data[0] << 8 | data[1]; break; case 1: *val = data[0]; break; default: return -EINVAL; } return 0; } static int ad7793_read_reg(struct ad7793_state *st, unsigned reg, int *val, unsigned size) { return __ad7793_read_reg(st, 0, 0, reg, val, size); } static int ad7793_read(struct ad7793_state *st, unsigned ch, unsigned len, int *val) { int ret; st->conf = (st->conf & ~AD7793_CONF_CHAN(-1)) | AD7793_CONF_CHAN(ch); st->mode = (st->mode & ~AD7793_MODE_SEL(-1)) | AD7793_MODE_SEL(AD7793_MODE_SINGLE); ad7793_write_reg(st, AD7793_REG_CONF, sizeof(st->conf), st->conf); spi_bus_lock(st->spi->master); st->done = false; ret = __ad7793_write_reg(st, 1, 1, AD7793_REG_MODE, sizeof(st->mode), st->mode); if (ret < 0) goto out; st->irq_dis = false; enable_irq(st->spi->irq); wait_event_interruptible(st->wq_data_avail, st->done); ret = __ad7793_read_reg(st, 1, 0, AD7793_REG_DATA, val, len); out: spi_bus_unlock(st->spi->master); return ret; } static int ad7793_calibrate(struct ad7793_state *st, unsigned mode, unsigned ch) { int ret; st->conf = (st->conf & ~AD7793_CONF_CHAN(-1)) | AD7793_CONF_CHAN(ch); st->mode = (st->mode & ~AD7793_MODE_SEL(-1)) | AD7793_MODE_SEL(mode); ad7793_write_reg(st, AD7793_REG_CONF, sizeof(st->conf), st->conf); spi_bus_lock(st->spi->master); st->done = false; ret = __ad7793_write_reg(st, 1, 1, AD7793_REG_MODE, sizeof(st->mode), st->mode); if (ret < 0) goto out; st->irq_dis = false; enable_irq(st->spi->irq); wait_event_interruptible(st->wq_data_avail, st->done); st->mode = (st->mode & ~AD7793_MODE_SEL(-1)) | AD7793_MODE_SEL(AD7793_MODE_IDLE); ret = __ad7793_write_reg(st, 1, 0, AD7793_REG_MODE, sizeof(st->mode), st->mode); out: spi_bus_unlock(st->spi->master); return ret; } static const u8 ad7793_calib_arr[6][2] = { {AD7793_MODE_CAL_INT_ZERO, AD7793_CH_AIN1P_AIN1M}, {AD7793_MODE_CAL_INT_FULL, AD7793_CH_AIN1P_AIN1M}, {AD7793_MODE_CAL_INT_ZERO, AD7793_CH_AIN2P_AIN2M}, {AD7793_MODE_CAL_INT_FULL, AD7793_CH_AIN2P_AIN2M}, {AD7793_MODE_CAL_INT_ZERO, AD7793_CH_AIN3P_AIN3M}, {AD7793_MODE_CAL_INT_FULL, AD7793_CH_AIN3P_AIN3M} }; static int ad7793_calibrate_all(struct ad7793_state *st) { int i, ret; for (i = 0; i < ARRAY_SIZE(ad7793_calib_arr); i++) { ret = ad7793_calibrate(st, ad7793_calib_arr[i][0], ad7793_calib_arr[i][1]); if (ret) goto out; } return 0; out: dev_err(&st->spi->dev, "Calibration failed\n"); return ret; } static int ad7793_setup(struct ad7793_state *st) { int i, ret = -1; unsigned long long scale_uv; u32 id; /* reset the serial interface */ ret = spi_write(st->spi, (u8 *)&ret, sizeof(ret)); if (ret < 0) goto out; msleep(1); /* Wait for at least 500us */ /* write/read test for device presence */ ret = ad7793_read_reg(st, AD7793_REG_ID, &id, 1); if (ret) goto out; id &= AD7793_ID_MASK; if (!((id == AD7792_ID) || (id == AD7793_ID))) { dev_err(&st->spi->dev, "device ID query failed\n"); goto out; } st->mode = (st->pdata->mode & ~AD7793_MODE_SEL(-1)) | AD7793_MODE_SEL(AD7793_MODE_IDLE); st->conf = st->pdata->conf & ~AD7793_CONF_CHAN(-1); ret = ad7793_write_reg(st, AD7793_REG_MODE, sizeof(st->mode), st->mode); if (ret) goto out; ret = ad7793_write_reg(st, AD7793_REG_CONF, sizeof(st->conf), st->conf); if (ret) goto out; ret = ad7793_write_reg(st, AD7793_REG_IO, sizeof(st->pdata->io), st->pdata->io); if (ret) goto out; ret = ad7793_calibrate_all(st); if (ret) goto out; /* Populate available ADC input ranges */ for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++) { scale_uv = ((u64)st->int_vref_mv * 100000000) >> (st->chip_info->channel[0].scan_type.realbits - (!!(st->conf & AD7793_CONF_UNIPOLAR) ? 0 : 1)); scale_uv >>= i; st->scale_avail[i][1] = do_div(scale_uv, 100000000) * 10; st->scale_avail[i][0] = scale_uv; } return 0; out: dev_err(&st->spi->dev, "setup failed\n"); return ret; } static int ad7793_ring_preenable(struct iio_dev *indio_dev) { struct ad7793_state *st = iio_priv(indio_dev); unsigned channel; int ret; if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) return -EINVAL; ret = iio_sw_buffer_preenable(indio_dev); if (ret < 0) return ret; channel = find_first_bit(indio_dev->active_scan_mask, indio_dev->masklength); st->mode = (st->mode & ~AD7793_MODE_SEL(-1)) | AD7793_MODE_SEL(AD7793_MODE_CONT); st->conf = (st->conf & ~AD7793_CONF_CHAN(-1)) | AD7793_CONF_CHAN(indio_dev->channels[channel].address); ad7793_write_reg(st, AD7793_REG_CONF, sizeof(st->conf), st->conf); spi_bus_lock(st->spi->master); __ad7793_write_reg(st, 1, 1, AD7793_REG_MODE, sizeof(st->mode), st->mode); st->irq_dis = false; enable_irq(st->spi->irq); return 0; } static int ad7793_ring_postdisable(struct iio_dev *indio_dev) { struct ad7793_state *st = iio_priv(indio_dev); st->mode = (st->mode & ~AD7793_MODE_SEL(-1)) | AD7793_MODE_SEL(AD7793_MODE_IDLE); st->done = false; wait_event_interruptible(st->wq_data_avail, st->done); if (!st->irq_dis) disable_irq_nosync(st->spi->irq); __ad7793_write_reg(st, 1, 0, AD7793_REG_MODE, sizeof(st->mode), st->mode); return spi_bus_unlock(st->spi->master); } /** * ad7793_trigger_handler() bh of trigger launched polling to ring buffer **/ static irqreturn_t ad7793_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct iio_buffer *ring = indio_dev->buffer; struct ad7793_state *st = iio_priv(indio_dev); s64 dat64[2]; s32 *dat32 = (s32 *)dat64; if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) __ad7793_read_reg(st, 1, 1, AD7793_REG_DATA, dat32, indio_dev->channels[0].scan_type.realbits/8); /* Guaranteed to be aligned with 8 byte boundary */ if (indio_dev->scan_timestamp) dat64[1] = pf->timestamp; ring->access->store_to(ring, (u8 *)dat64, pf->timestamp); iio_trigger_notify_done(indio_dev->trig); st->irq_dis = false; enable_irq(st->spi->irq); return IRQ_HANDLED; } static const struct iio_buffer_setup_ops ad7793_ring_setup_ops = { .preenable = &ad7793_ring_preenable, .postenable = &iio_triggered_buffer_postenable, .predisable = &iio_triggered_buffer_predisable, .postdisable = &ad7793_ring_postdisable, }; static int ad7793_register_ring_funcs_and_init(struct iio_dev *indio_dev) { int ret; indio_dev->buffer = iio_kfifo_allocate(indio_dev); if (!indio_dev->buffer) { ret = -ENOMEM; goto error_ret; } indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time, &ad7793_trigger_handler, IRQF_ONESHOT, indio_dev, "ad7793_consumer%d", indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_deallocate_kfifo; } /* Ring buffer functions - here trigger setup related */ indio_dev->setup_ops = &ad7793_ring_setup_ops; /* Flag that polled ring buffering is possible */ indio_dev->modes |= INDIO_BUFFER_TRIGGERED; return 0; error_deallocate_kfifo: iio_kfifo_free(indio_dev->buffer); error_ret: return ret; } static void ad7793_ring_cleanup(struct iio_dev *indio_dev) { iio_dealloc_pollfunc(indio_dev->pollfunc); iio_kfifo_free(indio_dev->buffer); } /** * ad7793_data_rdy_trig_poll() the event handler for the data rdy trig **/ static irqreturn_t ad7793_data_rdy_trig_poll(int irq, void *private) { struct ad7793_state *st = iio_priv(private); st->done = true; wake_up_interruptible(&st->wq_data_avail); disable_irq_nosync(irq); st->irq_dis = true; iio_trigger_poll(st->trig, iio_get_time_ns()); return IRQ_HANDLED; } static struct iio_trigger_ops ad7793_trigger_ops = { .owner = THIS_MODULE, }; static int ad7793_probe_trigger(struct iio_dev *indio_dev) { struct ad7793_state *st = iio_priv(indio_dev); int ret; st->trig = iio_trigger_alloc("%s-dev%d", spi_get_device_id(st->spi)->name, indio_dev->id); if (st->trig == NULL) { ret = -ENOMEM; goto error_ret; } st->trig->ops = &ad7793_trigger_ops; ret = request_irq(st->spi->irq, ad7793_data_rdy_trig_poll, IRQF_TRIGGER_LOW, spi_get_device_id(st->spi)->name, indio_dev); if (ret) goto error_free_trig; disable_irq_nosync(st->spi->irq); st->irq_dis = true; st->trig->dev.parent = &st->spi->dev; st->trig->private_data = indio_dev; ret = iio_trigger_register(st->trig); /* select default trigger */ indio_dev->trig = st->trig; if (ret) goto error_free_irq; return 0; error_free_irq: free_irq(st->spi->irq, indio_dev); error_free_trig: iio_trigger_free(st->trig); error_ret: return ret; } static void ad7793_remove_trigger(struct iio_dev *indio_dev) { struct ad7793_state *st = iio_priv(indio_dev); iio_trigger_unregister(st->trig); free_irq(st->spi->irq, indio_dev); iio_trigger_free(st->trig); } static const u16 sample_freq_avail[16] = {0, 470, 242, 123, 62, 50, 39, 33, 19, 17, 16, 12, 10, 8, 6, 4}; static ssize_t ad7793_read_frequency(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ad7793_state *st = iio_priv(indio_dev); return sprintf(buf, "%d\n", sample_freq_avail[AD7793_MODE_RATE(st->mode)]); } static ssize_t ad7793_write_frequency(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ad7793_state *st = iio_priv(indio_dev); long lval; int i, ret; mutex_lock(&indio_dev->mlock); if (iio_buffer_enabled(indio_dev)) { mutex_unlock(&indio_dev->mlock); return -EBUSY; } mutex_unlock(&indio_dev->mlock); ret = strict_strtol(buf, 10, &lval); if (ret) return ret; ret = -EINVAL; for (i = 0; i < ARRAY_SIZE(sample_freq_avail); i++) if (lval == sample_freq_avail[i]) { mutex_lock(&indio_dev->mlock); st->mode &= ~AD7793_MODE_RATE(-1); st->mode |= AD7793_MODE_RATE(i); ad7793_write_reg(st, AD7793_REG_MODE, sizeof(st->mode), st->mode); mutex_unlock(&indio_dev->mlock); ret = 0; } return ret ? ret : len; } static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO, ad7793_read_frequency, ad7793_write_frequency); static IIO_CONST_ATTR_SAMP_FREQ_AVAIL( "470 242 123 62 50 39 33 19 17 16 12 10 8 6 4"); static ssize_t ad7793_show_scale_available(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ad7793_state *st = iio_priv(indio_dev); int i, len = 0; for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++) len += sprintf(buf + len, "%d.%09u ", st->scale_avail[i][0], st->scale_avail[i][1]); len += sprintf(buf + len, "\n"); return len; } static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available, in-in_scale_available, S_IRUGO, ad7793_show_scale_available, NULL, 0); static struct attribute *ad7793_attributes[] = { &iio_dev_attr_sampling_frequency.dev_attr.attr, &iio_const_attr_sampling_frequency_available.dev_attr.attr, &iio_dev_attr_in_m_in_scale_available.dev_attr.attr, NULL }; static const struct attribute_group ad7793_attribute_group = { .attrs = ad7793_attributes, }; static int ad7793_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long m) { struct ad7793_state *st = iio_priv(indio_dev); int ret, smpl = 0; unsigned long long scale_uv; bool unipolar = !!(st->conf & AD7793_CONF_UNIPOLAR); switch (m) { case IIO_CHAN_INFO_RAW: mutex_lock(&indio_dev->mlock); if (iio_buffer_enabled(indio_dev)) ret = -EBUSY; else ret = ad7793_read(st, chan->address, chan->scan_type.realbits / 8, &smpl); mutex_unlock(&indio_dev->mlock); if (ret < 0) return ret; *val = (smpl >> chan->scan_type.shift) & ((1 << (chan->scan_type.realbits)) - 1); if (!unipolar) *val -= (1 << (chan->scan_type.realbits - 1)); return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: switch (chan->type) { case IIO_VOLTAGE: if (chan->differential) { *val = st-> scale_avail[(st->conf >> 8) & 0x7][0]; *val2 = st-> scale_avail[(st->conf >> 8) & 0x7][1]; return IIO_VAL_INT_PLUS_NANO; } else { /* 1170mV / 2^23 * 6 */ scale_uv = (1170ULL * 100000000ULL * 6ULL) >> (chan->scan_type.realbits - (unipolar ? 0 : 1)); } break; case IIO_TEMP: /* Always uses unity gain and internal ref */ scale_uv = (2500ULL * 100000000ULL) >> (chan->scan_type.realbits - (unipolar ? 0 : 1)); break; default: return -EINVAL; } *val2 = do_div(scale_uv, 100000000) * 10; *val = scale_uv; return IIO_VAL_INT_PLUS_NANO; } return -EINVAL; } static int ad7793_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { struct ad7793_state *st = iio_priv(indio_dev); int ret, i; unsigned int tmp; mutex_lock(&indio_dev->mlock); if (iio_buffer_enabled(indio_dev)) { mutex_unlock(&indio_dev->mlock); return -EBUSY; } switch (mask) { case IIO_CHAN_INFO_SCALE: ret = -EINVAL; for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++) if (val2 == st->scale_avail[i][1]) { tmp = st->conf; st->conf &= ~AD7793_CONF_GAIN(-1); st->conf |= AD7793_CONF_GAIN(i); if (tmp != st->conf) { ad7793_write_reg(st, AD7793_REG_CONF, sizeof(st->conf), st->conf); ad7793_calibrate_all(st); } ret = 0; } default: ret = -EINVAL; } mutex_unlock(&indio_dev->mlock); return ret; } static int ad7793_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig) { if (indio_dev->trig != trig) return -EINVAL; return 0; } static int ad7793_write_raw_get_fmt(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, long mask) { return IIO_VAL_INT_PLUS_NANO; } static const struct iio_info ad7793_info = { .read_raw = &ad7793_read_raw, .write_raw = &ad7793_write_raw, .write_raw_get_fmt = &ad7793_write_raw_get_fmt, .attrs = &ad7793_attribute_group, .validate_trigger = ad7793_validate_trigger, .driver_module = THIS_MODULE, }; static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { [ID_AD7793] = { .channel[0] = { .type = IIO_VOLTAGE, .differential = 1, .indexed = 1, .channel = 0, .channel2 = 0, .address = AD7793_CH_AIN1P_AIN1M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SHARED_BIT, .scan_index = 0, .scan_type = IIO_ST('s', 24, 32, 0) }, .channel[1] = { .type = IIO_VOLTAGE, .differential = 1, .indexed = 1, .channel = 1, .channel2 = 1, .address = AD7793_CH_AIN2P_AIN2M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SHARED_BIT, .scan_index = 1, .scan_type = IIO_ST('s', 24, 32, 0) }, .channel[2] = { .type = IIO_VOLTAGE, .differential = 1, .indexed = 1, .channel = 2, .channel2 = 2, .address = AD7793_CH_AIN3P_AIN3M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SHARED_BIT, .scan_index = 2, .scan_type = IIO_ST('s', 24, 32, 0) }, .channel[3] = { .type = IIO_VOLTAGE, .differential = 1, .extend_name = "shorted", .indexed = 1, .channel = 2, .channel2 = 2, .address = AD7793_CH_AIN1M_AIN1M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SHARED_BIT, .scan_index = 2, .scan_type = IIO_ST('s', 24, 32, 0) }, .channel[4] = { .type = IIO_TEMP, .indexed = 1, .channel = 0, .address = AD7793_CH_TEMP, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SEPARATE_BIT, .scan_index = 4, .scan_type = IIO_ST('s', 24, 32, 0), }, .channel[5] = { .type = IIO_VOLTAGE, .extend_name = "supply", .indexed = 1, .channel = 4, .address = AD7793_CH_AVDD_MONITOR, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SEPARATE_BIT, .scan_index = 5, .scan_type = IIO_ST('s', 24, 32, 0), }, .channel[6] = IIO_CHAN_SOFT_TIMESTAMP(6), }, [ID_AD7792] = { .channel[0] = { .type = IIO_VOLTAGE, .differential = 1, .indexed = 1, .channel = 0, .channel2 = 0, .address = AD7793_CH_AIN1P_AIN1M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SHARED_BIT, .scan_index = 0, .scan_type = IIO_ST('s', 16, 32, 0) }, .channel[1] = { .type = IIO_VOLTAGE, .differential = 1, .indexed = 1, .channel = 1, .channel2 = 1, .address = AD7793_CH_AIN2P_AIN2M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SHARED_BIT, .scan_index = 1, .scan_type = IIO_ST('s', 16, 32, 0) }, .channel[2] = { .type = IIO_VOLTAGE, .differential = 1, .indexed = 1, .channel = 2, .channel2 = 2, .address = AD7793_CH_AIN3P_AIN3M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SHARED_BIT, .scan_index = 2, .scan_type = IIO_ST('s', 16, 32, 0) }, .channel[3] = { .type = IIO_VOLTAGE, .differential = 1, .extend_name = "shorted", .indexed = 1, .channel = 2, .channel2 = 2, .address = AD7793_CH_AIN1M_AIN1M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SHARED_BIT, .scan_index = 2, .scan_type = IIO_ST('s', 16, 32, 0) }, .channel[4] = { .type = IIO_TEMP, .indexed = 1, .channel = 0, .address = AD7793_CH_TEMP, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SEPARATE_BIT, .scan_index = 4, .scan_type = IIO_ST('s', 16, 32, 0), }, .channel[5] = { .type = IIO_VOLTAGE, .extend_name = "supply", .indexed = 1, .channel = 4, .address = AD7793_CH_AVDD_MONITOR, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SEPARATE_BIT, .scan_index = 5, .scan_type = IIO_ST('s', 16, 32, 0), }, .channel[6] = IIO_CHAN_SOFT_TIMESTAMP(6), }, }; static int __devinit ad7793_probe(struct spi_device *spi) { struct ad7793_platform_data *pdata = spi->dev.platform_data; struct ad7793_state *st; struct iio_dev *indio_dev; int ret, i, voltage_uv = 0; if (!pdata) { dev_err(&spi->dev, "no platform data?\n"); return -ENODEV; } if (!spi->irq) { dev_err(&spi->dev, "no IRQ?\n"); return -ENODEV; } indio_dev = iio_device_alloc(sizeof(*st)); if (indio_dev == NULL) return -ENOMEM; st = iio_priv(indio_dev); st->reg = regulator_get(&spi->dev, "vcc"); if (!IS_ERR(st->reg)) { ret = regulator_enable(st->reg); if (ret) goto error_put_reg; voltage_uv = regulator_get_voltage(st->reg); } st->chip_info = &ad7793_chip_info_tbl[spi_get_device_id(spi)->driver_data]; st->pdata = pdata; if (pdata && pdata->vref_mv) st->int_vref_mv = pdata->vref_mv; else if (voltage_uv) st->int_vref_mv = voltage_uv / 1000; else st->int_vref_mv = 2500; /* Build-in ref */ spi_set_drvdata(spi, indio_dev); st->spi = spi; indio_dev->dev.parent = &spi->dev; indio_dev->name = spi_get_device_id(spi)->name; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = st->chip_info->channel; indio_dev->available_scan_masks = st->available_scan_masks; indio_dev->num_channels = 7; indio_dev->info = &ad7793_info; for (i = 0; i < indio_dev->num_channels; i++) { set_bit(i, &st->available_scan_masks[i]); set_bit(indio_dev-> channels[indio_dev->num_channels - 1].scan_index, &st->available_scan_masks[i]); } init_waitqueue_head(&st->wq_data_avail); ret = ad7793_register_ring_funcs_and_init(indio_dev); if (ret) goto error_disable_reg; ret = ad7793_probe_trigger(indio_dev); if (ret) goto error_unreg_ring; ret = iio_buffer_register(indio_dev, indio_dev->channels, indio_dev->num_channels); if (ret) goto error_remove_trigger; ret = ad7793_setup(st); if (ret) goto error_uninitialize_ring; ret = iio_device_register(indio_dev); if (ret) goto error_uninitialize_ring; return 0; error_uninitialize_ring: iio_buffer_unregister(indio_dev); error_remove_trigger: ad7793_remove_trigger(indio_dev); error_unreg_ring: ad7793_ring_cleanup(indio_dev); error_disable_reg: if (!IS_ERR(st->reg)) regulator_disable(st->reg); error_put_reg: if (!IS_ERR(st->reg)) regulator_put(st->reg); iio_device_free(indio_dev); return ret; } static int ad7793_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad7793_state *st = iio_priv(indio_dev); iio_device_unregister(indio_dev); iio_buffer_unregister(indio_dev); ad7793_remove_trigger(indio_dev); ad7793_ring_cleanup(indio_dev); if (!IS_ERR(st->reg)) { regulator_disable(st->reg); regulator_put(st->reg); } iio_device_free(indio_dev); return 0; } static const struct spi_device_id ad7793_id[] = { {"ad7792", ID_AD7792}, {"ad7793", ID_AD7793}, {} }; MODULE_DEVICE_TABLE(spi, ad7793_id); static struct spi_driver ad7793_driver = { .driver = { .name = "ad7793", .owner = THIS_MODULE, }, .probe = ad7793_probe, .remove = __devexit_p(ad7793_remove), .id_table = ad7793_id, }; module_spi_driver(ad7793_driver); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("Analog Devices AD7792/3 ADC"); MODULE_LICENSE("GPL v2");
gpl-2.0
exynos4-sdk/kernel
net/wireless/mlme.c
50
26749
/* * cfg80211 MLME SAP interface * * Copyright (c) 2009, Jouni Malinen <j@w1.fi> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/etherdevice.h> #include <linux/netdevice.h> #include <linux/nl80211.h> #include <linux/slab.h> #include <linux/wireless.h> #include <net/cfg80211.h> #include <net/iw_handler.h> #include "core.h" #include "nl80211.h" void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); wdev_lock(wdev); nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL); cfg80211_sme_rx_auth(dev, buf, len); wdev_unlock(wdev); } EXPORT_SYMBOL(cfg80211_send_rx_auth); void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss, const u8 *buf, size_t len) { u16 status_code; struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; u8 *ie = mgmt->u.assoc_resp.variable; int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable); wdev_lock(wdev); status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); /* * This is a bit of a hack, we don't notify userspace of * a (re-)association reply if we tried to send a reassoc * and got a reject -- we only try again with an assoc * frame instead of reassoc. */ if (status_code != WLAN_STATUS_SUCCESS && wdev->conn && cfg80211_sme_failed_reassoc(wdev)) { cfg80211_put_bss(bss); goto out; } nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL); if (status_code != WLAN_STATUS_SUCCESS && wdev->conn) { cfg80211_sme_failed_assoc(wdev); /* * do not call connect_result() now because the * sme will schedule work that does it later. */ cfg80211_put_bss(bss); goto out; } if (!wdev->conn && wdev->sme_state == CFG80211_SME_IDLE) { /* * This is for the userspace SME, the CONNECTING * state will be changed to CONNECTED by * __cfg80211_connect_result() below. */ wdev->sme_state = CFG80211_SME_CONNECTING; } /* this consumes the bss reference */ __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs, status_code, status_code == WLAN_STATUS_SUCCESS, bss); out: wdev_unlock(wdev); } EXPORT_SYMBOL(cfg80211_send_rx_assoc); void __cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; const u8 *bssid = mgmt->bssid; bool was_current = false; ASSERT_WDEV_LOCK(wdev); if (wdev->current_bss && ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) { cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(&wdev->current_bss->pub); wdev->current_bss = NULL; was_current = true; } nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL); if (wdev->sme_state == CFG80211_SME_CONNECTED && was_current) { u16 reason_code; bool from_ap; reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); from_ap = !ether_addr_equal(mgmt->sa, dev->dev_addr); __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); } else if (wdev->sme_state == CFG80211_SME_CONNECTING) { __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, false, NULL); } } EXPORT_SYMBOL(__cfg80211_send_deauth); void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len) { struct wireless_dev *wdev = dev->ieee80211_ptr; wdev_lock(wdev); __cfg80211_send_deauth(dev, buf, len); wdev_unlock(wdev); } EXPORT_SYMBOL(cfg80211_send_deauth); void __cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; const u8 *bssid = mgmt->bssid; u16 reason_code; bool from_ap; ASSERT_WDEV_LOCK(wdev); nl80211_send_disassoc(rdev, dev, buf, len, GFP_KERNEL); if (wdev->sme_state != CFG80211_SME_CONNECTED) return; if (wdev->current_bss && ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) { cfg80211_sme_disassoc(dev, wdev->current_bss); cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(&wdev->current_bss->pub); wdev->current_bss = NULL; } else WARN_ON(1); reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); from_ap = !ether_addr_equal(mgmt->sa, dev->dev_addr); __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); } EXPORT_SYMBOL(__cfg80211_send_disassoc); void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len) { struct wireless_dev *wdev = dev->ieee80211_ptr; wdev_lock(wdev); __cfg80211_send_disassoc(dev, buf, len); wdev_unlock(wdev); } EXPORT_SYMBOL(cfg80211_send_disassoc); void cfg80211_send_unprot_deauth(struct net_device *dev, const u8 *buf, size_t len) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); nl80211_send_unprot_deauth(rdev, dev, buf, len, GFP_ATOMIC); } EXPORT_SYMBOL(cfg80211_send_unprot_deauth); void cfg80211_send_unprot_disassoc(struct net_device *dev, const u8 *buf, size_t len) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); nl80211_send_unprot_disassoc(rdev, dev, buf, len, GFP_ATOMIC); } EXPORT_SYMBOL(cfg80211_send_unprot_disassoc); void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); wdev_lock(wdev); nl80211_send_auth_timeout(rdev, dev, addr, GFP_KERNEL); if (wdev->sme_state == CFG80211_SME_CONNECTING) __cfg80211_connect_result(dev, addr, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, false, NULL); wdev_unlock(wdev); } EXPORT_SYMBOL(cfg80211_send_auth_timeout); void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); wdev_lock(wdev); nl80211_send_assoc_timeout(rdev, dev, addr, GFP_KERNEL); if (wdev->sme_state == CFG80211_SME_CONNECTING) __cfg80211_connect_result(dev, addr, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, false, NULL); wdev_unlock(wdev); } EXPORT_SYMBOL(cfg80211_send_assoc_timeout); void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr, enum nl80211_key_type key_type, int key_id, const u8 *tsc, gfp_t gfp) { struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); #ifdef CONFIG_CFG80211_WEXT union iwreq_data wrqu; char *buf = kmalloc(128, gfp); if (buf) { sprintf(buf, "MLME-MICHAELMICFAILURE.indication(" "keyid=%d %scast addr=%pM)", key_id, key_type == NL80211_KEYTYPE_GROUP ? "broad" : "uni", addr); memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = strlen(buf); wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf); kfree(buf); } #endif nl80211_michael_mic_failure(rdev, dev, addr, key_type, key_id, tsc, gfp); } EXPORT_SYMBOL(cfg80211_michael_mic_failure); /* some MLME handling for userspace SME */ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ieee80211_channel *chan, enum nl80211_auth_type auth_type, const u8 *bssid, const u8 *ssid, int ssid_len, const u8 *ie, int ie_len, const u8 *key, int key_len, int key_idx) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_auth_request req; int err; ASSERT_WDEV_LOCK(wdev); if (auth_type == NL80211_AUTHTYPE_SHARED_KEY) if (!key || !key_len || key_idx < 0 || key_idx > 4) return -EINVAL; if (wdev->current_bss && ether_addr_equal(bssid, wdev->current_bss->pub.bssid)) return -EALREADY; memset(&req, 0, sizeof(req)); req.ie = ie; req.ie_len = ie_len; req.auth_type = auth_type; req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); req.key = key; req.key_len = key_len; req.key_idx = key_idx; if (!req.bss) return -ENOENT; err = rdev->ops->auth(&rdev->wiphy, dev, &req); cfg80211_put_bss(req.bss); return err; } int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ieee80211_channel *chan, enum nl80211_auth_type auth_type, const u8 *bssid, const u8 *ssid, int ssid_len, const u8 *ie, int ie_len, const u8 *key, int key_len, int key_idx) { int err; wdev_lock(dev->ieee80211_ptr); err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, ssid, ssid_len, ie, ie_len, key, key_len, key_idx); wdev_unlock(dev->ieee80211_ptr); return err; } /* Do a logical ht_capa &= ht_capa_mask. */ void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa, const struct ieee80211_ht_cap *ht_capa_mask) { int i; u8 *p1, *p2; if (!ht_capa_mask) { memset(ht_capa, 0, sizeof(*ht_capa)); return; } p1 = (u8*)(ht_capa); p2 = (u8*)(ht_capa_mask); for (i = 0; i<sizeof(*ht_capa); i++) p1[i] &= p2[i]; } int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ieee80211_channel *chan, const u8 *bssid, const u8 *prev_bssid, const u8 *ssid, int ssid_len, const u8 *ie, int ie_len, bool use_mfp, struct cfg80211_crypto_settings *crypt, u32 assoc_flags, struct ieee80211_ht_cap *ht_capa, struct ieee80211_ht_cap *ht_capa_mask) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_assoc_request req; int err; bool was_connected = false; ASSERT_WDEV_LOCK(wdev); memset(&req, 0, sizeof(req)); if (wdev->current_bss && prev_bssid && ether_addr_equal(wdev->current_bss->pub.bssid, prev_bssid)) { /* * Trying to reassociate: Allow this to proceed and let the old * association to be dropped when the new one is completed. */ if (wdev->sme_state == CFG80211_SME_CONNECTED) { was_connected = true; wdev->sme_state = CFG80211_SME_CONNECTING; } } else if (wdev->current_bss) return -EALREADY; req.ie = ie; req.ie_len = ie_len; memcpy(&req.crypto, crypt, sizeof(req.crypto)); req.use_mfp = use_mfp; req.prev_bssid = prev_bssid; req.flags = assoc_flags; if (ht_capa) memcpy(&req.ht_capa, ht_capa, sizeof(req.ht_capa)); if (ht_capa_mask) memcpy(&req.ht_capa_mask, ht_capa_mask, sizeof(req.ht_capa_mask)); cfg80211_oper_and_ht_capa(&req.ht_capa_mask, rdev->wiphy.ht_capa_mod_mask); req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); if (!req.bss) { if (was_connected) wdev->sme_state = CFG80211_SME_CONNECTED; return -ENOENT; } err = rdev->ops->assoc(&rdev->wiphy, dev, &req); if (err) { if (was_connected) wdev->sme_state = CFG80211_SME_CONNECTED; cfg80211_put_bss(req.bss); } return err; } int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ieee80211_channel *chan, const u8 *bssid, const u8 *prev_bssid, const u8 *ssid, int ssid_len, const u8 *ie, int ie_len, bool use_mfp, struct cfg80211_crypto_settings *crypt, u32 assoc_flags, struct ieee80211_ht_cap *ht_capa, struct ieee80211_ht_cap *ht_capa_mask) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; wdev_lock(wdev); err = __cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid, ssid, ssid_len, ie, ie_len, use_mfp, crypt, assoc_flags, ht_capa, ht_capa_mask); wdev_unlock(wdev); return err; } int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *bssid, const u8 *ie, int ie_len, u16 reason, bool local_state_change) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_deauth_request req = { .bssid = bssid, .reason_code = reason, .ie = ie, .ie_len = ie_len, }; ASSERT_WDEV_LOCK(wdev); if (local_state_change) { if (wdev->current_bss && ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) { cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(&wdev->current_bss->pub); wdev->current_bss = NULL; } return 0; } return rdev->ops->deauth(&rdev->wiphy, dev, &req); } int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *bssid, const u8 *ie, int ie_len, u16 reason, bool local_state_change) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; wdev_lock(wdev); err = __cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason, local_state_change); wdev_unlock(wdev); return err; } static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *bssid, const u8 *ie, int ie_len, u16 reason, bool local_state_change) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_disassoc_request req; ASSERT_WDEV_LOCK(wdev); if (wdev->sme_state != CFG80211_SME_CONNECTED) return -ENOTCONN; if (WARN_ON(!wdev->current_bss)) return -ENOTCONN; memset(&req, 0, sizeof(req)); req.reason_code = reason; req.local_state_change = local_state_change; req.ie = ie; req.ie_len = ie_len; if (ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) req.bss = &wdev->current_bss->pub; else return -ENOTCONN; return rdev->ops->disassoc(&rdev->wiphy, dev, &req); } int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *bssid, const u8 *ie, int ie_len, u16 reason, bool local_state_change) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; wdev_lock(wdev); err = __cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason, local_state_change); wdev_unlock(wdev); return err; } void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_deauth_request req; u8 bssid[ETH_ALEN]; ASSERT_WDEV_LOCK(wdev); if (!rdev->ops->deauth) return; memset(&req, 0, sizeof(req)); req.reason_code = WLAN_REASON_DEAUTH_LEAVING; req.ie = NULL; req.ie_len = 0; if (!wdev->current_bss) return; memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN); req.bssid = bssid; rdev->ops->deauth(&rdev->wiphy, dev, &req); if (wdev->current_bss) { cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(&wdev->current_bss->pub); wdev->current_bss = NULL; } } void cfg80211_ready_on_channel(struct net_device *dev, u64 cookie, struct ieee80211_channel *chan, enum nl80211_channel_type channel_type, unsigned int duration, gfp_t gfp) { struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); nl80211_send_remain_on_channel(rdev, dev, cookie, chan, channel_type, duration, gfp); } EXPORT_SYMBOL(cfg80211_ready_on_channel); void cfg80211_remain_on_channel_expired(struct net_device *dev, u64 cookie, struct ieee80211_channel *chan, enum nl80211_channel_type channel_type, gfp_t gfp) { struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); nl80211_send_remain_on_channel_cancel(rdev, dev, cookie, chan, channel_type, gfp); } EXPORT_SYMBOL(cfg80211_remain_on_channel_expired); void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr, struct station_info *sinfo, gfp_t gfp) { struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); nl80211_send_sta_event(rdev, dev, mac_addr, sinfo, gfp); } EXPORT_SYMBOL(cfg80211_new_sta); void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp) { struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); nl80211_send_sta_del_event(rdev, dev, mac_addr, gfp); } EXPORT_SYMBOL(cfg80211_del_sta); struct cfg80211_mgmt_registration { struct list_head list; u32 nlpid; int match_len; __le16 frame_type; u8 match[]; }; int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid, u16 frame_type, const u8 *match_data, int match_len) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); struct cfg80211_mgmt_registration *reg, *nreg; int err = 0; u16 mgmt_type; if (!wdev->wiphy->mgmt_stypes) return -EOPNOTSUPP; if ((frame_type & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT) return -EINVAL; if (frame_type & ~(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) return -EINVAL; mgmt_type = (frame_type & IEEE80211_FCTL_STYPE) >> 4; if (!(wdev->wiphy->mgmt_stypes[wdev->iftype].rx & BIT(mgmt_type))) return -EINVAL; nreg = kzalloc(sizeof(*reg) + match_len, GFP_KERNEL); if (!nreg) return -ENOMEM; spin_lock_bh(&wdev->mgmt_registrations_lock); list_for_each_entry(reg, &wdev->mgmt_registrations, list) { int mlen = min(match_len, reg->match_len); if (frame_type != le16_to_cpu(reg->frame_type)) continue; if (memcmp(reg->match, match_data, mlen) == 0) { err = -EALREADY; break; } } if (err) { kfree(nreg); goto out; } memcpy(nreg->match, match_data, match_len); nreg->match_len = match_len; nreg->nlpid = snd_pid; nreg->frame_type = cpu_to_le16(frame_type); list_add(&nreg->list, &wdev->mgmt_registrations); if (rdev->ops->mgmt_frame_register) rdev->ops->mgmt_frame_register(wiphy, wdev->netdev, frame_type, true); out: spin_unlock_bh(&wdev->mgmt_registrations_lock); return err; } void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); struct cfg80211_mgmt_registration *reg, *tmp; spin_lock_bh(&wdev->mgmt_registrations_lock); list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) { if (reg->nlpid != nlpid) continue; if (rdev->ops->mgmt_frame_register) { u16 frame_type = le16_to_cpu(reg->frame_type); rdev->ops->mgmt_frame_register(wiphy, wdev->netdev, frame_type, false); } list_del(&reg->list); kfree(reg); } spin_unlock_bh(&wdev->mgmt_registrations_lock); if (nlpid == wdev->ap_unexpected_nlpid) wdev->ap_unexpected_nlpid = 0; } void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev) { struct cfg80211_mgmt_registration *reg, *tmp; spin_lock_bh(&wdev->mgmt_registrations_lock); list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) { list_del(&reg->list); kfree(reg); } spin_unlock_bh(&wdev->mgmt_registrations_lock); } int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ieee80211_channel *chan, bool offchan, enum nl80211_channel_type channel_type, bool channel_type_valid, unsigned int wait, const u8 *buf, size_t len, bool no_cck, bool dont_wait_for_ack, u64 *cookie) { struct wireless_dev *wdev = dev->ieee80211_ptr; const struct ieee80211_mgmt *mgmt; u16 stype; if (!wdev->wiphy->mgmt_stypes) return -EOPNOTSUPP; if (!rdev->ops->mgmt_tx) return -EOPNOTSUPP; if (len < 24 + 1) return -EINVAL; mgmt = (const struct ieee80211_mgmt *) buf; if (!ieee80211_is_mgmt(mgmt->frame_control)) return -EINVAL; stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE; if (!(wdev->wiphy->mgmt_stypes[wdev->iftype].tx & BIT(stype >> 4))) return -EINVAL; if (ieee80211_is_action(mgmt->frame_control) && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) { int err = 0; wdev_lock(wdev); switch (wdev->iftype) { case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: if (!wdev->current_bss) { err = -ENOTCONN; break; } if (!ether_addr_equal(wdev->current_bss->pub.bssid, mgmt->bssid)) { err = -ENOTCONN; break; } /* * check for IBSS DA must be done by driver as * cfg80211 doesn't track the stations */ if (wdev->iftype == NL80211_IFTYPE_ADHOC) break; /* for station, check that DA is the AP */ if (!ether_addr_equal(wdev->current_bss->pub.bssid, mgmt->da)) { err = -ENOTCONN; break; } break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_AP_VLAN: if (!ether_addr_equal(mgmt->bssid, dev->dev_addr)) err = -EINVAL; break; case NL80211_IFTYPE_MESH_POINT: if (!ether_addr_equal(mgmt->sa, mgmt->bssid)) { err = -EINVAL; break; } /* * check for mesh DA must be done by driver as * cfg80211 doesn't track the stations */ break; default: err = -EOPNOTSUPP; break; } wdev_unlock(wdev); if (err) return err; } if (!ether_addr_equal(mgmt->sa, dev->dev_addr)) return -EINVAL; /* Transmit the Action frame as requested by user space */ return rdev->ops->mgmt_tx(&rdev->wiphy, dev, chan, offchan, channel_type, channel_type_valid, wait, buf, len, no_cck, dont_wait_for_ack, cookie); } bool cfg80211_rx_mgmt(struct net_device *dev, int freq, int sig_mbm, const u8 *buf, size_t len, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); struct cfg80211_mgmt_registration *reg; const struct ieee80211_txrx_stypes *stypes = &wiphy->mgmt_stypes[wdev->iftype]; struct ieee80211_mgmt *mgmt = (void *)buf; const u8 *data; int data_len; bool result = false; __le16 ftype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE); u16 stype; stype = (le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE) >> 4; if (!(stypes->rx & BIT(stype))) return false; data = buf + ieee80211_hdrlen(mgmt->frame_control); data_len = len - ieee80211_hdrlen(mgmt->frame_control); spin_lock_bh(&wdev->mgmt_registrations_lock); list_for_each_entry(reg, &wdev->mgmt_registrations, list) { if (reg->frame_type != ftype) continue; if (reg->match_len > data_len) continue; if (memcmp(reg->match, data, reg->match_len)) continue; /* found match! */ /* Indicate the received Action frame to user space */ if (nl80211_send_mgmt(rdev, dev, reg->nlpid, freq, sig_mbm, buf, len, gfp)) continue; result = true; break; } spin_unlock_bh(&wdev->mgmt_registrations_lock); return result; } EXPORT_SYMBOL(cfg80211_rx_mgmt); void cfg80211_mgmt_tx_status(struct net_device *dev, u64 cookie, const u8 *buf, size_t len, bool ack, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); /* Indicate TX status of the Action frame to user space */ nl80211_send_mgmt_tx_status(rdev, dev, cookie, buf, len, ack, gfp); } EXPORT_SYMBOL(cfg80211_mgmt_tx_status); void cfg80211_cqm_rssi_notify(struct net_device *dev, enum nl80211_cqm_rssi_threshold_event rssi_event, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); /* Indicate roaming trigger event to user space */ nl80211_send_cqm_rssi_notify(rdev, dev, rssi_event, gfp); } EXPORT_SYMBOL(cfg80211_cqm_rssi_notify); void cfg80211_cqm_pktloss_notify(struct net_device *dev, const u8 *peer, u32 num_packets, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); /* Indicate roaming trigger event to user space */ nl80211_send_cqm_pktloss_notify(rdev, dev, peer, num_packets, gfp); } EXPORT_SYMBOL(cfg80211_cqm_pktloss_notify); void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid, const u8 *replay_ctr, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp); } EXPORT_SYMBOL(cfg80211_gtk_rekey_notify); void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index, const u8 *bssid, bool preauth, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp); } EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify); void cfg80211_ch_switch_notify(struct net_device *dev, int freq, enum nl80211_channel_type type) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); struct ieee80211_channel *chan; wdev_lock(wdev); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP && wdev->iftype != NL80211_IFTYPE_P2P_GO)) goto out; chan = rdev_freq_to_chan(rdev, freq, type); if (WARN_ON(!chan)) goto out; wdev->channel = chan; nl80211_ch_switch_notify(rdev, dev, freq, type, GFP_KERNEL); out: wdev_unlock(wdev); return; } EXPORT_SYMBOL(cfg80211_ch_switch_notify); bool cfg80211_rx_spurious_frame(struct net_device *dev, const u8 *addr, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP && wdev->iftype != NL80211_IFTYPE_P2P_GO)) return false; return nl80211_unexpected_frame(dev, addr, gfp); } EXPORT_SYMBOL(cfg80211_rx_spurious_frame); bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev, const u8 *addr, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP && wdev->iftype != NL80211_IFTYPE_P2P_GO && wdev->iftype != NL80211_IFTYPE_AP_VLAN)) return false; return nl80211_unexpected_4addr_frame(dev, addr, gfp); } EXPORT_SYMBOL(cfg80211_rx_unexpected_4addr_frame);
gpl-2.0
akca/android_kernel_xiaomi_msm8996
drivers/platform/msm/msm_bus/msm_bus_arb.c
818
31983
/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "AXI: %s(): " fmt, __func__ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/radix-tree.h> #include <linux/clk.h> #include <linux/msm-bus.h> #include "msm_bus_core.h" #include <trace/events/trace_msm_bus.h> #define INDEX_MASK 0x0000FFFF #define PNODE_MASK 0xFFFF0000 #define SHIFT_VAL 16 #define CREATE_PNODE_ID(n, i) (((n) << SHIFT_VAL) | (i)) #define GET_INDEX(n) ((n) & INDEX_MASK) #define GET_NODE(n) ((n) >> SHIFT_VAL) #define IS_NODE(n) ((n) % FABRIC_ID_KEY) #define SEL_FAB_CLK 1 #define SEL_SLAVE_CLK 0 /* * To get to BIMC BW convert Hz to bytes by multiplying bus width(8), * double-data-rate(2) * ddr-channels(2). */ #define GET_BIMC_BW(clk) (clk * 8 * 2 * 2) #define BW_TO_CLK_FREQ_HZ(width, bw) \ msm_bus_div64(width, bw) #define IS_MASTER_VALID(mas) \ (((mas >= MSM_BUS_MASTER_FIRST) && (mas <= MSM_BUS_MASTER_LAST)) \ ? 1 : 0) #define IS_SLAVE_VALID(slv) \ (((slv >= MSM_BUS_SLAVE_FIRST) && (slv <= MSM_BUS_SLAVE_LAST)) ? 1 : 0) static DEFINE_MUTEX(msm_bus_lock); /* This function uses shift operations to divide 64 bit value for higher * efficiency. The divisor expected are number of ports or bus-width. * These are expected to be 1, 2, 4, 8, 16 and 32 in most cases. * * To account for exception to the above divisor values, the standard * do_div function is used. * */ uint64_t msm_bus_div64(unsigned int w, uint64_t bw) { uint64_t *b = &bw; if ((bw > 0) && (bw < w)) return 1; switch (w) { case 0: WARN(1, "AXI: Divide by 0 attempted\n"); case 1: return bw; case 2: return (bw >> 1); case 4: return (bw >> 2); case 8: return (bw >> 3); case 16: return (bw >> 4); case 32: return (bw >> 5); } do_div(*b, w); return *b; } /** * add_path_node: Adds the path information to the current node * @info: Internal node info structure * @next: Combination of the id and index of the next node * Function returns: Number of pnodes (path_nodes) on success, * error on failure. * * Every node maintains the list of path nodes. A path node is * reached by finding the node-id and index stored at the current * node. This makes updating the paths with requested bw and clock * values efficient, as it avoids lookup for each update-path request. */ static int add_path_node(struct msm_bus_inode_info *info, int next) { struct path_node *pnode; int i; if (ZERO_OR_NULL_PTR(info)) { MSM_BUS_ERR("Cannot find node info!: id :%d\n", info->node_info->priv_id); return -ENXIO; } for (i = 0; i <= info->num_pnodes; i++) { if (info->pnode[i].next == -2) { MSM_BUS_DBG("Reusing pnode for info: %d at index: %d\n", info->node_info->priv_id, i); info->pnode[i].clk[DUAL_CTX] = 0; info->pnode[i].clk[ACTIVE_CTX] = 0; info->pnode[i].bw[DUAL_CTX] = 0; info->pnode[i].bw[ACTIVE_CTX] = 0; info->pnode[i].next = next; MSM_BUS_DBG("%d[%d] : (%d, %d)\n", info->node_info->priv_id, i, GET_NODE(next), GET_INDEX(next)); return i; } } info->num_pnodes++; pnode = krealloc(info->pnode, ((info->num_pnodes + 1) * sizeof(struct path_node)) , GFP_KERNEL); if (ZERO_OR_NULL_PTR(pnode)) { MSM_BUS_ERR("Error creating path node!\n"); info->num_pnodes--; return -ENOMEM; } info->pnode = pnode; info->pnode[info->num_pnodes].clk[DUAL_CTX] = 0; info->pnode[info->num_pnodes].clk[ACTIVE_CTX] = 0; info->pnode[info->num_pnodes].bw[DUAL_CTX] = 0; info->pnode[info->num_pnodes].bw[ACTIVE_CTX] = 0; info->pnode[info->num_pnodes].next = next; MSM_BUS_DBG("%d[%d] : (%d, %d)\n", info->node_info->priv_id, info->num_pnodes, GET_NODE(next), GET_INDEX(next)); return info->num_pnodes; } static int clearvisitedflag(struct device *dev, void *data) { struct msm_bus_fabric_device *fabdev = to_msm_bus_fabric_device(dev); fabdev->visited = false; return 0; } /** * getpath() - Finds the path from the topology between src and dest * @src: Source. This is the master from which the request originates * @dest: Destination. This is the slave to which we're trying to reach * * Function returns: next_pnode_id. The higher 16 bits of the next_pnode_id * represent the src id of the next node on path. The lower 16 bits of the * next_pnode_id represent the "index", which is the next entry in the array * of pnodes for that node to fill in clk and bw values. This is created using * CREATE_PNODE_ID. The return value is stored in ret_pnode, and this is added * to the list of path nodes. * * This function recursively finds the path by updating the src to the * closest possible node to dest. */ static int getpath(int src, int dest) { int pnode_num = -1, i; struct msm_bus_fabnodeinfo *fabnodeinfo; struct msm_bus_fabric_device *fabdev; int next_pnode_id = -1; struct msm_bus_inode_info *info = NULL; int _src = src/FABRIC_ID_KEY; int _dst = dest/FABRIC_ID_KEY; int ret_pnode = -1; int fabid = GET_FABID(src); /* Find the location of fabric for the src */ MSM_BUS_DBG("%d --> %d\n", src, dest); fabdev = msm_bus_get_fabric_device(fabid); if (!fabdev) { MSM_BUS_WARN("Fabric Not yet registered. Try again\n"); return -ENXIO; } /* Are we there yet? */ if (src == dest) { info = fabdev->algo->find_node(fabdev, src); if (ZERO_OR_NULL_PTR(info)) { MSM_BUS_ERR("Node %d not found\n", dest); return -ENXIO; } for (i = 0; i <= info->num_pnodes; i++) { if (info->pnode[i].next == -2) { MSM_BUS_DBG("src = dst Reusing pnode for" " info: %d at index: %d\n", info->node_info->priv_id, i); next_pnode_id = CREATE_PNODE_ID(src, i); info->pnode[i].clk[DUAL_CTX] = 0; info->pnode[i].bw[DUAL_CTX] = 0; info->pnode[i].next = next_pnode_id; MSM_BUS_DBG("returning: %d, %d\n", GET_NODE (next_pnode_id), GET_INDEX(next_pnode_id)); return next_pnode_id; } } next_pnode_id = CREATE_PNODE_ID(src, (info->num_pnodes + 1)); pnode_num = add_path_node(info, next_pnode_id); if (pnode_num < 0) { MSM_BUS_ERR("Error adding path node\n"); return -ENXIO; } MSM_BUS_DBG("returning: %d, %d\n", GET_NODE(next_pnode_id), GET_INDEX(next_pnode_id)); return next_pnode_id; } else if (_src == _dst) { /* * src and dest belong to same fabric, find the destination * from the radix tree */ info = fabdev->algo->find_node(fabdev, dest); if (ZERO_OR_NULL_PTR(info)) { MSM_BUS_ERR("Node %d not found\n", dest); return -ENXIO; } ret_pnode = getpath(info->node_info->priv_id, dest); next_pnode_id = ret_pnode; } else { /* find the dest fabric */ int trynextgw = true; struct list_head *gateways = fabdev->algo->get_gw_list(fabdev); list_for_each_entry(fabnodeinfo, gateways, list) { /* see if the destination is at a connected fabric */ if (_dst == (fabnodeinfo->info->node_info->priv_id / FABRIC_ID_KEY)) { /* Found the fab on which the device exists */ info = fabnodeinfo->info; trynextgw = false; ret_pnode = getpath(info->node_info->priv_id, dest); pnode_num = add_path_node(info, ret_pnode); if (pnode_num < 0) { MSM_BUS_ERR("Error adding path node\n"); return -ENXIO; } next_pnode_id = CREATE_PNODE_ID( info->node_info->priv_id, pnode_num); break; } } /* find the gateway */ if (trynextgw) { gateways = fabdev->algo->get_gw_list(fabdev); list_for_each_entry(fabnodeinfo, gateways, list) { struct msm_bus_fabric_device *gwfab = msm_bus_get_fabric_device(fabnodeinfo-> info->node_info->priv_id); if (!gwfab) { MSM_BUS_ERR("Err: No gateway found\n"); return -ENXIO; } if (!gwfab->visited) { MSM_BUS_DBG("VISITED ID: %d\n", gwfab->id); gwfab->visited = true; info = fabnodeinfo->info; ret_pnode = getpath(info-> node_info->priv_id, dest); pnode_num = add_path_node(info, ret_pnode); if (pnode_num < 0) { MSM_BUS_ERR("Malloc failure in" " adding path node\n"); return -ENXIO; } next_pnode_id = CREATE_PNODE_ID( info->node_info->priv_id, pnode_num); break; } } if (next_pnode_id < 0) return -ENXIO; } } if (!IS_NODE(src)) { MSM_BUS_DBG("Returning next_pnode_id:%d[%d]\n", GET_NODE( next_pnode_id), GET_INDEX(next_pnode_id)); return next_pnode_id; } info = fabdev->algo->find_node(fabdev, src); if (!info) { MSM_BUS_ERR("Node info not found.\n"); return -ENXIO; } pnode_num = add_path_node(info, next_pnode_id); MSM_BUS_DBG(" Last: %d[%d] = (%d, %d)\n", src, info->num_pnodes, GET_NODE(next_pnode_id), GET_INDEX(next_pnode_id)); MSM_BUS_DBG("returning: %d, %d\n", src, pnode_num); return CREATE_PNODE_ID(src, pnode_num); } static uint64_t get_node_maxib(struct msm_bus_inode_info *info) { int i, ctx; uint64_t maxib = 0; for (i = 0; i <= info->num_pnodes; i++) { for (ctx = 0; ctx < NUM_CTX; ctx++) maxib = max(info->pnode[i].clk[ctx], maxib); } MSM_BUS_DBG("%s: Node %d numpnodes %d maxib %llu", __func__, info->num_pnodes, info->node_info->id, maxib); return maxib; } static uint64_t get_node_sumab(struct msm_bus_inode_info *info) { int i; uint64_t maxab = 0; for (i = 0; i <= info->num_pnodes; i++) maxab += info->pnode[i].bw[DUAL_CTX]; MSM_BUS_DBG("%s: Node %d numpnodes %d maxib %llu", __func__, info->num_pnodes, info->node_info->id, maxab); return maxab; } static uint64_t get_vfe_bw(void) { int vfe_id = MSM_BUS_MASTER_VFE; int iid = msm_bus_board_get_iid(vfe_id); int fabid; struct msm_bus_fabric_device *fabdev; struct msm_bus_inode_info *info; uint64_t vfe_bw = 0; fabid = GET_FABID(iid); fabdev = msm_bus_get_fabric_device(fabid); info = fabdev->algo->find_node(fabdev, iid); if (!info) { MSM_BUS_ERR("%s: Can't find node %d", __func__, vfe_id); goto exit_get_vfe_bw; } vfe_bw = get_node_sumab(info); MSM_BUS_DBG("vfe_ab %llu", vfe_bw); exit_get_vfe_bw: return vfe_bw; } static uint64_t get_mdp_bw(void) { int ids[] = {MSM_BUS_MASTER_MDP_PORT0, MSM_BUS_MASTER_MDP_PORT1}; int i; uint64_t mdp_ab = 0; uint32_t ff = 0; for (i = 0; i < ARRAY_SIZE(ids); i++) { int iid = msm_bus_board_get_iid(ids[i]); int fabid; struct msm_bus_fabric_device *fabdev; struct msm_bus_inode_info *info; fabid = GET_FABID(iid); fabdev = msm_bus_get_fabric_device(fabid); info = fabdev->algo->find_node(fabdev, iid); if (!info) { MSM_BUS_ERR("%s: Can't find node %d", __func__, ids[i]); continue; } mdp_ab += get_node_sumab(info); MSM_BUS_DBG("mdp_ab %llu", mdp_ab); ff = info->node_info->ff; } if (ff) { mdp_ab = msm_bus_div64(2 * ff, 100 * mdp_ab); } else { MSM_BUS_ERR("MDP FF is 0"); mdp_ab = 0; } MSM_BUS_DBG("MDP BW %llu\n", mdp_ab); return mdp_ab; } static uint64_t get_rt_bw(void) { uint64_t rt_bw = 0; rt_bw += get_mdp_bw(); rt_bw += get_vfe_bw(); return rt_bw; } static uint64_t get_avail_bw(struct msm_bus_fabric_device *fabdev) { uint64_t fabclk_rate = 0; int i; uint64_t avail_bw = 0; uint64_t rt_bw = get_rt_bw(); struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev); if (!rt_bw) goto exit_get_avail_bw; for (i = 0; i < NUM_CTX; i++) { uint64_t ctx_rate; ctx_rate = fabric->info.nodeclk[i].rate; fabclk_rate = max(ctx_rate, fabclk_rate); } if (!fabdev->eff_fact || !fabdev->nr_lim_thresh) { MSM_BUS_ERR("Error: Eff-fact %d; nr_thresh %llu", fabdev->eff_fact, fabdev->nr_lim_thresh); return 0; } avail_bw = msm_bus_div64(100, (GET_BIMC_BW(fabclk_rate) * fabdev->eff_fact)); if (avail_bw >= fabdev->nr_lim_thresh) return 0; MSM_BUS_DBG("%s: Total_avail_bw %llu, rt_bw %llu\n", __func__, avail_bw, rt_bw); trace_bus_avail_bw(avail_bw, rt_bw); if (avail_bw < rt_bw) { MSM_BUS_ERR("\n%s: ERROR avail BW %llu < MDP %llu", __func__, avail_bw, rt_bw); avail_bw = 0; goto exit_get_avail_bw; } avail_bw -= rt_bw; exit_get_avail_bw: return avail_bw; } static void program_nr_limits(struct msm_bus_fabric_device *fabdev) { int num_nr_lim = 0; int i; struct msm_bus_inode_info *info[fabdev->num_nr_lim]; struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev); num_nr_lim = radix_tree_gang_lookup_tag(&fabric->fab_tree, (void **)&info, fabric->fabdev.id, fabdev->num_nr_lim, MASTER_NODE); for (i = 0; i < num_nr_lim; i++) fabdev->algo->config_limiter(fabdev, info[i]); } static int msm_bus_commit_limiter(struct device *dev, void *data) { int ret = 0; struct msm_bus_fabric_device *fabdev = to_msm_bus_fabric_device(dev); MSM_BUS_DBG("fabid: %d\n", fabdev->id); program_nr_limits(fabdev); return ret; } static void compute_nr_limits(struct msm_bus_fabric_device *fabdev, int pnode) { uint64_t total_ib = 0; int num_nr_lim = 0; uint64_t avail_bw = 0; struct msm_bus_inode_info *info[fabdev->num_nr_lim]; struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev); int i; num_nr_lim = radix_tree_gang_lookup_tag(&fabric->fab_tree, (void **)&info, fabric->fabdev.id, fabdev->num_nr_lim, MASTER_NODE); MSM_BUS_DBG("%s: Found %d NR LIM nodes", __func__, num_nr_lim); for (i = 0; i < num_nr_lim; i++) total_ib += get_node_maxib(info[i]); avail_bw = get_avail_bw(fabdev); MSM_BUS_DBG("\n %s: Avail BW %llu", __func__, avail_bw); for (i = 0; i < num_nr_lim; i++) { uint32_t node_pct = 0; uint64_t new_lim_bw = 0; uint64_t node_max_ib = 0; uint32_t node_max_ib_kB = 0; uint32_t total_ib_kB = 0; uint64_t bw_node; node_max_ib = get_node_maxib(info[i]); node_max_ib_kB = msm_bus_div64(1024, node_max_ib); total_ib_kB = msm_bus_div64(1024, total_ib); node_pct = (node_max_ib_kB * 100) / total_ib_kB; bw_node = node_pct * avail_bw; new_lim_bw = msm_bus_div64(100, bw_node); /* * if limiter bw is more than the requested IB clip to requested IB. */ if (new_lim_bw >= node_max_ib) new_lim_bw = node_max_ib; /* * if there is a floor bw for this nr lim node and * if there is available bw to divy up among the nr masters * and if the nr lim masters have a non zero vote and * if the limited bw is below the floor for this node. * then limit this node to the floor bw. */ if (info[i]->node_info->floor_bw && node_max_ib && avail_bw && (new_lim_bw <= info[i]->node_info->floor_bw)) { MSM_BUS_ERR("\nNode %d:Limiting BW:%llu < floor:%llu", info[i]->node_info->id, new_lim_bw, info[i]->node_info->floor_bw); new_lim_bw = info[i]->node_info->floor_bw; } if (new_lim_bw != info[i]->cur_lim_bw) { info[i]->cur_lim_bw = new_lim_bw; MSM_BUS_DBG("NodeId %d: Requested IB %llu", info[i]->node_info->id, node_max_ib); MSM_BUS_DBG("Limited to %llu(%d pct of Avail %llu )\n", new_lim_bw, node_pct, avail_bw); } else { MSM_BUS_DBG("NodeId %d: No change Limited to %llu\n", info[i]->node_info->id, info[i]->cur_lim_bw); } } } static void setup_nr_limits(int curr, int pnode) { struct msm_bus_fabric_device *fabdev = msm_bus_get_fabric_device(GET_FABID(curr)); struct msm_bus_inode_info *info; if (!fabdev) { MSM_BUS_WARN("Fabric Not yet registered. Try again\n"); goto exit_setup_nr_limits; } /* This logic is currently applicable to BIMC masters only */ if (fabdev->id != MSM_BUS_FAB_DEFAULT) { MSM_BUS_ERR("Static limiting of NR masters only for BIMC\n"); goto exit_setup_nr_limits; } info = fabdev->algo->find_node(fabdev, curr); if (!info) { MSM_BUS_ERR("Cannot find node info!\n"); goto exit_setup_nr_limits; } compute_nr_limits(fabdev, pnode); exit_setup_nr_limits: return; } static bool is_nr_lim(int id) { struct msm_bus_fabric_device *fabdev = msm_bus_get_fabric_device (GET_FABID(id)); struct msm_bus_inode_info *info; bool ret = false; if (!fabdev) { MSM_BUS_ERR("Bus device for bus ID: %d not found!\n", GET_FABID(id)); goto exit_is_nr_lim; } info = fabdev->algo->find_node(fabdev, id); if (!info) MSM_BUS_ERR("Cannot find node info %d!\n", id); else if ((info->node_info->nr_lim || info->node_info->rt_mas)) ret = true; exit_is_nr_lim: return ret; } /** * update_path() - Update the path with the bandwidth and clock values, as * requested by the client. * * @curr: Current source node, as specified in the client vector (master) * @pnode: The first-hop node on the path, stored in the internal client struct * @req_clk: Requested clock value from the vector * @req_bw: Requested bandwidth value from the vector * @curr_clk: Current clock frequency * @curr_bw: Currently allocated bandwidth * * This function updates the nodes on the path calculated using getpath(), with * clock and bandwidth values. The sum of bandwidths, and the max of clock * frequencies is calculated at each node on the path. Commit data to be sent * to RPM for each master and slave is also calculated here. */ static int update_path(int curr, int pnode, uint64_t req_clk, uint64_t req_bw, uint64_t curr_clk, uint64_t curr_bw, unsigned int ctx, unsigned int cl_active_flag) { int index, ret = 0; struct msm_bus_inode_info *info; struct msm_bus_inode_info *src_info; int next_pnode; int64_t add_bw = req_bw - curr_bw; uint64_t bwsum = 0; uint64_t req_clk_hz, curr_clk_hz, bwsum_hz; int *master_tiers; struct msm_bus_fabric_device *fabdev = msm_bus_get_fabric_device (GET_FABID(curr)); if (!fabdev) { MSM_BUS_ERR("Bus device for bus ID: %d not found!\n", GET_FABID(curr)); return -ENXIO; } MSM_BUS_DBG("args: %d %d %d %llu %llu %llu %llu %u\n", curr, GET_NODE(pnode), GET_INDEX(pnode), req_clk, req_bw, curr_clk, curr_bw, ctx); index = GET_INDEX(pnode); MSM_BUS_DBG("Client passed index :%d\n", index); info = fabdev->algo->find_node(fabdev, curr); if (!info) { MSM_BUS_ERR("Cannot find node info!\n"); return -ENXIO; } src_info = info; info->link_info.sel_bw = &info->link_info.bw[ctx]; info->link_info.sel_clk = &info->link_info.clk[ctx]; *info->link_info.sel_bw += add_bw; info->pnode[index].sel_bw = &info->pnode[index].bw[ctx]; /** * To select the right clock, AND the context with * client active flag. */ info->pnode[index].sel_clk = &info->pnode[index].clk[ctx & cl_active_flag]; *info->pnode[index].sel_bw += add_bw; *info->pnode[index].sel_clk = req_clk; /** * If master supports dual configuration, check if * the configuration needs to be changed based on * incoming requests */ if (info->node_info->dual_conf) { uint64_t node_maxib = 0; node_maxib = get_node_maxib(info); fabdev->algo->config_master(fabdev, info, node_maxib, req_bw); } info->link_info.num_tiers = info->node_info->num_tiers; info->link_info.tier = info->node_info->tier; master_tiers = info->node_info->tier; do { struct msm_bus_inode_info *hop; fabdev = msm_bus_get_fabric_device(GET_FABID(curr)); if (!fabdev) { MSM_BUS_ERR("Fabric not found\n"); return -ENXIO; } MSM_BUS_DBG("id: %d\n", info->node_info->priv_id); /* find next node and index */ next_pnode = info->pnode[index].next; curr = GET_NODE(next_pnode); index = GET_INDEX(next_pnode); MSM_BUS_DBG("id:%d, next: %d\n", info-> node_info->priv_id, curr); /* Get hop */ /* check if we are here as gateway, or does the hop belong to * this fabric */ if (IS_NODE(curr)) hop = fabdev->algo->find_node(fabdev, curr); else hop = fabdev->algo->find_gw_node(fabdev, curr); if (!hop) { MSM_BUS_ERR("Null Info found for hop\n"); return -ENXIO; } hop->link_info.sel_bw = &hop->link_info.bw[ctx]; hop->link_info.sel_clk = &hop->link_info.clk[ctx]; *hop->link_info.sel_bw += add_bw; hop->pnode[index].sel_bw = &hop->pnode[index].bw[ctx]; hop->pnode[index].sel_clk = &hop->pnode[index].clk[ctx & cl_active_flag]; if (!hop->node_info->buswidth) { MSM_BUS_WARN("No bus width found. Using default\n"); hop->node_info->buswidth = 8; } *hop->pnode[index].sel_clk = BW_TO_CLK_FREQ_HZ(hop->node_info-> buswidth, req_clk); *hop->pnode[index].sel_bw += add_bw; MSM_BUS_DBG("fabric: %d slave: %d, slave-width: %d info: %d\n", fabdev->id, hop->node_info->priv_id, hop->node_info-> buswidth, info->node_info->priv_id); /* Update Bandwidth */ fabdev->algo->update_bw(fabdev, hop, info, add_bw, master_tiers, ctx); bwsum = *hop->link_info.sel_bw; /* Update Fabric clocks */ curr_clk_hz = BW_TO_CLK_FREQ_HZ(hop->node_info->buswidth, curr_clk); req_clk_hz = BW_TO_CLK_FREQ_HZ(hop->node_info->buswidth, req_clk); bwsum_hz = BW_TO_CLK_FREQ_HZ(hop->node_info->buswidth, bwsum); /* Account for multiple channels if any */ if (hop->node_info->num_sports > 1) bwsum_hz = msm_bus_div64(hop->node_info->num_sports, bwsum_hz); MSM_BUS_DBG("AXI: Hop: %d, ports: %d, bwsum_hz: %llu\n", hop->node_info->id, hop->node_info->num_sports, bwsum_hz); MSM_BUS_DBG("up-clk: curr_hz: %llu, req_hz: %llu, bw_hz %llu\n", curr_clk, req_clk, bwsum_hz); ret = fabdev->algo->update_clks(fabdev, hop, index, curr_clk_hz, req_clk_hz, bwsum_hz, SEL_FAB_CLK, ctx, cl_active_flag); if (ret) MSM_BUS_WARN("Failed to update clk\n"); info = hop; } while (GET_NODE(info->pnode[index].next) != info->node_info->priv_id); /* Update BW, clk after exiting the loop for the last one */ if (!info) { MSM_BUS_ERR("Cannot find node info!\n"); return -ENXIO; } /* Update slave clocks */ ret = fabdev->algo->update_clks(fabdev, info, index, curr_clk_hz, req_clk_hz, bwsum_hz, SEL_SLAVE_CLK, ctx, cl_active_flag); if (ret) MSM_BUS_ERR("Failed to update clk\n"); if ((ctx == cl_active_flag) && ((src_info->node_info->nr_lim || src_info->node_info->rt_mas))) setup_nr_limits(curr, pnode); /* If freq is going down , apply the changes now before * we commit clk data. */ if ((req_clk < curr_clk) || (req_bw < curr_bw)) bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_commit_limiter); return ret; } /** * msm_bus_commit_fn() - Commits the data for fabric to rpm * @dev: fabric device * @data: NULL */ static int msm_bus_commit_fn(struct device *dev, void *data) { int ret = 0; struct msm_bus_fabric_device *fabdev = to_msm_bus_fabric_device(dev); MSM_BUS_DBG("Committing: fabid: %d\n", fabdev->id); ret = fabdev->algo->commit(fabdev); return ret; } static uint32_t register_client_legacy(struct msm_bus_scale_pdata *pdata) { struct msm_bus_client *client = NULL; int i; int src, dest, nfab; struct msm_bus_fabric_device *deffab; deffab = msm_bus_get_fabric_device(MSM_BUS_FAB_DEFAULT); if (!deffab) { MSM_BUS_ERR("Error finding default fabric\n"); return 0; } nfab = msm_bus_get_num_fab(); if (nfab < deffab->board_algo->board_nfab) { MSM_BUS_ERR("Can't register client!\n" "Num of fabrics up: %d\n", nfab); return 0; } if ((!pdata) || (pdata->usecase->num_paths == 0) || IS_ERR(pdata)) { MSM_BUS_ERR("Cannot register client with null data\n"); return 0; } client = kzalloc(sizeof(struct msm_bus_client), GFP_KERNEL); if (!client) { MSM_BUS_ERR("Error allocating client\n"); return 0; } mutex_lock(&msm_bus_lock); client->pdata = pdata; client->curr = -1; for (i = 0; i < pdata->usecase->num_paths; i++) { int *pnode; struct msm_bus_fabric_device *srcfab; pnode = krealloc(client->src_pnode, ((i + 1) * sizeof(int)), GFP_KERNEL); if (ZERO_OR_NULL_PTR(pnode)) { MSM_BUS_ERR("Invalid Pnode ptr!\n"); continue; } else client->src_pnode = pnode; if (!IS_MASTER_VALID(pdata->usecase->vectors[i].src)) { MSM_BUS_ERR("Invalid Master ID %d in request!\n", pdata->usecase->vectors[i].src); goto err; } if (!IS_SLAVE_VALID(pdata->usecase->vectors[i].dst)) { MSM_BUS_ERR("Invalid Slave ID %d in request!\n", pdata->usecase->vectors[i].dst); goto err; } src = msm_bus_board_get_iid(pdata->usecase->vectors[i].src); if (src == -ENXIO) { MSM_BUS_ERR("Master %d not supported. Client cannot be" " registered\n", pdata->usecase->vectors[i].src); goto err; } dest = msm_bus_board_get_iid(pdata->usecase->vectors[i].dst); if (dest == -ENXIO) { MSM_BUS_ERR("Slave %d not supported. Client cannot be" " registered\n", pdata->usecase->vectors[i].dst); goto err; } srcfab = msm_bus_get_fabric_device(GET_FABID(src)); if (!srcfab) { MSM_BUS_ERR("Fabric not found\n"); goto err; } srcfab->visited = true; pnode[i] = getpath(src, dest); bus_for_each_dev(&msm_bus_type, NULL, NULL, clearvisitedflag); if (pnode[i] == -ENXIO) { MSM_BUS_ERR("Cannot register client now! Try again!\n"); goto err; } } msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_REGISTER, (uint32_t)client); mutex_unlock(&msm_bus_lock); MSM_BUS_DBG("ret: %u num_paths: %d\n", (uint32_t)client, pdata->usecase->num_paths); return (uint32_t)(client); err: kfree(client->src_pnode); kfree(client); mutex_unlock(&msm_bus_lock); return 0; } static int update_request_legacy(uint32_t cl, unsigned index) { int i, ret = 0; struct msm_bus_scale_pdata *pdata; int pnode, src = 0, curr, ctx; uint64_t req_clk = 0, req_bw = 0, curr_clk = 0, curr_bw = 0; struct msm_bus_client *client = (struct msm_bus_client *)cl; if (IS_ERR_OR_NULL(client)) { MSM_BUS_ERR("msm_bus_scale_client update req error %d\n", (uint32_t)client); return -ENXIO; } mutex_lock(&msm_bus_lock); if (client->curr == index) goto err; curr = client->curr; pdata = client->pdata; if (!pdata) { MSM_BUS_ERR("Null pdata passed to update-request\n"); ret = -ENXIO; goto err; } if (index >= pdata->num_usecases) { MSM_BUS_ERR("Client %u passed invalid index: %d\n", (uint32_t)client, index); ret = -ENXIO; goto err; } MSM_BUS_DBG("cl: %u index: %d curr: %d num_paths: %d\n", cl, index, client->curr, client->pdata->usecase->num_paths); for (i = 0; i < pdata->usecase->num_paths; i++) { src = msm_bus_board_get_iid(client->pdata->usecase[index]. vectors[i].src); if (src == -ENXIO) { MSM_BUS_ERR("Master %d not supported. Request cannot" " be updated\n", client->pdata->usecase-> vectors[i].src); goto err; } if (msm_bus_board_get_iid(client->pdata->usecase[index]. vectors[i].dst) == -ENXIO) { MSM_BUS_ERR("Slave %d not supported. Request cannot" " be updated\n", client->pdata->usecase-> vectors[i].dst); } pnode = client->src_pnode[i]; req_clk = client->pdata->usecase[index].vectors[i].ib; req_bw = client->pdata->usecase[index].vectors[i].ab; if (curr < 0) { curr_clk = 0; curr_bw = 0; } else { curr_clk = client->pdata->usecase[curr].vectors[i].ib; curr_bw = client->pdata->usecase[curr].vectors[i].ab; MSM_BUS_DBG("ab: %llu ib: %llu\n", curr_bw, curr_clk); } if (!pdata->active_only) { ret = update_path(src, pnode, req_clk, req_bw, curr_clk, curr_bw, 0, pdata->active_only); if (ret) { MSM_BUS_ERR("Update path failed! %d\n", ret); goto err; } } ret = update_path(src, pnode, req_clk, req_bw, curr_clk, curr_bw, ACTIVE_CTX, pdata->active_only); if (ret) { MSM_BUS_ERR("Update Path failed! %d\n", ret); goto err; } } client->curr = index; ctx = ACTIVE_CTX; msm_bus_dbg_client_data(client->pdata, index, cl); bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_commit_fn); /* For NR/RT limited masters, if freq is going up , apply the changes * after we commit clk data. */ if (is_nr_lim(src) && ((req_clk > curr_clk) || (req_bw > curr_bw))) bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_commit_limiter); err: mutex_unlock(&msm_bus_lock); return ret; } static int reset_pnodes(int curr, int pnode) { struct msm_bus_inode_info *info; struct msm_bus_fabric_device *fabdev; int index, next_pnode; fabdev = msm_bus_get_fabric_device(GET_FABID(curr)); if (!fabdev) { MSM_BUS_ERR("Fabric not found for: %d\n", (GET_FABID(curr))); return -ENXIO; } index = GET_INDEX(pnode); info = fabdev->algo->find_node(fabdev, curr); if (!info) { MSM_BUS_ERR("Cannot find node info!\n"); return -ENXIO; } MSM_BUS_DBG("Starting the loop--remove\n"); do { struct msm_bus_inode_info *hop; fabdev = msm_bus_get_fabric_device(GET_FABID(curr)); if (!fabdev) { MSM_BUS_ERR("Fabric not found\n"); return -ENXIO; } next_pnode = info->pnode[index].next; info->pnode[index].next = -2; curr = GET_NODE(next_pnode); index = GET_INDEX(next_pnode); if (IS_NODE(curr)) hop = fabdev->algo->find_node(fabdev, curr); else hop = fabdev->algo->find_gw_node(fabdev, curr); if (!hop) { MSM_BUS_ERR("Null Info found for hop\n"); return -ENXIO; } MSM_BUS_DBG("%d[%d] = %d\n", info->node_info->priv_id, index, info->pnode[index].next); MSM_BUS_DBG("num_pnodes: %d: %d\n", info->node_info->priv_id, info->num_pnodes); info = hop; } while (GET_NODE(info->pnode[index].next) != info->node_info->priv_id); info->pnode[index].next = -2; MSM_BUS_DBG("%d[%d] = %d\n", info->node_info->priv_id, index, info->pnode[index].next); MSM_BUS_DBG("num_pnodes: %d: %d\n", info->node_info->priv_id, info->num_pnodes); return 0; } int msm_bus_board_get_iid(int id) { struct msm_bus_fabric_device *deffab; deffab = msm_bus_get_fabric_device(MSM_BUS_FAB_DEFAULT); if (!deffab) { MSM_BUS_ERR("Error finding default fabric\n"); return -ENXIO; } return deffab->board_algo->get_iid(id); } void msm_bus_scale_client_reset_pnodes(uint32_t cl) { int i, src, pnode, index; struct msm_bus_client *client = (struct msm_bus_client *)(cl); if (IS_ERR_OR_NULL(client)) { MSM_BUS_ERR("msm_bus_scale_reset_pnodes error\n"); return; } index = 0; for (i = 0; i < client->pdata->usecase->num_paths; i++) { src = msm_bus_board_get_iid( client->pdata->usecase[index].vectors[i].src); pnode = client->src_pnode[i]; MSM_BUS_DBG("(%d, %d)\n", GET_NODE(pnode), GET_INDEX(pnode)); reset_pnodes(src, pnode); } } static void unregister_client_legacy(uint32_t cl) { int i; struct msm_bus_client *client = (struct msm_bus_client *)(cl); bool warn = false; if (IS_ERR_OR_NULL(client)) return; for (i = 0; i < client->pdata->usecase->num_paths; i++) { if ((client->pdata->usecase[0].vectors[i].ab) || (client->pdata->usecase[0].vectors[i].ib)) { warn = true; break; } } if (warn) { int num_paths = client->pdata->usecase->num_paths; int ab[num_paths], ib[num_paths]; WARN(1, "%s called unregister with non-zero vectors\n", client->pdata->name); /* * Save client values and zero them out to * cleanly unregister */ for (i = 0; i < num_paths; i++) { ab[i] = client->pdata->usecase[0].vectors[i].ab; ib[i] = client->pdata->usecase[0].vectors[i].ib; client->pdata->usecase[0].vectors[i].ab = 0; client->pdata->usecase[0].vectors[i].ib = 0; } msm_bus_scale_client_update_request(cl, 0); /* Restore client vectors if required for re-registering. */ for (i = 0; i < num_paths; i++) { client->pdata->usecase[0].vectors[i].ab = ab[i]; client->pdata->usecase[0].vectors[i].ib = ib[i]; } } else if (client->curr != 0) msm_bus_scale_client_update_request(cl, 0); MSM_BUS_DBG("Unregistering client %d\n", cl); mutex_lock(&msm_bus_lock); msm_bus_scale_client_reset_pnodes(cl); msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_UNREGISTER, cl); mutex_unlock(&msm_bus_lock); kfree(client->src_pnode); kfree(client); } void msm_bus_arb_setops_legacy(struct msm_bus_arb_ops *arb_ops) { arb_ops->register_client = register_client_legacy; arb_ops->update_request = update_request_legacy; arb_ops->unregister_client = unregister_client_legacy; }
gpl-2.0
Jovy23/N920TUVU2COJ5_Kernel
drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
2354
81168
/* IEEE 802.11 SoftMAC layer * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it> * * Mostly extracted from the rtl8180-sa2400 driver for the * in-kernel generic ieee802.11 stack. * * Few lines might be stolen from other part of the ieee80211 * stack. Copyright who own it's copyright * * WPA code stolen from the ipw2200 driver. * Copyright who own it's copyright. * * released under the GPL */ #include "ieee80211.h" #include <linux/random.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <asm/uaccess.h> #include <linux/etherdevice.h> #include "dot11d.h" u8 rsn_authen_cipher_suite[16][4] = { {0x00,0x0F,0xAC,0x00}, //Use group key, //Reserved {0x00,0x0F,0xAC,0x01}, //WEP-40 //RSNA default {0x00,0x0F,0xAC,0x02}, //TKIP //NONE //{used just as default} {0x00,0x0F,0xAC,0x03}, //WRAP-historical {0x00,0x0F,0xAC,0x04}, //CCMP {0x00,0x0F,0xAC,0x05}, //WEP-104 }; short ieee80211_is_54g(const struct ieee80211_network *net) { return (net->rates_ex_len > 0) || (net->rates_len > 4); } short ieee80211_is_shortslot(const struct ieee80211_network *net) { return net->capability & WLAN_CAPABILITY_SHORT_SLOT; } /* returns the total length needed for placing the RATE MFIE * tag and the EXTENDED RATE MFIE tag if needed. * It encludes two bytes per tag for the tag itself and its len */ unsigned int ieee80211_MFIE_rate_len(struct ieee80211_device *ieee) { unsigned int rate_len = 0; if (ieee->modulation & IEEE80211_CCK_MODULATION) rate_len = IEEE80211_CCK_RATE_LEN + 2; if (ieee->modulation & IEEE80211_OFDM_MODULATION) rate_len += IEEE80211_OFDM_RATE_LEN + 2; return rate_len; } /* place the MFIE rate, tag to the memory (double) poised. * Then it updates the pointer so that * it points after the new MFIE tag added. */ void ieee80211_MFIE_Brate(struct ieee80211_device *ieee, u8 **tag_p) { u8 *tag = *tag_p; if (ieee->modulation & IEEE80211_CCK_MODULATION){ *tag++ = MFIE_TYPE_RATES; *tag++ = 4; *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB; *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB; *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB; *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB; } /* We may add an option for custom rates that specific HW might support */ *tag_p = tag; } void ieee80211_MFIE_Grate(struct ieee80211_device *ieee, u8 **tag_p) { u8 *tag = *tag_p; if (ieee->modulation & IEEE80211_OFDM_MODULATION){ *tag++ = MFIE_TYPE_RATES_EX; *tag++ = 8; *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB; *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_9MB; *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_12MB; *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_18MB; *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB; *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_36MB; *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_48MB; *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB; } /* We may add an option for custom rates that specific HW might support */ *tag_p = tag; } void ieee80211_WMM_Info(struct ieee80211_device *ieee, u8 **tag_p) { u8 *tag = *tag_p; *tag++ = MFIE_TYPE_GENERIC; //0 *tag++ = 7; *tag++ = 0x00; *tag++ = 0x50; *tag++ = 0xf2; *tag++ = 0x02;//5 *tag++ = 0x00; *tag++ = 0x01; #ifdef SUPPORT_USPD if(ieee->current_network.wmm_info & 0x80) { *tag++ = 0x0f|MAX_SP_Len; } else { *tag++ = MAX_SP_Len; } #else *tag++ = MAX_SP_Len; #endif *tag_p = tag; } void ieee80211_TURBO_Info(struct ieee80211_device *ieee, u8 **tag_p) { u8 *tag = *tag_p; *tag++ = MFIE_TYPE_GENERIC; //0 *tag++ = 7; *tag++ = 0x00; *tag++ = 0xe0; *tag++ = 0x4c; *tag++ = 0x01;//5 *tag++ = 0x02; *tag++ = 0x11; *tag++ = 0x00; *tag_p = tag; printk(KERN_ALERT "This is enable turbo mode IE process\n"); } void enqueue_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb) { int nh; nh = (ieee->mgmt_queue_head +1) % MGMT_QUEUE_NUM; /* * if the queue is full but we have newer frames then * just overwrites the oldest. * * if (nh == ieee->mgmt_queue_tail) * return -1; */ ieee->mgmt_queue_head = nh; ieee->mgmt_queue_ring[nh] = skb; //return 0; } struct sk_buff *dequeue_mgmt(struct ieee80211_device *ieee) { struct sk_buff *ret; if(ieee->mgmt_queue_tail == ieee->mgmt_queue_head) return NULL; ret = ieee->mgmt_queue_ring[ieee->mgmt_queue_tail]; ieee->mgmt_queue_tail = (ieee->mgmt_queue_tail+1) % MGMT_QUEUE_NUM; return ret; } void init_mgmt_queue(struct ieee80211_device *ieee) { ieee->mgmt_queue_tail = ieee->mgmt_queue_head = 0; } void ieee80211_sta_wakeup(struct ieee80211_device *ieee, short nl); inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee) { unsigned long flags; short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE; struct ieee80211_hdr_3addr *header= (struct ieee80211_hdr_3addr *) skb->data; spin_lock_irqsave(&ieee->lock, flags); /* called with 2nd param 0, no mgmt lock required */ ieee80211_sta_wakeup(ieee,0); if(single){ if(ieee->queue_stop){ enqueue_mgmt(ieee,skb); }else{ header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0]<<4); if (ieee->seq_ctrl[0] == 0xFFF) ieee->seq_ctrl[0] = 0; else ieee->seq_ctrl[0]++; /* avoid watchdog triggers */ ieee->dev->trans_start = jiffies; ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate); } spin_unlock_irqrestore(&ieee->lock, flags); }else{ spin_unlock_irqrestore(&ieee->lock, flags); spin_lock_irqsave(&ieee->mgmt_tx_lock, flags); header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4); if (ieee->seq_ctrl[0] == 0xFFF) ieee->seq_ctrl[0] = 0; else ieee->seq_ctrl[0]++; /* avoid watchdog triggers */ ieee->dev->trans_start = jiffies; ieee->softmac_hard_start_xmit(skb,ieee->dev); spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags); } } inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee) { short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE; struct ieee80211_hdr_3addr *header = (struct ieee80211_hdr_3addr *) skb->data; if(single){ header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4); if (ieee->seq_ctrl[0] == 0xFFF) ieee->seq_ctrl[0] = 0; else ieee->seq_ctrl[0]++; /* avoid watchdog triggers */ ieee->dev->trans_start = jiffies; ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate); }else{ header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4); if (ieee->seq_ctrl[0] == 0xFFF) ieee->seq_ctrl[0] = 0; else ieee->seq_ctrl[0]++; /* avoid watchdog triggers */ ieee->dev->trans_start = jiffies; ieee->softmac_hard_start_xmit(skb,ieee->dev); } // dev_kfree_skb_any(skb);//edit by thomas } //by amy for power save inline struct sk_buff *ieee80211_disassociate_skb( struct ieee80211_network *beacon, struct ieee80211_device *ieee, u8 asRsn) { struct sk_buff *skb; struct ieee80211_disassoc_frame *disass; skb = dev_alloc_skb(sizeof(struct ieee80211_disassoc_frame)); if (!skb) return NULL; disass = (struct ieee80211_disassoc_frame *) skb_put(skb,sizeof(struct ieee80211_disassoc_frame)); disass->header.frame_control = cpu_to_le16(IEEE80211_STYPE_DISASSOC); disass->header.duration_id = 0; memcpy(disass->header.addr1, beacon->bssid, ETH_ALEN); memcpy(disass->header.addr2, ieee->dev->dev_addr, ETH_ALEN); memcpy(disass->header.addr3, beacon->bssid, ETH_ALEN); disass->reasoncode = asRsn; return skb; } void SendDisassociation( struct ieee80211_device *ieee, u8* asSta, u8 asRsn ) { struct ieee80211_network *beacon = &ieee->current_network; struct sk_buff *skb; skb = ieee80211_disassociate_skb(beacon,ieee,asRsn); if (skb){ softmac_mgmt_xmit(skb, ieee); //dev_kfree_skb_any(skb);//edit by thomas } } //by amy for power save inline struct sk_buff *ieee80211_probe_req(struct ieee80211_device *ieee) { unsigned int len,rate_len; u8 *tag; struct sk_buff *skb; struct ieee80211_probe_request *req; len = ieee->current_network.ssid_len; rate_len = ieee80211_MFIE_rate_len(ieee); skb = dev_alloc_skb(sizeof(struct ieee80211_probe_request) + 2 + len + rate_len); if (!skb) return NULL; req = (struct ieee80211_probe_request *) skb_put(skb,sizeof(struct ieee80211_probe_request)); req->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); req->header.duration_id = 0; //FIXME: is this OK ? memset(req->header.addr1, 0xff, ETH_ALEN); memcpy(req->header.addr2, ieee->dev->dev_addr, ETH_ALEN); memset(req->header.addr3, 0xff, ETH_ALEN); tag = (u8 *) skb_put(skb,len+2+rate_len); *tag++ = MFIE_TYPE_SSID; *tag++ = len; memcpy(tag, ieee->current_network.ssid, len); tag += len; ieee80211_MFIE_Brate(ieee,&tag); ieee80211_MFIE_Grate(ieee,&tag); return skb; } struct sk_buff *ieee80211_get_beacon_(struct ieee80211_device *ieee); void ext_ieee80211_send_beacon_wq(struct ieee80211_device *ieee) { struct sk_buff *skb; //unsigned long flags; skb = ieee80211_get_beacon_(ieee); if (skb){ softmac_mgmt_xmit(skb, ieee); ieee->softmac_stats.tx_beacons++; dev_kfree_skb_any(skb);//edit by thomas } //printk(KERN_WARNING "[1] beacon sending!\n"); ieee->beacon_timer.expires = jiffies + (MSECS( ieee->current_network.beacon_interval -5)); //spin_lock_irqsave(&ieee->beacon_lock,flags); if(ieee->beacon_txing) add_timer(&ieee->beacon_timer); //spin_unlock_irqrestore(&ieee->beacon_lock,flags); } void ieee80211_send_beacon(struct ieee80211_device *ieee) { struct sk_buff *skb; //unsigned long flags; skb = ieee80211_get_beacon_(ieee); if (skb){ softmac_mgmt_xmit(skb, ieee); ieee->softmac_stats.tx_beacons++; dev_kfree_skb_any(skb);//edit by thomas } //printk(KERN_WARNING "[1] beacon sending!\n"); ieee->beacon_timer.expires = jiffies + (MSECS( ieee->current_network.beacon_interval -5)); //spin_lock_irqsave(&ieee->beacon_lock,flags); if(ieee->beacon_txing) add_timer(&ieee->beacon_timer); //spin_unlock_irqrestore(&ieee->beacon_lock,flags); } void ieee80211_send_beacon_cb(unsigned long _ieee) { struct ieee80211_device *ieee = (struct ieee80211_device *) _ieee; unsigned long flags; spin_lock_irqsave(&ieee->beacon_lock, flags); ieee80211_send_beacon(ieee); spin_unlock_irqrestore(&ieee->beacon_lock, flags); } void ieee80211_send_probe(struct ieee80211_device *ieee) { struct sk_buff *skb; skb = ieee80211_probe_req(ieee); if (skb){ softmac_mgmt_xmit(skb, ieee); ieee->softmac_stats.tx_probe_rq++; //dev_kfree_skb_any(skb);//edit by thomas } } void ieee80211_send_probe_requests(struct ieee80211_device *ieee) { if (ieee->active_scan && (ieee->softmac_features & IEEE_SOFTMAC_PROBERQ)){ ieee80211_send_probe(ieee); ieee80211_send_probe(ieee); } } /* this performs syncro scan blocking the caller until all channels * in the allowed channel map has been checked. */ void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee) { short ch = 0; u8 channel_map[MAX_CHANNEL_NUMBER+1]; memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1); down(&ieee->scan_sem); // printk("==================> Sync scan\n"); while(1) { do{ ch++; if (ch > MAX_CHANNEL_NUMBER) goto out; /* scan completed */ }while(!channel_map[ch]); /* this function can be called in two situations * 1- We have switched to ad-hoc mode and we are * performing a complete syncro scan before conclude * there are no interesting cell and to create a * new one. In this case the link state is * IEEE80211_NOLINK until we found an interesting cell. * If so the ieee8021_new_net, called by the RX path * will set the state to IEEE80211_LINKED, so we stop * scanning * 2- We are linked and the root uses run iwlist scan. * So we switch to IEEE80211_LINKED_SCANNING to remember * that we are still logically linked (not interested in * new network events, despite for updating the net list, * but we are temporarily 'unlinked' as the driver shall * not filter RX frames and the channel is changing. * So the only situation in witch are interested is to check * if the state become LINKED because of the #1 situation */ if (ieee->state == IEEE80211_LINKED) goto out; ieee->set_chan(ieee->dev, ch); // printk("=====>channel=%d ",ch); if(channel_map[ch] == 1) { // printk("====send probe request\n"); ieee80211_send_probe_requests(ieee); } /* this prevent excessive time wait when we * need to wait for a syncro scan to end.. */ if (ieee->sync_scan_hurryup) goto out; msleep_interruptible_rtl(IEEE80211_SOFTMAC_SCAN_TIME); } out: ieee->sync_scan_hurryup = 0; up(&ieee->scan_sem); if(IS_DOT11D_ENABLE(ieee)) DOT11D_ScanComplete(ieee); } void ieee80211_softmac_ips_scan_syncro(struct ieee80211_device *ieee) { int ch; unsigned int watch_dog = 0; u8 channel_map[MAX_CHANNEL_NUMBER+1]; memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1); down(&ieee->scan_sem); ch = ieee->current_network.channel; // if(ieee->sync_scan_hurryup) // { // printk("stop scan sync\n"); // goto out; // } // printk("=======hh===============>ips scan\n"); while(1) { /* this function can be called in two situations * 1- We have switched to ad-hoc mode and we are * performing a complete syncro scan before conclude * there are no interesting cell and to create a * new one. In this case the link state is * IEEE80211_NOLINK until we found an interesting cell. * If so the ieee8021_new_net, called by the RX path * will set the state to IEEE80211_LINKED, so we stop * scanning * 2- We are linked and the root uses run iwlist scan. * So we switch to IEEE80211_LINKED_SCANNING to remember * that we are still logically linked (not interested in * new network events, despite for updating the net list, * but we are temporarily 'unlinked' as the driver shall * not filter RX frames and the channel is changing. * So the only situation in witch are interested is to check * if the state become LINKED because of the #1 situation */ if (ieee->state == IEEE80211_LINKED) { goto out; } if(channel_map[ieee->current_network.channel] > 0) { ieee->set_chan(ieee->dev, ieee->current_network.channel); // printk("======>channel=%d ",ieee->current_network.channel); } if(channel_map[ieee->current_network.channel] == 1) { // printk("====send probe request\n"); ieee80211_send_probe_requests(ieee); } /* this prevent excessive time wait when we * need to wait for a syncro scan to end.. */ // if (ieee->sync_scan_hurryup) // goto out; msleep_interruptible_rtl(IEEE80211_SOFTMAC_SCAN_TIME); do{ if (watch_dog++ >= MAX_CHANNEL_NUMBER) // if (++watch_dog >= 15);//MAX_CHANNEL_NUMBER) //YJ,modified,080630 goto out; /* scan completed */ ieee->current_network.channel = (ieee->current_network.channel + 1)%MAX_CHANNEL_NUMBER; }while(!channel_map[ieee->current_network.channel]); } out: //ieee->sync_scan_hurryup = 0; //ieee->set_chan(ieee->dev, ch); //ieee->current_network.channel = ch; ieee->actscanning = false; up(&ieee->scan_sem); if(IS_DOT11D_ENABLE(ieee)) DOT11D_ScanComplete(ieee); } void ieee80211_softmac_scan_wq(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, softmac_scan_wq); static short watchdog = 0; u8 channel_map[MAX_CHANNEL_NUMBER+1]; memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1); // printk("ieee80211_softmac_scan_wq ENABLE_IPS\n"); // printk("in %s\n",__func__); down(&ieee->scan_sem); do{ ieee->current_network.channel = (ieee->current_network.channel + 1) % MAX_CHANNEL_NUMBER; if (watchdog++ > MAX_CHANNEL_NUMBER) goto out; /* no good chans */ }while(!channel_map[ieee->current_network.channel]); //printk("current_network.channel:%d\n", ieee->current_network.channel); if (ieee->scanning == 0 ) { printk("error out, scanning = 0\n"); goto out; } ieee->set_chan(ieee->dev, ieee->current_network.channel); if(channel_map[ieee->current_network.channel] == 1) ieee80211_send_probe_requests(ieee); queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq, IEEE80211_SOFTMAC_SCAN_TIME); up(&ieee->scan_sem); return; out: ieee->actscanning = false; watchdog = 0; ieee->scanning = 0; up(&ieee->scan_sem); if(IS_DOT11D_ENABLE(ieee)) DOT11D_ScanComplete(ieee); return; } void ieee80211_beacons_start(struct ieee80211_device *ieee) { unsigned long flags; spin_lock_irqsave(&ieee->beacon_lock,flags); ieee->beacon_txing = 1; ieee80211_send_beacon(ieee); spin_unlock_irqrestore(&ieee->beacon_lock,flags); } void ieee80211_beacons_stop(struct ieee80211_device *ieee) { unsigned long flags; spin_lock_irqsave(&ieee->beacon_lock,flags); ieee->beacon_txing = 0; del_timer_sync(&ieee->beacon_timer); spin_unlock_irqrestore(&ieee->beacon_lock,flags); } void ieee80211_stop_send_beacons(struct ieee80211_device *ieee) { if(ieee->stop_send_beacons) ieee->stop_send_beacons(ieee->dev); if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS) ieee80211_beacons_stop(ieee); } void ieee80211_start_send_beacons(struct ieee80211_device *ieee) { if(ieee->start_send_beacons) ieee->start_send_beacons(ieee->dev); if(ieee->softmac_features & IEEE_SOFTMAC_BEACONS) ieee80211_beacons_start(ieee); } void ieee80211_softmac_stop_scan(struct ieee80211_device *ieee) { // unsigned long flags; //ieee->sync_scan_hurryup = 1; down(&ieee->scan_sem); // spin_lock_irqsave(&ieee->lock, flags); if (ieee->scanning == 1){ ieee->scanning = 0; //del_timer_sync(&ieee->scan_timer); cancel_delayed_work(&ieee->softmac_scan_wq); } // spin_unlock_irqrestore(&ieee->lock, flags); up(&ieee->scan_sem); } void ieee80211_stop_scan(struct ieee80211_device *ieee) { if (ieee->softmac_features & IEEE_SOFTMAC_SCAN) ieee80211_softmac_stop_scan(ieee); else ieee->stop_scan(ieee->dev); } /* called with ieee->lock held */ void ieee80211_rtl_start_scan(struct ieee80211_device *ieee) { if(IS_DOT11D_ENABLE(ieee) ) { if(IS_COUNTRY_IE_VALID(ieee)) { RESET_CIE_WATCHDOG(ieee); } } if (ieee->softmac_features & IEEE_SOFTMAC_SCAN){ if (ieee->scanning == 0) { ieee->scanning = 1; //ieee80211_softmac_scan(ieee); // queue_work(ieee->wq, &ieee->softmac_scan_wq); //care this,1203,2007,by lawrence #if 1 queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq,0); #endif } }else ieee->start_scan(ieee->dev); } /* called with wx_sem held */ void ieee80211_start_scan_syncro(struct ieee80211_device *ieee) { if(IS_DOT11D_ENABLE(ieee) ) { if(IS_COUNTRY_IE_VALID(ieee)) { RESET_CIE_WATCHDOG(ieee); } } ieee->sync_scan_hurryup = 0; if (ieee->softmac_features & IEEE_SOFTMAC_SCAN) ieee80211_softmac_scan_syncro(ieee); else ieee->scan_syncro(ieee->dev); } inline struct sk_buff *ieee80211_authentication_req(struct ieee80211_network *beacon, struct ieee80211_device *ieee, int challengelen) { struct sk_buff *skb; struct ieee80211_authentication *auth; skb = dev_alloc_skb(sizeof(struct ieee80211_authentication) + challengelen); if (!skb) return NULL; auth = (struct ieee80211_authentication *) skb_put(skb, sizeof(struct ieee80211_authentication)); auth->header.frame_ctl = IEEE80211_STYPE_AUTH; if (challengelen) auth->header.frame_ctl |= IEEE80211_FCTL_WEP; auth->header.duration_id = 0x013a; //FIXME memcpy(auth->header.addr1, beacon->bssid, ETH_ALEN); memcpy(auth->header.addr2, ieee->dev->dev_addr, ETH_ALEN); memcpy(auth->header.addr3, beacon->bssid, ETH_ALEN); auth->algorithm = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY; auth->transaction = cpu_to_le16(ieee->associate_seq); ieee->associate_seq++; auth->status = cpu_to_le16(WLAN_STATUS_SUCCESS); return skb; } static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *dest) { u8 *tag; int beacon_size; struct ieee80211_probe_response *beacon_buf; struct sk_buff *skb; int encrypt; int atim_len,erp_len; struct ieee80211_crypt_data* crypt; char *ssid = ieee->current_network.ssid; int ssid_len = ieee->current_network.ssid_len; int rate_len = ieee->current_network.rates_len+2; int rate_ex_len = ieee->current_network.rates_ex_len; int wpa_ie_len = ieee->wpa_ie_len; if(rate_ex_len > 0) rate_ex_len+=2; if(ieee->current_network.capability & WLAN_CAPABILITY_IBSS) atim_len = 4; else atim_len = 0; if(ieee80211_is_54g(&ieee->current_network)) erp_len = 3; else erp_len = 0; beacon_size = sizeof(struct ieee80211_probe_response)+ ssid_len +3 //channel +rate_len +rate_ex_len +atim_len +wpa_ie_len +erp_len; skb = dev_alloc_skb(beacon_size); if (!skb) return NULL; beacon_buf = (struct ieee80211_probe_response*) skb_put(skb, beacon_size); memcpy (beacon_buf->header.addr1, dest,ETH_ALEN); memcpy (beacon_buf->header.addr2, ieee->dev->dev_addr, ETH_ALEN); memcpy (beacon_buf->header.addr3, ieee->current_network.bssid, ETH_ALEN); beacon_buf->header.duration_id = 0; //FIXME beacon_buf->beacon_interval = cpu_to_le16(ieee->current_network.beacon_interval); beacon_buf->capability = cpu_to_le16(ieee->current_network.capability & WLAN_CAPABILITY_IBSS); if(ieee->short_slot && (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_SLOT)) beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT); crypt = ieee->crypt[ieee->tx_keyidx]; encrypt = ieee->host_encrypt && crypt && crypt->ops && ((0 == strcmp(crypt->ops->name, "WEP")) || wpa_ie_len); if (encrypt) beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY); beacon_buf->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_RESP); beacon_buf->info_element.id = MFIE_TYPE_SSID; beacon_buf->info_element.len = ssid_len; tag = (u8*) beacon_buf->info_element.data; memcpy(tag, ssid, ssid_len); tag += ssid_len; *(tag++) = MFIE_TYPE_RATES; *(tag++) = rate_len-2; memcpy(tag,ieee->current_network.rates,rate_len-2); tag+=rate_len-2; *(tag++) = MFIE_TYPE_DS_SET; *(tag++) = 1; *(tag++) = ieee->current_network.channel; if(atim_len){ *(tag++) = MFIE_TYPE_IBSS_SET; *(tag++) = 2; *((u16*)(tag)) = cpu_to_le16(ieee->current_network.atim_window); tag+=2; } if(erp_len){ *(tag++) = MFIE_TYPE_ERP; *(tag++) = 1; *(tag++) = 0; } if(rate_ex_len){ *(tag++) = MFIE_TYPE_RATES_EX; *(tag++) = rate_ex_len-2; memcpy(tag,ieee->current_network.rates_ex,rate_ex_len-2); tag+=rate_ex_len-2; } if (wpa_ie_len) { if (ieee->iw_mode == IW_MODE_ADHOC) {//as Windows will set pairwise key same as the group key which is not allowed in Linux, so set this for IOT issue. WB 2008.07.07 memcpy(&ieee->wpa_ie[14], &ieee->wpa_ie[8], 4); } memcpy(tag, ieee->wpa_ie, ieee->wpa_ie_len); } skb->dev = ieee->dev; return skb; } struct sk_buff* ieee80211_assoc_resp(struct ieee80211_device *ieee, u8 *dest) { struct sk_buff *skb; u8* tag; struct ieee80211_crypt_data* crypt; struct ieee80211_assoc_response_frame *assoc; short encrypt; unsigned int rate_len = ieee80211_MFIE_rate_len(ieee); int len = sizeof(struct ieee80211_assoc_response_frame) + rate_len; skb = dev_alloc_skb(len); if (!skb) return NULL; assoc = (struct ieee80211_assoc_response_frame *) skb_put(skb,sizeof(struct ieee80211_assoc_response_frame)); assoc->header.frame_control = cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP); memcpy(assoc->header.addr1, dest,ETH_ALEN); memcpy(assoc->header.addr3, ieee->dev->dev_addr, ETH_ALEN); memcpy(assoc->header.addr2, ieee->dev->dev_addr, ETH_ALEN); assoc->capability = cpu_to_le16(ieee->iw_mode == IW_MODE_MASTER ? WLAN_CAPABILITY_BSS : WLAN_CAPABILITY_IBSS); if(ieee->short_slot) assoc->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT); if (ieee->host_encrypt) crypt = ieee->crypt[ieee->tx_keyidx]; else crypt = NULL; encrypt = ( crypt && crypt->ops); if (encrypt) assoc->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY); assoc->status = 0; assoc->aid = cpu_to_le16(ieee->assoc_id); if (ieee->assoc_id == 0x2007) ieee->assoc_id=0; else ieee->assoc_id++; tag = (u8*) skb_put(skb, rate_len); ieee80211_MFIE_Brate(ieee, &tag); ieee80211_MFIE_Grate(ieee, &tag); return skb; } struct sk_buff* ieee80211_auth_resp(struct ieee80211_device *ieee,int status, u8 *dest) { struct sk_buff *skb; struct ieee80211_authentication *auth; skb = dev_alloc_skb(sizeof(struct ieee80211_authentication)+1); if (!skb) return NULL; skb->len = sizeof(struct ieee80211_authentication); auth = (struct ieee80211_authentication *)skb->data; auth->status = cpu_to_le16(status); auth->transaction = cpu_to_le16(2); auth->algorithm = cpu_to_le16(WLAN_AUTH_OPEN); memcpy(auth->header.addr3, ieee->dev->dev_addr, ETH_ALEN); memcpy(auth->header.addr2, ieee->dev->dev_addr, ETH_ALEN); memcpy(auth->header.addr1, dest, ETH_ALEN); auth->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_AUTH); return skb; } struct sk_buff* ieee80211_null_func(struct ieee80211_device *ieee,short pwr) { struct sk_buff *skb; struct ieee80211_hdr_3addr* hdr; skb = dev_alloc_skb(sizeof(struct ieee80211_hdr_3addr)); if (!skb) return NULL; hdr = (struct ieee80211_hdr_3addr*)skb_put(skb,sizeof(struct ieee80211_hdr_3addr)); memcpy(hdr->addr1, ieee->current_network.bssid, ETH_ALEN); memcpy(hdr->addr2, ieee->dev->dev_addr, ETH_ALEN); memcpy(hdr->addr3, ieee->current_network.bssid, ETH_ALEN); hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_TODS | (pwr ? IEEE80211_FCTL_PM:0)); return skb; } void ieee80211_resp_to_assoc_rq(struct ieee80211_device *ieee, u8* dest) { struct sk_buff *buf = ieee80211_assoc_resp(ieee, dest); if (buf){ softmac_mgmt_xmit(buf, ieee); dev_kfree_skb_any(buf);//edit by thomas } } void ieee80211_resp_to_auth(struct ieee80211_device *ieee, int s, u8* dest) { struct sk_buff *buf = ieee80211_auth_resp(ieee, s, dest); if (buf){ softmac_mgmt_xmit(buf, ieee); dev_kfree_skb_any(buf);//edit by thomas } } void ieee80211_resp_to_probe(struct ieee80211_device *ieee, u8 *dest) { struct sk_buff *buf = ieee80211_probe_resp(ieee, dest); if (buf) { softmac_mgmt_xmit(buf, ieee); dev_kfree_skb_any(buf);//edit by thomas } } inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beacon,struct ieee80211_device *ieee) { struct sk_buff *skb; //unsigned long flags; struct ieee80211_assoc_request_frame *hdr; u8 *tag; //short info_addr = 0; //int i; //u16 suite_count = 0; //u8 suit_select = 0; unsigned int wpa_len = beacon->wpa_ie_len; //struct net_device *dev = ieee->dev; //union iwreq_data wrqu; //u8 *buff; //u8 *p; #if 1 // for testing purpose unsigned int rsn_len = beacon->rsn_ie_len; #else unsigned int rsn_len = beacon->rsn_ie_len - 4; #endif unsigned int rate_len = ieee80211_MFIE_rate_len(ieee); unsigned int wmm_info_len = beacon->QoS_Enable?9:0; unsigned int turbo_info_len = beacon->Turbo_Enable?9:0; u8 encry_proto = ieee->wpax_type_notify & 0xff; //u8 pairwise_type = (ieee->wpax_type_notify >> 8) & 0xff; //u8 authen_type = (ieee->wpax_type_notify >> 16) & 0xff; int len = 0; //[0] Notify type of encryption: WPA/WPA2 //[1] pair wise type //[2] authen type if(ieee->wpax_type_set) { if (IEEE_PROTO_WPA == encry_proto) { rsn_len = 0; } else if (IEEE_PROTO_RSN == encry_proto) { wpa_len = 0; } } len = sizeof(struct ieee80211_assoc_request_frame)+ + beacon->ssid_len//essid tagged val + rate_len//rates tagged val + wpa_len + rsn_len + wmm_info_len + turbo_info_len; skb = dev_alloc_skb(len); if (!skb) return NULL; hdr = (struct ieee80211_assoc_request_frame *) skb_put(skb, sizeof(struct ieee80211_assoc_request_frame)); hdr->header.frame_control = IEEE80211_STYPE_ASSOC_REQ; hdr->header.duration_id= 37; //FIXME memcpy(hdr->header.addr1, beacon->bssid, ETH_ALEN); memcpy(hdr->header.addr2, ieee->dev->dev_addr, ETH_ALEN); memcpy(hdr->header.addr3, beacon->bssid, ETH_ALEN); memcpy(ieee->ap_mac_addr, beacon->bssid, ETH_ALEN);//for HW security, John hdr->capability = cpu_to_le16(WLAN_CAPABILITY_BSS); if (beacon->capability & WLAN_CAPABILITY_PRIVACY ) hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY); if (beacon->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE); if(ieee->short_slot) hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT); hdr->listen_interval = 0xa; //FIXME hdr->info_element.id = MFIE_TYPE_SSID; hdr->info_element.len = beacon->ssid_len; tag = skb_put(skb, beacon->ssid_len); memcpy(tag, beacon->ssid, beacon->ssid_len); tag = skb_put(skb, rate_len); ieee80211_MFIE_Brate(ieee, &tag); ieee80211_MFIE_Grate(ieee, &tag); //add rsn==0 condition for ap's mix security mode(wpa+wpa2), john2007.8.9 //choose AES encryption as default algorithm while using mixed mode tag = skb_put(skb,ieee->wpa_ie_len); memcpy(tag,ieee->wpa_ie,ieee->wpa_ie_len); tag = skb_put(skb,wmm_info_len); if(wmm_info_len) { ieee80211_WMM_Info(ieee, &tag); } tag = skb_put(skb,turbo_info_len); if(turbo_info_len) { ieee80211_TURBO_Info(ieee, &tag); } return skb; } void ieee80211_associate_abort(struct ieee80211_device *ieee) { unsigned long flags; spin_lock_irqsave(&ieee->lock, flags); ieee->associate_seq++; /* don't scan, and avoid to have the RX path possibly * try again to associate. Even do not react to AUTH or * ASSOC response. Just wait for the retry wq to be scheduled. * Here we will check if there are good nets to associate * with, so we retry or just get back to NO_LINK and scanning */ if (ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATING){ IEEE80211_DEBUG_MGMT("Authentication failed\n"); ieee->softmac_stats.no_auth_rs++; }else{ IEEE80211_DEBUG_MGMT("Association failed\n"); ieee->softmac_stats.no_ass_rs++; } ieee->state = IEEE80211_ASSOCIATING_RETRY; queue_delayed_work(ieee->wq, &ieee->associate_retry_wq,IEEE80211_SOFTMAC_ASSOC_RETRY_TIME); spin_unlock_irqrestore(&ieee->lock, flags); } void ieee80211_associate_abort_cb(unsigned long dev) { ieee80211_associate_abort((struct ieee80211_device *) dev); } void ieee80211_associate_step1(struct ieee80211_device *ieee) { struct ieee80211_network *beacon = &ieee->current_network; struct sk_buff *skb; IEEE80211_DEBUG_MGMT("Stopping scan\n"); ieee->softmac_stats.tx_auth_rq++; skb=ieee80211_authentication_req(beacon, ieee, 0); if (!skb){ ieee80211_associate_abort(ieee); } else{ ieee->state = IEEE80211_ASSOCIATING_AUTHENTICATING ; IEEE80211_DEBUG_MGMT("Sending authentication request\n"); //printk("---Sending authentication request\n"); softmac_mgmt_xmit(skb, ieee); //BUGON when you try to add_timer twice, using mod_timer may be better, john0709 if(!timer_pending(&ieee->associate_timer)){ ieee->associate_timer.expires = jiffies + (HZ / 2); add_timer(&ieee->associate_timer); } //If call dev_kfree_skb_any,a warning will ocur.... //KERNEL: assertion (!atomic_read(&skb->users)) failed at net/core/dev.c (1708) //So ... 1204 by lawrence. //printk("\nIn %s,line %d call kfree skb.",__func__,__LINE__); //dev_kfree_skb_any(skb);//edit by thomas } } void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen) { u8 *c; struct sk_buff *skb; struct ieee80211_network *beacon = &ieee->current_network; // int hlen = sizeof(struct ieee80211_authentication); del_timer_sync(&ieee->associate_timer); ieee->associate_seq++; ieee->softmac_stats.tx_auth_rq++; skb = ieee80211_authentication_req(beacon, ieee, chlen+2); if (!skb) ieee80211_associate_abort(ieee); else{ c = skb_put(skb, chlen+2); *(c++) = MFIE_TYPE_CHALLENGE; *(c++) = chlen; memcpy(c, challenge, chlen); IEEE80211_DEBUG_MGMT("Sending authentication challenge response\n"); ieee80211_encrypt_fragment(ieee, skb, sizeof(struct ieee80211_hdr_3addr )); softmac_mgmt_xmit(skb, ieee); if (!timer_pending(&ieee->associate_timer)){ //printk("=========>add timer again, to crash\n"); ieee->associate_timer.expires = jiffies + (HZ / 2); add_timer(&ieee->associate_timer); } dev_kfree_skb_any(skb);//edit by thomas } kfree(challenge); } void ieee80211_associate_step2(struct ieee80211_device *ieee) { struct sk_buff* skb; struct ieee80211_network *beacon = &ieee->current_network; del_timer_sync(&ieee->associate_timer); IEEE80211_DEBUG_MGMT("Sending association request\n"); ieee->softmac_stats.tx_ass_rq++; skb=ieee80211_association_req(beacon, ieee); if (!skb) ieee80211_associate_abort(ieee); else{ softmac_mgmt_xmit(skb, ieee); if (!timer_pending(&ieee->associate_timer)){ ieee->associate_timer.expires = jiffies + (HZ / 2); add_timer(&ieee->associate_timer); } //dev_kfree_skb_any(skb);//edit by thomas } } void ieee80211_associate_complete_wq(struct work_struct *work) { struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_complete_wq); printk(KERN_INFO "Associated successfully\n"); if(ieee80211_is_54g(&ieee->current_network) && (ieee->modulation & IEEE80211_OFDM_MODULATION)){ ieee->rate = 540; printk(KERN_INFO"Using G rates\n"); }else{ ieee->rate = 110; printk(KERN_INFO"Using B rates\n"); } ieee->link_change(ieee->dev); notify_wx_assoc_event(ieee); if (ieee->data_hard_resume) ieee->data_hard_resume(ieee->dev); netif_carrier_on(ieee->dev); } void ieee80211_associate_complete(struct ieee80211_device *ieee) { int i; del_timer_sync(&ieee->associate_timer); for(i = 0; i < 6; i++) { //ieee->seq_ctrl[i] = 0; } ieee->state = IEEE80211_LINKED; IEEE80211_DEBUG_MGMT("Successfully associated\n"); queue_work(ieee->wq, &ieee->associate_complete_wq); } void ieee80211_associate_procedure_wq(struct work_struct *work) { struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_procedure_wq); ieee->sync_scan_hurryup = 1; down(&ieee->wx_sem); if (ieee->data_hard_stop) ieee->data_hard_stop(ieee->dev); ieee80211_stop_scan(ieee); ieee->set_chan(ieee->dev, ieee->current_network.channel); ieee->associate_seq = 1; ieee80211_associate_step1(ieee); up(&ieee->wx_sem); } inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee80211_network *net) { u8 tmp_ssid[IW_ESSID_MAX_SIZE+1]; int tmp_ssid_len = 0; short apset,ssidset,ssidbroad,apmatch,ssidmatch; /* we are interested in new new only if we are not associated * and we are not associating / authenticating */ if (ieee->state != IEEE80211_NOLINK) return; if ((ieee->iw_mode == IW_MODE_INFRA) && !(net->capability & WLAN_CAPABILITY_BSS)) return; if ((ieee->iw_mode == IW_MODE_ADHOC) && !(net->capability & WLAN_CAPABILITY_IBSS)) return; if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC){ /* if the user specified the AP MAC, we need also the essid * This could be obtained by beacons or, if the network does not * broadcast it, it can be put manually. */ apset = ieee->wap_set;//(memcmp(ieee->current_network.bssid, zero,ETH_ALEN)!=0 ); ssidset = ieee->ssid_set;//ieee->current_network.ssid[0] != '\0'; ssidbroad = !(net->ssid_len == 0 || net->ssid[0]== '\0'); apmatch = (memcmp(ieee->current_network.bssid, net->bssid, ETH_ALEN)==0); if(ieee->current_network.ssid_len != net->ssid_len) ssidmatch = 0; else ssidmatch = (0==strncmp(ieee->current_network.ssid, net->ssid, net->ssid_len)); //printk("cur: %s, %d, net:%s, %d\n", ieee->current_network.ssid, ieee->current_network.ssid_len, net->ssid, net->ssid_len); //printk("apset=%d apmatch=%d ssidset=%d ssidbroad=%d ssidmatch=%d\n",apset,apmatch,ssidset,ssidbroad,ssidmatch); if ( /* if the user set the AP check if match. * if the network does not broadcast essid we check the user supplied ANY essid * if the network does broadcast and the user does not set essid it is OK * if the network does broadcast and the user did set essid chech if essid match */ ( apset && apmatch && ((ssidset && ssidbroad && ssidmatch) || (ssidbroad && !ssidset) || (!ssidbroad && ssidset)) ) || /* if the ap is not set, check that the user set the bssid * and the network does broadcast and that those two bssid matches */ (!apset && ssidset && ssidbroad && ssidmatch) ){ /* if the essid is hidden replace it with the * essid provided by the user. */ if (!ssidbroad){ strncpy(tmp_ssid, ieee->current_network.ssid, IW_ESSID_MAX_SIZE); tmp_ssid_len = ieee->current_network.ssid_len; } memcpy(&ieee->current_network, net, sizeof(struct ieee80211_network)); if (!ssidbroad){ strncpy(ieee->current_network.ssid, tmp_ssid, IW_ESSID_MAX_SIZE); ieee->current_network.ssid_len = tmp_ssid_len; } printk(KERN_INFO"Linking with %s: channel is %d\n",ieee->current_network.ssid,ieee->current_network.channel); if (ieee->iw_mode == IW_MODE_INFRA){ ieee->state = IEEE80211_ASSOCIATING; ieee->beinretry = false; queue_work(ieee->wq, &ieee->associate_procedure_wq); }else{ if(ieee80211_is_54g(&ieee->current_network) && (ieee->modulation & IEEE80211_OFDM_MODULATION)){ ieee->rate = 540; printk(KERN_INFO"Using G rates\n"); }else{ ieee->rate = 110; printk(KERN_INFO"Using B rates\n"); } ieee->state = IEEE80211_LINKED; ieee->beinretry = false; } } } } void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee) { unsigned long flags; struct ieee80211_network *target; spin_lock_irqsave(&ieee->lock, flags); list_for_each_entry(target, &ieee->network_list, list) { /* if the state become different that NOLINK means * we had found what we are searching for */ if (ieee->state != IEEE80211_NOLINK) break; if (ieee->scan_age == 0 || time_after(target->last_scanned + ieee->scan_age, jiffies)) ieee80211_softmac_new_net(ieee, target); } spin_unlock_irqrestore(&ieee->lock, flags); } static inline u16 auth_parse(struct sk_buff *skb, u8** challenge, int *chlen) { struct ieee80211_authentication *a; u8 *t; if (skb->len < (sizeof(struct ieee80211_authentication)-sizeof(struct ieee80211_info_element))){ IEEE80211_DEBUG_MGMT("invalid len in auth resp: %d\n",skb->len); return 0xcafe; } *challenge = NULL; a = (struct ieee80211_authentication*) skb->data; if(skb->len > (sizeof(struct ieee80211_authentication) +3)){ t = skb->data + sizeof(struct ieee80211_authentication); if(*(t++) == MFIE_TYPE_CHALLENGE){ *chlen = *(t++); *challenge = kmemdup(t, *chlen, GFP_ATOMIC); if (!*challenge) return -ENOMEM; } } return cpu_to_le16(a->status); } int auth_rq_parse(struct sk_buff *skb,u8* dest) { struct ieee80211_authentication *a; if (skb->len < (sizeof(struct ieee80211_authentication)-sizeof(struct ieee80211_info_element))){ IEEE80211_DEBUG_MGMT("invalid len in auth request: %d\n",skb->len); return -1; } a = (struct ieee80211_authentication*) skb->data; memcpy(dest,a->header.addr2, ETH_ALEN); if (le16_to_cpu(a->algorithm) != WLAN_AUTH_OPEN) return WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG; return WLAN_STATUS_SUCCESS; } static short probe_rq_parse(struct ieee80211_device *ieee, struct sk_buff *skb, u8 *src) { u8 *tag; u8 *skbend; u8 *ssid=NULL; u8 ssidlen = 0; struct ieee80211_hdr_3addr *header = (struct ieee80211_hdr_3addr *) skb->data; if (skb->len < sizeof (struct ieee80211_hdr_3addr )) return -1; /* corrupted */ memcpy(src,header->addr2, ETH_ALEN); skbend = (u8*)skb->data + skb->len; tag = skb->data + sizeof (struct ieee80211_hdr_3addr ); while (tag+1 < skbend){ if (*tag == 0){ ssid = tag+2; ssidlen = *(tag+1); break; } tag++; /* point to the len field */ tag = tag + *(tag); /* point to the last data byte of the tag */ tag++; /* point to the next tag */ } //IEEE80211DMESG("Card MAC address is "MACSTR, MAC2STR(src)); if (ssidlen == 0) return 1; if (!ssid) return 1; /* ssid not found in tagged param */ return (!strncmp(ssid, ieee->current_network.ssid, ssidlen)); } int assoc_rq_parse(struct sk_buff *skb,u8* dest) { struct ieee80211_assoc_request_frame *a; if (skb->len < (sizeof(struct ieee80211_assoc_request_frame) - sizeof(struct ieee80211_info_element))) { IEEE80211_DEBUG_MGMT("invalid len in auth request:%d \n", skb->len); return -1; } a = (struct ieee80211_assoc_request_frame*) skb->data; memcpy(dest,a->header.addr2,ETH_ALEN); return 0; } static inline u16 assoc_parse(struct sk_buff *skb, int *aid) { struct ieee80211_assoc_response_frame *a; if (skb->len < sizeof(struct ieee80211_assoc_response_frame)){ IEEE80211_DEBUG_MGMT("invalid len in auth resp: %d\n", skb->len); return 0xcafe; } a = (struct ieee80211_assoc_response_frame*) skb->data; *aid = le16_to_cpu(a->aid) & 0x3fff; return le16_to_cpu(a->status); } static inline void ieee80211_rx_probe_rq(struct ieee80211_device *ieee, struct sk_buff *skb) { u8 dest[ETH_ALEN]; //IEEE80211DMESG("Rx probe"); ieee->softmac_stats.rx_probe_rq++; //DMESG("Dest is "MACSTR, MAC2STR(dest)); if (probe_rq_parse(ieee, skb, dest)){ //IEEE80211DMESG("Was for me!"); ieee->softmac_stats.tx_probe_rs++; ieee80211_resp_to_probe(ieee, dest); } } inline void ieee80211_rx_auth_rq(struct ieee80211_device *ieee, struct sk_buff *skb) { u8 dest[ETH_ALEN]; int status; //IEEE80211DMESG("Rx probe"); ieee->softmac_stats.rx_auth_rq++; status = auth_rq_parse(skb, dest); if (status != -1) { ieee80211_resp_to_auth(ieee, status, dest); } //DMESG("Dest is "MACSTR, MAC2STR(dest)); } inline void ieee80211_rx_assoc_rq(struct ieee80211_device *ieee, struct sk_buff *skb) { u8 dest[ETH_ALEN]; //unsigned long flags; ieee->softmac_stats.rx_ass_rq++; if (assoc_rq_parse(skb,dest) != -1){ ieee80211_resp_to_assoc_rq(ieee, dest); } printk(KERN_INFO"New client associated: %pM\n", dest); } void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee, short pwr) { struct sk_buff *buf = ieee80211_null_func(ieee, pwr); if (buf) softmac_ps_mgmt_xmit(buf, ieee); } short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h, u32 *time_l) { int timeout = 0; u8 dtim; /*if(ieee->ps == IEEE80211_PS_DISABLED || ieee->iw_mode != IW_MODE_INFRA || ieee->state != IEEE80211_LINKED) return 0; */ dtim = ieee->current_network.dtim_data; //printk("DTIM\n"); if(!(dtim & IEEE80211_DTIM_VALID)) return 0; else timeout = ieee->current_network.beacon_interval; //printk("VALID\n"); ieee->current_network.dtim_data = IEEE80211_DTIM_INVALID; if(dtim & ((IEEE80211_DTIM_UCAST | IEEE80211_DTIM_MBCAST)& ieee->ps)) return 2; if(!time_after(jiffies, ieee->dev->trans_start + MSECS(timeout))) return 0; if(!time_after(jiffies, ieee->last_rx_ps_time + MSECS(timeout))) return 0; if((ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE ) && (ieee->mgmt_queue_tail != ieee->mgmt_queue_head)) return 0; if(time_l){ *time_l = ieee->current_network.last_dtim_sta_time[0] + MSECS((ieee->current_network.beacon_interval)); //* ieee->current_network.dtim_period)); //printk("beacon_interval:%x, dtim_period:%x, totol to Msecs:%x, HZ:%x\n", ieee->current_network.beacon_interval, ieee->current_network.dtim_period, MSECS(((ieee->current_network.beacon_interval * ieee->current_network.dtim_period))), HZ); } if(time_h){ *time_h = ieee->current_network.last_dtim_sta_time[1]; if(time_l && *time_l < ieee->current_network.last_dtim_sta_time[0]) *time_h += 1; } return 1; } inline void ieee80211_sta_ps(struct ieee80211_device *ieee) { u32 th,tl; short sleep; unsigned long flags,flags2; spin_lock_irqsave(&ieee->lock, flags); if((ieee->ps == IEEE80211_PS_DISABLED || ieee->iw_mode != IW_MODE_INFRA || ieee->state != IEEE80211_LINKED)){ //#warning CHECK_LOCK_HERE spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2); ieee80211_sta_wakeup(ieee, 1); spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2); } sleep = ieee80211_sta_ps_sleep(ieee,&th, &tl); // printk("===>%s,%d[2 wake, 1 sleep, 0 do nothing], ieee->sta_sleep = %d\n",__func__, sleep,ieee->sta_sleep); /* 2 wake, 1 sleep, 0 do nothing */ if(sleep == 0) goto out; if(sleep == 1){ if(ieee->sta_sleep == 1) ieee->enter_sleep_state(ieee->dev,th,tl); else if(ieee->sta_sleep == 0){ // printk("send null 1\n"); spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2); if(ieee->ps_is_queue_empty(ieee->dev)){ ieee->sta_sleep = 2; ieee->ps_request_tx_ack(ieee->dev); ieee80211_sta_ps_send_null_frame(ieee,1); ieee->ps_th = th; ieee->ps_tl = tl; } spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2); } }else if(sleep == 2){ //#warning CHECK_LOCK_HERE spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2); // printk("send wakeup packet\n"); ieee80211_sta_wakeup(ieee,1); spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2); } out: spin_unlock_irqrestore(&ieee->lock, flags); } void ieee80211_sta_wakeup(struct ieee80211_device *ieee, short nl) { if(ieee->sta_sleep == 0){ if(nl){ // printk("Warning: driver is probably failing to report TX ps error\n"); ieee->ps_request_tx_ack(ieee->dev); ieee80211_sta_ps_send_null_frame(ieee, 0); } return; } if(ieee->sta_sleep == 1) ieee->sta_wake_up(ieee->dev); ieee->sta_sleep = 0; if(nl){ ieee->ps_request_tx_ack(ieee->dev); ieee80211_sta_ps_send_null_frame(ieee, 0); } } void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success) { unsigned long flags,flags2; spin_lock_irqsave(&ieee->lock, flags); if(ieee->sta_sleep == 2){ /* Null frame with PS bit set */ if(success){ // printk("==================> %s::enter sleep state\n",__func__); ieee->sta_sleep = 1; ieee->enter_sleep_state(ieee->dev,ieee->ps_th,ieee->ps_tl); } /* if the card report not success we can't be sure the AP * has not RXed so we can't assume the AP believe us awake */ } /* 21112005 - tx again null without PS bit if lost */ else { if((ieee->sta_sleep == 0) && !success){ spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2); ieee80211_sta_ps_send_null_frame(ieee, 0); spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2); } } spin_unlock_irqrestore(&ieee->lock, flags); } inline int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_rx_stats *rx_stats, u16 type, u16 stype) { struct ieee80211_hdr_3addr *header = (struct ieee80211_hdr_3addr *) skb->data; u16 errcode; u8* challenge=NULL; int chlen=0; int aid=0; struct ieee80211_assoc_response_frame *assoc_resp; struct ieee80211_info_element *info_element; if(!ieee->proto_started) return 0; if(ieee->sta_sleep || (ieee->ps != IEEE80211_PS_DISABLED && ieee->iw_mode == IW_MODE_INFRA && ieee->state == IEEE80211_LINKED)) tasklet_schedule(&ieee->ps_task); if (WLAN_FC_GET_STYPE(header->frame_control) != IEEE80211_STYPE_PROBE_RESP && WLAN_FC_GET_STYPE(header->frame_control) != IEEE80211_STYPE_BEACON) ieee->last_rx_ps_time = jiffies; switch (WLAN_FC_GET_STYPE(header->frame_control)) { case IEEE80211_STYPE_ASSOC_RESP: case IEEE80211_STYPE_REASSOC_RESP: IEEE80211_DEBUG_MGMT("received [RE]ASSOCIATION RESPONSE (%d)\n", WLAN_FC_GET_STYPE(header->frame_ctl)); if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) && ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATED && ieee->iw_mode == IW_MODE_INFRA){ if (0 == (errcode=assoc_parse(skb, &aid))){ u16 left; ieee->state=IEEE80211_LINKED; ieee->assoc_id = aid; ieee->softmac_stats.rx_ass_ok++; //printk(KERN_WARNING "nic_type = %s", (rx_stats->nic_type == 1)?"rtl8187":"rtl8187B"); if(1 == rx_stats->nic_type) //card type is 8187 { goto associate_complete; } assoc_resp = (struct ieee80211_assoc_response_frame*)skb->data; info_element = &assoc_resp->info_element; left = skb->len - ((void*)info_element - (void*)assoc_resp); while (left >= sizeof(struct ieee80211_info_element_hdr)) { if (sizeof(struct ieee80211_info_element_hdr) + info_element->len > left) { printk(KERN_WARNING "[re]associate response error!"); return 1; } switch (info_element->id) { case MFIE_TYPE_GENERIC: IEEE80211_DEBUG_SCAN("MFIE_TYPE_GENERIC: %d bytes\n", info_element->len); if (info_element->len >= 8 && info_element->data[0] == 0x00 && info_element->data[1] == 0x50 && info_element->data[2] == 0xf2 && info_element->data[3] == 0x02 && info_element->data[4] == 0x01) { // Not care about version at present. //WMM Parameter Element memcpy(ieee->current_network.wmm_param,(u8*)(info_element->data\ + 8),(info_element->len - 8)); if (((ieee->current_network.wmm_info^info_element->data[6])& \ 0x0f)||(!ieee->init_wmmparam_flag)) { // refresh parameter element for current network // update the register parameter for hardware ieee->init_wmmparam_flag = 1; queue_work(ieee->wq, &ieee->wmm_param_update_wq); } //update info_element for current network ieee->current_network.wmm_info = info_element->data[6]; } break; default: //nothing to do at present!!! break; } left -= sizeof(struct ieee80211_info_element_hdr) + info_element->len; info_element = (struct ieee80211_info_element *) &info_element->data[info_element->len]; } if(!ieee->init_wmmparam_flag) //legacy AP, reset the AC_xx_param register { queue_work(ieee->wq,&ieee->wmm_param_update_wq); ieee->init_wmmparam_flag = 1;//indicate AC_xx_param upated since last associate } associate_complete: ieee80211_associate_complete(ieee); }else{ ieee->softmac_stats.rx_ass_err++; IEEE80211_DEBUG_MGMT( "Association response status code 0x%x\n", errcode); ieee80211_associate_abort(ieee); } } break; case IEEE80211_STYPE_ASSOC_REQ: case IEEE80211_STYPE_REASSOC_REQ: if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) && ieee->iw_mode == IW_MODE_MASTER) ieee80211_rx_assoc_rq(ieee, skb); break; case IEEE80211_STYPE_AUTH: if (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE){ if (ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATING && ieee->iw_mode == IW_MODE_INFRA){ IEEE80211_DEBUG_MGMT("Received authentication response"); if (0 == (errcode=auth_parse(skb, &challenge, &chlen))){ if(ieee->open_wep || !challenge){ ieee->state = IEEE80211_ASSOCIATING_AUTHENTICATED; ieee->softmac_stats.rx_auth_rs_ok++; ieee80211_associate_step2(ieee); }else{ ieee80211_rtl_auth_challenge(ieee, challenge, chlen); } }else{ ieee->softmac_stats.rx_auth_rs_err++; IEEE80211_DEBUG_MGMT("Authentication response status code 0x%x",errcode); ieee80211_associate_abort(ieee); } }else if (ieee->iw_mode == IW_MODE_MASTER){ ieee80211_rx_auth_rq(ieee, skb); } } break; case IEEE80211_STYPE_PROBE_REQ: if ((ieee->softmac_features & IEEE_SOFTMAC_PROBERS) && ((ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER) && ieee->state == IEEE80211_LINKED)) ieee80211_rx_probe_rq(ieee, skb); break; case IEEE80211_STYPE_DISASSOC: case IEEE80211_STYPE_DEAUTH: /* FIXME for now repeat all the association procedure * both for disassociation and deauthentication */ if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) && (ieee->state == IEEE80211_LINKED) && (ieee->iw_mode == IW_MODE_INFRA) && (!memcmp(header->addr2,ieee->current_network.bssid,ETH_ALEN))){ ieee->state = IEEE80211_ASSOCIATING; ieee->softmac_stats.reassoc++; //notify_wx_assoc_event(ieee); //YJ,del,080828, do not notify os here queue_work(ieee->wq, &ieee->associate_procedure_wq); } break; default: return -1; break; } //dev_kfree_skb_any(skb); return 0; } /* following are for a simpler TX queue management. * Instead of using netif_[stop/wake]_queue the driver * will uses these two function (plus a reset one), that * will internally uses the kernel netif_* and takes * care of the ieee802.11 fragmentation. * So the driver receives a fragment per time and might * call the stop function when it want without take care * to have enough room to TX an entire packet. * This might be useful if each fragment need it's own * descriptor, thus just keep a total free memory > than * the max fragmentation threshold is not enough.. If the * ieee802.11 stack passed a TXB struct then you needed * to keep N free descriptors where * N = MAX_PACKET_SIZE / MIN_FRAG_TRESHOLD * In this way you need just one and the 802.11 stack * will take care of buffering fragments and pass them to * to the driver later, when it wakes the queue. */ void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *ieee) { unsigned long flags; int i; spin_lock_irqsave(&ieee->lock,flags); /* called with 2nd parm 0, no tx mgmt lock required */ ieee80211_sta_wakeup(ieee,0); for(i = 0; i < txb->nr_frags; i++) { if (ieee->queue_stop){ ieee->tx_pending.txb = txb; ieee->tx_pending.frag = i; goto exit; }else{ ieee->softmac_data_hard_start_xmit( txb->fragments[i], ieee->dev,ieee->rate); //(i+1)<txb->nr_frags); ieee->stats.tx_packets++; ieee->stats.tx_bytes += txb->fragments[i]->len; ieee->dev->trans_start = jiffies; } } ieee80211_txb_free(txb); exit: spin_unlock_irqrestore(&ieee->lock,flags); } /* called with ieee->lock acquired */ void ieee80211_resume_tx(struct ieee80211_device *ieee) { int i; for(i = ieee->tx_pending.frag; i < ieee->tx_pending.txb->nr_frags; i++) { if (ieee->queue_stop){ ieee->tx_pending.frag = i; return; }else{ ieee->softmac_data_hard_start_xmit( ieee->tx_pending.txb->fragments[i], ieee->dev,ieee->rate); //(i+1)<ieee->tx_pending.txb->nr_frags); ieee->stats.tx_packets++; ieee->dev->trans_start = jiffies; } } ieee80211_txb_free(ieee->tx_pending.txb); ieee->tx_pending.txb = NULL; } void ieee80211_reset_queue(struct ieee80211_device *ieee) { unsigned long flags; spin_lock_irqsave(&ieee->lock,flags); init_mgmt_queue(ieee); if (ieee->tx_pending.txb){ ieee80211_txb_free(ieee->tx_pending.txb); ieee->tx_pending.txb = NULL; } ieee->queue_stop = 0; spin_unlock_irqrestore(&ieee->lock,flags); } void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee) { unsigned long flags; struct sk_buff *skb; struct ieee80211_hdr_3addr *header; spin_lock_irqsave(&ieee->lock,flags); if (! ieee->queue_stop) goto exit; ieee->queue_stop = 0; if(ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE){ while (!ieee->queue_stop && (skb = dequeue_mgmt(ieee))){ header = (struct ieee80211_hdr_3addr *) skb->data; header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4); if (ieee->seq_ctrl[0] == 0xFFF) ieee->seq_ctrl[0] = 0; else ieee->seq_ctrl[0]++; //printk(KERN_ALERT "ieee80211_wake_queue \n"); ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate); dev_kfree_skb_any(skb);//edit by thomas } } if (!ieee->queue_stop && ieee->tx_pending.txb) ieee80211_resume_tx(ieee); if (!ieee->queue_stop && netif_queue_stopped(ieee->dev)){ ieee->softmac_stats.swtxawake++; netif_wake_queue(ieee->dev); } exit : spin_unlock_irqrestore(&ieee->lock,flags); } void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee) { //unsigned long flags; //spin_lock_irqsave(&ieee->lock,flags); if (! netif_queue_stopped(ieee->dev)){ netif_stop_queue(ieee->dev); ieee->softmac_stats.swtxstop++; } ieee->queue_stop = 1; //spin_unlock_irqrestore(&ieee->lock,flags); } inline void ieee80211_randomize_cell(struct ieee80211_device *ieee) { random_ether_addr(ieee->current_network.bssid); } /* called in user context only */ void ieee80211_start_master_bss(struct ieee80211_device *ieee) { ieee->assoc_id = 1; if (ieee->current_network.ssid_len == 0){ strncpy(ieee->current_network.ssid, IEEE80211_DEFAULT_TX_ESSID, IW_ESSID_MAX_SIZE); ieee->current_network.ssid_len = strlen(IEEE80211_DEFAULT_TX_ESSID); ieee->ssid_set = 1; } memcpy(ieee->current_network.bssid, ieee->dev->dev_addr, ETH_ALEN); ieee->set_chan(ieee->dev, ieee->current_network.channel); ieee->state = IEEE80211_LINKED; ieee->link_change(ieee->dev); notify_wx_assoc_event(ieee); if (ieee->data_hard_resume) ieee->data_hard_resume(ieee->dev); netif_carrier_on(ieee->dev); } void ieee80211_start_monitor_mode(struct ieee80211_device *ieee) { if(ieee->raw_tx){ if (ieee->data_hard_resume) ieee->data_hard_resume(ieee->dev); netif_carrier_on(ieee->dev); } } void ieee80211_start_ibss_wq(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, start_ibss_wq); /* iwconfig mode ad-hoc will schedule this and return * on the other hand this will block further iwconfig SET * operations because of the wx_sem hold. * Anyway some most set operations set a flag to speed-up * (abort) this wq (when syncro scanning) before sleeping * on the semaphore */ down(&ieee->wx_sem); if (ieee->current_network.ssid_len == 0){ strcpy(ieee->current_network.ssid,IEEE80211_DEFAULT_TX_ESSID); ieee->current_network.ssid_len = strlen(IEEE80211_DEFAULT_TX_ESSID); ieee->ssid_set = 1; } /* check if we have this cell in our network list */ ieee80211_softmac_check_all_nets(ieee); if(ieee->state == IEEE80211_NOLINK) ieee->current_network.channel = 10; /* if not then the state is not linked. Maybe the user switched to * ad-hoc mode just after being in monitor mode, or just after * being very few time in managed mode (so the card have had no * time to scan all the chans..) or we have just run up the iface * after setting ad-hoc mode. So we have to give another try.. * Here, in ibss mode, should be safe to do this without extra care * (in bss mode we had to make sure no-one tried to associate when * we had just checked the ieee->state and we was going to start the * scan) because in ibss mode the ieee80211_new_net function, when * finds a good net, just set the ieee->state to IEEE80211_LINKED, * so, at worst, we waste a bit of time to initiate an unneeded syncro * scan, that will stop at the first round because it sees the state * associated. */ if (ieee->state == IEEE80211_NOLINK) ieee80211_start_scan_syncro(ieee); /* the network definitively is not here.. create a new cell */ if (ieee->state == IEEE80211_NOLINK){ printk("creating new IBSS cell\n"); if(!ieee->wap_set) ieee80211_randomize_cell(ieee); if(ieee->modulation & IEEE80211_CCK_MODULATION){ ieee->current_network.rates_len = 4; ieee->current_network.rates[0] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB; ieee->current_network.rates[1] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB; ieee->current_network.rates[2] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB; ieee->current_network.rates[3] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB; }else ieee->current_network.rates_len = 0; if(ieee->modulation & IEEE80211_OFDM_MODULATION){ ieee->current_network.rates_ex_len = 8; ieee->current_network.rates_ex[0] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB; ieee->current_network.rates_ex[1] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_9MB; ieee->current_network.rates_ex[2] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_12MB; ieee->current_network.rates_ex[3] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_18MB; ieee->current_network.rates_ex[4] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB; ieee->current_network.rates_ex[5] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_36MB; ieee->current_network.rates_ex[6] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_48MB; ieee->current_network.rates_ex[7] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB; ieee->rate = 540; }else{ ieee->current_network.rates_ex_len = 0; ieee->rate = 110; } // By default, WMM function will be disabled in IBSS mode ieee->current_network.QoS_Enable = 0; ieee->current_network.atim_window = 0; ieee->current_network.capability = WLAN_CAPABILITY_IBSS; if(ieee->short_slot) ieee->current_network.capability |= WLAN_CAPABILITY_SHORT_SLOT; } ieee->state = IEEE80211_LINKED; ieee->set_chan(ieee->dev, ieee->current_network.channel); ieee->link_change(ieee->dev); notify_wx_assoc_event(ieee); ieee80211_start_send_beacons(ieee); printk(KERN_WARNING "after sending beacon packet!\n"); if (ieee->data_hard_resume) ieee->data_hard_resume(ieee->dev); netif_carrier_on(ieee->dev); up(&ieee->wx_sem); } inline void ieee80211_start_ibss(struct ieee80211_device *ieee) { queue_delayed_work(ieee->wq, &ieee->start_ibss_wq, 100); } /* this is called only in user context, with wx_sem held */ void ieee80211_start_bss(struct ieee80211_device *ieee) { unsigned long flags; // // Ref: 802.11d 11.1.3.3 // STA shall not start a BSS unless properly formed Beacon frame including a Country IE. // if(IS_DOT11D_ENABLE(ieee) && !IS_COUNTRY_IE_VALID(ieee)) { if(! ieee->bGlobalDomain) { return; } } /* check if we have already found the net we * are interested in (if any). * if not (we are disassociated and we are not * in associating / authenticating phase) start the background scanning. */ ieee80211_softmac_check_all_nets(ieee); /* ensure no-one start an associating process (thus setting * the ieee->state to ieee80211_ASSOCIATING) while we * have just cheked it and we are going to enable scan. * The ieee80211_new_net function is always called with * lock held (from both ieee80211_softmac_check_all_nets and * the rx path), so we cannot be in the middle of such function */ spin_lock_irqsave(&ieee->lock, flags); //#ifdef ENABLE_IPS // printk("start bss ENABLE_IPS\n"); //#else if (ieee->state == IEEE80211_NOLINK){ ieee->actscanning = true; ieee80211_rtl_start_scan(ieee); } //#endif spin_unlock_irqrestore(&ieee->lock, flags); } /* called only in userspace context */ void ieee80211_disassociate(struct ieee80211_device *ieee) { netif_carrier_off(ieee->dev); if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) ieee80211_reset_queue(ieee); if (ieee->data_hard_stop) ieee->data_hard_stop(ieee->dev); if(IS_DOT11D_ENABLE(ieee)) Dot11d_Reset(ieee); ieee->link_change(ieee->dev); if (ieee->state == IEEE80211_LINKED) notify_wx_assoc_event(ieee); ieee->state = IEEE80211_NOLINK; } void ieee80211_associate_retry_wq(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, associate_retry_wq); unsigned long flags; down(&ieee->wx_sem); if(!ieee->proto_started) goto exit; if(ieee->state != IEEE80211_ASSOCIATING_RETRY) goto exit; /* until we do not set the state to IEEE80211_NOLINK * there are no possibility to have someone else trying * to start an association procedure (we get here with * ieee->state = IEEE80211_ASSOCIATING). * When we set the state to IEEE80211_NOLINK it is possible * that the RX path run an attempt to associate, but * both ieee80211_softmac_check_all_nets and the * RX path works with ieee->lock held so there are no * problems. If we are still disassociated then start a scan. * the lock here is necessary to ensure no one try to start * an association procedure when we have just checked the * state and we are going to start the scan. */ ieee->state = IEEE80211_NOLINK; ieee->beinretry = true; ieee80211_softmac_check_all_nets(ieee); spin_lock_irqsave(&ieee->lock, flags); if(ieee->state == IEEE80211_NOLINK){ ieee->beinretry = false; ieee->actscanning = true; ieee80211_rtl_start_scan(ieee); } //YJ,add,080828, notify os here if(ieee->state == IEEE80211_NOLINK) { notify_wx_assoc_event(ieee); } //YJ,add,080828,end spin_unlock_irqrestore(&ieee->lock, flags); exit: up(&ieee->wx_sem); } struct sk_buff *ieee80211_get_beacon_(struct ieee80211_device *ieee) { u8 broadcast_addr[] = {0xff,0xff,0xff,0xff,0xff,0xff}; struct sk_buff *skb = NULL; struct ieee80211_probe_response *b; skb = ieee80211_probe_resp(ieee, broadcast_addr); if (!skb) return NULL; b = (struct ieee80211_probe_response *) skb->data; b->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_BEACON); return skb; } struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee) { struct sk_buff *skb; struct ieee80211_probe_response *b; skb = ieee80211_get_beacon_(ieee); if(!skb) return NULL; b = (struct ieee80211_probe_response *) skb->data; b->header.seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4); if (ieee->seq_ctrl[0] == 0xFFF) ieee->seq_ctrl[0] = 0; else ieee->seq_ctrl[0]++; return skb; } void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee) { ieee->sync_scan_hurryup = 1; down(&ieee->wx_sem); ieee80211_stop_protocol(ieee); up(&ieee->wx_sem); } void ieee80211_stop_protocol(struct ieee80211_device *ieee) { if (!ieee->proto_started) return; ieee->proto_started = 0; ieee80211_stop_send_beacons(ieee); if((ieee->iw_mode == IW_MODE_INFRA)&&(ieee->state == IEEE80211_LINKED)) { SendDisassociation(ieee,NULL,WLAN_REASON_DISASSOC_STA_HAS_LEFT); } del_timer_sync(&ieee->associate_timer); cancel_delayed_work(&ieee->associate_retry_wq); cancel_delayed_work(&ieee->start_ibss_wq); ieee80211_stop_scan(ieee); ieee80211_disassociate(ieee); } void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee) { ieee->sync_scan_hurryup = 0; down(&ieee->wx_sem); ieee80211_start_protocol(ieee); up(&ieee->wx_sem); } void ieee80211_start_protocol(struct ieee80211_device *ieee) { short ch = 0; int i = 0; if (ieee->proto_started) return; ieee->proto_started = 1; if (ieee->current_network.channel == 0){ do{ ch++; if (ch > MAX_CHANNEL_NUMBER) return; /* no channel found */ }while(!GET_DOT11D_INFO(ieee)->channel_map[ch]); ieee->current_network.channel = ch; } if (ieee->current_network.beacon_interval == 0) ieee->current_network.beacon_interval = 100; ieee->set_chan(ieee->dev,ieee->current_network.channel); for(i = 0; i < 17; i++) { ieee->last_rxseq_num[i] = -1; ieee->last_rxfrag_num[i] = -1; ieee->last_packet_time[i] = 0; } ieee->init_wmmparam_flag = 0;//reinitialize AC_xx_PARAM registers. /* if the user set the MAC of the ad-hoc cell and then * switch to managed mode, shall we make sure that association * attempts does not fail just because the user provide the essid * and the nic is still checking for the AP MAC ?? */ switch (ieee->iw_mode) { case IW_MODE_AUTO: ieee->iw_mode = IW_MODE_INFRA; //not set break here intentionly case IW_MODE_INFRA: ieee80211_start_bss(ieee); break; case IW_MODE_ADHOC: ieee80211_start_ibss(ieee); break; case IW_MODE_MASTER: ieee80211_start_master_bss(ieee); break; case IW_MODE_MONITOR: ieee80211_start_monitor_mode(ieee); break; default: ieee->iw_mode = IW_MODE_INFRA; ieee80211_start_bss(ieee); break; } } #define DRV_NAME "Ieee80211" void ieee80211_softmac_init(struct ieee80211_device *ieee) { int i; memset(&ieee->current_network, 0, sizeof(struct ieee80211_network)); ieee->state = IEEE80211_NOLINK; ieee->sync_scan_hurryup = 0; for(i = 0; i < 5; i++) { ieee->seq_ctrl[i] = 0; } ieee->assoc_id = 0; ieee->queue_stop = 0; ieee->scanning = 0; ieee->softmac_features = 0; //so IEEE2100-like driver are happy ieee->wap_set = 0; ieee->ssid_set = 0; ieee->proto_started = 0; ieee->basic_rate = IEEE80211_DEFAULT_BASIC_RATE; ieee->rate = 3; //#ifdef ENABLE_LPS ieee->ps = IEEE80211_PS_MBCAST|IEEE80211_PS_UNICAST; //#else // ieee->ps = IEEE80211_PS_DISABLED; //#endif ieee->sta_sleep = 0; //by amy ieee->bInactivePs = false; ieee->actscanning = false; ieee->ListenInterval = 2; ieee->NumRxDataInPeriod = 0; //YJ,add,080828 ieee->NumRxBcnInPeriod = 0; //YJ,add,080828 ieee->NumRxOkTotal = 0;//+by amy 080312 ieee->NumRxUnicast = 0;//YJ,add,080828,for keep alive ieee->beinretry = false; ieee->bHwRadioOff = false; //by amy init_mgmt_queue(ieee); ieee->tx_pending.txb = NULL; init_timer(&ieee->associate_timer); ieee->associate_timer.data = (unsigned long)ieee; ieee->associate_timer.function = ieee80211_associate_abort_cb; init_timer(&ieee->beacon_timer); ieee->beacon_timer.data = (unsigned long) ieee; ieee->beacon_timer.function = ieee80211_send_beacon_cb; ieee->wq = create_workqueue(DRV_NAME); INIT_DELAYED_WORK(&ieee->start_ibss_wq,(void*) ieee80211_start_ibss_wq); INIT_WORK(&ieee->associate_complete_wq,(void*) ieee80211_associate_complete_wq); INIT_WORK(&ieee->associate_procedure_wq,(void*) ieee80211_associate_procedure_wq); INIT_DELAYED_WORK(&ieee->softmac_scan_wq,(void*) ieee80211_softmac_scan_wq); INIT_DELAYED_WORK(&ieee->associate_retry_wq,(void*) ieee80211_associate_retry_wq); INIT_WORK(&ieee->wx_sync_scan_wq,(void*) ieee80211_wx_sync_scan_wq); // INIT_WORK(&ieee->watch_dog_wq,(void*) ieee80211_watch_dog_wq); sema_init(&ieee->wx_sem, 1); sema_init(&ieee->scan_sem, 1); spin_lock_init(&ieee->mgmt_tx_lock); spin_lock_init(&ieee->beacon_lock); tasklet_init(&ieee->ps_task, (void(*)(unsigned long)) ieee80211_sta_ps, (unsigned long)ieee); ieee->pDot11dInfo = kmalloc(sizeof(RT_DOT11D_INFO), GFP_ATOMIC); } void ieee80211_softmac_free(struct ieee80211_device *ieee) { down(&ieee->wx_sem); del_timer_sync(&ieee->associate_timer); cancel_delayed_work(&ieee->associate_retry_wq); //add for RF power on power of by lizhaoming 080512 cancel_delayed_work(&ieee->GPIOChangeRFWorkItem); destroy_workqueue(ieee->wq); kfree(ieee->pDot11dInfo); up(&ieee->wx_sem); } /******************************************************** * Start of WPA code. * * this is stolen from the ipw2200 driver * ********************************************************/ static int ieee80211_wpa_enable(struct ieee80211_device *ieee, int value) { /* This is called when wpa_supplicant loads and closes the driver * interface. */ printk("%s WPA\n",value ? "enabling" : "disabling"); ieee->wpa_enabled = value; return 0; } void ieee80211_wpa_assoc_frame(struct ieee80211_device *ieee, char *wpa_ie, int wpa_ie_len) { /* make sure WPA is enabled */ ieee80211_wpa_enable(ieee, 1); ieee80211_disassociate(ieee); } static int ieee80211_wpa_mlme(struct ieee80211_device *ieee, int command, int reason) { int ret = 0; switch (command) { case IEEE_MLME_STA_DEAUTH: // silently ignore break; case IEEE_MLME_STA_DISASSOC: ieee80211_disassociate(ieee); break; default: printk("Unknown MLME request: %d\n", command); ret = -EOPNOTSUPP; } return ret; } static int ieee80211_wpa_set_wpa_ie(struct ieee80211_device *ieee, struct ieee_param *param, int plen) { u8 *buf; if (param->u.wpa_ie.len > MAX_WPA_IE_LEN || (param->u.wpa_ie.len && param->u.wpa_ie.data == NULL)) return -EINVAL; if (param->u.wpa_ie.len) { buf = kmemdup(param->u.wpa_ie.data, param->u.wpa_ie.len, GFP_KERNEL); if (buf == NULL) return -ENOMEM; kfree(ieee->wpa_ie); ieee->wpa_ie = buf; ieee->wpa_ie_len = param->u.wpa_ie.len; } else { kfree(ieee->wpa_ie); ieee->wpa_ie = NULL; ieee->wpa_ie_len = 0; } ieee80211_wpa_assoc_frame(ieee, ieee->wpa_ie, ieee->wpa_ie_len); return 0; } #define AUTH_ALG_OPEN_SYSTEM 0x1 #define AUTH_ALG_SHARED_KEY 0x2 static int ieee80211_wpa_set_auth_algs(struct ieee80211_device *ieee, int value) { struct ieee80211_security sec = { .flags = SEC_AUTH_MODE, }; int ret = 0; if (value & AUTH_ALG_SHARED_KEY) { sec.auth_mode = WLAN_AUTH_SHARED_KEY; ieee->open_wep = 0; } else { sec.auth_mode = WLAN_AUTH_OPEN; ieee->open_wep = 1; } if (ieee->set_security) ieee->set_security(ieee->dev, &sec); else ret = -EOPNOTSUPP; return ret; } static int ieee80211_wpa_set_param(struct ieee80211_device *ieee, u8 name, u32 value) { int ret=0; unsigned long flags; switch (name) { case IEEE_PARAM_WPA_ENABLED: ret = ieee80211_wpa_enable(ieee, value); break; case IEEE_PARAM_TKIP_COUNTERMEASURES: ieee->tkip_countermeasures=value; break; case IEEE_PARAM_DROP_UNENCRYPTED: { /* HACK: * * wpa_supplicant calls set_wpa_enabled when the driver * is loaded and unloaded, regardless of if WPA is being * used. No other calls are made which can be used to * determine if encryption will be used or not prior to * association being expected. If encryption is not being * used, drop_unencrypted is set to false, else true -- we * can use this to determine if the CAP_PRIVACY_ON bit should * be set. */ struct ieee80211_security sec = { .flags = SEC_ENABLED, .enabled = value, }; ieee->drop_unencrypted = value; /* We only change SEC_LEVEL for open mode. Others * are set by ipw_wpa_set_encryption. */ if (!value) { sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_0; } else { sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_1; } if (ieee->set_security) ieee->set_security(ieee->dev, &sec); break; } case IEEE_PARAM_PRIVACY_INVOKED: ieee->privacy_invoked=value; break; case IEEE_PARAM_AUTH_ALGS: ret = ieee80211_wpa_set_auth_algs(ieee, value); break; case IEEE_PARAM_IEEE_802_1X: ieee->ieee802_1x=value; break; case IEEE_PARAM_WPAX_SELECT: // added for WPA2 mixed mode //printk(KERN_WARNING "------------------------>wpax value = %x\n", value); spin_lock_irqsave(&ieee->wpax_suitlist_lock,flags); ieee->wpax_type_set = 1; ieee->wpax_type_notify = value; spin_unlock_irqrestore(&ieee->wpax_suitlist_lock,flags); break; default: printk("Unknown WPA param: %d\n",name); ret = -EOPNOTSUPP; } return ret; } /* implementation borrowed from hostap driver */ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee, struct ieee_param *param, int param_len) { int ret = 0; struct ieee80211_crypto_ops *ops; struct ieee80211_crypt_data **crypt; struct ieee80211_security sec = { .flags = 0, }; param->u.crypt.err = 0; param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0'; if (param_len != (int) ((char *) param->u.crypt.key - (char *) param) + param->u.crypt.key_len) { printk("Len mismatch %d, %d\n", param_len, param->u.crypt.key_len); return -EINVAL; } if (is_broadcast_ether_addr(param->sta_addr)) { if (param->u.crypt.idx >= WEP_KEYS) return -EINVAL; crypt = &ieee->crypt[param->u.crypt.idx]; } else { return -EINVAL; } if (strcmp(param->u.crypt.alg, "none") == 0) { if (crypt) { sec.enabled = 0; // FIXME FIXME //sec.encrypt = 0; sec.level = SEC_LEVEL_0; sec.flags |= SEC_ENABLED | SEC_LEVEL; ieee80211_crypt_delayed_deinit(ieee, crypt); } goto done; } sec.enabled = 1; // FIXME FIXME // sec.encrypt = 1; sec.flags |= SEC_ENABLED; /* IPW HW cannot build TKIP MIC, host decryption still needed. */ if (!(ieee->host_encrypt || ieee->host_decrypt) && strcmp(param->u.crypt.alg, "TKIP")) goto skip_host_crypt; ops = ieee80211_get_crypto_ops(param->u.crypt.alg); if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) ops = ieee80211_get_crypto_ops(param->u.crypt.alg); else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) ops = ieee80211_get_crypto_ops(param->u.crypt.alg); else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) ops = ieee80211_get_crypto_ops(param->u.crypt.alg); if (ops == NULL) { printk("unknown crypto alg '%s'\n", param->u.crypt.alg); param->u.crypt.err = IEEE_CRYPT_ERR_UNKNOWN_ALG; ret = -EINVAL; goto done; } if (*crypt == NULL || (*crypt)->ops != ops) { struct ieee80211_crypt_data *new_crypt; ieee80211_crypt_delayed_deinit(ieee, crypt); new_crypt = kmalloc(sizeof(*new_crypt), GFP_KERNEL); if (new_crypt == NULL) { ret = -ENOMEM; goto done; } memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data)); new_crypt->ops = ops; if (new_crypt->ops) new_crypt->priv = new_crypt->ops->init(param->u.crypt.idx); if (new_crypt->priv == NULL) { kfree(new_crypt); param->u.crypt.err = IEEE_CRYPT_ERR_CRYPT_INIT_FAILED; ret = -EINVAL; goto done; } *crypt = new_crypt; } if (param->u.crypt.key_len > 0 && (*crypt)->ops->set_key && (*crypt)->ops->set_key(param->u.crypt.key, param->u.crypt.key_len, param->u.crypt.seq, (*crypt)->priv) < 0) { printk("key setting failed\n"); param->u.crypt.err = IEEE_CRYPT_ERR_KEY_SET_FAILED; ret = -EINVAL; goto done; } skip_host_crypt: if (param->u.crypt.set_tx) { ieee->tx_keyidx = param->u.crypt.idx; sec.active_key = param->u.crypt.idx; sec.flags |= SEC_ACTIVE_KEY; } else sec.flags &= ~SEC_ACTIVE_KEY; if (param->u.crypt.alg != NULL) { memcpy(sec.keys[param->u.crypt.idx], param->u.crypt.key, param->u.crypt.key_len); sec.key_sizes[param->u.crypt.idx] = param->u.crypt.key_len; sec.flags |= (1 << param->u.crypt.idx); if (strcmp(param->u.crypt.alg, "WEP") == 0) { sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_1; } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) { sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_2; } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) { sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_3; } } done: if (ieee->set_security) ieee->set_security(ieee->dev, &sec); /* Do not reset port if card is in Managed mode since resetting will * generate new IEEE 802.11 authentication which may end up in looping * with IEEE 802.1X. If your hardware requires a reset after WEP * configuration (for example... Prism2), implement the reset_port in * the callbacks structures used to initialize the 802.11 stack. */ if (ieee->reset_on_keychange && ieee->iw_mode != IW_MODE_INFRA && ieee->reset_port && ieee->reset_port(ieee->dev)) { printk("reset_port failed\n"); param->u.crypt.err = IEEE_CRYPT_ERR_CARD_CONF_FAILED; return -EINVAL; } return ret; } int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_point *p) { struct ieee_param *param; int ret=0; down(&ieee->wx_sem); //IEEE_DEBUG_INFO("wpa_supplicant: len=%d\n", p->length); if (p->length < sizeof(struct ieee_param) || !p->pointer){ ret = -EINVAL; goto out; } param = kmalloc(p->length, GFP_KERNEL); if (param == NULL){ ret = -ENOMEM; goto out; } if (copy_from_user(param, p->pointer, p->length)) { kfree(param); ret = -EFAULT; goto out; } switch (param->cmd) { case IEEE_CMD_SET_WPA_PARAM: ret = ieee80211_wpa_set_param(ieee, param->u.wpa_param.name, param->u.wpa_param.value); break; case IEEE_CMD_SET_WPA_IE: ret = ieee80211_wpa_set_wpa_ie(ieee, param, p->length); break; case IEEE_CMD_SET_ENCRYPTION: ret = ieee80211_wpa_set_encryption(ieee, param, p->length); break; case IEEE_CMD_MLME: ret = ieee80211_wpa_mlme(ieee, param->u.mlme.command, param->u.mlme.reason_code); break; default: printk("Unknown WPA supplicant request: %d\n",param->cmd); ret = -EOPNOTSUPP; break; } if (ret == 0 && copy_to_user(p->pointer, param, p->length)) ret = -EFAULT; kfree(param); out: up(&ieee->wx_sem); return ret; } void notify_wx_assoc_event(struct ieee80211_device *ieee) { union iwreq_data wrqu; wrqu.ap_addr.sa_family = ARPHRD_ETHER; if (ieee->state == IEEE80211_LINKED) memcpy(wrqu.ap_addr.sa_data, ieee->current_network.bssid, ETH_ALEN); else memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); wireless_send_event(ieee->dev, SIOCGIWAP, &wrqu, NULL); }
gpl-2.0
koxda/android_kernel_samsung_msm8660-common
arch/s390/kvm/intercept.c
2610
5825
/* * intercept.c - in-kernel handling for sie intercepts * * Copyright IBM Corp. 2008,2009 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (version 2 only) * as published by the Free Software Foundation. * * Author(s): Carsten Otte <cotte@de.ibm.com> * Christian Borntraeger <borntraeger@de.ibm.com> */ #include <linux/kvm_host.h> #include <linux/errno.h> #include <linux/pagemap.h> #include <asm/kvm_host.h> #include "kvm-s390.h" #include "gaccess.h" static int handle_lctlg(struct kvm_vcpu *vcpu) { int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int reg3 = vcpu->arch.sie_block->ipa & 0x000f; int base2 = vcpu->arch.sie_block->ipb >> 28; int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + ((vcpu->arch.sie_block->ipb & 0xff00) << 4); u64 useraddr; int reg, rc; vcpu->stat.instruction_lctlg++; if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f) return -EOPNOTSUPP; useraddr = disp2; if (base2) useraddr += vcpu->arch.guest_gprs[base2]; if (useraddr & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); reg = reg1; VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, disp2); do { rc = get_guest_u64(vcpu, useraddr, &vcpu->arch.sie_block->gcr[reg]); if (rc == -EFAULT) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); break; } useraddr += 8; if (reg == reg3) break; reg = (reg + 1) % 16; } while (1); return 0; } static int handle_lctl(struct kvm_vcpu *vcpu) { int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int reg3 = vcpu->arch.sie_block->ipa & 0x000f; int base2 = vcpu->arch.sie_block->ipb >> 28; int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 useraddr; u32 val = 0; int reg, rc; vcpu->stat.instruction_lctl++; useraddr = disp2; if (base2) useraddr += vcpu->arch.guest_gprs[base2]; if (useraddr & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, disp2); reg = reg1; do { rc = get_guest_u32(vcpu, useraddr, &val); if (rc == -EFAULT) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); break; } vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; vcpu->arch.sie_block->gcr[reg] |= val; useraddr += 4; if (reg == reg3) break; reg = (reg + 1) % 16; } while (1); return 0; } static intercept_handler_t instruction_handlers[256] = { [0x83] = kvm_s390_handle_diag, [0xae] = kvm_s390_handle_sigp, [0xb2] = kvm_s390_handle_b2, [0xb7] = handle_lctl, [0xeb] = handle_lctlg, }; static int handle_noop(struct kvm_vcpu *vcpu) { switch (vcpu->arch.sie_block->icptcode) { case 0x0: vcpu->stat.exit_null++; break; case 0x10: vcpu->stat.exit_external_request++; break; case 0x14: vcpu->stat.exit_external_interrupt++; break; default: break; /* nothing */ } return 0; } static int handle_stop(struct kvm_vcpu *vcpu) { int rc = 0; vcpu->stat.exit_stop_request++; atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); spin_lock_bh(&vcpu->arch.local_int.lock); if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) { vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP; rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_NOADDR); if (rc >= 0) rc = -EOPNOTSUPP; } if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) { vcpu->arch.local_int.action_bits &= ~ACTION_RELOADVCPU_ON_STOP; rc = SIE_INTERCEPT_RERUNVCPU; vcpu->run->exit_reason = KVM_EXIT_INTR; } if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); rc = -EOPNOTSUPP; } spin_unlock_bh(&vcpu->arch.local_int.lock); return rc; } static int handle_validity(struct kvm_vcpu *vcpu) { int viwhy = vcpu->arch.sie_block->ipb >> 16; int rc; vcpu->stat.exit_validity++; if ((viwhy == 0x37) && (vcpu->arch.sie_block->prefix <= kvm_s390_vcpu_get_memsize(vcpu) - 2*PAGE_SIZE)) { rc = fault_in_pages_writeable((char __user *) vcpu->arch.sie_block->gmsor + vcpu->arch.sie_block->prefix, 2*PAGE_SIZE); if (rc) /* user will receive sigsegv, exit to user */ rc = -EOPNOTSUPP; } else rc = -EOPNOTSUPP; if (rc) VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d", viwhy); return rc; } static int handle_instruction(struct kvm_vcpu *vcpu) { intercept_handler_t handler; vcpu->stat.exit_instruction++; handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8]; if (handler) return handler(vcpu); return -EOPNOTSUPP; } static int handle_prog(struct kvm_vcpu *vcpu) { vcpu->stat.exit_program_interruption++; return kvm_s390_inject_program_int(vcpu, vcpu->arch.sie_block->iprcc); } static int handle_instruction_and_prog(struct kvm_vcpu *vcpu) { int rc, rc2; vcpu->stat.exit_instr_and_program++; rc = handle_instruction(vcpu); rc2 = handle_prog(vcpu); if (rc == -EOPNOTSUPP) vcpu->arch.sie_block->icptcode = 0x04; if (rc) return rc; return rc2; } static const intercept_handler_t intercept_funcs[] = { [0x00 >> 2] = handle_noop, [0x04 >> 2] = handle_instruction, [0x08 >> 2] = handle_prog, [0x0C >> 2] = handle_instruction_and_prog, [0x10 >> 2] = handle_noop, [0x14 >> 2] = handle_noop, [0x1C >> 2] = kvm_s390_handle_wait, [0x20 >> 2] = handle_validity, [0x28 >> 2] = handle_stop, }; int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) { intercept_handler_t func; u8 code = vcpu->arch.sie_block->icptcode; if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs)) return -EOPNOTSUPP; func = intercept_funcs[code >> 2]; if (func) return func(vcpu); return -EOPNOTSUPP; }
gpl-2.0
leitick/linux
drivers/net/wireless/ti/wl18xx/tx.c
2610
4989
/* * This file is part of wl18xx * * Copyright (C) 2011 Texas Instruments Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include "../wlcore/wlcore.h" #include "../wlcore/cmd.h" #include "../wlcore/debug.h" #include "../wlcore/acx.h" #include "../wlcore/tx.h" #include "wl18xx.h" #include "tx.h" static void wl18xx_get_last_tx_rate(struct wl1271 *wl, struct ieee80211_vif *vif, struct ieee80211_tx_rate *rate) { u8 fw_rate = wl->fw_status_2->counters.tx_last_rate; if (fw_rate > CONF_HW_RATE_INDEX_MAX) { wl1271_error("last Tx rate invalid: %d", fw_rate); rate->idx = 0; rate->flags = 0; return; } if (fw_rate <= CONF_HW_RATE_INDEX_54MBPS) { rate->idx = fw_rate; rate->flags = 0; } else { rate->flags = IEEE80211_TX_RC_MCS; rate->idx = fw_rate - CONF_HW_RATE_INDEX_MCS0; /* SGI modifier is counted as a separate rate */ if (fw_rate >= CONF_HW_RATE_INDEX_MCS7_SGI) (rate->idx)--; if (fw_rate == CONF_HW_RATE_INDEX_MCS15_SGI) (rate->idx)--; /* this also covers the 40Mhz SGI case (= MCS15) */ if (fw_rate == CONF_HW_RATE_INDEX_MCS7_SGI || fw_rate == CONF_HW_RATE_INDEX_MCS15_SGI) rate->flags |= IEEE80211_TX_RC_SHORT_GI; if (fw_rate > CONF_HW_RATE_INDEX_MCS7_SGI && vif) { struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); if (wlvif->channel_type == NL80211_CHAN_HT40MINUS || wlvif->channel_type == NL80211_CHAN_HT40PLUS) { /* adjustment needed for range 0-7 */ rate->idx -= 8; rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; } } } } static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte) { struct ieee80211_tx_info *info; struct sk_buff *skb; int id = tx_stat_byte & WL18XX_TX_STATUS_DESC_ID_MASK; bool tx_success; /* check for id legality */ if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) { wl1271_warning("illegal id in tx completion: %d", id); return; } /* a zero bit indicates Tx success */ tx_success = !(tx_stat_byte & BIT(WL18XX_TX_STATUS_STAT_BIT_IDX)); skb = wl->tx_frames[id]; info = IEEE80211_SKB_CB(skb); if (wl12xx_is_dummy_packet(wl, skb)) { wl1271_free_tx_id(wl, id); return; } /* update the TX status info */ if (tx_success && !(info->flags & IEEE80211_TX_CTL_NO_ACK)) info->flags |= IEEE80211_TX_STAT_ACK; /* * first pass info->control.vif while it's valid, and then fill out * the info->status structures */ wl18xx_get_last_tx_rate(wl, info->control.vif, &info->status.rates[0]); info->status.rates[0].count = 1; /* no data about retries */ info->status.ack_signal = -1; if (!tx_success) wl->stats.retry_count++; /* * TODO: update sequence number for encryption? seems to be * unsupported for now. needed for recovery with encryption. */ /* remove private header from packet */ skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); /* remove TKIP header space if present */ if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && info->control.hw_key && info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) { int hdrlen = ieee80211_get_hdrlen_from_skb(skb); memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data, hdrlen); skb_pull(skb, WL1271_EXTRA_SPACE_TKIP); } wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p success %d", id, skb, tx_success); /* return the packet to the stack */ skb_queue_tail(&wl->deferred_tx_queue, skb); queue_work(wl->freezable_wq, &wl->netstack_work); wl1271_free_tx_id(wl, id); } void wl18xx_tx_immediate_complete(struct wl1271 *wl) { struct wl18xx_fw_status_priv *status_priv = (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv; struct wl18xx_priv *priv = wl->priv; u8 i; /* nothing to do here */ if (priv->last_fw_rls_idx == status_priv->fw_release_idx) return; /* freed Tx descriptors */ wl1271_debug(DEBUG_TX, "last released desc = %d, current idx = %d", priv->last_fw_rls_idx, status_priv->fw_release_idx); if (status_priv->fw_release_idx >= WL18XX_FW_MAX_TX_STATUS_DESC) { wl1271_error("invalid desc release index %d", status_priv->fw_release_idx); WARN_ON(1); return; } for (i = priv->last_fw_rls_idx; i != status_priv->fw_release_idx; i = (i + 1) % WL18XX_FW_MAX_TX_STATUS_DESC) { wl18xx_tx_complete_packet(wl, status_priv->released_tx_desc[i]); wl->tx_results_count++; } priv->last_fw_rls_idx = status_priv->fw_release_idx; }
gpl-2.0
chaosmaster/android_kernel_amazon_ford
drivers/infiniband/hw/mthca/mthca_qp.c
2610
62454
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/string.h> #include <linux/slab.h> #include <linux/sched.h> #include <asm/io.h> #include <rdma/ib_verbs.h> #include <rdma/ib_cache.h> #include <rdma/ib_pack.h> #include "mthca_dev.h" #include "mthca_cmd.h" #include "mthca_memfree.h" #include "mthca_wqe.h" enum { MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE, MTHCA_ACK_REQ_FREQ = 10, MTHCA_FLIGHT_LIMIT = 9, MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */ MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */ MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */ }; enum { MTHCA_QP_STATE_RST = 0, MTHCA_QP_STATE_INIT = 1, MTHCA_QP_STATE_RTR = 2, MTHCA_QP_STATE_RTS = 3, MTHCA_QP_STATE_SQE = 4, MTHCA_QP_STATE_SQD = 5, MTHCA_QP_STATE_ERR = 6, MTHCA_QP_STATE_DRAINING = 7 }; enum { MTHCA_QP_ST_RC = 0x0, MTHCA_QP_ST_UC = 0x1, MTHCA_QP_ST_RD = 0x2, MTHCA_QP_ST_UD = 0x3, MTHCA_QP_ST_MLX = 0x7 }; enum { MTHCA_QP_PM_MIGRATED = 0x3, MTHCA_QP_PM_ARMED = 0x0, MTHCA_QP_PM_REARM = 0x1 }; enum { /* qp_context flags */ MTHCA_QP_BIT_DE = 1 << 8, /* params1 */ MTHCA_QP_BIT_SRE = 1 << 15, MTHCA_QP_BIT_SWE = 1 << 14, MTHCA_QP_BIT_SAE = 1 << 13, MTHCA_QP_BIT_SIC = 1 << 4, MTHCA_QP_BIT_SSC = 1 << 3, /* params2 */ MTHCA_QP_BIT_RRE = 1 << 15, MTHCA_QP_BIT_RWE = 1 << 14, MTHCA_QP_BIT_RAE = 1 << 13, MTHCA_QP_BIT_RIC = 1 << 4, MTHCA_QP_BIT_RSC = 1 << 3 }; enum { MTHCA_SEND_DOORBELL_FENCE = 1 << 5 }; struct mthca_qp_path { __be32 port_pkey; u8 rnr_retry; u8 g_mylmc; __be16 rlid; u8 ackto; u8 mgid_index; u8 static_rate; u8 hop_limit; __be32 sl_tclass_flowlabel; u8 rgid[16]; } __attribute__((packed)); struct mthca_qp_context { __be32 flags; __be32 tavor_sched_queue; /* Reserved on Arbel */ u8 mtu_msgmax; u8 rq_size_stride; /* Reserved on Tavor */ u8 sq_size_stride; /* Reserved on Tavor */ u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ __be32 usr_page; __be32 local_qpn; __be32 remote_qpn; u32 reserved1[2]; struct mthca_qp_path pri_path; struct mthca_qp_path alt_path; __be32 rdd; __be32 pd; __be32 wqe_base; __be32 wqe_lkey; __be32 params1; __be32 reserved2; __be32 next_send_psn; __be32 cqn_snd; __be32 snd_wqe_base_l; /* Next send WQE on Tavor */ __be32 snd_db_index; /* (debugging only entries) */ __be32 last_acked_psn; __be32 ssn; __be32 params2; __be32 rnr_nextrecvpsn; __be32 ra_buff_indx; __be32 cqn_rcv; __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ __be32 rcv_db_index; /* (debugging only entries) */ __be32 qkey; __be32 srqn; __be32 rmsn; __be16 rq_wqe_counter; /* reserved on Tavor */ __be16 sq_wqe_counter; /* reserved on Tavor */ u32 reserved3[18]; } __attribute__((packed)); struct mthca_qp_param { __be32 opt_param_mask; u32 reserved1; struct mthca_qp_context context; u32 reserved2[62]; } __attribute__((packed)); enum { MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, MTHCA_QP_OPTPAR_RRE = 1 << 1, MTHCA_QP_OPTPAR_RAE = 1 << 2, MTHCA_QP_OPTPAR_RWE = 1 << 3, MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4, MTHCA_QP_OPTPAR_Q_KEY = 1 << 5, MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6, MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7, MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8, MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9, MTHCA_QP_OPTPAR_PM_STATE = 1 << 10, MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11, MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12, MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13, MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15, MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16 }; static const u8 mthca_opcode[] = { [IB_WR_SEND] = MTHCA_OPCODE_SEND, [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM, [IB_WR_RDMA_WRITE] = MTHCA_OPCODE_RDMA_WRITE, [IB_WR_RDMA_WRITE_WITH_IMM] = MTHCA_OPCODE_RDMA_WRITE_IMM, [IB_WR_RDMA_READ] = MTHCA_OPCODE_RDMA_READ, [IB_WR_ATOMIC_CMP_AND_SWP] = MTHCA_OPCODE_ATOMIC_CS, [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA, }; static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) { return qp->qpn >= dev->qp_table.sqp_start && qp->qpn <= dev->qp_table.sqp_start + 3; } static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) { return qp->qpn >= dev->qp_table.sqp_start && qp->qpn <= dev->qp_table.sqp_start + 1; } static void *get_recv_wqe(struct mthca_qp *qp, int n) { if (qp->is_direct) return qp->queue.direct.buf + (n << qp->rq.wqe_shift); else return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1)); } static void *get_send_wqe(struct mthca_qp *qp, int n) { if (qp->is_direct) return qp->queue.direct.buf + qp->send_wqe_offset + (n << qp->sq.wqe_shift); else return qp->queue.page_list[(qp->send_wqe_offset + (n << qp->sq.wqe_shift)) >> PAGE_SHIFT].buf + ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) & (PAGE_SIZE - 1)); } static void mthca_wq_reset(struct mthca_wq *wq) { wq->next_ind = 0; wq->last_comp = wq->max - 1; wq->head = 0; wq->tail = 0; } void mthca_qp_event(struct mthca_dev *dev, u32 qpn, enum ib_event_type event_type) { struct mthca_qp *qp; struct ib_event event; spin_lock(&dev->qp_table.lock); qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); if (qp) ++qp->refcount; spin_unlock(&dev->qp_table.lock); if (!qp) { mthca_warn(dev, "Async event %d for bogus QP %08x\n", event_type, qpn); return; } if (event_type == IB_EVENT_PATH_MIG) qp->port = qp->alt_port; event.device = &dev->ib_dev; event.event = event_type; event.element.qp = &qp->ibqp; if (qp->ibqp.event_handler) qp->ibqp.event_handler(&event, qp->ibqp.qp_context); spin_lock(&dev->qp_table.lock); if (!--qp->refcount) wake_up(&qp->wait); spin_unlock(&dev->qp_table.lock); } static int to_mthca_state(enum ib_qp_state ib_state) { switch (ib_state) { case IB_QPS_RESET: return MTHCA_QP_STATE_RST; case IB_QPS_INIT: return MTHCA_QP_STATE_INIT; case IB_QPS_RTR: return MTHCA_QP_STATE_RTR; case IB_QPS_RTS: return MTHCA_QP_STATE_RTS; case IB_QPS_SQD: return MTHCA_QP_STATE_SQD; case IB_QPS_SQE: return MTHCA_QP_STATE_SQE; case IB_QPS_ERR: return MTHCA_QP_STATE_ERR; default: return -1; } } enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS }; static int to_mthca_st(int transport) { switch (transport) { case RC: return MTHCA_QP_ST_RC; case UC: return MTHCA_QP_ST_UC; case UD: return MTHCA_QP_ST_UD; case RD: return MTHCA_QP_ST_RD; case MLX: return MTHCA_QP_ST_MLX; default: return -1; } } static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr, int attr_mask) { if (attr_mask & IB_QP_PKEY_INDEX) sqp->pkey_index = attr->pkey_index; if (attr_mask & IB_QP_QKEY) sqp->qkey = attr->qkey; if (attr_mask & IB_QP_SQ_PSN) sqp->send_psn = attr->sq_psn; } static void init_port(struct mthca_dev *dev, int port) { int err; struct mthca_init_ib_param param; memset(&param, 0, sizeof param); param.port_width = dev->limits.port_width_cap; param.vl_cap = dev->limits.vl_cap; param.mtu_cap = dev->limits.mtu_cap; param.gid_cap = dev->limits.gid_table_len; param.pkey_cap = dev->limits.pkey_table_len; err = mthca_INIT_IB(dev, &param, port); if (err) mthca_warn(dev, "INIT_IB failed, return code %d.\n", err); } static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr, int attr_mask) { u8 dest_rd_atomic; u32 access_flags; u32 hw_access_flags = 0; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) dest_rd_atomic = attr->max_dest_rd_atomic; else dest_rd_atomic = qp->resp_depth; if (attr_mask & IB_QP_ACCESS_FLAGS) access_flags = attr->qp_access_flags; else access_flags = qp->atomic_rd_en; if (!dest_rd_atomic) access_flags &= IB_ACCESS_REMOTE_WRITE; if (access_flags & IB_ACCESS_REMOTE_READ) hw_access_flags |= MTHCA_QP_BIT_RRE; if (access_flags & IB_ACCESS_REMOTE_ATOMIC) hw_access_flags |= MTHCA_QP_BIT_RAE; if (access_flags & IB_ACCESS_REMOTE_WRITE) hw_access_flags |= MTHCA_QP_BIT_RWE; return cpu_to_be32(hw_access_flags); } static inline enum ib_qp_state to_ib_qp_state(int mthca_state) { switch (mthca_state) { case MTHCA_QP_STATE_RST: return IB_QPS_RESET; case MTHCA_QP_STATE_INIT: return IB_QPS_INIT; case MTHCA_QP_STATE_RTR: return IB_QPS_RTR; case MTHCA_QP_STATE_RTS: return IB_QPS_RTS; case MTHCA_QP_STATE_DRAINING: case MTHCA_QP_STATE_SQD: return IB_QPS_SQD; case MTHCA_QP_STATE_SQE: return IB_QPS_SQE; case MTHCA_QP_STATE_ERR: return IB_QPS_ERR; default: return -1; } } static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state) { switch (mthca_mig_state) { case 0: return IB_MIG_ARMED; case 1: return IB_MIG_REARM; case 3: return IB_MIG_MIGRATED; default: return -1; } } static int to_ib_qp_access_flags(int mthca_flags) { int ib_flags = 0; if (mthca_flags & MTHCA_QP_BIT_RRE) ib_flags |= IB_ACCESS_REMOTE_READ; if (mthca_flags & MTHCA_QP_BIT_RWE) ib_flags |= IB_ACCESS_REMOTE_WRITE; if (mthca_flags & MTHCA_QP_BIT_RAE) ib_flags |= IB_ACCESS_REMOTE_ATOMIC; return ib_flags; } static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr, struct mthca_qp_path *path) { memset(ib_ah_attr, 0, sizeof *ib_ah_attr); ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3; if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports) return; ib_ah_attr->dlid = be16_to_cpu(path->rlid); ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28; ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f; ib_ah_attr->static_rate = mthca_rate_to_ib(dev, path->static_rate & 0xf, ib_ah_attr->port_num); ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0; if (ib_ah_attr->ah_flags) { ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1); ib_ah_attr->grh.hop_limit = path->hop_limit; ib_ah_attr->grh.traffic_class = (be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff; ib_ah_attr->grh.flow_label = be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff; memcpy(ib_ah_attr->grh.dgid.raw, path->rgid, sizeof ib_ah_attr->grh.dgid.raw); } } int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_qp *qp = to_mqp(ibqp); int err = 0; struct mthca_mailbox *mailbox = NULL; struct mthca_qp_param *qp_param; struct mthca_qp_context *context; int mthca_state; mutex_lock(&qp->mutex); if (qp->state == IB_QPS_RESET) { qp_attr->qp_state = IB_QPS_RESET; goto done; } mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) { err = PTR_ERR(mailbox); goto out; } err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox); if (err) { mthca_warn(dev, "QUERY_QP failed (%d)\n", err); goto out_mailbox; } qp_param = mailbox->buf; context = &qp_param->context; mthca_state = be32_to_cpu(context->flags) >> 28; qp->state = to_ib_qp_state(mthca_state); qp_attr->qp_state = qp->state; qp_attr->path_mtu = context->mtu_msgmax >> 5; qp_attr->path_mig_state = to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); qp_attr->qkey = be32_to_cpu(context->qkey); qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff; qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff; qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff; qp_attr->qp_access_flags = to_ib_qp_access_flags(be32_to_cpu(context->params2)); if (qp->transport == RC || qp->transport == UC) { to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) & 0x7f; qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; } qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f; qp_attr->port_num = (be32_to_cpu(context->pri_path.port_pkey) >> 24) & 0x3; /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING; qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7); qp_attr->max_dest_rd_atomic = 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7); qp_attr->min_rnr_timer = (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f; qp_attr->timeout = context->pri_path.ackto >> 3; qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5; qp_attr->alt_timeout = context->alt_path.ackto >> 3; done: qp_attr->cur_qp_state = qp_attr->qp_state; qp_attr->cap.max_send_wr = qp->sq.max; qp_attr->cap.max_recv_wr = qp->rq.max; qp_attr->cap.max_send_sge = qp->sq.max_gs; qp_attr->cap.max_recv_sge = qp->rq.max_gs; qp_attr->cap.max_inline_data = qp->max_inline_data; qp_init_attr->cap = qp_attr->cap; qp_init_attr->sq_sig_type = qp->sq_policy; out_mailbox: mthca_free_mailbox(dev, mailbox); out: mutex_unlock(&qp->mutex); return err; } static int mthca_path_set(struct mthca_dev *dev, const struct ib_ah_attr *ah, struct mthca_qp_path *path, u8 port) { path->g_mylmc = ah->src_path_bits & 0x7f; path->rlid = cpu_to_be16(ah->dlid); path->static_rate = mthca_get_rate(dev, ah->static_rate, port); if (ah->ah_flags & IB_AH_GRH) { if (ah->grh.sgid_index >= dev->limits.gid_table_len) { mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n", ah->grh.sgid_index, dev->limits.gid_table_len-1); return -1; } path->g_mylmc |= 1 << 7; path->mgid_index = ah->grh.sgid_index; path->hop_limit = ah->grh.hop_limit; path->sl_tclass_flowlabel = cpu_to_be32((ah->sl << 28) | (ah->grh.traffic_class << 20) | (ah->grh.flow_label)); memcpy(path->rgid, ah->grh.dgid.raw, 16); } else path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28); return 0; } static int __mthca_modify_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_qp *qp = to_mqp(ibqp); struct mthca_mailbox *mailbox; struct mthca_qp_param *qp_param; struct mthca_qp_context *qp_context; u32 sqd_event = 0; int err = -EINVAL; mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); if (IS_ERR(mailbox)) { err = PTR_ERR(mailbox); goto out; } qp_param = mailbox->buf; qp_context = &qp_param->context; memset(qp_param, 0, sizeof *qp_param); qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) | (to_mthca_st(qp->transport) << 16)); qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE); if (!(attr_mask & IB_QP_PATH_MIG_STATE)) qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); else { qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE); switch (attr->path_mig_state) { case IB_MIG_MIGRATED: qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); break; case IB_MIG_REARM: qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11); break; case IB_MIG_ARMED: qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11); break; } } /* leave tavor_sched_queue as 0 */ if (qp->transport == MLX || qp->transport == UD) qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11; else if (attr_mask & IB_QP_PATH_MTU) { if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) { mthca_dbg(dev, "path MTU (%u) is invalid\n", attr->path_mtu); goto out_mailbox; } qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; } if (mthca_is_memfree(dev)) { if (qp->rq.max) qp_context->rq_size_stride = ilog2(qp->rq.max) << 3; qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; if (qp->sq.max) qp_context->sq_size_stride = ilog2(qp->sq.max) << 3; qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; } /* leave arbel_sched_queue as 0 */ if (qp->ibqp.uobject) qp_context->usr_page = cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index); else qp_context->usr_page = cpu_to_be32(dev->driver_uar.index); qp_context->local_qpn = cpu_to_be32(qp->qpn); if (attr_mask & IB_QP_DEST_QPN) { qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num); } if (qp->transport == MLX) qp_context->pri_path.port_pkey |= cpu_to_be32(qp->port << 24); else { if (attr_mask & IB_QP_PORT) { qp_context->pri_path.port_pkey |= cpu_to_be32(attr->port_num << 24); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM); } } if (attr_mask & IB_QP_PKEY_INDEX) { qp_context->pri_path.port_pkey |= cpu_to_be32(attr->pkey_index); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX); } if (attr_mask & IB_QP_RNR_RETRY) { qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry = attr->rnr_retry << 5; qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY | MTHCA_QP_OPTPAR_ALT_RNR_RETRY); } if (attr_mask & IB_QP_AV) { if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path, attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) goto out_mailbox; qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); } if (ibqp->qp_type == IB_QPT_RC && cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { u8 sched_queue = ibqp->uobject ? 0x2 : 0x1; if (mthca_is_memfree(dev)) qp_context->rlkey_arbel_sched_queue |= sched_queue; else qp_context->tavor_sched_queue |= cpu_to_be32(sched_queue); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SCHED_QUEUE); } if (attr_mask & IB_QP_TIMEOUT) { qp_context->pri_path.ackto = attr->timeout << 3; qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); } if (attr_mask & IB_QP_ALT_PATH) { if (attr->alt_pkey_index >= dev->limits.pkey_table_len) { mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n", attr->alt_pkey_index, dev->limits.pkey_table_len-1); goto out_mailbox; } if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) { mthca_dbg(dev, "Alternate port number (%u) is invalid\n", attr->alt_port_num); goto out_mailbox; } if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path, attr->alt_ah_attr.port_num)) goto out_mailbox; qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index | attr->alt_port_num << 24); qp_context->alt_path.ackto = attr->alt_timeout << 3; qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH); } /* leave rdd as 0 */ qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num); /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */ qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey); qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) | (MTHCA_FLIGHT_LIMIT << 24) | MTHCA_QP_BIT_SWE); if (qp->sq_policy == IB_SIGNAL_ALL_WR) qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC); if (attr_mask & IB_QP_RETRY_CNT) { qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT); } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { if (attr->max_rd_atomic) { qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SRE | MTHCA_QP_BIT_SAE); qp_context->params1 |= cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); } qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX); } if (attr_mask & IB_QP_SQ_PSN) qp_context->next_send_psn = cpu_to_be32(attr->sq_psn); qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); if (mthca_is_memfree(dev)) { qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset); qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index); } if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { if (attr->max_dest_rd_atomic) qp_context->params2 |= cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX); } if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) { qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | MTHCA_QP_OPTPAR_RRE | MTHCA_QP_OPTPAR_RAE); } qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); if (ibqp->srq) qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC); if (attr_mask & IB_QP_MIN_RNR_TIMER) { qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT); } if (attr_mask & IB_QP_RQ_PSN) qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); qp_context->ra_buff_indx = cpu_to_be32(dev->qp_table.rdb_base + ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE << dev->qp_table.rdb_shift)); qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn); if (mthca_is_memfree(dev)) qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index); if (attr_mask & IB_QP_QKEY) { qp_context->qkey = cpu_to_be32(attr->qkey); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY); } if (ibqp->srq) qp_context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->srqn); if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) sqd_event = 1 << 31; err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0, mailbox, sqd_event); if (err) { mthca_warn(dev, "modify QP %d->%d returned %d.\n", cur_state, new_state, err); goto out_mailbox; } qp->state = new_state; if (attr_mask & IB_QP_ACCESS_FLAGS) qp->atomic_rd_en = attr->qp_access_flags; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) qp->resp_depth = attr->max_dest_rd_atomic; if (attr_mask & IB_QP_PORT) qp->port = attr->port_num; if (attr_mask & IB_QP_ALT_PATH) qp->alt_port = attr->alt_port_num; if (is_sqp(dev, qp)) store_attrs(to_msqp(qp), attr, attr_mask); /* * If we moved QP0 to RTR, bring the IB link up; if we moved * QP0 to RESET or ERROR, bring the link back down. */ if (is_qp0(dev, qp)) { if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR) init_port(dev, qp->port); if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR && (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR)) mthca_CLOSE_IB(dev, qp->port); } /* * If we moved a kernel QP to RESET, clean up all old CQ * entries and reinitialize the QP. */ if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); if (qp->ibqp.send_cq != qp->ibqp.recv_cq) mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL); mthca_wq_reset(&qp->sq); qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); mthca_wq_reset(&qp->rq); qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); if (mthca_is_memfree(dev)) { *qp->sq.db = 0; *qp->rq.db = 0; } } out_mailbox: mthca_free_mailbox(dev, mailbox); out: return err; } int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_qp *qp = to_mqp(ibqp); enum ib_qp_state cur_state, new_state; int err = -EINVAL; mutex_lock(&qp->mutex); if (attr_mask & IB_QP_CUR_STATE) { cur_state = attr->cur_qp_state; } else { spin_lock_irq(&qp->sq.lock); spin_lock(&qp->rq.lock); cur_state = qp->state; spin_unlock(&qp->rq.lock); spin_unlock_irq(&qp->sq.lock); } new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { mthca_dbg(dev, "Bad QP transition (transport %d) " "%d->%d with attr 0x%08x\n", qp->transport, cur_state, new_state, attr_mask); goto out; } if ((attr_mask & IB_QP_PKEY_INDEX) && attr->pkey_index >= dev->limits.pkey_table_len) { mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n", attr->pkey_index, dev->limits.pkey_table_len-1); goto out; } if ((attr_mask & IB_QP_PORT) && (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) { mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num); goto out; } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic > dev->limits.max_qp_init_rdma) { mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n", attr->max_rd_atomic, dev->limits.max_qp_init_rdma); goto out; } if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) { mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n", attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift); goto out; } if (cur_state == new_state && cur_state == IB_QPS_RESET) { err = 0; goto out; } err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); out: mutex_unlock(&qp->mutex); return err; } static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz) { /* * Calculate the maximum size of WQE s/g segments, excluding * the next segment and other non-data segments. */ int max_data_size = desc_sz - sizeof (struct mthca_next_seg); switch (qp->transport) { case MLX: max_data_size -= 2 * sizeof (struct mthca_data_seg); break; case UD: if (mthca_is_memfree(dev)) max_data_size -= sizeof (struct mthca_arbel_ud_seg); else max_data_size -= sizeof (struct mthca_tavor_ud_seg); break; default: max_data_size -= sizeof (struct mthca_raddr_seg); break; } return max_data_size; } static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size) { /* We don't support inline data for kernel QPs (yet). */ return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0; } static void mthca_adjust_qp_caps(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_qp *qp) { int max_data_size = mthca_max_data_size(dev, qp, min(dev->limits.max_desc_sz, 1 << qp->sq.wqe_shift)); qp->max_inline_data = mthca_max_inline_data(pd, max_data_size); qp->sq.max_gs = min_t(int, dev->limits.max_sg, max_data_size / sizeof (struct mthca_data_seg)); qp->rq.max_gs = min_t(int, dev->limits.max_sg, (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) - sizeof (struct mthca_next_seg)) / sizeof (struct mthca_data_seg)); } /* * Allocate and register buffer for WQEs. qp->rq.max, sq.max, * rq.max_gs and sq.max_gs must all be assigned. * mthca_alloc_wqe_buf will calculate rq.wqe_shift and * sq.wqe_shift (as well as send_wqe_offset, is_direct, and * queue) */ static int mthca_alloc_wqe_buf(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_qp *qp) { int size; int err = -ENOMEM; size = sizeof (struct mthca_next_seg) + qp->rq.max_gs * sizeof (struct mthca_data_seg); if (size > dev->limits.max_desc_sz) return -EINVAL; for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size; qp->rq.wqe_shift++) ; /* nothing */ size = qp->sq.max_gs * sizeof (struct mthca_data_seg); switch (qp->transport) { case MLX: size += 2 * sizeof (struct mthca_data_seg); break; case UD: size += mthca_is_memfree(dev) ? sizeof (struct mthca_arbel_ud_seg) : sizeof (struct mthca_tavor_ud_seg); break; case UC: size += sizeof (struct mthca_raddr_seg); break; case RC: size += sizeof (struct mthca_raddr_seg); /* * An atomic op will require an atomic segment, a * remote address segment and one scatter entry. */ size = max_t(int, size, sizeof (struct mthca_atomic_seg) + sizeof (struct mthca_raddr_seg) + sizeof (struct mthca_data_seg)); break; default: break; } /* Make sure that we have enough space for a bind request */ size = max_t(int, size, sizeof (struct mthca_bind_seg)); size += sizeof (struct mthca_next_seg); if (size > dev->limits.max_desc_sz) return -EINVAL; for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; qp->sq.wqe_shift++) ; /* nothing */ qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift, 1 << qp->sq.wqe_shift); /* * If this is a userspace QP, we don't actually have to * allocate anything. All we need is to calculate the WQE * sizes and the send_wqe_offset, so we're done now. */ if (pd->ibpd.uobject) return 0; size = PAGE_ALIGN(qp->send_wqe_offset + (qp->sq.max << qp->sq.wqe_shift)); qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64), GFP_KERNEL); if (!qp->wrid) goto err_out; err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE, &qp->queue, &qp->is_direct, pd, 0, &qp->mr); if (err) goto err_out; return 0; err_out: kfree(qp->wrid); return err; } static void mthca_free_wqe_buf(struct mthca_dev *dev, struct mthca_qp *qp) { mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset + (qp->sq.max << qp->sq.wqe_shift)), &qp->queue, qp->is_direct, &qp->mr); kfree(qp->wrid); } static int mthca_map_memfree(struct mthca_dev *dev, struct mthca_qp *qp) { int ret; if (mthca_is_memfree(dev)) { ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn); if (ret) return ret; ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn); if (ret) goto err_qpc; ret = mthca_table_get(dev, dev->qp_table.rdb_table, qp->qpn << dev->qp_table.rdb_shift); if (ret) goto err_eqpc; } return 0; err_eqpc: mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); err_qpc: mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); return ret; } static void mthca_unmap_memfree(struct mthca_dev *dev, struct mthca_qp *qp) { mthca_table_put(dev, dev->qp_table.rdb_table, qp->qpn << dev->qp_table.rdb_shift); mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); } static int mthca_alloc_memfree(struct mthca_dev *dev, struct mthca_qp *qp) { if (mthca_is_memfree(dev)) { qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ, qp->qpn, &qp->rq.db); if (qp->rq.db_index < 0) return -ENOMEM; qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ, qp->qpn, &qp->sq.db); if (qp->sq.db_index < 0) { mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); return -ENOMEM; } } return 0; } static void mthca_free_memfree(struct mthca_dev *dev, struct mthca_qp *qp) { if (mthca_is_memfree(dev)) { mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index); mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); } } static int mthca_alloc_qp_common(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_sig_type send_policy, struct mthca_qp *qp) { int ret; int i; struct mthca_next_seg *next; qp->refcount = 1; init_waitqueue_head(&qp->wait); mutex_init(&qp->mutex); qp->state = IB_QPS_RESET; qp->atomic_rd_en = 0; qp->resp_depth = 0; qp->sq_policy = send_policy; mthca_wq_reset(&qp->sq); mthca_wq_reset(&qp->rq); spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); ret = mthca_map_memfree(dev, qp); if (ret) return ret; ret = mthca_alloc_wqe_buf(dev, pd, qp); if (ret) { mthca_unmap_memfree(dev, qp); return ret; } mthca_adjust_qp_caps(dev, pd, qp); /* * If this is a userspace QP, we're done now. The doorbells * will be allocated and buffers will be initialized in * userspace. */ if (pd->ibpd.uobject) return 0; ret = mthca_alloc_memfree(dev, qp); if (ret) { mthca_free_wqe_buf(dev, qp); mthca_unmap_memfree(dev, qp); return ret; } if (mthca_is_memfree(dev)) { struct mthca_data_seg *scatter; int size = (sizeof (struct mthca_next_seg) + qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16; for (i = 0; i < qp->rq.max; ++i) { next = get_recv_wqe(qp, i); next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) << qp->rq.wqe_shift); next->ee_nds = cpu_to_be32(size); for (scatter = (void *) (next + 1); (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift); ++scatter) scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); } for (i = 0; i < qp->sq.max; ++i) { next = get_send_wqe(qp, i); next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) << qp->sq.wqe_shift) + qp->send_wqe_offset); } } else { for (i = 0; i < qp->rq.max; ++i) { next = get_recv_wqe(qp, i); next->nda_op = htonl((((i + 1) % qp->rq.max) << qp->rq.wqe_shift) | 1); } } qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); return 0; } static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap, struct mthca_pd *pd, struct mthca_qp *qp) { int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz); /* Sanity check QP size before proceeding */ if (cap->max_send_wr > dev->limits.max_wqes || cap->max_recv_wr > dev->limits.max_wqes || cap->max_send_sge > dev->limits.max_sg || cap->max_recv_sge > dev->limits.max_sg || cap->max_inline_data > mthca_max_inline_data(pd, max_data_size)) return -EINVAL; /* * For MLX transport we need 2 extra send gather entries: * one for the header and one for the checksum at the end */ if (qp->transport == MLX && cap->max_send_sge + 2 > dev->limits.max_sg) return -EINVAL; if (mthca_is_memfree(dev)) { qp->rq.max = cap->max_recv_wr ? roundup_pow_of_two(cap->max_recv_wr) : 0; qp->sq.max = cap->max_send_wr ? roundup_pow_of_two(cap->max_send_wr) : 0; } else { qp->rq.max = cap->max_recv_wr; qp->sq.max = cap->max_send_wr; } qp->rq.max_gs = cap->max_recv_sge; qp->sq.max_gs = max_t(int, cap->max_send_sge, ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE, MTHCA_INLINE_CHUNK_SIZE) / sizeof (struct mthca_data_seg)); return 0; } int mthca_alloc_qp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_qp_type type, enum ib_sig_type send_policy, struct ib_qp_cap *cap, struct mthca_qp *qp) { int err; switch (type) { case IB_QPT_RC: qp->transport = RC; break; case IB_QPT_UC: qp->transport = UC; break; case IB_QPT_UD: qp->transport = UD; break; default: return -EINVAL; } err = mthca_set_qp_size(dev, cap, pd, qp); if (err) return err; qp->qpn = mthca_alloc(&dev->qp_table.alloc); if (qp->qpn == -1) return -ENOMEM; /* initialize port to zero for error-catching. */ qp->port = 0; err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, send_policy, qp); if (err) { mthca_free(&dev->qp_table.alloc, qp->qpn); return err; } spin_lock_irq(&dev->qp_table.lock); mthca_array_set(&dev->qp_table.qp, qp->qpn & (dev->limits.num_qps - 1), qp); spin_unlock_irq(&dev->qp_table.lock); return 0; } static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) __acquires(&send_cq->lock) __acquires(&recv_cq->lock) { if (send_cq == recv_cq) { spin_lock_irq(&send_cq->lock); __acquire(&recv_cq->lock); } else if (send_cq->cqn < recv_cq->cqn) { spin_lock_irq(&send_cq->lock); spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); } else { spin_lock_irq(&recv_cq->lock); spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); } } static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) __releases(&send_cq->lock) __releases(&recv_cq->lock) { if (send_cq == recv_cq) { __release(&recv_cq->lock); spin_unlock_irq(&send_cq->lock); } else if (send_cq->cqn < recv_cq->cqn) { spin_unlock(&recv_cq->lock); spin_unlock_irq(&send_cq->lock); } else { spin_unlock(&send_cq->lock); spin_unlock_irq(&recv_cq->lock); } } int mthca_alloc_sqp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_sig_type send_policy, struct ib_qp_cap *cap, int qpn, int port, struct mthca_sqp *sqp) { u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1; int err; sqp->qp.transport = MLX; err = mthca_set_qp_size(dev, cap, pd, &sqp->qp); if (err) return err; sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE; sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size, &sqp->header_dma, GFP_KERNEL); if (!sqp->header_buf) return -ENOMEM; spin_lock_irq(&dev->qp_table.lock); if (mthca_array_get(&dev->qp_table.qp, mqpn)) err = -EBUSY; else mthca_array_set(&dev->qp_table.qp, mqpn, sqp); spin_unlock_irq(&dev->qp_table.lock); if (err) goto err_out; sqp->qp.port = port; sqp->qp.qpn = mqpn; sqp->qp.transport = MLX; err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, send_policy, &sqp->qp); if (err) goto err_out_free; atomic_inc(&pd->sqp_count); return 0; err_out_free: /* * Lock CQs here, so that CQ polling code can do QP lookup * without taking a lock. */ mthca_lock_cqs(send_cq, recv_cq); spin_lock(&dev->qp_table.lock); mthca_array_clear(&dev->qp_table.qp, mqpn); spin_unlock(&dev->qp_table.lock); mthca_unlock_cqs(send_cq, recv_cq); err_out: dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size, sqp->header_buf, sqp->header_dma); return err; } static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp) { int c; spin_lock_irq(&dev->qp_table.lock); c = qp->refcount; spin_unlock_irq(&dev->qp_table.lock); return c; } void mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp) { struct mthca_cq *send_cq; struct mthca_cq *recv_cq; send_cq = to_mcq(qp->ibqp.send_cq); recv_cq = to_mcq(qp->ibqp.recv_cq); /* * Lock CQs here, so that CQ polling code can do QP lookup * without taking a lock. */ mthca_lock_cqs(send_cq, recv_cq); spin_lock(&dev->qp_table.lock); mthca_array_clear(&dev->qp_table.qp, qp->qpn & (dev->limits.num_qps - 1)); --qp->refcount; spin_unlock(&dev->qp_table.lock); mthca_unlock_cqs(send_cq, recv_cq); wait_event(qp->wait, !get_qp_refcount(dev, qp)); if (qp->state != IB_QPS_RESET) mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, NULL, 0); /* * If this is a userspace QP, the buffers, MR, CQs and so on * will be cleaned up in userspace, so all we have to do is * unref the mem-free tables and free the QPN in our table. */ if (!qp->ibqp.uobject) { mthca_cq_clean(dev, recv_cq, qp->qpn, qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); if (send_cq != recv_cq) mthca_cq_clean(dev, send_cq, qp->qpn, NULL); mthca_free_memfree(dev, qp); mthca_free_wqe_buf(dev, qp); } mthca_unmap_memfree(dev, qp); if (is_sqp(dev, qp)) { atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count)); dma_free_coherent(&dev->pdev->dev, to_msqp(qp)->header_buf_size, to_msqp(qp)->header_buf, to_msqp(qp)->header_dma); } else mthca_free(&dev->qp_table.alloc, qp->qpn); } /* Create UD header for an MLX send and build a data segment for it */ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, int ind, struct ib_send_wr *wr, struct mthca_mlx_seg *mlx, struct mthca_data_seg *data) { int header_size; int err; u16 pkey; ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0, mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0, &sqp->ud_header); err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header); if (err) return err; mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) | (sqp->ud_header.lrh.service_level << 8)); mlx->rlid = sqp->ud_header.lrh.destination_lid; mlx->vcrc = 0; switch (wr->opcode) { case IB_WR_SEND: sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; sqp->ud_header.immediate_present = 0; break; case IB_WR_SEND_WITH_IMM: sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; sqp->ud_header.immediate_present = 1; sqp->ud_header.immediate_data = wr->ex.imm_data; break; default: return -EINVAL; } sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); if (!sqp->qp.ibqp.qp_num) ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); else ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey); sqp->ud_header.bth.pkey = cpu_to_be16(pkey); sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? sqp->qkey : wr->wr.ud.remote_qkey); sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf + ind * MTHCA_UD_HEADER_SIZE); data->byte_count = cpu_to_be32(header_size); data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey); data->addr = cpu_to_be64(sqp->header_dma + ind * MTHCA_UD_HEADER_SIZE); return 0; } static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, struct ib_cq *ib_cq) { unsigned cur; struct mthca_cq *cq; cur = wq->head - wq->tail; if (likely(cur + nreq < wq->max)) return 0; cq = to_mcq(ib_cq); spin_lock(&cq->lock); cur = wq->head - wq->tail; spin_unlock(&cq->lock); return cur + nreq >= wq->max; } static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg, u64 remote_addr, u32 rkey) { rseg->raddr = cpu_to_be64(remote_addr); rseg->rkey = cpu_to_be32(rkey); rseg->reserved = 0; } static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg, struct ib_send_wr *wr) { if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); } else { aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); aseg->compare = 0; } } static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg, struct ib_send_wr *wr) { useg->lkey = cpu_to_be32(to_mah(wr->wr.ud.ah)->key); useg->av_addr = cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma); useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); } static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg, struct ib_send_wr *wr) { memcpy(useg->av, to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE); useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); } int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_qp *qp = to_mqp(ibqp); void *wqe; void *prev_wqe; unsigned long flags; int err = 0; int nreq; int i; int size; /* * f0 and size0 are only used if nreq != 0, and they will * always be initialized the first time through the main loop * before nreq is incremented. So nreq cannot become non-zero * without initializing f0 and size0, and they are in fact * never used uninitialized. */ int uninitialized_var(size0); u32 uninitialized_var(f0); int ind; u8 op0 = 0; spin_lock_irqsave(&qp->sq.lock, flags); /* XXX check that state is OK to post send */ ind = qp->sq.next_ind; for (nreq = 0; wr; ++nreq, wr = wr->next) { if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { mthca_err(dev, "SQ %06x full (%u head, %u tail," " %d max, %d nreq)\n", qp->qpn, qp->sq.head, qp->sq.tail, qp->sq.max, nreq); err = -ENOMEM; *bad_wr = wr; goto out; } wqe = get_send_wqe(qp, ind); prev_wqe = qp->sq.last; qp->sq.last = wqe; ((struct mthca_next_seg *) wqe)->nda_op = 0; ((struct mthca_next_seg *) wqe)->ee_nds = 0; ((struct mthca_next_seg *) wqe)->flags = ((wr->send_flags & IB_SEND_SIGNALED) ? cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | ((wr->send_flags & IB_SEND_SOLICITED) ? cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | cpu_to_be32(1); if (wr->opcode == IB_WR_SEND_WITH_IMM || wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; wqe += sizeof (struct mthca_next_seg); size = sizeof (struct mthca_next_seg) / 16; switch (qp->transport) { case RC: switch (wr->opcode) { case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: set_raddr_seg(wqe, wr->wr.atomic.remote_addr, wr->wr.atomic.rkey); wqe += sizeof (struct mthca_raddr_seg); set_atomic_seg(wqe, wr); wqe += sizeof (struct mthca_atomic_seg); size += (sizeof (struct mthca_raddr_seg) + sizeof (struct mthca_atomic_seg)) / 16; break; case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: case IB_WR_RDMA_READ: set_raddr_seg(wqe, wr->wr.rdma.remote_addr, wr->wr.rdma.rkey); wqe += sizeof (struct mthca_raddr_seg); size += sizeof (struct mthca_raddr_seg) / 16; break; default: /* No extra segments required for sends */ break; } break; case UC: switch (wr->opcode) { case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: set_raddr_seg(wqe, wr->wr.rdma.remote_addr, wr->wr.rdma.rkey); wqe += sizeof (struct mthca_raddr_seg); size += sizeof (struct mthca_raddr_seg) / 16; break; default: /* No extra segments required for sends */ break; } break; case UD: set_tavor_ud_seg(wqe, wr); wqe += sizeof (struct mthca_tavor_ud_seg); size += sizeof (struct mthca_tavor_ud_seg) / 16; break; case MLX: err = build_mlx_header(dev, to_msqp(qp), ind, wr, wqe - sizeof (struct mthca_next_seg), wqe); if (err) { *bad_wr = wr; goto out; } wqe += sizeof (struct mthca_data_seg); size += sizeof (struct mthca_data_seg) / 16; break; } if (wr->num_sge > qp->sq.max_gs) { mthca_err(dev, "too many gathers\n"); err = -EINVAL; *bad_wr = wr; goto out; } for (i = 0; i < wr->num_sge; ++i) { mthca_set_data_seg(wqe, wr->sg_list + i); wqe += sizeof (struct mthca_data_seg); size += sizeof (struct mthca_data_seg) / 16; } /* Add one more inline data segment for ICRC */ if (qp->transport == MLX) { ((struct mthca_data_seg *) wqe)->byte_count = cpu_to_be32((1 << 31) | 4); ((u32 *) wqe)[1] = 0; wqe += sizeof (struct mthca_data_seg); size += sizeof (struct mthca_data_seg) / 16; } qp->wrid[ind + qp->rq.max] = wr->wr_id; if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { mthca_err(dev, "opcode invalid\n"); err = -EINVAL; *bad_wr = wr; goto out; } ((struct mthca_next_seg *) prev_wqe)->nda_op = cpu_to_be32(((ind << qp->sq.wqe_shift) + qp->send_wqe_offset) | mthca_opcode[wr->opcode]); wmb(); ((struct mthca_next_seg *) prev_wqe)->ee_nds = cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size | ((wr->send_flags & IB_SEND_FENCE) ? MTHCA_NEXT_FENCE : 0)); if (!nreq) { size0 = size; op0 = mthca_opcode[wr->opcode]; f0 = wr->send_flags & IB_SEND_FENCE ? MTHCA_SEND_DOORBELL_FENCE : 0; } ++ind; if (unlikely(ind >= qp->sq.max)) ind -= qp->sq.max; } out: if (likely(nreq)) { wmb(); mthca_write64(((qp->sq.next_ind << qp->sq.wqe_shift) + qp->send_wqe_offset) | f0 | op0, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); /* * Make sure doorbells don't leak out of SQ spinlock * and reach the HCA out of order: */ mmiowb(); } qp->sq.next_ind = ind; qp->sq.head += nreq; spin_unlock_irqrestore(&qp->sq.lock, flags); return err; } int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_qp *qp = to_mqp(ibqp); unsigned long flags; int err = 0; int nreq; int i; int size; /* * size0 is only used if nreq != 0, and it will always be * initialized the first time through the main loop before * nreq is incremented. So nreq cannot become non-zero * without initializing size0, and it is in fact never used * uninitialized. */ int uninitialized_var(size0); int ind; void *wqe; void *prev_wqe; spin_lock_irqsave(&qp->rq.lock, flags); /* XXX check that state is OK to post receive */ ind = qp->rq.next_ind; for (nreq = 0; wr; wr = wr->next) { if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { mthca_err(dev, "RQ %06x full (%u head, %u tail," " %d max, %d nreq)\n", qp->qpn, qp->rq.head, qp->rq.tail, qp->rq.max, nreq); err = -ENOMEM; *bad_wr = wr; goto out; } wqe = get_recv_wqe(qp, ind); prev_wqe = qp->rq.last; qp->rq.last = wqe; ((struct mthca_next_seg *) wqe)->ee_nds = cpu_to_be32(MTHCA_NEXT_DBD); ((struct mthca_next_seg *) wqe)->flags = 0; wqe += sizeof (struct mthca_next_seg); size = sizeof (struct mthca_next_seg) / 16; if (unlikely(wr->num_sge > qp->rq.max_gs)) { err = -EINVAL; *bad_wr = wr; goto out; } for (i = 0; i < wr->num_sge; ++i) { mthca_set_data_seg(wqe, wr->sg_list + i); wqe += sizeof (struct mthca_data_seg); size += sizeof (struct mthca_data_seg) / 16; } qp->wrid[ind] = wr->wr_id; ((struct mthca_next_seg *) prev_wqe)->ee_nds = cpu_to_be32(MTHCA_NEXT_DBD | size); if (!nreq) size0 = size; ++ind; if (unlikely(ind >= qp->rq.max)) ind -= qp->rq.max; ++nreq; if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { nreq = 0; wmb(); mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0, qp->qpn << 8, dev->kar + MTHCA_RECEIVE_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); qp->rq.next_ind = ind; qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; } } out: if (likely(nreq)) { wmb(); mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0, qp->qpn << 8 | nreq, dev->kar + MTHCA_RECEIVE_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); } qp->rq.next_ind = ind; qp->rq.head += nreq; /* * Make sure doorbells don't leak out of RQ spinlock and reach * the HCA out of order: */ mmiowb(); spin_unlock_irqrestore(&qp->rq.lock, flags); return err; } int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_qp *qp = to_mqp(ibqp); u32 dbhi; void *wqe; void *prev_wqe; unsigned long flags; int err = 0; int nreq; int i; int size; /* * f0 and size0 are only used if nreq != 0, and they will * always be initialized the first time through the main loop * before nreq is incremented. So nreq cannot become non-zero * without initializing f0 and size0, and they are in fact * never used uninitialized. */ int uninitialized_var(size0); u32 uninitialized_var(f0); int ind; u8 op0 = 0; spin_lock_irqsave(&qp->sq.lock, flags); /* XXX check that state is OK to post send */ ind = qp->sq.head & (qp->sq.max - 1); for (nreq = 0; wr; ++nreq, wr = wr->next) { if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) { nreq = 0; dbhi = (MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0; qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB; /* * Make sure that descriptors are written before * doorbell record. */ wmb(); *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); /* * Make sure doorbell record is written before we * write MMIO send doorbell. */ wmb(); mthca_write64(dbhi, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); } if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { mthca_err(dev, "SQ %06x full (%u head, %u tail," " %d max, %d nreq)\n", qp->qpn, qp->sq.head, qp->sq.tail, qp->sq.max, nreq); err = -ENOMEM; *bad_wr = wr; goto out; } wqe = get_send_wqe(qp, ind); prev_wqe = qp->sq.last; qp->sq.last = wqe; ((struct mthca_next_seg *) wqe)->flags = ((wr->send_flags & IB_SEND_SIGNALED) ? cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | ((wr->send_flags & IB_SEND_SOLICITED) ? cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | ((wr->send_flags & IB_SEND_IP_CSUM) ? cpu_to_be32(MTHCA_NEXT_IP_CSUM | MTHCA_NEXT_TCP_UDP_CSUM) : 0) | cpu_to_be32(1); if (wr->opcode == IB_WR_SEND_WITH_IMM || wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; wqe += sizeof (struct mthca_next_seg); size = sizeof (struct mthca_next_seg) / 16; switch (qp->transport) { case RC: switch (wr->opcode) { case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: set_raddr_seg(wqe, wr->wr.atomic.remote_addr, wr->wr.atomic.rkey); wqe += sizeof (struct mthca_raddr_seg); set_atomic_seg(wqe, wr); wqe += sizeof (struct mthca_atomic_seg); size += (sizeof (struct mthca_raddr_seg) + sizeof (struct mthca_atomic_seg)) / 16; break; case IB_WR_RDMA_READ: case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: set_raddr_seg(wqe, wr->wr.rdma.remote_addr, wr->wr.rdma.rkey); wqe += sizeof (struct mthca_raddr_seg); size += sizeof (struct mthca_raddr_seg) / 16; break; default: /* No extra segments required for sends */ break; } break; case UC: switch (wr->opcode) { case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: set_raddr_seg(wqe, wr->wr.rdma.remote_addr, wr->wr.rdma.rkey); wqe += sizeof (struct mthca_raddr_seg); size += sizeof (struct mthca_raddr_seg) / 16; break; default: /* No extra segments required for sends */ break; } break; case UD: set_arbel_ud_seg(wqe, wr); wqe += sizeof (struct mthca_arbel_ud_seg); size += sizeof (struct mthca_arbel_ud_seg) / 16; break; case MLX: err = build_mlx_header(dev, to_msqp(qp), ind, wr, wqe - sizeof (struct mthca_next_seg), wqe); if (err) { *bad_wr = wr; goto out; } wqe += sizeof (struct mthca_data_seg); size += sizeof (struct mthca_data_seg) / 16; break; } if (wr->num_sge > qp->sq.max_gs) { mthca_err(dev, "too many gathers\n"); err = -EINVAL; *bad_wr = wr; goto out; } for (i = 0; i < wr->num_sge; ++i) { mthca_set_data_seg(wqe, wr->sg_list + i); wqe += sizeof (struct mthca_data_seg); size += sizeof (struct mthca_data_seg) / 16; } /* Add one more inline data segment for ICRC */ if (qp->transport == MLX) { ((struct mthca_data_seg *) wqe)->byte_count = cpu_to_be32((1 << 31) | 4); ((u32 *) wqe)[1] = 0; wqe += sizeof (struct mthca_data_seg); size += sizeof (struct mthca_data_seg) / 16; } qp->wrid[ind + qp->rq.max] = wr->wr_id; if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { mthca_err(dev, "opcode invalid\n"); err = -EINVAL; *bad_wr = wr; goto out; } ((struct mthca_next_seg *) prev_wqe)->nda_op = cpu_to_be32(((ind << qp->sq.wqe_shift) + qp->send_wqe_offset) | mthca_opcode[wr->opcode]); wmb(); ((struct mthca_next_seg *) prev_wqe)->ee_nds = cpu_to_be32(MTHCA_NEXT_DBD | size | ((wr->send_flags & IB_SEND_FENCE) ? MTHCA_NEXT_FENCE : 0)); if (!nreq) { size0 = size; op0 = mthca_opcode[wr->opcode]; f0 = wr->send_flags & IB_SEND_FENCE ? MTHCA_SEND_DOORBELL_FENCE : 0; } ++ind; if (unlikely(ind >= qp->sq.max)) ind -= qp->sq.max; } out: if (likely(nreq)) { dbhi = (nreq << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0; qp->sq.head += nreq; /* * Make sure that descriptors are written before * doorbell record. */ wmb(); *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); /* * Make sure doorbell record is written before we * write MMIO send doorbell. */ wmb(); mthca_write64(dbhi, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL, MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); } /* * Make sure doorbells don't leak out of SQ spinlock and reach * the HCA out of order: */ mmiowb(); spin_unlock_irqrestore(&qp->sq.lock, flags); return err; } int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_qp *qp = to_mqp(ibqp); unsigned long flags; int err = 0; int nreq; int ind; int i; void *wqe; spin_lock_irqsave(&qp->rq.lock, flags); /* XXX check that state is OK to post receive */ ind = qp->rq.head & (qp->rq.max - 1); for (nreq = 0; wr; ++nreq, wr = wr->next) { if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { mthca_err(dev, "RQ %06x full (%u head, %u tail," " %d max, %d nreq)\n", qp->qpn, qp->rq.head, qp->rq.tail, qp->rq.max, nreq); err = -ENOMEM; *bad_wr = wr; goto out; } wqe = get_recv_wqe(qp, ind); ((struct mthca_next_seg *) wqe)->flags = 0; wqe += sizeof (struct mthca_next_seg); if (unlikely(wr->num_sge > qp->rq.max_gs)) { err = -EINVAL; *bad_wr = wr; goto out; } for (i = 0; i < wr->num_sge; ++i) { mthca_set_data_seg(wqe, wr->sg_list + i); wqe += sizeof (struct mthca_data_seg); } if (i < qp->rq.max_gs) mthca_set_data_seg_inval(wqe); qp->wrid[ind] = wr->wr_id; ++ind; if (unlikely(ind >= qp->rq.max)) ind -= qp->rq.max; } out: if (likely(nreq)) { qp->rq.head += nreq; /* * Make sure that descriptors are written before * doorbell record. */ wmb(); *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff); } spin_unlock_irqrestore(&qp->rq.lock, flags); return err; } void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, int index, int *dbd, __be32 *new_wqe) { struct mthca_next_seg *next; /* * For SRQs, all receive WQEs generate a CQE, so we're always * at the end of the doorbell chain. */ if (qp->ibqp.srq && !is_send) { *new_wqe = 0; return; } if (is_send) next = get_send_wqe(qp, index); else next = get_recv_wqe(qp, index); *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD)); if (next->ee_nds & cpu_to_be32(0x3f)) *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) | (next->ee_nds & cpu_to_be32(0x3f)); else *new_wqe = 0; } int mthca_init_qp_table(struct mthca_dev *dev) { int err; int i; spin_lock_init(&dev->qp_table.lock); /* * We reserve 2 extra QPs per port for the special QPs. The * special QP for port 1 has to be even, so round up. */ dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL; err = mthca_alloc_init(&dev->qp_table.alloc, dev->limits.num_qps, (1 << 24) - 1, dev->qp_table.sqp_start + MTHCA_MAX_PORTS * 2); if (err) return err; err = mthca_array_init(&dev->qp_table.qp, dev->limits.num_qps); if (err) { mthca_alloc_cleanup(&dev->qp_table.alloc); return err; } for (i = 0; i < 2; ++i) { err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI, dev->qp_table.sqp_start + i * 2); if (err) { mthca_warn(dev, "CONF_SPECIAL_QP returned " "%d, aborting.\n", err); goto err_out; } } return 0; err_out: for (i = 0; i < 2; ++i) mthca_CONF_SPECIAL_QP(dev, i, 0); mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); mthca_alloc_cleanup(&dev->qp_table.alloc); return err; } void mthca_cleanup_qp_table(struct mthca_dev *dev) { int i; for (i = 0; i < 2; ++i) mthca_CONF_SPECIAL_QP(dev, i, 0); mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); mthca_alloc_cleanup(&dev->qp_table.alloc); }
gpl-2.0
cooldudezach/android_kernel_zte_nex
drivers/ssb/driver_chipcommon_pmu.c
3634
20366
/* * Sonics Silicon Backplane * Broadcom ChipCommon Power Management Unit driver * * Copyright 2009, Michael Buesch <m@bues.ch> * Copyright 2007, Broadcom Corporation * * Licensed under the GNU/GPL. See COPYING for details. */ #include <linux/ssb/ssb.h> #include <linux/ssb/ssb_regs.h> #include <linux/ssb/ssb_driver_chipcommon.h> #include <linux/delay.h> #include <linux/export.h> #ifdef CONFIG_BCM47XX #include <asm/mach-bcm47xx/nvram.h> #endif #include "ssb_private.h" static u32 ssb_chipco_pll_read(struct ssb_chipcommon *cc, u32 offset) { chipco_write32(cc, SSB_CHIPCO_PLLCTL_ADDR, offset); return chipco_read32(cc, SSB_CHIPCO_PLLCTL_DATA); } static void ssb_chipco_pll_write(struct ssb_chipcommon *cc, u32 offset, u32 value) { chipco_write32(cc, SSB_CHIPCO_PLLCTL_ADDR, offset); chipco_write32(cc, SSB_CHIPCO_PLLCTL_DATA, value); } static void ssb_chipco_regctl_maskset(struct ssb_chipcommon *cc, u32 offset, u32 mask, u32 set) { u32 value; chipco_read32(cc, SSB_CHIPCO_REGCTL_ADDR); chipco_write32(cc, SSB_CHIPCO_REGCTL_ADDR, offset); chipco_read32(cc, SSB_CHIPCO_REGCTL_ADDR); value = chipco_read32(cc, SSB_CHIPCO_REGCTL_DATA); value &= mask; value |= set; chipco_write32(cc, SSB_CHIPCO_REGCTL_DATA, value); chipco_read32(cc, SSB_CHIPCO_REGCTL_DATA); } struct pmu0_plltab_entry { u16 freq; /* Crystal frequency in kHz.*/ u8 xf; /* Crystal frequency value for PMU control */ u8 wb_int; u32 wb_frac; }; static const struct pmu0_plltab_entry pmu0_plltab[] = { { .freq = 12000, .xf = 1, .wb_int = 73, .wb_frac = 349525, }, { .freq = 13000, .xf = 2, .wb_int = 67, .wb_frac = 725937, }, { .freq = 14400, .xf = 3, .wb_int = 61, .wb_frac = 116508, }, { .freq = 15360, .xf = 4, .wb_int = 57, .wb_frac = 305834, }, { .freq = 16200, .xf = 5, .wb_int = 54, .wb_frac = 336579, }, { .freq = 16800, .xf = 6, .wb_int = 52, .wb_frac = 399457, }, { .freq = 19200, .xf = 7, .wb_int = 45, .wb_frac = 873813, }, { .freq = 19800, .xf = 8, .wb_int = 44, .wb_frac = 466033, }, { .freq = 20000, .xf = 9, .wb_int = 44, .wb_frac = 0, }, { .freq = 25000, .xf = 10, .wb_int = 70, .wb_frac = 419430, }, { .freq = 26000, .xf = 11, .wb_int = 67, .wb_frac = 725937, }, { .freq = 30000, .xf = 12, .wb_int = 58, .wb_frac = 699050, }, { .freq = 38400, .xf = 13, .wb_int = 45, .wb_frac = 873813, }, { .freq = 40000, .xf = 14, .wb_int = 45, .wb_frac = 0, }, }; #define SSB_PMU0_DEFAULT_XTALFREQ 20000 static const struct pmu0_plltab_entry * pmu0_plltab_find_entry(u32 crystalfreq) { const struct pmu0_plltab_entry *e; unsigned int i; for (i = 0; i < ARRAY_SIZE(pmu0_plltab); i++) { e = &pmu0_plltab[i]; if (e->freq == crystalfreq) return e; } return NULL; } /* Tune the PLL to the crystal speed. crystalfreq is in kHz. */ static void ssb_pmu0_pllinit_r0(struct ssb_chipcommon *cc, u32 crystalfreq) { struct ssb_bus *bus = cc->dev->bus; const struct pmu0_plltab_entry *e = NULL; u32 pmuctl, tmp, pllctl; unsigned int i; if (crystalfreq) e = pmu0_plltab_find_entry(crystalfreq); if (!e) e = pmu0_plltab_find_entry(SSB_PMU0_DEFAULT_XTALFREQ); BUG_ON(!e); crystalfreq = e->freq; cc->pmu.crystalfreq = e->freq; /* Check if the PLL already is programmed to this frequency. */ pmuctl = chipco_read32(cc, SSB_CHIPCO_PMU_CTL); if (((pmuctl & SSB_CHIPCO_PMU_CTL_XTALFREQ) >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT) == e->xf) { /* We're already there... */ return; } ssb_printk(KERN_INFO PFX "Programming PLL to %u.%03u MHz\n", (crystalfreq / 1000), (crystalfreq % 1000)); /* First turn the PLL off. */ switch (bus->chip_id) { case 0x4328: chipco_mask32(cc, SSB_CHIPCO_PMU_MINRES_MSK, ~(1 << SSB_PMURES_4328_BB_PLL_PU)); chipco_mask32(cc, SSB_CHIPCO_PMU_MAXRES_MSK, ~(1 << SSB_PMURES_4328_BB_PLL_PU)); break; case 0x5354: chipco_mask32(cc, SSB_CHIPCO_PMU_MINRES_MSK, ~(1 << SSB_PMURES_5354_BB_PLL_PU)); chipco_mask32(cc, SSB_CHIPCO_PMU_MAXRES_MSK, ~(1 << SSB_PMURES_5354_BB_PLL_PU)); break; default: SSB_WARN_ON(1); } for (i = 1500; i; i--) { tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST); if (!(tmp & SSB_CHIPCO_CLKCTLST_HAVEHT)) break; udelay(10); } tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST); if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT) ssb_printk(KERN_EMERG PFX "Failed to turn the PLL off!\n"); /* Set PDIV in PLL control 0. */ pllctl = ssb_chipco_pll_read(cc, SSB_PMU0_PLLCTL0); if (crystalfreq >= SSB_PMU0_PLLCTL0_PDIV_FREQ) pllctl |= SSB_PMU0_PLLCTL0_PDIV_MSK; else pllctl &= ~SSB_PMU0_PLLCTL0_PDIV_MSK; ssb_chipco_pll_write(cc, SSB_PMU0_PLLCTL0, pllctl); /* Set WILD in PLL control 1. */ pllctl = ssb_chipco_pll_read(cc, SSB_PMU0_PLLCTL1); pllctl &= ~SSB_PMU0_PLLCTL1_STOPMOD; pllctl &= ~(SSB_PMU0_PLLCTL1_WILD_IMSK | SSB_PMU0_PLLCTL1_WILD_FMSK); pllctl |= ((u32)e->wb_int << SSB_PMU0_PLLCTL1_WILD_IMSK_SHIFT) & SSB_PMU0_PLLCTL1_WILD_IMSK; pllctl |= ((u32)e->wb_frac << SSB_PMU0_PLLCTL1_WILD_FMSK_SHIFT) & SSB_PMU0_PLLCTL1_WILD_FMSK; if (e->wb_frac == 0) pllctl |= SSB_PMU0_PLLCTL1_STOPMOD; ssb_chipco_pll_write(cc, SSB_PMU0_PLLCTL1, pllctl); /* Set WILD in PLL control 2. */ pllctl = ssb_chipco_pll_read(cc, SSB_PMU0_PLLCTL2); pllctl &= ~SSB_PMU0_PLLCTL2_WILD_IMSKHI; pllctl |= (((u32)e->wb_int >> 4) << SSB_PMU0_PLLCTL2_WILD_IMSKHI_SHIFT) & SSB_PMU0_PLLCTL2_WILD_IMSKHI; ssb_chipco_pll_write(cc, SSB_PMU0_PLLCTL2, pllctl); /* Set the crystalfrequency and the divisor. */ pmuctl = chipco_read32(cc, SSB_CHIPCO_PMU_CTL); pmuctl &= ~SSB_CHIPCO_PMU_CTL_ILP_DIV; pmuctl |= (((crystalfreq + 127) / 128 - 1) << SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT) & SSB_CHIPCO_PMU_CTL_ILP_DIV; pmuctl &= ~SSB_CHIPCO_PMU_CTL_XTALFREQ; pmuctl |= ((u32)e->xf << SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT) & SSB_CHIPCO_PMU_CTL_XTALFREQ; chipco_write32(cc, SSB_CHIPCO_PMU_CTL, pmuctl); } struct pmu1_plltab_entry { u16 freq; /* Crystal frequency in kHz.*/ u8 xf; /* Crystal frequency value for PMU control */ u8 ndiv_int; u32 ndiv_frac; u8 p1div; u8 p2div; }; static const struct pmu1_plltab_entry pmu1_plltab[] = { { .freq = 12000, .xf = 1, .p1div = 3, .p2div = 22, .ndiv_int = 0x9, .ndiv_frac = 0xFFFFEF, }, { .freq = 13000, .xf = 2, .p1div = 1, .p2div = 6, .ndiv_int = 0xb, .ndiv_frac = 0x483483, }, { .freq = 14400, .xf = 3, .p1div = 1, .p2div = 10, .ndiv_int = 0xa, .ndiv_frac = 0x1C71C7, }, { .freq = 15360, .xf = 4, .p1div = 1, .p2div = 5, .ndiv_int = 0xb, .ndiv_frac = 0x755555, }, { .freq = 16200, .xf = 5, .p1div = 1, .p2div = 10, .ndiv_int = 0x5, .ndiv_frac = 0x6E9E06, }, { .freq = 16800, .xf = 6, .p1div = 1, .p2div = 10, .ndiv_int = 0x5, .ndiv_frac = 0x3CF3CF, }, { .freq = 19200, .xf = 7, .p1div = 1, .p2div = 9, .ndiv_int = 0x5, .ndiv_frac = 0x17B425, }, { .freq = 19800, .xf = 8, .p1div = 1, .p2div = 11, .ndiv_int = 0x4, .ndiv_frac = 0xA57EB, }, { .freq = 20000, .xf = 9, .p1div = 1, .p2div = 11, .ndiv_int = 0x4, .ndiv_frac = 0, }, { .freq = 24000, .xf = 10, .p1div = 3, .p2div = 11, .ndiv_int = 0xa, .ndiv_frac = 0, }, { .freq = 25000, .xf = 11, .p1div = 5, .p2div = 16, .ndiv_int = 0xb, .ndiv_frac = 0, }, { .freq = 26000, .xf = 12, .p1div = 1, .p2div = 2, .ndiv_int = 0x10, .ndiv_frac = 0xEC4EC4, }, { .freq = 30000, .xf = 13, .p1div = 3, .p2div = 8, .ndiv_int = 0xb, .ndiv_frac = 0, }, { .freq = 38400, .xf = 14, .p1div = 1, .p2div = 5, .ndiv_int = 0x4, .ndiv_frac = 0x955555, }, { .freq = 40000, .xf = 15, .p1div = 1, .p2div = 2, .ndiv_int = 0xb, .ndiv_frac = 0, }, }; #define SSB_PMU1_DEFAULT_XTALFREQ 15360 static const struct pmu1_plltab_entry * pmu1_plltab_find_entry(u32 crystalfreq) { const struct pmu1_plltab_entry *e; unsigned int i; for (i = 0; i < ARRAY_SIZE(pmu1_plltab); i++) { e = &pmu1_plltab[i]; if (e->freq == crystalfreq) return e; } return NULL; } /* Tune the PLL to the crystal speed. crystalfreq is in kHz. */ static void ssb_pmu1_pllinit_r0(struct ssb_chipcommon *cc, u32 crystalfreq) { struct ssb_bus *bus = cc->dev->bus; const struct pmu1_plltab_entry *e = NULL; u32 buffer_strength = 0; u32 tmp, pllctl, pmuctl; unsigned int i; if (bus->chip_id == 0x4312) { /* We do not touch the BCM4312 PLL and assume * the default crystal settings work out-of-the-box. */ cc->pmu.crystalfreq = 20000; return; } if (crystalfreq) e = pmu1_plltab_find_entry(crystalfreq); if (!e) e = pmu1_plltab_find_entry(SSB_PMU1_DEFAULT_XTALFREQ); BUG_ON(!e); crystalfreq = e->freq; cc->pmu.crystalfreq = e->freq; /* Check if the PLL already is programmed to this frequency. */ pmuctl = chipco_read32(cc, SSB_CHIPCO_PMU_CTL); if (((pmuctl & SSB_CHIPCO_PMU_CTL_XTALFREQ) >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT) == e->xf) { /* We're already there... */ return; } ssb_printk(KERN_INFO PFX "Programming PLL to %u.%03u MHz\n", (crystalfreq / 1000), (crystalfreq % 1000)); /* First turn the PLL off. */ switch (bus->chip_id) { case 0x4325: chipco_mask32(cc, SSB_CHIPCO_PMU_MINRES_MSK, ~((1 << SSB_PMURES_4325_BBPLL_PWRSW_PU) | (1 << SSB_PMURES_4325_HT_AVAIL))); chipco_mask32(cc, SSB_CHIPCO_PMU_MAXRES_MSK, ~((1 << SSB_PMURES_4325_BBPLL_PWRSW_PU) | (1 << SSB_PMURES_4325_HT_AVAIL))); /* Adjust the BBPLL to 2 on all channels later. */ buffer_strength = 0x222222; break; default: SSB_WARN_ON(1); } for (i = 1500; i; i--) { tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST); if (!(tmp & SSB_CHIPCO_CLKCTLST_HAVEHT)) break; udelay(10); } tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST); if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT) ssb_printk(KERN_EMERG PFX "Failed to turn the PLL off!\n"); /* Set p1div and p2div. */ pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL0); pllctl &= ~(SSB_PMU1_PLLCTL0_P1DIV | SSB_PMU1_PLLCTL0_P2DIV); pllctl |= ((u32)e->p1div << SSB_PMU1_PLLCTL0_P1DIV_SHIFT) & SSB_PMU1_PLLCTL0_P1DIV; pllctl |= ((u32)e->p2div << SSB_PMU1_PLLCTL0_P2DIV_SHIFT) & SSB_PMU1_PLLCTL0_P2DIV; ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL0, pllctl); /* Set ndiv int and ndiv mode */ pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL2); pllctl &= ~(SSB_PMU1_PLLCTL2_NDIVINT | SSB_PMU1_PLLCTL2_NDIVMODE); pllctl |= ((u32)e->ndiv_int << SSB_PMU1_PLLCTL2_NDIVINT_SHIFT) & SSB_PMU1_PLLCTL2_NDIVINT; pllctl |= (1 << SSB_PMU1_PLLCTL2_NDIVMODE_SHIFT) & SSB_PMU1_PLLCTL2_NDIVMODE; ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, pllctl); /* Set ndiv frac */ pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL3); pllctl &= ~SSB_PMU1_PLLCTL3_NDIVFRAC; pllctl |= ((u32)e->ndiv_frac << SSB_PMU1_PLLCTL3_NDIVFRAC_SHIFT) & SSB_PMU1_PLLCTL3_NDIVFRAC; ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL3, pllctl); /* Change the drive strength, if required. */ if (buffer_strength) { pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL5); pllctl &= ~SSB_PMU1_PLLCTL5_CLKDRV; pllctl |= (buffer_strength << SSB_PMU1_PLLCTL5_CLKDRV_SHIFT) & SSB_PMU1_PLLCTL5_CLKDRV; ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL5, pllctl); } /* Tune the crystalfreq and the divisor. */ pmuctl = chipco_read32(cc, SSB_CHIPCO_PMU_CTL); pmuctl &= ~(SSB_CHIPCO_PMU_CTL_ILP_DIV | SSB_CHIPCO_PMU_CTL_XTALFREQ); pmuctl |= ((((u32)e->freq + 127) / 128 - 1) << SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT) & SSB_CHIPCO_PMU_CTL_ILP_DIV; pmuctl |= ((u32)e->xf << SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT) & SSB_CHIPCO_PMU_CTL_XTALFREQ; chipco_write32(cc, SSB_CHIPCO_PMU_CTL, pmuctl); } static void ssb_pmu_pll_init(struct ssb_chipcommon *cc) { struct ssb_bus *bus = cc->dev->bus; u32 crystalfreq = 0; /* in kHz. 0 = keep default freq. */ if (bus->bustype == SSB_BUSTYPE_SSB) { #ifdef CONFIG_BCM47XX char buf[20]; if (nvram_getenv("xtalfreq", buf, sizeof(buf)) >= 0) crystalfreq = simple_strtoul(buf, NULL, 0); #endif } switch (bus->chip_id) { case 0x4312: case 0x4325: ssb_pmu1_pllinit_r0(cc, crystalfreq); break; case 0x4328: ssb_pmu0_pllinit_r0(cc, crystalfreq); break; case 0x5354: if (crystalfreq == 0) crystalfreq = 25000; ssb_pmu0_pllinit_r0(cc, crystalfreq); break; case 0x4322: if (cc->pmu.rev == 2) { chipco_write32(cc, SSB_CHIPCO_PLLCTL_ADDR, 0x0000000A); chipco_write32(cc, SSB_CHIPCO_PLLCTL_DATA, 0x380005C0); } break; default: ssb_printk(KERN_ERR PFX "ERROR: PLL init unknown for device %04X\n", bus->chip_id); } } struct pmu_res_updown_tab_entry { u8 resource; /* The resource number */ u16 updown; /* The updown value */ }; enum pmu_res_depend_tab_task { PMU_RES_DEP_SET = 1, PMU_RES_DEP_ADD, PMU_RES_DEP_REMOVE, }; struct pmu_res_depend_tab_entry { u8 resource; /* The resource number */ u8 task; /* SET | ADD | REMOVE */ u32 depend; /* The depend mask */ }; static const struct pmu_res_updown_tab_entry pmu_res_updown_tab_4328a0[] = { { .resource = SSB_PMURES_4328_EXT_SWITCHER_PWM, .updown = 0x0101, }, { .resource = SSB_PMURES_4328_BB_SWITCHER_PWM, .updown = 0x1F01, }, { .resource = SSB_PMURES_4328_BB_SWITCHER_BURST, .updown = 0x010F, }, { .resource = SSB_PMURES_4328_BB_EXT_SWITCHER_BURST, .updown = 0x0101, }, { .resource = SSB_PMURES_4328_ILP_REQUEST, .updown = 0x0202, }, { .resource = SSB_PMURES_4328_RADIO_SWITCHER_PWM, .updown = 0x0F01, }, { .resource = SSB_PMURES_4328_RADIO_SWITCHER_BURST, .updown = 0x0F01, }, { .resource = SSB_PMURES_4328_ROM_SWITCH, .updown = 0x0101, }, { .resource = SSB_PMURES_4328_PA_REF_LDO, .updown = 0x0F01, }, { .resource = SSB_PMURES_4328_RADIO_LDO, .updown = 0x0F01, }, { .resource = SSB_PMURES_4328_AFE_LDO, .updown = 0x0F01, }, { .resource = SSB_PMURES_4328_PLL_LDO, .updown = 0x0F01, }, { .resource = SSB_PMURES_4328_BG_FILTBYP, .updown = 0x0101, }, { .resource = SSB_PMURES_4328_TX_FILTBYP, .updown = 0x0101, }, { .resource = SSB_PMURES_4328_RX_FILTBYP, .updown = 0x0101, }, { .resource = SSB_PMURES_4328_XTAL_PU, .updown = 0x0101, }, { .resource = SSB_PMURES_4328_XTAL_EN, .updown = 0xA001, }, { .resource = SSB_PMURES_4328_BB_PLL_FILTBYP, .updown = 0x0101, }, { .resource = SSB_PMURES_4328_RF_PLL_FILTBYP, .updown = 0x0101, }, { .resource = SSB_PMURES_4328_BB_PLL_PU, .updown = 0x0701, }, }; static const struct pmu_res_depend_tab_entry pmu_res_depend_tab_4328a0[] = { { /* Adjust ILP Request to avoid forcing EXT/BB into burst mode. */ .resource = SSB_PMURES_4328_ILP_REQUEST, .task = PMU_RES_DEP_SET, .depend = ((1 << SSB_PMURES_4328_EXT_SWITCHER_PWM) | (1 << SSB_PMURES_4328_BB_SWITCHER_PWM)), }, }; static const struct pmu_res_updown_tab_entry pmu_res_updown_tab_4325a0[] = { { .resource = SSB_PMURES_4325_XTAL_PU, .updown = 0x1501, }, }; static const struct pmu_res_depend_tab_entry pmu_res_depend_tab_4325a0[] = { { /* Adjust HT-Available dependencies. */ .resource = SSB_PMURES_4325_HT_AVAIL, .task = PMU_RES_DEP_ADD, .depend = ((1 << SSB_PMURES_4325_RX_PWRSW_PU) | (1 << SSB_PMURES_4325_TX_PWRSW_PU) | (1 << SSB_PMURES_4325_LOGEN_PWRSW_PU) | (1 << SSB_PMURES_4325_AFE_PWRSW_PU)), }, }; static void ssb_pmu_resources_init(struct ssb_chipcommon *cc) { struct ssb_bus *bus = cc->dev->bus; u32 min_msk = 0, max_msk = 0; unsigned int i; const struct pmu_res_updown_tab_entry *updown_tab = NULL; unsigned int updown_tab_size = 0; const struct pmu_res_depend_tab_entry *depend_tab = NULL; unsigned int depend_tab_size = 0; switch (bus->chip_id) { case 0x4312: min_msk = 0xCBB; break; case 0x4322: /* We keep the default settings: * min_msk = 0xCBB * max_msk = 0x7FFFF */ break; case 0x4325: /* Power OTP down later. */ min_msk = (1 << SSB_PMURES_4325_CBUCK_BURST) | (1 << SSB_PMURES_4325_LNLDO2_PU); if (chipco_read32(cc, SSB_CHIPCO_CHIPSTAT) & SSB_CHIPCO_CHST_4325_PMUTOP_2B) min_msk |= (1 << SSB_PMURES_4325_CLDO_CBUCK_BURST); /* The PLL may turn on, if it decides so. */ max_msk = 0xFFFFF; updown_tab = pmu_res_updown_tab_4325a0; updown_tab_size = ARRAY_SIZE(pmu_res_updown_tab_4325a0); depend_tab = pmu_res_depend_tab_4325a0; depend_tab_size = ARRAY_SIZE(pmu_res_depend_tab_4325a0); break; case 0x4328: min_msk = (1 << SSB_PMURES_4328_EXT_SWITCHER_PWM) | (1 << SSB_PMURES_4328_BB_SWITCHER_PWM) | (1 << SSB_PMURES_4328_XTAL_EN); /* The PLL may turn on, if it decides so. */ max_msk = 0xFFFFF; updown_tab = pmu_res_updown_tab_4328a0; updown_tab_size = ARRAY_SIZE(pmu_res_updown_tab_4328a0); depend_tab = pmu_res_depend_tab_4328a0; depend_tab_size = ARRAY_SIZE(pmu_res_depend_tab_4328a0); break; case 0x5354: /* The PLL may turn on, if it decides so. */ max_msk = 0xFFFFF; break; default: ssb_printk(KERN_ERR PFX "ERROR: PMU resource config unknown for device %04X\n", bus->chip_id); } if (updown_tab) { for (i = 0; i < updown_tab_size; i++) { chipco_write32(cc, SSB_CHIPCO_PMU_RES_TABSEL, updown_tab[i].resource); chipco_write32(cc, SSB_CHIPCO_PMU_RES_UPDNTM, updown_tab[i].updown); } } if (depend_tab) { for (i = 0; i < depend_tab_size; i++) { chipco_write32(cc, SSB_CHIPCO_PMU_RES_TABSEL, depend_tab[i].resource); switch (depend_tab[i].task) { case PMU_RES_DEP_SET: chipco_write32(cc, SSB_CHIPCO_PMU_RES_DEPMSK, depend_tab[i].depend); break; case PMU_RES_DEP_ADD: chipco_set32(cc, SSB_CHIPCO_PMU_RES_DEPMSK, depend_tab[i].depend); break; case PMU_RES_DEP_REMOVE: chipco_mask32(cc, SSB_CHIPCO_PMU_RES_DEPMSK, ~(depend_tab[i].depend)); break; default: SSB_WARN_ON(1); } } } /* Set the resource masks. */ if (min_msk) chipco_write32(cc, SSB_CHIPCO_PMU_MINRES_MSK, min_msk); if (max_msk) chipco_write32(cc, SSB_CHIPCO_PMU_MAXRES_MSK, max_msk); } /* http://bcm-v4.sipsolutions.net/802.11/SSB/PmuInit */ void ssb_pmu_init(struct ssb_chipcommon *cc) { u32 pmucap; if (!(cc->capabilities & SSB_CHIPCO_CAP_PMU)) return; pmucap = chipco_read32(cc, SSB_CHIPCO_PMU_CAP); cc->pmu.rev = (pmucap & SSB_CHIPCO_PMU_CAP_REVISION); ssb_dprintk(KERN_DEBUG PFX "Found rev %u PMU (capabilities 0x%08X)\n", cc->pmu.rev, pmucap); if (cc->pmu.rev == 1) chipco_mask32(cc, SSB_CHIPCO_PMU_CTL, ~SSB_CHIPCO_PMU_CTL_NOILPONW); else chipco_set32(cc, SSB_CHIPCO_PMU_CTL, SSB_CHIPCO_PMU_CTL_NOILPONW); ssb_pmu_pll_init(cc); ssb_pmu_resources_init(cc); } void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc, enum ssb_pmu_ldo_volt_id id, u32 voltage) { struct ssb_bus *bus = cc->dev->bus; u32 addr, shift, mask; switch (bus->chip_id) { case 0x4328: case 0x5354: switch (id) { case LDO_VOLT1: addr = 2; shift = 25; mask = 0xF; break; case LDO_VOLT2: addr = 3; shift = 1; mask = 0xF; break; case LDO_VOLT3: addr = 3; shift = 9; mask = 0xF; break; case LDO_PAREF: addr = 3; shift = 17; mask = 0x3F; break; default: SSB_WARN_ON(1); return; } break; case 0x4312: if (SSB_WARN_ON(id != LDO_PAREF)) return; addr = 0; shift = 21; mask = 0x3F; break; default: return; } ssb_chipco_regctl_maskset(cc, addr, ~(mask << shift), (voltage & mask) << shift); } void ssb_pmu_set_ldo_paref(struct ssb_chipcommon *cc, bool on) { struct ssb_bus *bus = cc->dev->bus; int ldo; switch (bus->chip_id) { case 0x4312: ldo = SSB_PMURES_4312_PA_REF_LDO; break; case 0x4328: ldo = SSB_PMURES_4328_PA_REF_LDO; break; case 0x5354: ldo = SSB_PMURES_5354_PA_REF_LDO; break; default: return; } if (on) chipco_set32(cc, SSB_CHIPCO_PMU_MINRES_MSK, 1 << ldo); else chipco_mask32(cc, SSB_CHIPCO_PMU_MINRES_MSK, ~(1 << ldo)); chipco_read32(cc, SSB_CHIPCO_PMU_MINRES_MSK); //SPEC FIXME found via mmiotrace - dummy read? } EXPORT_SYMBOL(ssb_pmu_set_ldo_voltage); EXPORT_SYMBOL(ssb_pmu_set_ldo_paref); u32 ssb_pmu_get_cpu_clock(struct ssb_chipcommon *cc) { struct ssb_bus *bus = cc->dev->bus; switch (bus->chip_id) { case 0x5354: /* 5354 chip uses a non programmable PLL of frequency 240MHz */ return 240000000; default: ssb_printk(KERN_ERR PFX "ERROR: PMU cpu clock unknown for device %04X\n", bus->chip_id); return 0; } } u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc) { struct ssb_bus *bus = cc->dev->bus; switch (bus->chip_id) { case 0x5354: return 120000000; default: ssb_printk(KERN_ERR PFX "ERROR: PMU controlclock unknown for device %04X\n", bus->chip_id); return 0; } }
gpl-2.0
davidmueller13/android_kernel_samsung_lt03lte-5
drivers/video/msm/mipi_simulator_video.c
3634
2566
/* Copyright (c) 2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "msm_fb.h" #include "mipi_dsi.h" #include "mipi_simulator.h" static struct msm_panel_info pinfo; static struct mipi_dsi_phy_ctrl dsi_video_mode_phy_db = { {0x03, 0x01, 0x01, 0x00}, {0xaa, 0x3b, 0x1b, 0x00, 0x52, 0x58, 0x20, 0x3f, 0x2e, 0x03, 0x04}, {0x7f, 0x00, 0x00, 0x00}, {0xee, 0x00, 0x86, 0x00}, {0x40, 0xc7, 0xb0, 0xda, 0x00, 0x50, 0x48, 0x63, 0x30, 0x07, 0x03, 0x05, 0x14, 0x03, 0x0, 0x0, 0x54, 0x06, 0x10, 0x04, 0x0}, }; static int __init mipi_video_simulator_init(void) { int ret; if (msm_fb_detect_client("mipi_video_simulator_vga")) return 0; pinfo.xres = 640; pinfo.yres = 480; pinfo.type = MIPI_VIDEO_PANEL; pinfo.pdest = DISPLAY_1; pinfo.wait_cycle = 0; pinfo.bpp = 24; pinfo.lcdc.h_back_porch = 6; pinfo.lcdc.h_front_porch = 6; pinfo.lcdc.h_pulse_width = 2; pinfo.lcdc.v_back_porch = 6; pinfo.lcdc.v_front_porch = 6; pinfo.lcdc.v_pulse_width = 2; pinfo.lcdc.border_clr = 0; /* blk */ pinfo.lcdc.underflow_clr = 0xff; /* blue */ pinfo.lcdc.hsync_skew = 0; pinfo.bl_max = 15; pinfo.bl_min = 1; pinfo.fb_num = 2; pinfo.mipi.mode = DSI_VIDEO_MODE; pinfo.mipi.pulse_mode_hsa_he = TRUE; pinfo.mipi.hfp_power_stop = TRUE; pinfo.mipi.hbp_power_stop = TRUE; pinfo.mipi.hsa_power_stop = TRUE; pinfo.mipi.eof_bllp_power_stop = TRUE; pinfo.mipi.bllp_power_stop = TRUE; pinfo.mipi.traffic_mode = DSI_NON_BURST_SYNCH_PULSE; pinfo.mipi.dst_format = DSI_VIDEO_DST_FORMAT_RGB888; pinfo.mipi.vc = 0; pinfo.mipi.rgb_swap = DSI_RGB_SWAP_RGB; pinfo.mipi.data_lane0 = TRUE; pinfo.mipi.data_lane1 = TRUE; pinfo.mipi.t_clk_post = 0x03; pinfo.mipi.t_clk_pre = 0x24; pinfo.mipi.stream = 0; /* dma_p */ pinfo.mipi.mdp_trigger = DSI_CMD_TRIGGER_SW; pinfo.mipi.dma_trigger = DSI_CMD_TRIGGER_SW; pinfo.mipi.frame_rate = 60; pinfo.mipi.dsi_phy_db = &dsi_video_mode_phy_db; ret = mipi_simulator_device_register(&pinfo, MIPI_DSI_PRIM, MIPI_DSI_PANEL_VGA); if (ret) pr_err("%s: failed to register device!\n", __func__); return ret; } module_init(mipi_video_simulator_init);
gpl-2.0
lenovo-a3-dev/kernel_lenovo_a3
drivers/block/aoe/aoeblk.c
3634
7731
/* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ /* * aoeblk.c * block device routines */ #include <linux/kernel.h> #include <linux/hdreg.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <linux/fs.h> #include <linux/ioctl.h> #include <linux/slab.h> #include <linux/ratelimit.h> #include <linux/genhd.h> #include <linux/netdevice.h> #include <linux/mutex.h> #include <linux/export.h> #include "aoe.h" static DEFINE_MUTEX(aoeblk_mutex); static struct kmem_cache *buf_pool_cache; static ssize_t aoedisk_show_state(struct device *dev, struct device_attribute *attr, char *page) { struct gendisk *disk = dev_to_disk(dev); struct aoedev *d = disk->private_data; return snprintf(page, PAGE_SIZE, "%s%s\n", (d->flags & DEVFL_UP) ? "up" : "down", (d->flags & DEVFL_KICKME) ? ",kickme" : (d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : ""); /* I'd rather see nopen exported so we can ditch closewait */ } static ssize_t aoedisk_show_mac(struct device *dev, struct device_attribute *attr, char *page) { struct gendisk *disk = dev_to_disk(dev); struct aoedev *d = disk->private_data; struct aoetgt *t = d->targets[0]; if (t == NULL) return snprintf(page, PAGE_SIZE, "none\n"); return snprintf(page, PAGE_SIZE, "%pm\n", t->addr); } static ssize_t aoedisk_show_netif(struct device *dev, struct device_attribute *attr, char *page) { struct gendisk *disk = dev_to_disk(dev); struct aoedev *d = disk->private_data; struct net_device *nds[8], **nd, **nnd, **ne; struct aoetgt **t, **te; struct aoeif *ifp, *e; char *p; memset(nds, 0, sizeof nds); nd = nds; ne = nd + ARRAY_SIZE(nds); t = d->targets; te = t + NTARGETS; for (; t < te && *t; t++) { ifp = (*t)->ifs; e = ifp + NAOEIFS; for (; ifp < e && ifp->nd; ifp++) { for (nnd = nds; nnd < nd; nnd++) if (*nnd == ifp->nd) break; if (nnd == nd && nd != ne) *nd++ = ifp->nd; } } ne = nd; nd = nds; if (*nd == NULL) return snprintf(page, PAGE_SIZE, "none\n"); for (p = page; nd < ne; nd++) p += snprintf(p, PAGE_SIZE - (p-page), "%s%s", p == page ? "" : ",", (*nd)->name); p += snprintf(p, PAGE_SIZE - (p-page), "\n"); return p-page; } /* firmware version */ static ssize_t aoedisk_show_fwver(struct device *dev, struct device_attribute *attr, char *page) { struct gendisk *disk = dev_to_disk(dev); struct aoedev *d = disk->private_data; return snprintf(page, PAGE_SIZE, "0x%04x\n", (unsigned int) d->fw_ver); } static DEVICE_ATTR(state, S_IRUGO, aoedisk_show_state, NULL); static DEVICE_ATTR(mac, S_IRUGO, aoedisk_show_mac, NULL); static DEVICE_ATTR(netif, S_IRUGO, aoedisk_show_netif, NULL); static struct device_attribute dev_attr_firmware_version = { .attr = { .name = "firmware-version", .mode = S_IRUGO }, .show = aoedisk_show_fwver, }; static struct attribute *aoe_attrs[] = { &dev_attr_state.attr, &dev_attr_mac.attr, &dev_attr_netif.attr, &dev_attr_firmware_version.attr, NULL, }; static const struct attribute_group attr_group = { .attrs = aoe_attrs, }; static int aoedisk_add_sysfs(struct aoedev *d) { return sysfs_create_group(&disk_to_dev(d->gd)->kobj, &attr_group); } void aoedisk_rm_sysfs(struct aoedev *d) { sysfs_remove_group(&disk_to_dev(d->gd)->kobj, &attr_group); } static int aoeblk_open(struct block_device *bdev, fmode_t mode) { struct aoedev *d = bdev->bd_disk->private_data; ulong flags; mutex_lock(&aoeblk_mutex); spin_lock_irqsave(&d->lock, flags); if (d->flags & DEVFL_UP) { d->nopen++; spin_unlock_irqrestore(&d->lock, flags); mutex_unlock(&aoeblk_mutex); return 0; } spin_unlock_irqrestore(&d->lock, flags); mutex_unlock(&aoeblk_mutex); return -ENODEV; } static int aoeblk_release(struct gendisk *disk, fmode_t mode) { struct aoedev *d = disk->private_data; ulong flags; spin_lock_irqsave(&d->lock, flags); if (--d->nopen == 0) { spin_unlock_irqrestore(&d->lock, flags); aoecmd_cfg(d->aoemajor, d->aoeminor); return 0; } spin_unlock_irqrestore(&d->lock, flags); return 0; } static void aoeblk_make_request(struct request_queue *q, struct bio *bio) { struct sk_buff_head queue; struct aoedev *d; struct buf *buf; ulong flags; blk_queue_bounce(q, &bio); if (bio == NULL) { printk(KERN_ERR "aoe: bio is NULL\n"); BUG(); return; } d = bio->bi_bdev->bd_disk->private_data; if (d == NULL) { printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n"); BUG(); bio_endio(bio, -ENXIO); return; } else if (bio->bi_io_vec == NULL) { printk(KERN_ERR "aoe: bi_io_vec is NULL\n"); BUG(); bio_endio(bio, -ENXIO); return; } buf = mempool_alloc(d->bufpool, GFP_NOIO); if (buf == NULL) { printk(KERN_INFO "aoe: buf allocation failure\n"); bio_endio(bio, -ENOMEM); return; } memset(buf, 0, sizeof(*buf)); INIT_LIST_HEAD(&buf->bufs); buf->stime = jiffies; buf->bio = bio; buf->resid = bio->bi_size; buf->sector = bio->bi_sector; buf->bv = &bio->bi_io_vec[bio->bi_idx]; buf->bv_resid = buf->bv->bv_len; WARN_ON(buf->bv_resid == 0); buf->bv_off = buf->bv->bv_offset; spin_lock_irqsave(&d->lock, flags); if ((d->flags & DEVFL_UP) == 0) { pr_info_ratelimited("aoe: device %ld.%d is not up\n", d->aoemajor, d->aoeminor); spin_unlock_irqrestore(&d->lock, flags); mempool_free(buf, d->bufpool); bio_endio(bio, -ENXIO); return; } list_add_tail(&buf->bufs, &d->bufq); aoecmd_work(d); __skb_queue_head_init(&queue); skb_queue_splice_init(&d->sendq, &queue); spin_unlock_irqrestore(&d->lock, flags); aoenet_xmit(&queue); } static int aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct aoedev *d = bdev->bd_disk->private_data; if ((d->flags & DEVFL_UP) == 0) { printk(KERN_ERR "aoe: disk not up\n"); return -ENODEV; } geo->cylinders = d->geo.cylinders; geo->heads = d->geo.heads; geo->sectors = d->geo.sectors; return 0; } static const struct block_device_operations aoe_bdops = { .open = aoeblk_open, .release = aoeblk_release, .getgeo = aoeblk_getgeo, .owner = THIS_MODULE, }; /* alloc_disk and add_disk can sleep */ void aoeblk_gdalloc(void *vp) { struct aoedev *d = vp; struct gendisk *gd; ulong flags; gd = alloc_disk(AOE_PARTITIONS); if (gd == NULL) { printk(KERN_ERR "aoe: cannot allocate disk structure for %ld.%d\n", d->aoemajor, d->aoeminor); goto err; } d->bufpool = mempool_create_slab_pool(MIN_BUFS, buf_pool_cache); if (d->bufpool == NULL) { printk(KERN_ERR "aoe: cannot allocate bufpool for %ld.%d\n", d->aoemajor, d->aoeminor); goto err_disk; } d->blkq = blk_alloc_queue(GFP_KERNEL); if (!d->blkq) goto err_mempool; blk_queue_make_request(d->blkq, aoeblk_make_request); d->blkq->backing_dev_info.name = "aoe"; if (bdi_init(&d->blkq->backing_dev_info)) goto err_blkq; spin_lock_irqsave(&d->lock, flags); gd->major = AOE_MAJOR; gd->first_minor = d->sysminor * AOE_PARTITIONS; gd->fops = &aoe_bdops; gd->private_data = d; set_capacity(gd, d->ssize); snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d", d->aoemajor, d->aoeminor); gd->queue = d->blkq; d->gd = gd; d->flags &= ~DEVFL_GDALLOC; d->flags |= DEVFL_UP; spin_unlock_irqrestore(&d->lock, flags); add_disk(gd); aoedisk_add_sysfs(d); return; err_blkq: blk_cleanup_queue(d->blkq); d->blkq = NULL; err_mempool: mempool_destroy(d->bufpool); err_disk: put_disk(gd); err: spin_lock_irqsave(&d->lock, flags); d->flags &= ~DEVFL_GDALLOC; spin_unlock_irqrestore(&d->lock, flags); } void aoeblk_exit(void) { kmem_cache_destroy(buf_pool_cache); } int __init aoeblk_init(void) { buf_pool_cache = kmem_cache_create("aoe_bufs", sizeof(struct buf), 0, 0, NULL); if (buf_pool_cache == NULL) return -ENOMEM; return 0; }
gpl-2.0
Shabbypenguin/Jellybean_kernel
drivers/usb/host/ehci-mem.c
5682
6887
/* * Copyright (c) 2001 by David Brownell * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* this file is part of ehci-hcd.c */ /*-------------------------------------------------------------------------*/ /* * There's basically three types of memory: * - data used only by the HCD ... kmalloc is fine * - async and periodic schedules, shared by HC and HCD ... these * need to use dma_pool or dma_alloc_coherent * - driver buffers, read/written by HC ... single shot DMA mapped * * There's also "register" data (e.g. PCI or SOC), which is memory mapped. * No memory seen by this driver is pageable. */ /*-------------------------------------------------------------------------*/ /* Allocate the key transfer structures from the previously allocated pool */ static inline void ehci_qtd_init(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t dma) { memset (qtd, 0, sizeof *qtd); qtd->qtd_dma = dma; qtd->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT); qtd->hw_next = EHCI_LIST_END(ehci); qtd->hw_alt_next = EHCI_LIST_END(ehci); INIT_LIST_HEAD (&qtd->qtd_list); } static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, gfp_t flags) { struct ehci_qtd *qtd; dma_addr_t dma; qtd = dma_pool_alloc (ehci->qtd_pool, flags, &dma); if (qtd != NULL) { ehci_qtd_init(ehci, qtd, dma); } return qtd; } static inline void ehci_qtd_free (struct ehci_hcd *ehci, struct ehci_qtd *qtd) { dma_pool_free (ehci->qtd_pool, qtd, qtd->qtd_dma); } static void qh_destroy(struct ehci_qh *qh) { struct ehci_hcd *ehci = qh->ehci; /* clean qtds first, and know this is not linked */ if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) { ehci_dbg (ehci, "unused qh not empty!\n"); BUG (); } if (qh->dummy) ehci_qtd_free (ehci, qh->dummy); dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma); kfree(qh); } static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags) { struct ehci_qh *qh; dma_addr_t dma; qh = kzalloc(sizeof *qh, GFP_ATOMIC); if (!qh) goto done; qh->hw = (struct ehci_qh_hw *) dma_pool_alloc(ehci->qh_pool, flags, &dma); if (!qh->hw) goto fail; memset(qh->hw, 0, sizeof *qh->hw); qh->refcount = 1; qh->ehci = ehci; qh->qh_dma = dma; // INIT_LIST_HEAD (&qh->qh_list); INIT_LIST_HEAD (&qh->qtd_list); /* dummy td enables safe urb queuing */ qh->dummy = ehci_qtd_alloc (ehci, flags); if (qh->dummy == NULL) { ehci_dbg (ehci, "no dummy td\n"); goto fail1; } done: return qh; fail1: dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma); fail: kfree(qh); return NULL; } /* to share a qh (cpu threads, or hc) */ static inline struct ehci_qh *qh_get (struct ehci_qh *qh) { WARN_ON(!qh->refcount); qh->refcount++; return qh; } static inline void qh_put (struct ehci_qh *qh) { if (!--qh->refcount) qh_destroy(qh); } /*-------------------------------------------------------------------------*/ /* The queue heads and transfer descriptors are managed from pools tied * to each of the "per device" structures. * This is the initialisation and cleanup code. */ static void ehci_mem_cleanup (struct ehci_hcd *ehci) { free_cached_lists(ehci); if (ehci->async) qh_put (ehci->async); ehci->async = NULL; if (ehci->dummy) qh_put(ehci->dummy); ehci->dummy = NULL; /* DMA consistent memory and pools */ if (ehci->qtd_pool) dma_pool_destroy (ehci->qtd_pool); ehci->qtd_pool = NULL; if (ehci->qh_pool) { dma_pool_destroy (ehci->qh_pool); ehci->qh_pool = NULL; } if (ehci->itd_pool) dma_pool_destroy (ehci->itd_pool); ehci->itd_pool = NULL; if (ehci->sitd_pool) dma_pool_destroy (ehci->sitd_pool); ehci->sitd_pool = NULL; if (ehci->periodic) dma_free_coherent (ehci_to_hcd(ehci)->self.controller, ehci->periodic_size * sizeof (u32), ehci->periodic, ehci->periodic_dma); ehci->periodic = NULL; /* shadow periodic table */ kfree(ehci->pshadow); ehci->pshadow = NULL; } /* remember to add cleanup code (above) if you add anything here */ static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags) { int i; /* QTDs for control/bulk/intr transfers */ ehci->qtd_pool = dma_pool_create ("ehci_qtd", ehci_to_hcd(ehci)->self.controller, sizeof (struct ehci_qtd), 32 /* byte alignment (for hw parts) */, 4096 /* can't cross 4K */); if (!ehci->qtd_pool) { goto fail; } /* QHs for control/bulk/intr transfers */ ehci->qh_pool = dma_pool_create ("ehci_qh", ehci_to_hcd(ehci)->self.controller, sizeof(struct ehci_qh_hw), 32 /* byte alignment (for hw parts) */, 4096 /* can't cross 4K */); if (!ehci->qh_pool) { goto fail; } ehci->async = ehci_qh_alloc (ehci, flags); if (!ehci->async) { goto fail; } /* ITD for high speed ISO transfers */ ehci->itd_pool = dma_pool_create ("ehci_itd", ehci_to_hcd(ehci)->self.controller, sizeof (struct ehci_itd), 32 /* byte alignment (for hw parts) */, 4096 /* can't cross 4K */); if (!ehci->itd_pool) { goto fail; } /* SITD for full/low speed split ISO transfers */ ehci->sitd_pool = dma_pool_create ("ehci_sitd", ehci_to_hcd(ehci)->self.controller, sizeof (struct ehci_sitd), 32 /* byte alignment (for hw parts) */, 4096 /* can't cross 4K */); if (!ehci->sitd_pool) { goto fail; } /* Hardware periodic table */ ehci->periodic = (__le32 *) dma_alloc_coherent (ehci_to_hcd(ehci)->self.controller, ehci->periodic_size * sizeof(__le32), &ehci->periodic_dma, 0); if (ehci->periodic == NULL) { goto fail; } if (ehci->use_dummy_qh) { struct ehci_qh_hw *hw; ehci->dummy = ehci_qh_alloc(ehci, flags); if (!ehci->dummy) goto fail; hw = ehci->dummy->hw; hw->hw_next = EHCI_LIST_END(ehci); hw->hw_qtd_next = EHCI_LIST_END(ehci); hw->hw_alt_next = EHCI_LIST_END(ehci); hw->hw_token &= ~QTD_STS_ACTIVE; ehci->dummy->hw = hw; for (i = 0; i < ehci->periodic_size; i++) ehci->periodic[i] = ehci->dummy->qh_dma; } else { for (i = 0; i < ehci->periodic_size; i++) ehci->periodic[i] = EHCI_LIST_END(ehci); } /* software shadow of hardware table */ ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags); if (ehci->pshadow != NULL) return 0; fail: ehci_dbg (ehci, "couldn't init memory\n"); ehci_mem_cleanup (ehci); return -ENOMEM; }
gpl-2.0
tenfar/pyramid-gb-kernel
fs/cachefiles/xattr.c
9266
6565
/* CacheFiles extended attribute management * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/module.h> #include <linux/sched.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/fsnotify.h> #include <linux/quotaops.h> #include <linux/xattr.h> #include <linux/slab.h> #include "internal.h" static const char cachefiles_xattr_cache[] = XATTR_USER_PREFIX "CacheFiles.cache"; /* * check the type label on an object * - done using xattrs */ int cachefiles_check_object_type(struct cachefiles_object *object) { struct dentry *dentry = object->dentry; char type[3], xtype[3]; int ret; ASSERT(dentry); ASSERT(dentry->d_inode); if (!object->fscache.cookie) strcpy(type, "C3"); else snprintf(type, 3, "%02x", object->fscache.cookie->def->type); _enter("%p{%s}", object, type); /* attempt to install a type label directly */ ret = vfs_setxattr(dentry, cachefiles_xattr_cache, type, 2, XATTR_CREATE); if (ret == 0) { _debug("SET"); /* we succeeded */ goto error; } if (ret != -EEXIST) { kerror("Can't set xattr on %*.*s [%lu] (err %d)", dentry->d_name.len, dentry->d_name.len, dentry->d_name.name, dentry->d_inode->i_ino, -ret); goto error; } /* read the current type label */ ret = vfs_getxattr(dentry, cachefiles_xattr_cache, xtype, 3); if (ret < 0) { if (ret == -ERANGE) goto bad_type_length; kerror("Can't read xattr on %*.*s [%lu] (err %d)", dentry->d_name.len, dentry->d_name.len, dentry->d_name.name, dentry->d_inode->i_ino, -ret); goto error; } /* check the type is what we're expecting */ if (ret != 2) goto bad_type_length; if (xtype[0] != type[0] || xtype[1] != type[1]) goto bad_type; ret = 0; error: _leave(" = %d", ret); return ret; bad_type_length: kerror("Cache object %lu type xattr length incorrect", dentry->d_inode->i_ino); ret = -EIO; goto error; bad_type: xtype[2] = 0; kerror("Cache object %*.*s [%lu] type %s not %s", dentry->d_name.len, dentry->d_name.len, dentry->d_name.name, dentry->d_inode->i_ino, xtype, type); ret = -EIO; goto error; } /* * set the state xattr on a cache file */ int cachefiles_set_object_xattr(struct cachefiles_object *object, struct cachefiles_xattr *auxdata) { struct dentry *dentry = object->dentry; int ret; ASSERT(object->fscache.cookie); ASSERT(dentry); _enter("%p,#%d", object, auxdata->len); /* attempt to install the cache metadata directly */ _debug("SET %s #%u", object->fscache.cookie->def->name, auxdata->len); ret = vfs_setxattr(dentry, cachefiles_xattr_cache, &auxdata->type, auxdata->len, XATTR_CREATE); if (ret < 0 && ret != -ENOMEM) cachefiles_io_error_obj( object, "Failed to set xattr with error %d", ret); _leave(" = %d", ret); return ret; } /* * update the state xattr on a cache file */ int cachefiles_update_object_xattr(struct cachefiles_object *object, struct cachefiles_xattr *auxdata) { struct dentry *dentry = object->dentry; int ret; ASSERT(object->fscache.cookie); ASSERT(dentry); _enter("%p,#%d", object, auxdata->len); /* attempt to install the cache metadata directly */ _debug("SET %s #%u", object->fscache.cookie->def->name, auxdata->len); ret = vfs_setxattr(dentry, cachefiles_xattr_cache, &auxdata->type, auxdata->len, XATTR_REPLACE); if (ret < 0 && ret != -ENOMEM) cachefiles_io_error_obj( object, "Failed to update xattr with error %d", ret); _leave(" = %d", ret); return ret; } /* * check the state xattr on a cache file * - return -ESTALE if the object should be deleted */ int cachefiles_check_object_xattr(struct cachefiles_object *object, struct cachefiles_xattr *auxdata) { struct cachefiles_xattr *auxbuf; struct dentry *dentry = object->dentry; int ret; _enter("%p,#%d", object, auxdata->len); ASSERT(dentry); ASSERT(dentry->d_inode); auxbuf = kmalloc(sizeof(struct cachefiles_xattr) + 512, GFP_KERNEL); if (!auxbuf) { _leave(" = -ENOMEM"); return -ENOMEM; } /* read the current type label */ ret = vfs_getxattr(dentry, cachefiles_xattr_cache, &auxbuf->type, 512 + 1); if (ret < 0) { if (ret == -ENODATA) goto stale; /* no attribute - power went off * mid-cull? */ if (ret == -ERANGE) goto bad_type_length; cachefiles_io_error_obj(object, "Can't read xattr on %lu (err %d)", dentry->d_inode->i_ino, -ret); goto error; } /* check the on-disk object */ if (ret < 1) goto bad_type_length; if (auxbuf->type != auxdata->type) goto stale; auxbuf->len = ret; /* consult the netfs */ if (object->fscache.cookie->def->check_aux) { enum fscache_checkaux result; unsigned int dlen; dlen = auxbuf->len - 1; _debug("checkaux %s #%u", object->fscache.cookie->def->name, dlen); result = fscache_check_aux(&object->fscache, &auxbuf->data, dlen); switch (result) { /* entry okay as is */ case FSCACHE_CHECKAUX_OKAY: goto okay; /* entry requires update */ case FSCACHE_CHECKAUX_NEEDS_UPDATE: break; /* entry requires deletion */ case FSCACHE_CHECKAUX_OBSOLETE: goto stale; default: BUG(); } /* update the current label */ ret = vfs_setxattr(dentry, cachefiles_xattr_cache, &auxdata->type, auxdata->len, XATTR_REPLACE); if (ret < 0) { cachefiles_io_error_obj(object, "Can't update xattr on %lu" " (error %d)", dentry->d_inode->i_ino, -ret); goto error; } } okay: ret = 0; error: kfree(auxbuf); _leave(" = %d", ret); return ret; bad_type_length: kerror("Cache object %lu xattr length incorrect", dentry->d_inode->i_ino); ret = -EIO; goto error; stale: ret = -ESTALE; goto error; } /* * remove the object's xattr to mark it stale */ int cachefiles_remove_object_xattr(struct cachefiles_cache *cache, struct dentry *dentry) { int ret; ret = vfs_removexattr(dentry, cachefiles_xattr_cache); if (ret < 0) { if (ret == -ENOENT || ret == -ENODATA) ret = 0; else if (ret != -ENOMEM) cachefiles_io_error(cache, "Can't remove xattr from %lu" " (error %d)", dentry->d_inode->i_ino, -ret); } _leave(" = %d", ret); return ret; }
gpl-2.0