repo_name
string
path
string
copies
string
size
string
content
string
license
string
Savaged-Zen/Savaged-Zen-Inc
arch/arm/mach-omap2/omap_hwmod_2420_data.c
32
11831
/* * omap_hwmod_2420_data.c - hardware modules present on the OMAP2420 chips * * Copyright (C) 2009-2010 Nokia Corporation * Paul Walmsley * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * XXX handle crossbar/shared link difference for L3? * XXX these should be marked initdata for multi-OMAP kernels */ #include <plat/omap_hwmod.h> #include <mach/irqs.h> #include <plat/cpu.h> #include <plat/dma.h> #include <plat/serial.h> #include "omap_hwmod_common_data.h" #include "prm-regbits-24xx.h" #include "cm-regbits-24xx.h" /* * OMAP2420 hardware module integration data * * ALl of the data in this section should be autogeneratable from the * TI hardware database or other technical documentation. Data that * is driver-specific or driver-kernel integration-specific belongs * elsewhere. */ static struct omap_hwmod omap2420_mpu_hwmod; static struct omap_hwmod omap2420_iva_hwmod; static struct omap_hwmod omap2420_l3_main_hwmod; static struct omap_hwmod omap2420_l4_core_hwmod; static struct omap_hwmod omap2420_wd_timer2_hwmod; /* L3 -> L4_CORE interface */ static struct omap_hwmod_ocp_if omap2420_l3_main__l4_core = { .master = &omap2420_l3_main_hwmod, .slave = &omap2420_l4_core_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* MPU -> L3 interface */ static struct omap_hwmod_ocp_if omap2420_mpu__l3_main = { .master = &omap2420_mpu_hwmod, .slave = &omap2420_l3_main_hwmod, .user = OCP_USER_MPU, }; /* Slave interfaces on the L3 interconnect */ static struct omap_hwmod_ocp_if *omap2420_l3_main_slaves[] = { &omap2420_mpu__l3_main, }; /* Master interfaces on the L3 interconnect */ static struct omap_hwmod_ocp_if *omap2420_l3_main_masters[] = { &omap2420_l3_main__l4_core, }; /* L3 */ static struct omap_hwmod omap2420_l3_main_hwmod = { .name = "l3_main", .class = &l3_hwmod_class, .masters = omap2420_l3_main_masters, .masters_cnt = ARRAY_SIZE(omap2420_l3_main_masters), .slaves = omap2420_l3_main_slaves, .slaves_cnt = ARRAY_SIZE(omap2420_l3_main_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), .flags = HWMOD_NO_IDLEST, }; static struct omap_hwmod omap2420_l4_wkup_hwmod; static struct omap_hwmod omap2420_uart1_hwmod; static struct omap_hwmod omap2420_uart2_hwmod; static struct omap_hwmod omap2420_uart3_hwmod; /* L4_CORE -> L4_WKUP interface */ static struct omap_hwmod_ocp_if omap2420_l4_core__l4_wkup = { .master = &omap2420_l4_core_hwmod, .slave = &omap2420_l4_wkup_hwmod, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 CORE -> UART1 interface */ static struct omap_hwmod_addr_space omap2420_uart1_addr_space[] = { { .pa_start = OMAP2_UART1_BASE, .pa_end = OMAP2_UART1_BASE + SZ_8K - 1, .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT, }, }; static struct omap_hwmod_ocp_if omap2_l4_core__uart1 = { .master = &omap2420_l4_core_hwmod, .slave = &omap2420_uart1_hwmod, .clk = "uart1_ick", .addr = omap2420_uart1_addr_space, .addr_cnt = ARRAY_SIZE(omap2420_uart1_addr_space), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 CORE -> UART2 interface */ static struct omap_hwmod_addr_space omap2420_uart2_addr_space[] = { { .pa_start = OMAP2_UART2_BASE, .pa_end = OMAP2_UART2_BASE + SZ_1K - 1, .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT, }, }; static struct omap_hwmod_ocp_if omap2_l4_core__uart2 = { .master = &omap2420_l4_core_hwmod, .slave = &omap2420_uart2_hwmod, .clk = "uart2_ick", .addr = omap2420_uart2_addr_space, .addr_cnt = ARRAY_SIZE(omap2420_uart2_addr_space), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* L4 PER -> UART3 interface */ static struct omap_hwmod_addr_space omap2420_uart3_addr_space[] = { { .pa_start = OMAP2_UART3_BASE, .pa_end = OMAP2_UART3_BASE + SZ_1K - 1, .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT, }, }; static struct omap_hwmod_ocp_if omap2_l4_core__uart3 = { .master = &omap2420_l4_core_hwmod, .slave = &omap2420_uart3_hwmod, .clk = "uart3_ick", .addr = omap2420_uart3_addr_space, .addr_cnt = ARRAY_SIZE(omap2420_uart3_addr_space), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* Slave interfaces on the L4_CORE interconnect */ static struct omap_hwmod_ocp_if *omap2420_l4_core_slaves[] = { &omap2420_l3_main__l4_core, }; /* Master interfaces on the L4_CORE interconnect */ static struct omap_hwmod_ocp_if *omap2420_l4_core_masters[] = { &omap2420_l4_core__l4_wkup, &omap2_l4_core__uart1, &omap2_l4_core__uart2, &omap2_l4_core__uart3, }; /* L4 CORE */ static struct omap_hwmod omap2420_l4_core_hwmod = { .name = "l4_core", .class = &l4_hwmod_class, .masters = omap2420_l4_core_masters, .masters_cnt = ARRAY_SIZE(omap2420_l4_core_masters), .slaves = omap2420_l4_core_slaves, .slaves_cnt = ARRAY_SIZE(omap2420_l4_core_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), .flags = HWMOD_NO_IDLEST, }; /* Slave interfaces on the L4_WKUP interconnect */ static struct omap_hwmod_ocp_if *omap2420_l4_wkup_slaves[] = { &omap2420_l4_core__l4_wkup, }; /* Master interfaces on the L4_WKUP interconnect */ static struct omap_hwmod_ocp_if *omap2420_l4_wkup_masters[] = { }; /* L4 WKUP */ static struct omap_hwmod omap2420_l4_wkup_hwmod = { .name = "l4_wkup", .class = &l4_hwmod_class, .masters = omap2420_l4_wkup_masters, .masters_cnt = ARRAY_SIZE(omap2420_l4_wkup_masters), .slaves = omap2420_l4_wkup_slaves, .slaves_cnt = ARRAY_SIZE(omap2420_l4_wkup_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), .flags = HWMOD_NO_IDLEST, }; /* Master interfaces on the MPU device */ static struct omap_hwmod_ocp_if *omap2420_mpu_masters[] = { &omap2420_mpu__l3_main, }; /* MPU */ static struct omap_hwmod omap2420_mpu_hwmod = { .name = "mpu", .class = &mpu_hwmod_class, .main_clk = "mpu_ck", .masters = omap2420_mpu_masters, .masters_cnt = ARRAY_SIZE(omap2420_mpu_masters), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), }; /* * IVA1 interface data */ /* IVA <- L3 interface */ static struct omap_hwmod_ocp_if omap2420_l3__iva = { .master = &omap2420_l3_main_hwmod, .slave = &omap2420_iva_hwmod, .clk = "iva1_ifck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_ocp_if *omap2420_iva_masters[] = { &omap2420_l3__iva, }; /* * IVA2 (IVA2) */ static struct omap_hwmod omap2420_iva_hwmod = { .name = "iva", .class = &iva_hwmod_class, .masters = omap2420_iva_masters, .masters_cnt = ARRAY_SIZE(omap2420_iva_masters), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420) }; /* l4_wkup -> wd_timer2 */ static struct omap_hwmod_addr_space omap2420_wd_timer2_addrs[] = { { .pa_start = 0x48022000, .pa_end = 0x4802207f, .flags = ADDR_TYPE_RT }, }; static struct omap_hwmod_ocp_if omap2420_l4_wkup__wd_timer2 = { .master = &omap2420_l4_wkup_hwmod, .slave = &omap2420_wd_timer2_hwmod, .clk = "mpu_wdt_ick", .addr = omap2420_wd_timer2_addrs, .addr_cnt = ARRAY_SIZE(omap2420_wd_timer2_addrs), .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* * 'wd_timer' class * 32-bit watchdog upward counter that generates a pulse on the reset pin on * overflow condition */ static struct omap_hwmod_class_sysconfig omap2420_wd_timer_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap2420_wd_timer_hwmod_class = { .name = "wd_timer", .sysc = &omap2420_wd_timer_sysc, }; /* wd_timer2 */ static struct omap_hwmod_ocp_if *omap2420_wd_timer2_slaves[] = { &omap2420_l4_wkup__wd_timer2, }; static struct omap_hwmod omap2420_wd_timer2_hwmod = { .name = "wd_timer2", .class = &omap2420_wd_timer_hwmod_class, .main_clk = "mpu_wdt_fck", .prcm = { .omap2 = { .prcm_reg_id = 1, .module_bit = OMAP24XX_EN_MPU_WDT_SHIFT, .module_offs = WKUP_MOD, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_ST_MPU_WDT_SHIFT, }, }, .slaves = omap2420_wd_timer2_slaves, .slaves_cnt = ARRAY_SIZE(omap2420_wd_timer2_slaves), .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), }; /* UART */ static struct omap_hwmod_class_sysconfig uart_sysc = { .rev_offs = 0x50, .sysc_offs = 0x54, .syss_offs = 0x58, .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class uart_class = { .name = "uart", .sysc = &uart_sysc, }; /* UART1 */ static struct omap_hwmod_irq_info uart1_mpu_irqs[] = { { .irq = INT_24XX_UART1_IRQ, }, }; static struct omap_hwmod_dma_info uart1_sdma_reqs[] = { { .name = "rx", .dma_req = OMAP24XX_DMA_UART1_RX, }, { .name = "tx", .dma_req = OMAP24XX_DMA_UART1_TX, }, }; static struct omap_hwmod_ocp_if *omap2420_uart1_slaves[] = { &omap2_l4_core__uart1, }; static struct omap_hwmod omap2420_uart1_hwmod = { .name = "uart1", .mpu_irqs = uart1_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(uart1_mpu_irqs), .sdma_reqs = uart1_sdma_reqs, .sdma_reqs_cnt = ARRAY_SIZE(uart1_sdma_reqs), .main_clk = "uart1_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP24XX_EN_UART1_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_EN_UART1_SHIFT, }, }, .slaves = omap2420_uart1_slaves, .slaves_cnt = ARRAY_SIZE(omap2420_uart1_slaves), .class = &uart_class, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), }; /* UART2 */ static struct omap_hwmod_irq_info uart2_mpu_irqs[] = { { .irq = INT_24XX_UART2_IRQ, }, }; static struct omap_hwmod_dma_info uart2_sdma_reqs[] = { { .name = "rx", .dma_req = OMAP24XX_DMA_UART2_RX, }, { .name = "tx", .dma_req = OMAP24XX_DMA_UART2_TX, }, }; static struct omap_hwmod_ocp_if *omap2420_uart2_slaves[] = { &omap2_l4_core__uart2, }; static struct omap_hwmod omap2420_uart2_hwmod = { .name = "uart2", .mpu_irqs = uart2_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(uart2_mpu_irqs), .sdma_reqs = uart2_sdma_reqs, .sdma_reqs_cnt = ARRAY_SIZE(uart2_sdma_reqs), .main_clk = "uart2_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP24XX_EN_UART2_SHIFT, .idlest_reg_id = 1, .idlest_idle_bit = OMAP24XX_EN_UART2_SHIFT, }, }, .slaves = omap2420_uart2_slaves, .slaves_cnt = ARRAY_SIZE(omap2420_uart2_slaves), .class = &uart_class, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), }; /* UART3 */ static struct omap_hwmod_irq_info uart3_mpu_irqs[] = { { .irq = INT_24XX_UART3_IRQ, }, }; static struct omap_hwmod_dma_info uart3_sdma_reqs[] = { { .name = "rx", .dma_req = OMAP24XX_DMA_UART3_RX, }, { .name = "tx", .dma_req = OMAP24XX_DMA_UART3_TX, }, }; static struct omap_hwmod_ocp_if *omap2420_uart3_slaves[] = { &omap2_l4_core__uart3, }; static struct omap_hwmod omap2420_uart3_hwmod = { .name = "uart3", .mpu_irqs = uart3_mpu_irqs, .mpu_irqs_cnt = ARRAY_SIZE(uart3_mpu_irqs), .sdma_reqs = uart3_sdma_reqs, .sdma_reqs_cnt = ARRAY_SIZE(uart3_sdma_reqs), .main_clk = "uart3_fck", .prcm = { .omap2 = { .module_offs = CORE_MOD, .prcm_reg_id = 2, .module_bit = OMAP24XX_EN_UART3_SHIFT, .idlest_reg_id = 2, .idlest_idle_bit = OMAP24XX_EN_UART3_SHIFT, }, }, .slaves = omap2420_uart3_slaves, .slaves_cnt = ARRAY_SIZE(omap2420_uart3_slaves), .class = &uart_class, .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420), }; static __initdata struct omap_hwmod *omap2420_hwmods[] = { &omap2420_l3_main_hwmod, &omap2420_l4_core_hwmod, &omap2420_l4_wkup_hwmod, &omap2420_mpu_hwmod, &omap2420_iva_hwmod, &omap2420_wd_timer2_hwmod, &omap2420_uart1_hwmod, &omap2420_uart2_hwmod, &omap2420_uart3_hwmod, NULL, }; int __init omap2420_hwmod_init(void) { return omap_hwmod_init(omap2420_hwmods); }
gpl-2.0
MTDEV-KERNEL/MOTO-KERNEL
drivers/serial/8250.c
288
83363
/* * linux/drivers/char/8250.c * * Driver for 8250/16550-type serial ports * * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. * * Copyright (C) 2001 Russell King. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * A note about mapbase / membase * * mapbase is the physical address of the IO port. * membase is an 'ioremapped' cookie. */ #if defined(CONFIG_SERIAL_8250_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/console.h> #include <linux/sysrq.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial_reg.h> #include <linux/serial_core.h> #include <linux/serial.h> #include <linux/serial_8250.h> #include <linux/nmi.h> #include <linux/mutex.h> #include <asm/io.h> #include <asm/irq.h> #include "8250.h" #ifdef CONFIG_SPARC #include "suncore.h" #endif /* * Configuration: * share_irqs - whether we pass IRQF_SHARED to request_irq(). This option * is unsafe when used on edge-triggered interrupts. */ static unsigned int share_irqs = SERIAL8250_SHARE_IRQS; static unsigned int nr_uarts = CONFIG_SERIAL_8250_RUNTIME_UARTS; static struct uart_driver serial8250_reg; static int serial_index(struct uart_port *port) { return (serial8250_reg.minor - 64) + port->line; } static unsigned int skip_txen_test; /* force skip of txen test at init time */ /* * Debugging. */ #if 0 #define DEBUG_AUTOCONF(fmt...) printk(fmt) #else #define DEBUG_AUTOCONF(fmt...) do { } while (0) #endif #if 0 #define DEBUG_INTR(fmt...) printk(fmt) #else #define DEBUG_INTR(fmt...) do { } while (0) #endif #define PASS_LIMIT 256 #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) /* * We default to IRQ0 for the "no irq" hack. Some * machine types want others as well - they're free * to redefine this in their header file. */ #define is_real_interrupt(irq) ((irq) != 0) #ifdef CONFIG_SERIAL_8250_DETECT_IRQ #define CONFIG_SERIAL_DETECT_IRQ 1 #endif #ifdef CONFIG_SERIAL_8250_MANY_PORTS #define CONFIG_SERIAL_MANY_PORTS 1 #endif /* * HUB6 is always on. This will be removed once the header * files have been cleaned. */ #define CONFIG_HUB6 1 #include <asm/serial.h> /* * SERIAL_PORT_DFNS tells us about built-in ports that have no * standard enumeration mechanism. Platforms that can find all * serial ports via mechanisms like ACPI or PCI need not supply it. */ #ifndef SERIAL_PORT_DFNS #define SERIAL_PORT_DFNS #endif static const struct old_serial_port old_serial_port[] = { SERIAL_PORT_DFNS /* defined in asm/serial.h */ }; #define UART_NR CONFIG_SERIAL_8250_NR_UARTS #ifdef CONFIG_SERIAL_8250_RSA #define PORT_RSA_MAX 4 static unsigned long probe_rsa[PORT_RSA_MAX]; static unsigned int probe_rsa_count; #endif /* CONFIG_SERIAL_8250_RSA */ struct uart_8250_port { struct uart_port port; struct timer_list timer; /* "no irq" timer */ struct list_head list; /* ports on this IRQ */ unsigned short capabilities; /* port capabilities */ unsigned short bugs; /* port bugs */ unsigned int tx_loadsz; /* transmit fifo load size */ unsigned char acr; unsigned char ier; unsigned char lcr; unsigned char mcr; unsigned char mcr_mask; /* mask of user bits */ unsigned char mcr_force; /* mask of forced bits */ unsigned char cur_iotype; /* Running I/O type */ /* * Some bits in registers are cleared on a read, so they must * be saved whenever the register is read but the bits will not * be immediately processed. */ #define LSR_SAVE_FLAGS UART_LSR_BRK_ERROR_BITS unsigned char lsr_saved_flags; #define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA unsigned char msr_saved_flags; /* * We provide a per-port pm hook. */ void (*pm)(struct uart_port *port, unsigned int state, unsigned int old); }; struct irq_info { struct hlist_node node; int irq; spinlock_t lock; /* Protects list not the hash */ struct list_head *head; }; #define NR_IRQ_HASH 32 /* Can be adjusted later */ static struct hlist_head irq_lists[NR_IRQ_HASH]; static DEFINE_MUTEX(hash_mutex); /* Used to walk the hash */ /* * Here we define the default xmit fifo size used for each type of UART. */ static const struct serial8250_config uart_config[] = { [PORT_UNKNOWN] = { .name = "unknown", .fifo_size = 1, .tx_loadsz = 1, }, [PORT_8250] = { .name = "8250", .fifo_size = 1, .tx_loadsz = 1, }, [PORT_16450] = { .name = "16450", .fifo_size = 1, .tx_loadsz = 1, }, [PORT_16550] = { .name = "16550", .fifo_size = 1, .tx_loadsz = 1, }, [PORT_16550A] = { .name = "16550A", .fifo_size = 16, .tx_loadsz = 16, .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, .flags = UART_CAP_FIFO, }, [PORT_CIRRUS] = { .name = "Cirrus", .fifo_size = 1, .tx_loadsz = 1, }, [PORT_16650] = { .name = "ST16650", .fifo_size = 1, .tx_loadsz = 1, .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP, }, [PORT_16650V2] = { .name = "ST16650V2", .fifo_size = 32, .tx_loadsz = 16, .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 | UART_FCR_T_TRIG_00, .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP, }, [PORT_16750] = { .name = "TI16750", .fifo_size = 64, .tx_loadsz = 64, .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 | UART_FCR7_64BYTE, .flags = UART_CAP_FIFO | UART_CAP_SLEEP | UART_CAP_AFE, }, [PORT_STARTECH] = { .name = "Startech", .fifo_size = 1, .tx_loadsz = 1, }, [PORT_16C950] = { .name = "16C950/954", .fifo_size = 128, .tx_loadsz = 128, .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, .flags = UART_CAP_FIFO, }, [PORT_16654] = { .name = "ST16654", .fifo_size = 64, .tx_loadsz = 32, .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 | UART_FCR_T_TRIG_10, .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP, }, [PORT_16850] = { .name = "XR16850", .fifo_size = 128, .tx_loadsz = 128, .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP, }, [PORT_RSA] = { .name = "RSA", .fifo_size = 2048, .tx_loadsz = 2048, .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_11, .flags = UART_CAP_FIFO, }, [PORT_NS16550A] = { .name = "NS16550A", .fifo_size = 16, .tx_loadsz = 16, .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, .flags = UART_CAP_FIFO | UART_NATSEMI, }, [PORT_XSCALE] = { .name = "XScale", .fifo_size = 32, .tx_loadsz = 32, .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, .flags = UART_CAP_FIFO | UART_CAP_UUE, }, [PORT_RM9000] = { .name = "RM9000", .fifo_size = 16, .tx_loadsz = 16, .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, .flags = UART_CAP_FIFO, }, [PORT_OCTEON] = { .name = "OCTEON", .fifo_size = 64, .tx_loadsz = 64, .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, .flags = UART_CAP_FIFO, }, [PORT_AR7] = { .name = "AR7", .fifo_size = 16, .tx_loadsz = 16, .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00, .flags = UART_CAP_FIFO | UART_CAP_AFE, }, }; #if defined (CONFIG_SERIAL_8250_AU1X00) /* Au1x00 UART hardware has a weird register layout */ static const u8 au_io_in_map[] = { [UART_RX] = 0, [UART_IER] = 2, [UART_IIR] = 3, [UART_LCR] = 5, [UART_MCR] = 6, [UART_LSR] = 7, [UART_MSR] = 8, }; static const u8 au_io_out_map[] = { [UART_TX] = 1, [UART_IER] = 2, [UART_FCR] = 4, [UART_LCR] = 5, [UART_MCR] = 6, }; /* sane hardware needs no mapping */ static inline int map_8250_in_reg(struct uart_port *p, int offset) { if (p->iotype != UPIO_AU) return offset; return au_io_in_map[offset]; } static inline int map_8250_out_reg(struct uart_port *p, int offset) { if (p->iotype != UPIO_AU) return offset; return au_io_out_map[offset]; } #elif defined(CONFIG_SERIAL_8250_RM9K) static const u8 regmap_in[8] = { [UART_RX] = 0x00, [UART_IER] = 0x0c, [UART_IIR] = 0x14, [UART_LCR] = 0x1c, [UART_MCR] = 0x20, [UART_LSR] = 0x24, [UART_MSR] = 0x28, [UART_SCR] = 0x2c }, regmap_out[8] = { [UART_TX] = 0x04, [UART_IER] = 0x0c, [UART_FCR] = 0x18, [UART_LCR] = 0x1c, [UART_MCR] = 0x20, [UART_LSR] = 0x24, [UART_MSR] = 0x28, [UART_SCR] = 0x2c }; static inline int map_8250_in_reg(struct uart_port *p, int offset) { if (p->iotype != UPIO_RM9000) return offset; return regmap_in[offset]; } static inline int map_8250_out_reg(struct uart_port *p, int offset) { if (p->iotype != UPIO_RM9000) return offset; return regmap_out[offset]; } #else /* sane hardware needs no mapping */ #define map_8250_in_reg(up, offset) (offset) #define map_8250_out_reg(up, offset) (offset) #endif static unsigned int hub6_serial_in(struct uart_port *p, int offset) { offset = map_8250_in_reg(p, offset) << p->regshift; outb(p->hub6 - 1 + offset, p->iobase); return inb(p->iobase + 1); } static void hub6_serial_out(struct uart_port *p, int offset, int value) { offset = map_8250_out_reg(p, offset) << p->regshift; outb(p->hub6 - 1 + offset, p->iobase); outb(value, p->iobase + 1); } static unsigned int mem_serial_in(struct uart_port *p, int offset) { offset = map_8250_in_reg(p, offset) << p->regshift; return readb(p->membase + offset); } static void mem_serial_out(struct uart_port *p, int offset, int value) { offset = map_8250_out_reg(p, offset) << p->regshift; writeb(value, p->membase + offset); } static void mem32_serial_out(struct uart_port *p, int offset, int value) { offset = map_8250_out_reg(p, offset) << p->regshift; writel(value, p->membase + offset); } static unsigned int mem32_serial_in(struct uart_port *p, int offset) { offset = map_8250_in_reg(p, offset) << p->regshift; return readl(p->membase + offset); } #ifdef CONFIG_SERIAL_8250_AU1X00 static unsigned int au_serial_in(struct uart_port *p, int offset) { offset = map_8250_in_reg(p, offset) << p->regshift; return __raw_readl(p->membase + offset); } static void au_serial_out(struct uart_port *p, int offset, int value) { offset = map_8250_out_reg(p, offset) << p->regshift; __raw_writel(value, p->membase + offset); } #endif static unsigned int tsi_serial_in(struct uart_port *p, int offset) { unsigned int tmp; offset = map_8250_in_reg(p, offset) << p->regshift; if (offset == UART_IIR) { tmp = readl(p->membase + (UART_IIR & ~3)); return (tmp >> 16) & 0xff; /* UART_IIR % 4 == 2 */ } else return readb(p->membase + offset); } static void tsi_serial_out(struct uart_port *p, int offset, int value) { offset = map_8250_out_reg(p, offset) << p->regshift; if (!((offset == UART_IER) && (value & UART_IER_UUE))) writeb(value, p->membase + offset); } static void dwapb_serial_out(struct uart_port *p, int offset, int value) { int save_offset = offset; offset = map_8250_out_reg(p, offset) << p->regshift; /* Save the LCR value so it can be re-written when a * Busy Detect interrupt occurs. */ if (save_offset == UART_LCR) { struct uart_8250_port *up = (struct uart_8250_port *)p; up->lcr = value; } writeb(value, p->membase + offset); /* Read the IER to ensure any interrupt is cleared before * returning from ISR. */ if (save_offset == UART_TX || save_offset == UART_IER) value = p->serial_in(p, UART_IER); } static unsigned int io_serial_in(struct uart_port *p, int offset) { offset = map_8250_in_reg(p, offset) << p->regshift; return inb(p->iobase + offset); } static void io_serial_out(struct uart_port *p, int offset, int value) { offset = map_8250_out_reg(p, offset) << p->regshift; outb(value, p->iobase + offset); } static void set_io_from_upio(struct uart_port *p) { struct uart_8250_port *up = (struct uart_8250_port *)p; switch (p->iotype) { case UPIO_HUB6: p->serial_in = hub6_serial_in; p->serial_out = hub6_serial_out; break; case UPIO_MEM: p->serial_in = mem_serial_in; p->serial_out = mem_serial_out; break; case UPIO_RM9000: case UPIO_MEM32: p->serial_in = mem32_serial_in; p->serial_out = mem32_serial_out; break; #ifdef CONFIG_SERIAL_8250_AU1X00 case UPIO_AU: p->serial_in = au_serial_in; p->serial_out = au_serial_out; break; #endif case UPIO_TSI: p->serial_in = tsi_serial_in; p->serial_out = tsi_serial_out; break; case UPIO_DWAPB: p->serial_in = mem_serial_in; p->serial_out = dwapb_serial_out; break; default: p->serial_in = io_serial_in; p->serial_out = io_serial_out; break; } /* Remember loaded iotype */ up->cur_iotype = p->iotype; } static void serial_out_sync(struct uart_8250_port *up, int offset, int value) { struct uart_port *p = &up->port; switch (p->iotype) { case UPIO_MEM: case UPIO_MEM32: #ifdef CONFIG_SERIAL_8250_AU1X00 case UPIO_AU: #endif case UPIO_DWAPB: p->serial_out(p, offset, value); p->serial_in(p, UART_LCR); /* safe, no side-effects */ break; default: p->serial_out(p, offset, value); } } #define serial_in(up, offset) \ (up->port.serial_in(&(up)->port, (offset))) #define serial_out(up, offset, value) \ (up->port.serial_out(&(up)->port, (offset), (value))) /* * We used to support using pause I/O for certain machines. We * haven't supported this for a while, but just in case it's badly * needed for certain old 386 machines, I've left these #define's * in.... */ #define serial_inp(up, offset) serial_in(up, offset) #define serial_outp(up, offset, value) serial_out(up, offset, value) /* Uart divisor latch read */ static inline int _serial_dl_read(struct uart_8250_port *up) { return serial_inp(up, UART_DLL) | serial_inp(up, UART_DLM) << 8; } /* Uart divisor latch write */ static inline void _serial_dl_write(struct uart_8250_port *up, int value) { serial_outp(up, UART_DLL, value & 0xff); serial_outp(up, UART_DLM, value >> 8 & 0xff); } #if defined(CONFIG_SERIAL_8250_AU1X00) /* Au1x00 haven't got a standard divisor latch */ static int serial_dl_read(struct uart_8250_port *up) { if (up->port.iotype == UPIO_AU) return __raw_readl(up->port.membase + 0x28); else return _serial_dl_read(up); } static void serial_dl_write(struct uart_8250_port *up, int value) { if (up->port.iotype == UPIO_AU) __raw_writel(value, up->port.membase + 0x28); else _serial_dl_write(up, value); } #elif defined(CONFIG_SERIAL_8250_RM9K) static int serial_dl_read(struct uart_8250_port *up) { return (up->port.iotype == UPIO_RM9000) ? (((__raw_readl(up->port.membase + 0x10) << 8) | (__raw_readl(up->port.membase + 0x08) & 0xff)) & 0xffff) : _serial_dl_read(up); } static void serial_dl_write(struct uart_8250_port *up, int value) { if (up->port.iotype == UPIO_RM9000) { __raw_writel(value, up->port.membase + 0x08); __raw_writel(value >> 8, up->port.membase + 0x10); } else { _serial_dl_write(up, value); } } #else #define serial_dl_read(up) _serial_dl_read(up) #define serial_dl_write(up, value) _serial_dl_write(up, value) #endif /* * For the 16C950 */ static void serial_icr_write(struct uart_8250_port *up, int offset, int value) { serial_out(up, UART_SCR, offset); serial_out(up, UART_ICR, value); } static unsigned int serial_icr_read(struct uart_8250_port *up, int offset) { unsigned int value; serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD); serial_out(up, UART_SCR, offset); value = serial_in(up, UART_ICR); serial_icr_write(up, UART_ACR, up->acr); return value; } /* * FIFO support. */ static void serial8250_clear_fifos(struct uart_8250_port *p) { if (p->capabilities & UART_CAP_FIFO) { serial_outp(p, UART_FCR, UART_FCR_ENABLE_FIFO); serial_outp(p, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); serial_outp(p, UART_FCR, 0); } } /* * IER sleep support. UARTs which have EFRs need the "extended * capability" bit enabled. Note that on XR16C850s, we need to * reset LCR to write to IER. */ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep) { if (p->capabilities & UART_CAP_SLEEP) { if (p->capabilities & UART_CAP_EFR) { serial_outp(p, UART_LCR, 0xBF); serial_outp(p, UART_EFR, UART_EFR_ECB); serial_outp(p, UART_LCR, 0); } serial_outp(p, UART_IER, sleep ? UART_IERX_SLEEP : 0); if (p->capabilities & UART_CAP_EFR) { serial_outp(p, UART_LCR, 0xBF); serial_outp(p, UART_EFR, 0); serial_outp(p, UART_LCR, 0); } } } #ifdef CONFIG_SERIAL_8250_RSA /* * Attempts to turn on the RSA FIFO. Returns zero on failure. * We set the port uart clock rate if we succeed. */ static int __enable_rsa(struct uart_8250_port *up) { unsigned char mode; int result; mode = serial_inp(up, UART_RSA_MSR); result = mode & UART_RSA_MSR_FIFO; if (!result) { serial_outp(up, UART_RSA_MSR, mode | UART_RSA_MSR_FIFO); mode = serial_inp(up, UART_RSA_MSR); result = mode & UART_RSA_MSR_FIFO; } if (result) up->port.uartclk = SERIAL_RSA_BAUD_BASE * 16; return result; } static void enable_rsa(struct uart_8250_port *up) { if (up->port.type == PORT_RSA) { if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) { spin_lock_irq(&up->port.lock); __enable_rsa(up); spin_unlock_irq(&up->port.lock); } if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) serial_outp(up, UART_RSA_FRR, 0); } } /* * Attempts to turn off the RSA FIFO. Returns zero on failure. * It is unknown why interrupts were disabled in here. However, * the caller is expected to preserve this behaviour by grabbing * the spinlock before calling this function. */ static void disable_rsa(struct uart_8250_port *up) { unsigned char mode; int result; if (up->port.type == PORT_RSA && up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) { spin_lock_irq(&up->port.lock); mode = serial_inp(up, UART_RSA_MSR); result = !(mode & UART_RSA_MSR_FIFO); if (!result) { serial_outp(up, UART_RSA_MSR, mode & ~UART_RSA_MSR_FIFO); mode = serial_inp(up, UART_RSA_MSR); result = !(mode & UART_RSA_MSR_FIFO); } if (result) up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16; spin_unlock_irq(&up->port.lock); } } #endif /* CONFIG_SERIAL_8250_RSA */ /* * This is a quickie test to see how big the FIFO is. * It doesn't work at all the time, more's the pity. */ static int size_fifo(struct uart_8250_port *up) { unsigned char old_fcr, old_mcr, old_lcr; unsigned short old_dl; int count; old_lcr = serial_inp(up, UART_LCR); serial_outp(up, UART_LCR, 0); old_fcr = serial_inp(up, UART_FCR); old_mcr = serial_inp(up, UART_MCR); serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); serial_outp(up, UART_MCR, UART_MCR_LOOP); serial_outp(up, UART_LCR, UART_LCR_DLAB); old_dl = serial_dl_read(up); serial_dl_write(up, 0x0001); serial_outp(up, UART_LCR, 0x03); for (count = 0; count < 256; count++) serial_outp(up, UART_TX, count); mdelay(20);/* FIXME - schedule_timeout */ for (count = 0; (serial_inp(up, UART_LSR) & UART_LSR_DR) && (count < 256); count++) serial_inp(up, UART_RX); serial_outp(up, UART_FCR, old_fcr); serial_outp(up, UART_MCR, old_mcr); serial_outp(up, UART_LCR, UART_LCR_DLAB); serial_dl_write(up, old_dl); serial_outp(up, UART_LCR, old_lcr); return count; } /* * Read UART ID using the divisor method - set DLL and DLM to zero * and the revision will be in DLL and device type in DLM. We * preserve the device state across this. */ static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p) { unsigned char old_dll, old_dlm, old_lcr; unsigned int id; old_lcr = serial_inp(p, UART_LCR); serial_outp(p, UART_LCR, UART_LCR_DLAB); old_dll = serial_inp(p, UART_DLL); old_dlm = serial_inp(p, UART_DLM); serial_outp(p, UART_DLL, 0); serial_outp(p, UART_DLM, 0); id = serial_inp(p, UART_DLL) | serial_inp(p, UART_DLM) << 8; serial_outp(p, UART_DLL, old_dll); serial_outp(p, UART_DLM, old_dlm); serial_outp(p, UART_LCR, old_lcr); return id; } /* * This is a helper routine to autodetect StarTech/Exar/Oxsemi UART's. * When this function is called we know it is at least a StarTech * 16650 V2, but it might be one of several StarTech UARTs, or one of * its clones. (We treat the broken original StarTech 16650 V1 as a * 16550, and why not? Startech doesn't seem to even acknowledge its * existence.) * * What evil have men's minds wrought... */ static void autoconfig_has_efr(struct uart_8250_port *up) { unsigned int id1, id2, id3, rev; /* * Everything with an EFR has SLEEP */ up->capabilities |= UART_CAP_EFR | UART_CAP_SLEEP; /* * First we check to see if it's an Oxford Semiconductor UART. * * If we have to do this here because some non-National * Semiconductor clone chips lock up if you try writing to the * LSR register (which serial_icr_read does) */ /* * Check for Oxford Semiconductor 16C950. * * EFR [4] must be set else this test fails. * * This shouldn't be necessary, but Mike Hudson (Exoray@isys.ca) * claims that it's needed for 952 dual UART's (which are not * recommended for new designs). */ up->acr = 0; serial_out(up, UART_LCR, 0xBF); serial_out(up, UART_EFR, UART_EFR_ECB); serial_out(up, UART_LCR, 0x00); id1 = serial_icr_read(up, UART_ID1); id2 = serial_icr_read(up, UART_ID2); id3 = serial_icr_read(up, UART_ID3); rev = serial_icr_read(up, UART_REV); DEBUG_AUTOCONF("950id=%02x:%02x:%02x:%02x ", id1, id2, id3, rev); if (id1 == 0x16 && id2 == 0xC9 && (id3 == 0x50 || id3 == 0x52 || id3 == 0x54)) { up->port.type = PORT_16C950; /* * Enable work around for the Oxford Semiconductor 952 rev B * chip which causes it to seriously miscalculate baud rates * when DLL is 0. */ if (id3 == 0x52 && rev == 0x01) up->bugs |= UART_BUG_QUOT; return; } /* * We check for a XR16C850 by setting DLL and DLM to 0, and then * reading back DLL and DLM. The chip type depends on the DLM * value read back: * 0x10 - XR16C850 and the DLL contains the chip revision. * 0x12 - XR16C2850. * 0x14 - XR16C854. */ id1 = autoconfig_read_divisor_id(up); DEBUG_AUTOCONF("850id=%04x ", id1); id2 = id1 >> 8; if (id2 == 0x10 || id2 == 0x12 || id2 == 0x14) { up->port.type = PORT_16850; return; } /* * It wasn't an XR16C850. * * We distinguish between the '654 and the '650 by counting * how many bytes are in the FIFO. I'm using this for now, * since that's the technique that was sent to me in the * serial driver update, but I'm not convinced this works. * I've had problems doing this in the past. -TYT */ if (size_fifo(up) == 64) up->port.type = PORT_16654; else up->port.type = PORT_16650V2; } /* * We detected a chip without a FIFO. Only two fall into * this category - the original 8250 and the 16450. The * 16450 has a scratch register (accessible with LCR=0) */ static void autoconfig_8250(struct uart_8250_port *up) { unsigned char scratch, status1, status2; up->port.type = PORT_8250; scratch = serial_in(up, UART_SCR); serial_outp(up, UART_SCR, 0xa5); status1 = serial_in(up, UART_SCR); serial_outp(up, UART_SCR, 0x5a); status2 = serial_in(up, UART_SCR); serial_outp(up, UART_SCR, scratch); if (status1 == 0xa5 && status2 == 0x5a) up->port.type = PORT_16450; } static int broken_efr(struct uart_8250_port *up) { /* * Exar ST16C2550 "A2" devices incorrectly detect as * having an EFR, and report an ID of 0x0201. See * http://www.exar.com/info.php?pdf=dan180_oct2004.pdf */ if (autoconfig_read_divisor_id(up) == 0x0201 && size_fifo(up) == 16) return 1; return 0; } /* * We know that the chip has FIFOs. Does it have an EFR? The * EFR is located in the same register position as the IIR and * we know the top two bits of the IIR are currently set. The * EFR should contain zero. Try to read the EFR. */ static void autoconfig_16550a(struct uart_8250_port *up) { unsigned char status1, status2; unsigned int iersave; up->port.type = PORT_16550A; up->capabilities |= UART_CAP_FIFO; /* * Check for presence of the EFR when DLAB is set. * Only ST16C650V1 UARTs pass this test. */ serial_outp(up, UART_LCR, UART_LCR_DLAB); if (serial_in(up, UART_EFR) == 0) { serial_outp(up, UART_EFR, 0xA8); if (serial_in(up, UART_EFR) != 0) { DEBUG_AUTOCONF("EFRv1 "); up->port.type = PORT_16650; up->capabilities |= UART_CAP_EFR | UART_CAP_SLEEP; } else { DEBUG_AUTOCONF("Motorola 8xxx DUART "); } serial_outp(up, UART_EFR, 0); return; } /* * Maybe it requires 0xbf to be written to the LCR. * (other ST16C650V2 UARTs, TI16C752A, etc) */ serial_outp(up, UART_LCR, 0xBF); if (serial_in(up, UART_EFR) == 0 && !broken_efr(up)) { DEBUG_AUTOCONF("EFRv2 "); autoconfig_has_efr(up); return; } /* * Check for a National Semiconductor SuperIO chip. * Attempt to switch to bank 2, read the value of the LOOP bit * from EXCR1. Switch back to bank 0, change it in MCR. Then * switch back to bank 2, read it from EXCR1 again and check * it's changed. If so, set baud_base in EXCR2 to 921600. -- dwmw2 */ serial_outp(up, UART_LCR, 0); status1 = serial_in(up, UART_MCR); serial_outp(up, UART_LCR, 0xE0); status2 = serial_in(up, 0x02); /* EXCR1 */ if (!((status2 ^ status1) & UART_MCR_LOOP)) { serial_outp(up, UART_LCR, 0); serial_outp(up, UART_MCR, status1 ^ UART_MCR_LOOP); serial_outp(up, UART_LCR, 0xE0); status2 = serial_in(up, 0x02); /* EXCR1 */ serial_outp(up, UART_LCR, 0); serial_outp(up, UART_MCR, status1); if ((status2 ^ status1) & UART_MCR_LOOP) { unsigned short quot; serial_outp(up, UART_LCR, 0xE0); quot = serial_dl_read(up); quot <<= 3; status1 = serial_in(up, 0x04); /* EXCR2 */ status1 &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ serial_outp(up, 0x04, status1); serial_dl_write(up, quot); serial_outp(up, UART_LCR, 0); up->port.uartclk = 921600*16; up->port.type = PORT_NS16550A; up->capabilities |= UART_NATSEMI; return; } } /* * No EFR. Try to detect a TI16750, which only sets bit 5 of * the IIR when 64 byte FIFO mode is enabled when DLAB is set. * Try setting it with and without DLAB set. Cheap clones * set bit 5 without DLAB set. */ serial_outp(up, UART_LCR, 0); serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE); status1 = serial_in(up, UART_IIR) >> 5; serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO); serial_outp(up, UART_LCR, UART_LCR_DLAB); serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE); status2 = serial_in(up, UART_IIR) >> 5; serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO); serial_outp(up, UART_LCR, 0); DEBUG_AUTOCONF("iir1=%d iir2=%d ", status1, status2); if (status1 == 6 && status2 == 7) { up->port.type = PORT_16750; up->capabilities |= UART_CAP_AFE | UART_CAP_SLEEP; return; } /* * Try writing and reading the UART_IER_UUE bit (b6). * If it works, this is probably one of the Xscale platform's * internal UARTs. * We're going to explicitly set the UUE bit to 0 before * trying to write and read a 1 just to make sure it's not * already a 1 and maybe locked there before we even start start. */ iersave = serial_in(up, UART_IER); serial_outp(up, UART_IER, iersave & ~UART_IER_UUE); if (!(serial_in(up, UART_IER) & UART_IER_UUE)) { /* * OK it's in a known zero state, try writing and reading * without disturbing the current state of the other bits. */ serial_outp(up, UART_IER, iersave | UART_IER_UUE); if (serial_in(up, UART_IER) & UART_IER_UUE) { /* * It's an Xscale. * We'll leave the UART_IER_UUE bit set to 1 (enabled). */ DEBUG_AUTOCONF("Xscale "); up->port.type = PORT_XSCALE; up->capabilities |= UART_CAP_UUE; return; } } else { /* * If we got here we couldn't force the IER_UUE bit to 0. * Log it and continue. */ DEBUG_AUTOCONF("Couldn't force IER_UUE to 0 "); } serial_outp(up, UART_IER, iersave); } /* * This routine is called by rs_init() to initialize a specific serial * port. It determines what type of UART chip this serial port is * using: 8250, 16450, 16550, 16550A. The important question is * whether or not this UART is a 16550A or not, since this will * determine whether or not we can use its FIFO features or not. */ static void autoconfig(struct uart_8250_port *up, unsigned int probeflags) { unsigned char status1, scratch, scratch2, scratch3; unsigned char save_lcr, save_mcr; unsigned long flags; if (!up->port.iobase && !up->port.mapbase && !up->port.membase) return; DEBUG_AUTOCONF("ttyS%d: autoconf (0x%04lx, 0x%p): ", serial_index(&up->port), up->port.iobase, up->port.membase); /* * We really do need global IRQs disabled here - we're going to * be frobbing the chips IRQ enable register to see if it exists. */ spin_lock_irqsave(&up->port.lock, flags); up->capabilities = 0; up->bugs = 0; if (!(up->port.flags & UPF_BUGGY_UART)) { /* * Do a simple existence test first; if we fail this, * there's no point trying anything else. * * 0x80 is used as a nonsense port to prevent against * false positives due to ISA bus float. The * assumption is that 0x80 is a non-existent port; * which should be safe since include/asm/io.h also * makes this assumption. * * Note: this is safe as long as MCR bit 4 is clear * and the device is in "PC" mode. */ scratch = serial_inp(up, UART_IER); serial_outp(up, UART_IER, 0); #ifdef __i386__ outb(0xff, 0x080); #endif /* * Mask out IER[7:4] bits for test as some UARTs (e.g. TL * 16C754B) allow only to modify them if an EFR bit is set. */ scratch2 = serial_inp(up, UART_IER) & 0x0f; serial_outp(up, UART_IER, 0x0F); #ifdef __i386__ outb(0, 0x080); #endif scratch3 = serial_inp(up, UART_IER) & 0x0f; serial_outp(up, UART_IER, scratch); if (scratch2 != 0 || scratch3 != 0x0F) { /* * We failed; there's nothing here */ DEBUG_AUTOCONF("IER test failed (%02x, %02x) ", scratch2, scratch3); goto out; } } save_mcr = serial_in(up, UART_MCR); save_lcr = serial_in(up, UART_LCR); /* * Check to see if a UART is really there. Certain broken * internal modems based on the Rockwell chipset fail this * test, because they apparently don't implement the loopback * test mode. So this test is skipped on the COM 1 through * COM 4 ports. This *should* be safe, since no board * manufacturer would be stupid enough to design a board * that conflicts with COM 1-4 --- we hope! */ if (!(up->port.flags & UPF_SKIP_TEST)) { serial_outp(up, UART_MCR, UART_MCR_LOOP | 0x0A); status1 = serial_inp(up, UART_MSR) & 0xF0; serial_outp(up, UART_MCR, save_mcr); if (status1 != 0x90) { DEBUG_AUTOCONF("LOOP test failed (%02x) ", status1); goto out; } } /* * We're pretty sure there's a port here. Lets find out what * type of port it is. The IIR top two bits allows us to find * out if it's 8250 or 16450, 16550, 16550A or later. This * determines what we test for next. * * We also initialise the EFR (if any) to zero for later. The * EFR occupies the same register location as the FCR and IIR. */ serial_outp(up, UART_LCR, 0xBF); serial_outp(up, UART_EFR, 0); serial_outp(up, UART_LCR, 0); serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO); scratch = serial_in(up, UART_IIR) >> 6; DEBUG_AUTOCONF("iir=%d ", scratch); switch (scratch) { case 0: autoconfig_8250(up); break; case 1: up->port.type = PORT_UNKNOWN; break; case 2: up->port.type = PORT_16550; break; case 3: autoconfig_16550a(up); break; } #ifdef CONFIG_SERIAL_8250_RSA /* * Only probe for RSA ports if we got the region. */ if (up->port.type == PORT_16550A && probeflags & PROBE_RSA) { int i; for (i = 0 ; i < probe_rsa_count; ++i) { if (probe_rsa[i] == up->port.iobase && __enable_rsa(up)) { up->port.type = PORT_RSA; break; } } } #endif #ifdef CONFIG_SERIAL_8250_AU1X00 /* if access method is AU, it is a 16550 with a quirk */ if (up->port.type == PORT_16550A && up->port.iotype == UPIO_AU) up->bugs |= UART_BUG_NOMSR; #endif serial_outp(up, UART_LCR, save_lcr); if (up->capabilities != uart_config[up->port.type].flags) { printk(KERN_WARNING "ttyS%d: detected caps %08x should be %08x\n", serial_index(&up->port), up->capabilities, uart_config[up->port.type].flags); } up->port.fifosize = uart_config[up->port.type].fifo_size; up->capabilities = uart_config[up->port.type].flags; up->tx_loadsz = uart_config[up->port.type].tx_loadsz; if (up->port.type == PORT_UNKNOWN) goto out; /* * Reset the UART. */ #ifdef CONFIG_SERIAL_8250_RSA if (up->port.type == PORT_RSA) serial_outp(up, UART_RSA_FRR, 0); #endif serial_outp(up, UART_MCR, save_mcr); serial8250_clear_fifos(up); serial_in(up, UART_RX); if (up->capabilities & UART_CAP_UUE) serial_outp(up, UART_IER, UART_IER_UUE); else serial_outp(up, UART_IER, 0); out: spin_unlock_irqrestore(&up->port.lock, flags); DEBUG_AUTOCONF("type=%s\n", uart_config[up->port.type].name); } static void autoconfig_irq(struct uart_8250_port *up) { unsigned char save_mcr, save_ier; unsigned char save_ICP = 0; unsigned int ICP = 0; unsigned long irqs; int irq; if (up->port.flags & UPF_FOURPORT) { ICP = (up->port.iobase & 0xfe0) | 0x1f; save_ICP = inb_p(ICP); outb_p(0x80, ICP); (void) inb_p(ICP); } /* forget possible initially masked and pending IRQ */ probe_irq_off(probe_irq_on()); save_mcr = serial_inp(up, UART_MCR); save_ier = serial_inp(up, UART_IER); serial_outp(up, UART_MCR, UART_MCR_OUT1 | UART_MCR_OUT2); irqs = probe_irq_on(); serial_outp(up, UART_MCR, 0); udelay(10); if (up->port.flags & UPF_FOURPORT) { serial_outp(up, UART_MCR, UART_MCR_DTR | UART_MCR_RTS); } else { serial_outp(up, UART_MCR, UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2); } serial_outp(up, UART_IER, 0x0f); /* enable all intrs */ (void)serial_inp(up, UART_LSR); (void)serial_inp(up, UART_RX); (void)serial_inp(up, UART_IIR); (void)serial_inp(up, UART_MSR); serial_outp(up, UART_TX, 0xFF); udelay(20); irq = probe_irq_off(irqs); serial_outp(up, UART_MCR, save_mcr); serial_outp(up, UART_IER, save_ier); if (up->port.flags & UPF_FOURPORT) outb_p(save_ICP, ICP); up->port.irq = (irq > 0) ? irq : 0; } static inline void __stop_tx(struct uart_8250_port *p) { if (p->ier & UART_IER_THRI) { p->ier &= ~UART_IER_THRI; serial_out(p, UART_IER, p->ier); } } static void serial8250_stop_tx(struct uart_port *port) { struct uart_8250_port *up = (struct uart_8250_port *)port; __stop_tx(up); /* * We really want to stop the transmitter from sending. */ if (up->port.type == PORT_16C950) { up->acr |= UART_ACR_TXDIS; serial_icr_write(up, UART_ACR, up->acr); } } static void transmit_chars(struct uart_8250_port *up); static void serial8250_start_tx(struct uart_port *port) { struct uart_8250_port *up = (struct uart_8250_port *)port; if (!(up->ier & UART_IER_THRI)) { up->ier |= UART_IER_THRI; serial_out(up, UART_IER, up->ier); if (up->bugs & UART_BUG_TXEN) { unsigned char lsr; lsr = serial_in(up, UART_LSR); up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; if ((up->port.type == PORT_RM9000) ? (lsr & UART_LSR_THRE) : (lsr & UART_LSR_TEMT)) transmit_chars(up); } } /* * Re-enable the transmitter if we disabled it. */ if (up->port.type == PORT_16C950 && up->acr & UART_ACR_TXDIS) { up->acr &= ~UART_ACR_TXDIS; serial_icr_write(up, UART_ACR, up->acr); } } static void serial8250_stop_rx(struct uart_port *port) { struct uart_8250_port *up = (struct uart_8250_port *)port; up->ier &= ~UART_IER_RLSI; up->port.read_status_mask &= ~UART_LSR_DR; serial_out(up, UART_IER, up->ier); } static void serial8250_enable_ms(struct uart_port *port) { struct uart_8250_port *up = (struct uart_8250_port *)port; /* no MSR capabilities */ if (up->bugs & UART_BUG_NOMSR) return; up->ier |= UART_IER_MSI; serial_out(up, UART_IER, up->ier); } static void receive_chars(struct uart_8250_port *up, unsigned int *status) { struct tty_struct *tty = up->port.state->port.tty; unsigned char ch, lsr = *status; int max_count = 256; char flag; do { if (likely(lsr & UART_LSR_DR)) ch = serial_inp(up, UART_RX); else /* * Intel 82571 has a Serial Over Lan device that will * set UART_LSR_BI without setting UART_LSR_DR when * it receives a break. To avoid reading from the * receive buffer without UART_LSR_DR bit set, we * just force the read character to be 0 */ ch = 0; flag = TTY_NORMAL; up->port.icount.rx++; lsr |= up->lsr_saved_flags; up->lsr_saved_flags = 0; if (unlikely(lsr & UART_LSR_BRK_ERROR_BITS)) { /* * For statistics only */ if (lsr & UART_LSR_BI) { lsr &= ~(UART_LSR_FE | UART_LSR_PE); up->port.icount.brk++; /* * We do the SysRQ and SAK checking * here because otherwise the break * may get masked by ignore_status_mask * or read_status_mask. */ if (uart_handle_break(&up->port)) goto ignore_char; } else if (lsr & UART_LSR_PE) up->port.icount.parity++; else if (lsr & UART_LSR_FE) up->port.icount.frame++; if (lsr & UART_LSR_OE) up->port.icount.overrun++; /* * Mask off conditions which should be ignored. */ lsr &= up->port.read_status_mask; if (lsr & UART_LSR_BI) { DEBUG_INTR("handling break...."); flag = TTY_BREAK; } else if (lsr & UART_LSR_PE) flag = TTY_PARITY; else if (lsr & UART_LSR_FE) flag = TTY_FRAME; } if (uart_handle_sysrq_char(&up->port, ch)) goto ignore_char; uart_insert_char(&up->port, lsr, UART_LSR_OE, ch, flag); ignore_char: lsr = serial_inp(up, UART_LSR); } while ((lsr & (UART_LSR_DR | UART_LSR_BI)) && (max_count-- > 0)); spin_unlock(&up->port.lock); tty_flip_buffer_push(tty); spin_lock(&up->port.lock); *status = lsr; } static void transmit_chars(struct uart_8250_port *up) { struct circ_buf *xmit = &up->port.state->xmit; int count; if (up->port.x_char) { serial_outp(up, UART_TX, up->port.x_char); up->port.icount.tx++; up->port.x_char = 0; return; } if (uart_tx_stopped(&up->port)) { serial8250_stop_tx(&up->port); return; } if (uart_circ_empty(xmit)) { __stop_tx(up); return; } count = up->tx_loadsz; do { serial_out(up, UART_TX, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); up->port.icount.tx++; if (uart_circ_empty(xmit)) break; } while (--count > 0); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&up->port); DEBUG_INTR("THRE..."); if (uart_circ_empty(xmit)) __stop_tx(up); } static unsigned int check_modem_status(struct uart_8250_port *up) { unsigned int status = serial_in(up, UART_MSR); status |= up->msr_saved_flags; up->msr_saved_flags = 0; if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI && up->port.state != NULL) { if (status & UART_MSR_TERI) up->port.icount.rng++; if (status & UART_MSR_DDSR) up->port.icount.dsr++; if (status & UART_MSR_DDCD) uart_handle_dcd_change(&up->port, status & UART_MSR_DCD); if (status & UART_MSR_DCTS) uart_handle_cts_change(&up->port, status & UART_MSR_CTS); wake_up_interruptible(&up->port.state->port.delta_msr_wait); } return status; } /* * This handles the interrupt from one port. */ static void serial8250_handle_port(struct uart_8250_port *up) { unsigned int status; unsigned long flags; spin_lock_irqsave(&up->port.lock, flags); status = serial_inp(up, UART_LSR); DEBUG_INTR("status = %x...", status); if (status & (UART_LSR_DR | UART_LSR_BI)) receive_chars(up, &status); check_modem_status(up); if (status & UART_LSR_THRE) transmit_chars(up); spin_unlock_irqrestore(&up->port.lock, flags); } /* * This is the serial driver's interrupt routine. * * Arjan thinks the old way was overly complex, so it got simplified. * Alan disagrees, saying that need the complexity to handle the weird * nature of ISA shared interrupts. (This is a special exception.) * * In order to handle ISA shared interrupts properly, we need to check * that all ports have been serviced, and therefore the ISA interrupt * line has been de-asserted. * * This means we need to loop through all ports. checking that they * don't have an interrupt pending. */ static irqreturn_t serial8250_interrupt(int irq, void *dev_id) { struct irq_info *i = dev_id; struct list_head *l, *end = NULL; int pass_counter = 0, handled = 0; DEBUG_INTR("serial8250_interrupt(%d)...", irq); spin_lock(&i->lock); l = i->head; do { struct uart_8250_port *up; unsigned int iir; up = list_entry(l, struct uart_8250_port, list); iir = serial_in(up, UART_IIR); if (!(iir & UART_IIR_NO_INT)) { serial8250_handle_port(up); handled = 1; end = NULL; } else if (up->port.iotype == UPIO_DWAPB && (iir & UART_IIR_BUSY) == UART_IIR_BUSY) { /* The DesignWare APB UART has an Busy Detect (0x07) * interrupt meaning an LCR write attempt occured while the * UART was busy. The interrupt must be cleared by reading * the UART status register (USR) and the LCR re-written. */ unsigned int status; status = *(volatile u32 *)up->port.private_data; serial_out(up, UART_LCR, up->lcr); handled = 1; end = NULL; } else if (end == NULL) end = l; l = l->next; if (l == i->head && pass_counter++ > PASS_LIMIT) { /* If we hit this, we're dead. */ printk(KERN_ERR "serial8250: too much work for " "irq%d\n", irq); break; } } while (l != end); spin_unlock(&i->lock); DEBUG_INTR("end.\n"); return IRQ_RETVAL(handled); } /* * To support ISA shared interrupts, we need to have one interrupt * handler that ensures that the IRQ line has been deasserted * before returning. Failing to do this will result in the IRQ * line being stuck active, and, since ISA irqs are edge triggered, * no more IRQs will be seen. */ static void serial_do_unlink(struct irq_info *i, struct uart_8250_port *up) { spin_lock_irq(&i->lock); if (!list_empty(i->head)) { if (i->head == &up->list) i->head = i->head->next; list_del(&up->list); } else { BUG_ON(i->head != &up->list); i->head = NULL; } spin_unlock_irq(&i->lock); /* List empty so throw away the hash node */ if (i->head == NULL) { hlist_del(&i->node); kfree(i); } } static int serial_link_irq_chain(struct uart_8250_port *up) { struct hlist_head *h; struct hlist_node *n; struct irq_info *i; int ret, irq_flags = up->port.flags & UPF_SHARE_IRQ ? IRQF_SHARED : 0; mutex_lock(&hash_mutex); h = &irq_lists[up->port.irq % NR_IRQ_HASH]; hlist_for_each(n, h) { i = hlist_entry(n, struct irq_info, node); if (i->irq == up->port.irq) break; } if (n == NULL) { i = kzalloc(sizeof(struct irq_info), GFP_KERNEL); if (i == NULL) { mutex_unlock(&hash_mutex); return -ENOMEM; } spin_lock_init(&i->lock); i->irq = up->port.irq; hlist_add_head(&i->node, h); } mutex_unlock(&hash_mutex); spin_lock_irq(&i->lock); if (i->head) { list_add(&up->list, i->head); spin_unlock_irq(&i->lock); ret = 0; } else { INIT_LIST_HEAD(&up->list); i->head = &up->list; spin_unlock_irq(&i->lock); irq_flags |= up->port.irqflags; ret = request_irq(up->port.irq, serial8250_interrupt, irq_flags, "serial", i); if (ret < 0) serial_do_unlink(i, up); } return ret; } static void serial_unlink_irq_chain(struct uart_8250_port *up) { struct irq_info *i; struct hlist_node *n; struct hlist_head *h; mutex_lock(&hash_mutex); h = &irq_lists[up->port.irq % NR_IRQ_HASH]; hlist_for_each(n, h) { i = hlist_entry(n, struct irq_info, node); if (i->irq == up->port.irq) break; } BUG_ON(n == NULL); BUG_ON(i->head == NULL); if (list_empty(i->head)) free_irq(up->port.irq, i); serial_do_unlink(i, up); mutex_unlock(&hash_mutex); } /* Base timer interval for polling */ static inline int poll_timeout(int timeout) { return timeout > 6 ? (timeout / 2 - 2) : 1; } /* * This function is used to handle ports that do not have an * interrupt. This doesn't work very well for 16450's, but gives * barely passable results for a 16550A. (Although at the expense * of much CPU overhead). */ static void serial8250_timeout(unsigned long data) { struct uart_8250_port *up = (struct uart_8250_port *)data; unsigned int iir; iir = serial_in(up, UART_IIR); if (!(iir & UART_IIR_NO_INT)) serial8250_handle_port(up); mod_timer(&up->timer, jiffies + poll_timeout(up->port.timeout)); } static void serial8250_backup_timeout(unsigned long data) { struct uart_8250_port *up = (struct uart_8250_port *)data; unsigned int iir, ier = 0, lsr; unsigned long flags; /* * Must disable interrupts or else we risk racing with the interrupt * based handler. */ if (is_real_interrupt(up->port.irq)) { ier = serial_in(up, UART_IER); serial_out(up, UART_IER, 0); } iir = serial_in(up, UART_IIR); /* * This should be a safe test for anyone who doesn't trust the * IIR bits on their UART, but it's specifically designed for * the "Diva" UART used on the management processor on many HP * ia64 and parisc boxes. */ spin_lock_irqsave(&up->port.lock, flags); lsr = serial_in(up, UART_LSR); up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; spin_unlock_irqrestore(&up->port.lock, flags); if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) && (!uart_circ_empty(&up->port.state->xmit) || up->port.x_char) && (lsr & UART_LSR_THRE)) { iir &= ~(UART_IIR_ID | UART_IIR_NO_INT); iir |= UART_IIR_THRI; } if (!(iir & UART_IIR_NO_INT)) serial8250_handle_port(up); if (is_real_interrupt(up->port.irq)) serial_out(up, UART_IER, ier); /* Standard timer interval plus 0.2s to keep the port running */ mod_timer(&up->timer, jiffies + poll_timeout(up->port.timeout) + HZ / 5); } static unsigned int serial8250_tx_empty(struct uart_port *port) { struct uart_8250_port *up = (struct uart_8250_port *)port; unsigned long flags; unsigned int lsr; spin_lock_irqsave(&up->port.lock, flags); lsr = serial_in(up, UART_LSR); up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; spin_unlock_irqrestore(&up->port.lock, flags); return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0; } static unsigned int serial8250_get_mctrl(struct uart_port *port) { struct uart_8250_port *up = (struct uart_8250_port *)port; unsigned int status; unsigned int ret; status = check_modem_status(up); ret = 0; if (status & UART_MSR_DCD) ret |= TIOCM_CAR; if (status & UART_MSR_RI) ret |= TIOCM_RNG; if (status & UART_MSR_DSR) ret |= TIOCM_DSR; if (status & UART_MSR_CTS) ret |= TIOCM_CTS; return ret; } static void serial8250_set_mctrl(struct uart_port *port, unsigned int mctrl) { struct uart_8250_port *up = (struct uart_8250_port *)port; unsigned char mcr = 0; if (mctrl & TIOCM_RTS) mcr |= UART_MCR_RTS; if (mctrl & TIOCM_DTR) mcr |= UART_MCR_DTR; if (mctrl & TIOCM_OUT1) mcr |= UART_MCR_OUT1; if (mctrl & TIOCM_OUT2) mcr |= UART_MCR_OUT2; if (mctrl & TIOCM_LOOP) mcr |= UART_MCR_LOOP; mcr = (mcr & up->mcr_mask) | up->mcr_force | up->mcr; serial_out(up, UART_MCR, mcr); } static void serial8250_break_ctl(struct uart_port *port, int break_state) { struct uart_8250_port *up = (struct uart_8250_port *)port; unsigned long flags; spin_lock_irqsave(&up->port.lock, flags); if (break_state == -1) up->lcr |= UART_LCR_SBC; else up->lcr &= ~UART_LCR_SBC; serial_out(up, UART_LCR, up->lcr); spin_unlock_irqrestore(&up->port.lock, flags); } /* * Wait for transmitter & holding register to empty */ static void wait_for_xmitr(struct uart_8250_port *up, int bits) { unsigned int status, tmout = 10000; /* Wait up to 10ms for the character(s) to be sent. */ do { status = serial_in(up, UART_LSR); up->lsr_saved_flags |= status & LSR_SAVE_FLAGS; if (--tmout == 0) break; udelay(1); } while ((status & bits) != bits); /* Wait up to 1s for flow control if necessary */ if (up->port.flags & UPF_CONS_FLOW) { unsigned int tmout; for (tmout = 1000000; tmout; tmout--) { unsigned int msr = serial_in(up, UART_MSR); up->msr_saved_flags |= msr & MSR_SAVE_FLAGS; if (msr & UART_MSR_CTS) break; udelay(1); touch_nmi_watchdog(); } } } #ifdef CONFIG_CONSOLE_POLL /* * Console polling routines for writing and reading from the uart while * in an interrupt or debug context. */ static int serial8250_get_poll_char(struct uart_port *port) { struct uart_8250_port *up = (struct uart_8250_port *)port; unsigned char lsr = serial_inp(up, UART_LSR); while (!(lsr & UART_LSR_DR)) lsr = serial_inp(up, UART_LSR); return serial_inp(up, UART_RX); } static void serial8250_put_poll_char(struct uart_port *port, unsigned char c) { unsigned int ier; struct uart_8250_port *up = (struct uart_8250_port *)port; /* * First save the IER then disable the interrupts */ ier = serial_in(up, UART_IER); if (up->capabilities & UART_CAP_UUE) serial_out(up, UART_IER, UART_IER_UUE); else serial_out(up, UART_IER, 0); wait_for_xmitr(up, BOTH_EMPTY); /* * Send the character out. * If a LF, also do CR... */ serial_out(up, UART_TX, c); if (c == 10) { wait_for_xmitr(up, BOTH_EMPTY); serial_out(up, UART_TX, 13); } /* * Finally, wait for transmitter to become empty * and restore the IER */ wait_for_xmitr(up, BOTH_EMPTY); serial_out(up, UART_IER, ier); } #endif /* CONFIG_CONSOLE_POLL */ static int serial8250_startup(struct uart_port *port) { struct uart_8250_port *up = (struct uart_8250_port *)port; unsigned long flags; unsigned char lsr, iir; int retval; up->capabilities = uart_config[up->port.type].flags; up->mcr = 0; if (up->port.iotype != up->cur_iotype) set_io_from_upio(port); if (up->port.type == PORT_16C950) { /* Wake up and initialize UART */ up->acr = 0; serial_outp(up, UART_LCR, 0xBF); serial_outp(up, UART_EFR, UART_EFR_ECB); serial_outp(up, UART_IER, 0); serial_outp(up, UART_LCR, 0); serial_icr_write(up, UART_CSR, 0); /* Reset the UART */ serial_outp(up, UART_LCR, 0xBF); serial_outp(up, UART_EFR, UART_EFR_ECB); serial_outp(up, UART_LCR, 0); } #ifdef CONFIG_SERIAL_8250_RSA /* * If this is an RSA port, see if we can kick it up to the * higher speed clock. */ enable_rsa(up); #endif /* * Clear the FIFO buffers and disable them. * (they will be reenabled in set_termios()) */ serial8250_clear_fifos(up); /* * Clear the interrupt registers. */ (void) serial_inp(up, UART_LSR); (void) serial_inp(up, UART_RX); (void) serial_inp(up, UART_IIR); (void) serial_inp(up, UART_MSR); /* * At this point, there's no way the LSR could still be 0xff; * if it is, then bail out, because there's likely no UART * here. */ if (!(up->port.flags & UPF_BUGGY_UART) && (serial_inp(up, UART_LSR) == 0xff)) { printk(KERN_INFO "ttyS%d: LSR safety check engaged!\n", serial_index(&up->port)); return -ENODEV; } /* * For a XR16C850, we need to set the trigger levels */ if (up->port.type == PORT_16850) { unsigned char fctr; serial_outp(up, UART_LCR, 0xbf); fctr = serial_inp(up, UART_FCTR) & ~(UART_FCTR_RX|UART_FCTR_TX); serial_outp(up, UART_FCTR, fctr | UART_FCTR_TRGD | UART_FCTR_RX); serial_outp(up, UART_TRG, UART_TRG_96); serial_outp(up, UART_FCTR, fctr | UART_FCTR_TRGD | UART_FCTR_TX); serial_outp(up, UART_TRG, UART_TRG_96); serial_outp(up, UART_LCR, 0); } if (is_real_interrupt(up->port.irq)) { unsigned char iir1; /* * Test for UARTs that do not reassert THRE when the * transmitter is idle and the interrupt has already * been cleared. Real 16550s should always reassert * this interrupt whenever the transmitter is idle and * the interrupt is enabled. Delays are necessary to * allow register changes to become visible. */ spin_lock_irqsave(&up->port.lock, flags); if (up->port.irqflags & IRQF_SHARED) disable_irq_nosync(up->port.irq); wait_for_xmitr(up, UART_LSR_THRE); serial_out_sync(up, UART_IER, UART_IER_THRI); udelay(1); /* allow THRE to set */ iir1 = serial_in(up, UART_IIR); serial_out(up, UART_IER, 0); serial_out_sync(up, UART_IER, UART_IER_THRI); udelay(1); /* allow a working UART time to re-assert THRE */ iir = serial_in(up, UART_IIR); serial_out(up, UART_IER, 0); if (up->port.irqflags & IRQF_SHARED) enable_irq(up->port.irq); spin_unlock_irqrestore(&up->port.lock, flags); /* * If the interrupt is not reasserted, setup a timer to * kick the UART on a regular basis. */ if (!(iir1 & UART_IIR_NO_INT) && (iir & UART_IIR_NO_INT)) { up->bugs |= UART_BUG_THRE; pr_debug("ttyS%d - using backup timer\n", serial_index(port)); } } /* * The above check will only give an accurate result the first time * the port is opened so this value needs to be preserved. */ if (up->bugs & UART_BUG_THRE) { up->timer.function = serial8250_backup_timeout; up->timer.data = (unsigned long)up; mod_timer(&up->timer, jiffies + poll_timeout(up->port.timeout) + HZ / 5); } /* * If the "interrupt" for this port doesn't correspond with any * hardware interrupt, we use a timer-based system. The original * driver used to do this with IRQ0. */ if (!is_real_interrupt(up->port.irq)) { up->timer.data = (unsigned long)up; mod_timer(&up->timer, jiffies + poll_timeout(up->port.timeout)); } else { retval = serial_link_irq_chain(up); if (retval) return retval; } /* * Now, initialize the UART */ serial_outp(up, UART_LCR, UART_LCR_WLEN8); spin_lock_irqsave(&up->port.lock, flags); if (up->port.flags & UPF_FOURPORT) { if (!is_real_interrupt(up->port.irq)) up->port.mctrl |= TIOCM_OUT1; } else /* * Most PC uarts need OUT2 raised to enable interrupts. */ if (is_real_interrupt(up->port.irq)) up->port.mctrl |= TIOCM_OUT2; serial8250_set_mctrl(&up->port, up->port.mctrl); /* Serial over Lan (SoL) hack: Intel 8257x Gigabit ethernet chips have a 16550 emulation, to be used for Serial Over Lan. Those chips take a longer time than a normal serial device to signalize that a transmission data was queued. Due to that, the above test generally fails. One solution would be to delay the reading of iir. However, this is not reliable, since the timeout is variable. So, let's just don't test if we receive TX irq. This way, we'll never enable UART_BUG_TXEN. */ if (skip_txen_test || up->port.flags & UPF_NO_TXEN_TEST) goto dont_test_tx_en; /* * Do a quick test to see if we receive an * interrupt when we enable the TX irq. */ serial_outp(up, UART_IER, UART_IER_THRI); lsr = serial_in(up, UART_LSR); iir = serial_in(up, UART_IIR); serial_outp(up, UART_IER, 0); if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) { if (!(up->bugs & UART_BUG_TXEN)) { up->bugs |= UART_BUG_TXEN; pr_debug("ttyS%d - enabling bad tx status workarounds\n", serial_index(port)); } } else { up->bugs &= ~UART_BUG_TXEN; } dont_test_tx_en: spin_unlock_irqrestore(&up->port.lock, flags); /* * Clear the interrupt registers again for luck, and clear the * saved flags to avoid getting false values from polling * routines or the previous session. */ serial_inp(up, UART_LSR); serial_inp(up, UART_RX); serial_inp(up, UART_IIR); serial_inp(up, UART_MSR); up->lsr_saved_flags = 0; up->msr_saved_flags = 0; /* * Finally, enable interrupts. Note: Modem status interrupts * are set via set_termios(), which will be occurring imminently * anyway, so we don't enable them here. */ up->ier = UART_IER_RLSI | UART_IER_RDI; serial_outp(up, UART_IER, up->ier); if (up->port.flags & UPF_FOURPORT) { unsigned int icp; /* * Enable interrupts on the AST Fourport board */ icp = (up->port.iobase & 0xfe0) | 0x01f; outb_p(0x80, icp); (void) inb_p(icp); } return 0; } static void serial8250_shutdown(struct uart_port *port) { struct uart_8250_port *up = (struct uart_8250_port *)port; unsigned long flags; /* * Disable interrupts from this port */ up->ier = 0; serial_outp(up, UART_IER, 0); spin_lock_irqsave(&up->port.lock, flags); if (up->port.flags & UPF_FOURPORT) { /* reset interrupts on the AST Fourport board */ inb((up->port.iobase & 0xfe0) | 0x1f); up->port.mctrl |= TIOCM_OUT1; } else up->port.mctrl &= ~TIOCM_OUT2; serial8250_set_mctrl(&up->port, up->port.mctrl); spin_unlock_irqrestore(&up->port.lock, flags); /* * Disable break condition and FIFOs */ serial_out(up, UART_LCR, serial_inp(up, UART_LCR) & ~UART_LCR_SBC); serial8250_clear_fifos(up); #ifdef CONFIG_SERIAL_8250_RSA /* * Reset the RSA board back to 115kbps compat mode. */ disable_rsa(up); #endif /* * Read data port to reset things, and then unlink from * the IRQ chain. */ (void) serial_in(up, UART_RX); del_timer_sync(&up->timer); up->timer.function = serial8250_timeout; if (is_real_interrupt(up->port.irq)) serial_unlink_irq_chain(up); } static unsigned int serial8250_get_divisor(struct uart_port *port, unsigned int baud) { unsigned int quot; /* * Handle magic divisors for baud rates above baud_base on * SMSC SuperIO chips. */ if ((port->flags & UPF_MAGIC_MULTIPLIER) && baud == (port->uartclk/4)) quot = 0x8001; else if ((port->flags & UPF_MAGIC_MULTIPLIER) && baud == (port->uartclk/8)) quot = 0x8002; else quot = uart_get_divisor(port, baud); return quot; } static void serial8250_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct uart_8250_port *up = (struct uart_8250_port *)port; unsigned char cval, fcr = 0; unsigned long flags; unsigned int baud, quot; switch (termios->c_cflag & CSIZE) { case CS5: cval = UART_LCR_WLEN5; break; case CS6: cval = UART_LCR_WLEN6; break; case CS7: cval = UART_LCR_WLEN7; break; default: case CS8: cval = UART_LCR_WLEN8; break; } if (termios->c_cflag & CSTOPB) cval |= UART_LCR_STOP; if (termios->c_cflag & PARENB) cval |= UART_LCR_PARITY; if (!(termios->c_cflag & PARODD)) cval |= UART_LCR_EPAR; #ifdef CMSPAR if (termios->c_cflag & CMSPAR) cval |= UART_LCR_SPAR; #endif /* * Ask the core to calculate the divisor for us. */ baud = uart_get_baud_rate(port, termios, old, port->uartclk / 16 / 0xffff, port->uartclk / 16); quot = serial8250_get_divisor(port, baud); /* * Oxford Semi 952 rev B workaround */ if (up->bugs & UART_BUG_QUOT && (quot & 0xff) == 0) quot++; if (up->capabilities & UART_CAP_FIFO && up->port.fifosize > 1) { if (baud < 2400) fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_1; else fcr = uart_config[up->port.type].fcr; } /* * MCR-based auto flow control. When AFE is enabled, RTS will be * deasserted when the receive FIFO contains more characters than * the trigger, or the MCR RTS bit is cleared. In the case where * the remote UART is not using CTS auto flow control, we must * have sufficient FIFO entries for the latency of the remote * UART to respond. IOW, at least 32 bytes of FIFO. */ if (up->capabilities & UART_CAP_AFE && up->port.fifosize >= 32) { up->mcr &= ~UART_MCR_AFE; if (termios->c_cflag & CRTSCTS) up->mcr |= UART_MCR_AFE; } /* * Ok, we're now changing the port state. Do it with * interrupts disabled. */ spin_lock_irqsave(&up->port.lock, flags); /* * Update the per-port timeout. */ uart_update_timeout(port, termios->c_cflag, baud); up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; if (termios->c_iflag & INPCK) up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE; if (termios->c_iflag & (BRKINT | PARMRK)) up->port.read_status_mask |= UART_LSR_BI; /* * Characteres to ignore */ up->port.ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE; if (termios->c_iflag & IGNBRK) { up->port.ignore_status_mask |= UART_LSR_BI; /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) up->port.ignore_status_mask |= UART_LSR_OE; } /* * ignore all characters if CREAD is not set */ if ((termios->c_cflag & CREAD) == 0) up->port.ignore_status_mask |= UART_LSR_DR; /* * CTS flow control flag and modem status interrupts */ up->ier &= ~UART_IER_MSI; if (!(up->bugs & UART_BUG_NOMSR) && UART_ENABLE_MS(&up->port, termios->c_cflag)) up->ier |= UART_IER_MSI; if (up->capabilities & UART_CAP_UUE) up->ier |= UART_IER_UUE | UART_IER_RTOIE; serial_out(up, UART_IER, up->ier); if (up->capabilities & UART_CAP_EFR) { unsigned char efr = 0; /* * TI16C752/Startech hardware flow control. FIXME: * - TI16C752 requires control thresholds to be set. * - UART_MCR_RTS is ineffective if auto-RTS mode is enabled. */ if (termios->c_cflag & CRTSCTS) efr |= UART_EFR_CTS; serial_outp(up, UART_LCR, 0xBF); serial_outp(up, UART_EFR, efr); } #ifdef CONFIG_ARCH_OMAP /* Workaround to enable 115200 baud on OMAP1510 internal ports */ if (cpu_is_omap1510() && is_omap_port(up)) { if (baud == 115200) { quot = 1; serial_out(up, UART_OMAP_OSC_12M_SEL, 1); } else serial_out(up, UART_OMAP_OSC_12M_SEL, 0); } #endif if (up->capabilities & UART_NATSEMI) { /* Switch to bank 2 not bank 1, to avoid resetting EXCR2 */ serial_outp(up, UART_LCR, 0xe0); } else { serial_outp(up, UART_LCR, cval | UART_LCR_DLAB);/* set DLAB */ } serial_dl_write(up, quot); /* * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR * is written without DLAB set, this mode will be disabled. */ if (up->port.type == PORT_16750) serial_outp(up, UART_FCR, fcr); serial_outp(up, UART_LCR, cval); /* reset DLAB */ up->lcr = cval; /* Save LCR */ if (up->port.type != PORT_16750) { if (fcr & UART_FCR_ENABLE_FIFO) { /* emulated UARTs (Lucent Venus 167x) need two steps */ serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO); } serial_outp(up, UART_FCR, fcr); /* set fcr */ } serial8250_set_mctrl(&up->port, up->port.mctrl); spin_unlock_irqrestore(&up->port.lock, flags); /* Don't rewrite B0 */ if (tty_termios_baud_rate(termios)) tty_termios_encode_baud_rate(termios, baud, baud); } static void serial8250_pm(struct uart_port *port, unsigned int state, unsigned int oldstate) { struct uart_8250_port *p = (struct uart_8250_port *)port; serial8250_set_sleep(p, state != 0); if (p->pm) p->pm(port, state, oldstate); } static unsigned int serial8250_port_size(struct uart_8250_port *pt) { if (pt->port.iotype == UPIO_AU) return 0x100000; #ifdef CONFIG_ARCH_OMAP if (is_omap_port(pt)) return 0x16 << pt->port.regshift; #endif return 8 << pt->port.regshift; } /* * Resource handling. */ static int serial8250_request_std_resource(struct uart_8250_port *up) { unsigned int size = serial8250_port_size(up); int ret = 0; switch (up->port.iotype) { case UPIO_AU: case UPIO_TSI: case UPIO_MEM32: case UPIO_MEM: case UPIO_DWAPB: if (!up->port.mapbase) break; if (!request_mem_region(up->port.mapbase, size, "serial")) { ret = -EBUSY; break; } if (up->port.flags & UPF_IOREMAP) { up->port.membase = ioremap_nocache(up->port.mapbase, size); if (!up->port.membase) { release_mem_region(up->port.mapbase, size); ret = -ENOMEM; } } break; case UPIO_HUB6: case UPIO_PORT: if (!request_region(up->port.iobase, size, "serial")) ret = -EBUSY; break; } return ret; } static void serial8250_release_std_resource(struct uart_8250_port *up) { unsigned int size = serial8250_port_size(up); switch (up->port.iotype) { case UPIO_AU: case UPIO_TSI: case UPIO_MEM32: case UPIO_MEM: case UPIO_DWAPB: if (!up->port.mapbase) break; if (up->port.flags & UPF_IOREMAP) { iounmap(up->port.membase); up->port.membase = NULL; } release_mem_region(up->port.mapbase, size); break; case UPIO_HUB6: case UPIO_PORT: release_region(up->port.iobase, size); break; } } static int serial8250_request_rsa_resource(struct uart_8250_port *up) { unsigned long start = UART_RSA_BASE << up->port.regshift; unsigned int size = 8 << up->port.regshift; int ret = -EINVAL; switch (up->port.iotype) { case UPIO_HUB6: case UPIO_PORT: start += up->port.iobase; if (request_region(start, size, "serial-rsa")) ret = 0; else ret = -EBUSY; break; } return ret; } static void serial8250_release_rsa_resource(struct uart_8250_port *up) { unsigned long offset = UART_RSA_BASE << up->port.regshift; unsigned int size = 8 << up->port.regshift; switch (up->port.iotype) { case UPIO_HUB6: case UPIO_PORT: release_region(up->port.iobase + offset, size); break; } } static void serial8250_release_port(struct uart_port *port) { struct uart_8250_port *up = (struct uart_8250_port *)port; serial8250_release_std_resource(up); if (up->port.type == PORT_RSA) serial8250_release_rsa_resource(up); } static int serial8250_request_port(struct uart_port *port) { struct uart_8250_port *up = (struct uart_8250_port *)port; int ret = 0; ret = serial8250_request_std_resource(up); if (ret == 0 && up->port.type == PORT_RSA) { ret = serial8250_request_rsa_resource(up); if (ret < 0) serial8250_release_std_resource(up); } return ret; } static void serial8250_config_port(struct uart_port *port, int flags) { struct uart_8250_port *up = (struct uart_8250_port *)port; int probeflags = PROBE_ANY; int ret; /* * Find the region that we can probe for. This in turn * tells us whether we can probe for the type of port. */ ret = serial8250_request_std_resource(up); if (ret < 0) return; ret = serial8250_request_rsa_resource(up); if (ret < 0) probeflags &= ~PROBE_RSA; if (up->port.iotype != up->cur_iotype) set_io_from_upio(port); if (flags & UART_CONFIG_TYPE) autoconfig(up, probeflags); if (up->port.type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ) autoconfig_irq(up); if (up->port.type != PORT_RSA && probeflags & PROBE_RSA) serial8250_release_rsa_resource(up); if (up->port.type == PORT_UNKNOWN) serial8250_release_std_resource(up); } static int serial8250_verify_port(struct uart_port *port, struct serial_struct *ser) { if (ser->irq >= nr_irqs || ser->irq < 0 || ser->baud_base < 9600 || ser->type < PORT_UNKNOWN || ser->type >= ARRAY_SIZE(uart_config) || ser->type == PORT_CIRRUS || ser->type == PORT_STARTECH) return -EINVAL; return 0; } static const char * serial8250_type(struct uart_port *port) { int type = port->type; if (type >= ARRAY_SIZE(uart_config)) type = 0; return uart_config[type].name; } static struct uart_ops serial8250_pops = { .tx_empty = serial8250_tx_empty, .set_mctrl = serial8250_set_mctrl, .get_mctrl = serial8250_get_mctrl, .stop_tx = serial8250_stop_tx, .start_tx = serial8250_start_tx, .stop_rx = serial8250_stop_rx, .enable_ms = serial8250_enable_ms, .break_ctl = serial8250_break_ctl, .startup = serial8250_startup, .shutdown = serial8250_shutdown, .set_termios = serial8250_set_termios, .pm = serial8250_pm, .type = serial8250_type, .release_port = serial8250_release_port, .request_port = serial8250_request_port, .config_port = serial8250_config_port, .verify_port = serial8250_verify_port, #ifdef CONFIG_CONSOLE_POLL .poll_get_char = serial8250_get_poll_char, .poll_put_char = serial8250_put_poll_char, #endif }; static struct uart_8250_port serial8250_ports[UART_NR]; static void __init serial8250_isa_init_ports(void) { struct uart_8250_port *up; static int first = 1; int i; if (!first) return; first = 0; for (i = 0; i < nr_uarts; i++) { struct uart_8250_port *up = &serial8250_ports[i]; up->port.line = i; spin_lock_init(&up->port.lock); init_timer(&up->timer); up->timer.function = serial8250_timeout; /* * ALPHA_KLUDGE_MCR needs to be killed. */ up->mcr_mask = ~ALPHA_KLUDGE_MCR; up->mcr_force = ALPHA_KLUDGE_MCR; up->port.ops = &serial8250_pops; } for (i = 0, up = serial8250_ports; i < ARRAY_SIZE(old_serial_port) && i < nr_uarts; i++, up++) { up->port.iobase = old_serial_port[i].port; up->port.irq = irq_canonicalize(old_serial_port[i].irq); up->port.irqflags = old_serial_port[i].irqflags; up->port.uartclk = old_serial_port[i].baud_base * 16; up->port.flags = old_serial_port[i].flags; up->port.hub6 = old_serial_port[i].hub6; up->port.membase = old_serial_port[i].iomem_base; up->port.iotype = old_serial_port[i].io_type; up->port.regshift = old_serial_port[i].iomem_reg_shift; set_io_from_upio(&up->port); if (share_irqs) up->port.irqflags |= IRQF_SHARED; } } static void __init serial8250_register_ports(struct uart_driver *drv, struct device *dev) { int i; for (i = 0; i < nr_uarts; i++) { struct uart_8250_port *up = &serial8250_ports[i]; up->cur_iotype = 0xFF; } serial8250_isa_init_ports(); for (i = 0; i < nr_uarts; i++) { struct uart_8250_port *up = &serial8250_ports[i]; up->port.dev = dev; uart_add_one_port(drv, &up->port); } } #ifdef CONFIG_SERIAL_8250_CONSOLE static void serial8250_console_putchar(struct uart_port *port, int ch) { struct uart_8250_port *up = (struct uart_8250_port *)port; wait_for_xmitr(up, UART_LSR_THRE); serial_out(up, UART_TX, ch); } /* * Print a string to the serial port trying not to disturb * any possible real use of the port... * * The console_lock must be held when we get here. */ static void serial8250_console_write(struct console *co, const char *s, unsigned int count) { struct uart_8250_port *up = &serial8250_ports[co->index]; unsigned long flags; unsigned int ier; int locked = 1; touch_nmi_watchdog(); local_irq_save(flags); if (up->port.sysrq) { /* serial8250_handle_port() already took the lock */ locked = 0; } else if (oops_in_progress) { locked = spin_trylock(&up->port.lock); } else spin_lock(&up->port.lock); /* * First save the IER then disable the interrupts */ ier = serial_in(up, UART_IER); if (up->capabilities & UART_CAP_UUE) serial_out(up, UART_IER, UART_IER_UUE); else serial_out(up, UART_IER, 0); uart_console_write(&up->port, s, count, serial8250_console_putchar); /* * Finally, wait for transmitter to become empty * and restore the IER */ wait_for_xmitr(up, BOTH_EMPTY); serial_out(up, UART_IER, ier); /* * The receive handling will happen properly because the * receive ready bit will still be set; it is not cleared * on read. However, modem control will not, we must * call it if we have saved something in the saved flags * while processing with interrupts off. */ if (up->msr_saved_flags) check_modem_status(up); if (locked) spin_unlock(&up->port.lock); local_irq_restore(flags); } static int __init serial8250_console_setup(struct console *co, char *options) { struct uart_port *port; int baud = 9600; int bits = 8; int parity = 'n'; int flow = 'n'; /* * Check whether an invalid uart number has been specified, and * if so, search for the first available port that does have * console support. */ if (co->index >= nr_uarts) co->index = 0; port = &serial8250_ports[co->index].port; if (!port->iobase && !port->membase) return -ENODEV; if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); return uart_set_options(port, co, baud, parity, bits, flow); } static int serial8250_console_early_setup(void) { return serial8250_find_port_for_earlycon(); } static struct console serial8250_console = { .name = "ttyS", .write = serial8250_console_write, .device = uart_console_device, .setup = serial8250_console_setup, .early_setup = serial8250_console_early_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &serial8250_reg, }; static int __init serial8250_console_init(void) { if (nr_uarts > UART_NR) nr_uarts = UART_NR; serial8250_isa_init_ports(); register_console(&serial8250_console); return 0; } console_initcall(serial8250_console_init); int serial8250_find_port(struct uart_port *p) { int line; struct uart_port *port; for (line = 0; line < nr_uarts; line++) { port = &serial8250_ports[line].port; if (uart_match_port(p, port)) return line; } return -ENODEV; } #define SERIAL8250_CONSOLE &serial8250_console #else #define SERIAL8250_CONSOLE NULL #endif static struct uart_driver serial8250_reg = { .owner = THIS_MODULE, .driver_name = "serial", .dev_name = "ttyS", .major = TTY_MAJOR, .minor = 64, .cons = SERIAL8250_CONSOLE, }; /* * early_serial_setup - early registration for 8250 ports * * Setup an 8250 port structure prior to console initialisation. Use * after console initialisation will cause undefined behaviour. */ int __init early_serial_setup(struct uart_port *port) { struct uart_port *p; if (port->line >= ARRAY_SIZE(serial8250_ports)) return -ENODEV; serial8250_isa_init_ports(); p = &serial8250_ports[port->line].port; p->iobase = port->iobase; p->membase = port->membase; p->irq = port->irq; p->irqflags = port->irqflags; p->uartclk = port->uartclk; p->fifosize = port->fifosize; p->regshift = port->regshift; p->iotype = port->iotype; p->flags = port->flags; p->mapbase = port->mapbase; p->private_data = port->private_data; p->type = port->type; p->line = port->line; set_io_from_upio(p); if (port->serial_in) p->serial_in = port->serial_in; if (port->serial_out) p->serial_out = port->serial_out; return 0; } /** * serial8250_suspend_port - suspend one serial port * @line: serial line number * * Suspend one serial port. */ void serial8250_suspend_port(int line) { uart_suspend_port(&serial8250_reg, &serial8250_ports[line].port); } /** * serial8250_resume_port - resume one serial port * @line: serial line number * * Resume one serial port. */ void serial8250_resume_port(int line) { struct uart_8250_port *up = &serial8250_ports[line]; if (up->capabilities & UART_NATSEMI) { unsigned char tmp; /* Ensure it's still in high speed mode */ serial_outp(up, UART_LCR, 0xE0); tmp = serial_in(up, 0x04); /* EXCR2 */ tmp &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ tmp |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ serial_outp(up, 0x04, tmp); serial_outp(up, UART_LCR, 0); } uart_resume_port(&serial8250_reg, &up->port); } /* * Register a set of serial devices attached to a platform device. The * list is terminated with a zero flags entry, which means we expect * all entries to have at least UPF_BOOT_AUTOCONF set. */ static int __devinit serial8250_probe(struct platform_device *dev) { struct plat_serial8250_port *p = dev->dev.platform_data; struct uart_port port; int ret, i; memset(&port, 0, sizeof(struct uart_port)); for (i = 0; p && p->flags != 0; p++, i++) { port.iobase = p->iobase; port.membase = p->membase; port.irq = p->irq; port.irqflags = p->irqflags; port.uartclk = p->uartclk; port.regshift = p->regshift; port.iotype = p->iotype; port.flags = p->flags; port.mapbase = p->mapbase; port.hub6 = p->hub6; port.private_data = p->private_data; port.type = p->type; port.serial_in = p->serial_in; port.serial_out = p->serial_out; port.dev = &dev->dev; if (share_irqs) port.irqflags |= IRQF_SHARED; ret = serial8250_register_port(&port); if (ret < 0) { dev_err(&dev->dev, "unable to register port at index %d " "(IO%lx MEM%llx IRQ%d): %d\n", i, p->iobase, (unsigned long long)p->mapbase, p->irq, ret); } } return 0; } /* * Remove serial ports registered against a platform device. */ static int __devexit serial8250_remove(struct platform_device *dev) { int i; for (i = 0; i < nr_uarts; i++) { struct uart_8250_port *up = &serial8250_ports[i]; if (up->port.dev == &dev->dev) serial8250_unregister_port(i); } return 0; } static int serial8250_suspend(struct platform_device *dev, pm_message_t state) { int i; for (i = 0; i < UART_NR; i++) { struct uart_8250_port *up = &serial8250_ports[i]; if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev) uart_suspend_port(&serial8250_reg, &up->port); } return 0; } static int serial8250_resume(struct platform_device *dev) { int i; for (i = 0; i < UART_NR; i++) { struct uart_8250_port *up = &serial8250_ports[i]; if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev) serial8250_resume_port(i); } return 0; } static struct platform_driver serial8250_isa_driver = { .probe = serial8250_probe, .remove = __devexit_p(serial8250_remove), .suspend = serial8250_suspend, .resume = serial8250_resume, .driver = { .name = "serial8250", .owner = THIS_MODULE, }, }; /* * This "device" covers _all_ ISA 8250-compatible serial devices listed * in the table in include/asm/serial.h */ static struct platform_device *serial8250_isa_devs; /* * serial8250_register_port and serial8250_unregister_port allows for * 16x50 serial ports to be configured at run-time, to support PCMCIA * modems and PCI multiport cards. */ static DEFINE_MUTEX(serial_mutex); static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *port) { int i; /* * First, find a port entry which matches. */ for (i = 0; i < nr_uarts; i++) if (uart_match_port(&serial8250_ports[i].port, port)) return &serial8250_ports[i]; /* * We didn't find a matching entry, so look for the first * free entry. We look for one which hasn't been previously * used (indicated by zero iobase). */ for (i = 0; i < nr_uarts; i++) if (serial8250_ports[i].port.type == PORT_UNKNOWN && serial8250_ports[i].port.iobase == 0) return &serial8250_ports[i]; /* * That also failed. Last resort is to find any entry which * doesn't have a real port associated with it. */ for (i = 0; i < nr_uarts; i++) if (serial8250_ports[i].port.type == PORT_UNKNOWN) return &serial8250_ports[i]; return NULL; } /** * serial8250_register_port - register a serial port * @port: serial port template * * Configure the serial port specified by the request. If the * port exists and is in use, it is hung up and unregistered * first. * * The port is then probed and if necessary the IRQ is autodetected * If this fails an error is returned. * * On success the port is ready to use and the line number is returned. */ int serial8250_register_port(struct uart_port *port) { struct uart_8250_port *uart; int ret = -ENOSPC; if (port->uartclk == 0) return -EINVAL; mutex_lock(&serial_mutex); uart = serial8250_find_match_or_unused(port); if (uart) { uart_remove_one_port(&serial8250_reg, &uart->port); uart->port.iobase = port->iobase; uart->port.membase = port->membase; uart->port.irq = port->irq; uart->port.irqflags = port->irqflags; uart->port.uartclk = port->uartclk; uart->port.fifosize = port->fifosize; uart->port.regshift = port->regshift; uart->port.iotype = port->iotype; uart->port.flags = port->flags | UPF_BOOT_AUTOCONF; uart->port.mapbase = port->mapbase; uart->port.private_data = port->private_data; if (port->dev) uart->port.dev = port->dev; if (port->flags & UPF_FIXED_TYPE) { uart->port.type = port->type; uart->port.fifosize = uart_config[port->type].fifo_size; uart->capabilities = uart_config[port->type].flags; uart->tx_loadsz = uart_config[port->type].tx_loadsz; } set_io_from_upio(&uart->port); /* Possibly override default I/O functions. */ if (port->serial_in) uart->port.serial_in = port->serial_in; if (port->serial_out) uart->port.serial_out = port->serial_out; ret = uart_add_one_port(&serial8250_reg, &uart->port); if (ret == 0) ret = uart->port.line; } mutex_unlock(&serial_mutex); return ret; } EXPORT_SYMBOL(serial8250_register_port); /** * serial8250_unregister_port - remove a 16x50 serial port at runtime * @line: serial line number * * Remove one serial port. This may not be called from interrupt * context. We hand the port back to the our control. */ void serial8250_unregister_port(int line) { struct uart_8250_port *uart = &serial8250_ports[line]; mutex_lock(&serial_mutex); uart_remove_one_port(&serial8250_reg, &uart->port); if (serial8250_isa_devs) { uart->port.flags &= ~UPF_BOOT_AUTOCONF; uart->port.type = PORT_UNKNOWN; uart->port.dev = &serial8250_isa_devs->dev; uart_add_one_port(&serial8250_reg, &uart->port); } else { uart->port.dev = NULL; } mutex_unlock(&serial_mutex); } EXPORT_SYMBOL(serial8250_unregister_port); static int __init serial8250_init(void) { int ret; if (nr_uarts > UART_NR) nr_uarts = UART_NR; printk(KERN_INFO "Serial: 8250/16550 driver, " "%d ports, IRQ sharing %sabled\n", nr_uarts, share_irqs ? "en" : "dis"); #ifdef CONFIG_SPARC ret = sunserial_register_minors(&serial8250_reg, UART_NR); #else serial8250_reg.nr = UART_NR; ret = uart_register_driver(&serial8250_reg); #endif if (ret) goto out; serial8250_isa_devs = platform_device_alloc("serial8250", PLAT8250_DEV_LEGACY); if (!serial8250_isa_devs) { ret = -ENOMEM; goto unreg_uart_drv; } ret = platform_device_add(serial8250_isa_devs); if (ret) goto put_dev; serial8250_register_ports(&serial8250_reg, &serial8250_isa_devs->dev); ret = platform_driver_register(&serial8250_isa_driver); if (ret == 0) goto out; platform_device_del(serial8250_isa_devs); put_dev: platform_device_put(serial8250_isa_devs); unreg_uart_drv: #ifdef CONFIG_SPARC sunserial_unregister_minors(&serial8250_reg, UART_NR); #else uart_unregister_driver(&serial8250_reg); #endif out: return ret; } static void __exit serial8250_exit(void) { struct platform_device *isa_dev = serial8250_isa_devs; /* * This tells serial8250_unregister_port() not to re-register * the ports (thereby making serial8250_isa_driver permanently * in use.) */ serial8250_isa_devs = NULL; platform_driver_unregister(&serial8250_isa_driver); platform_device_unregister(isa_dev); #ifdef CONFIG_SPARC sunserial_unregister_minors(&serial8250_reg, UART_NR); #else uart_unregister_driver(&serial8250_reg); #endif } module_init(serial8250_init); module_exit(serial8250_exit); EXPORT_SYMBOL(serial8250_suspend_port); EXPORT_SYMBOL(serial8250_resume_port); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Generic 8250/16x50 serial driver"); module_param(share_irqs, uint, 0644); MODULE_PARM_DESC(share_irqs, "Share IRQs with other non-8250/16x50 devices" " (unsafe)"); module_param(nr_uarts, uint, 0644); MODULE_PARM_DESC(nr_uarts, "Maximum number of UARTs supported. (1-" __MODULE_STRING(CONFIG_SERIAL_8250_NR_UARTS) ")"); module_param(skip_txen_test, uint, 0644); MODULE_PARM_DESC(skip_txen_test, "Skip checking for the TXEN bug at init time"); #ifdef CONFIG_SERIAL_8250_RSA module_param_array(probe_rsa, ulong, &probe_rsa_count, 0444); MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA"); #endif MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR);
gpl-2.0
jayk/linux
drivers/nfc/microread/i2c.c
544
6997
/* * HCI based Driver for Inside Secure microread NFC Chip - i2c layer * * Copyright (C) 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/i2c.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/nfc.h> #include <net/nfc/hci.h> #include <net/nfc/llc.h> #include "microread.h" #define MICROREAD_I2C_DRIVER_NAME "microread" #define MICROREAD_I2C_FRAME_HEADROOM 1 #define MICROREAD_I2C_FRAME_TAILROOM 1 /* framing in HCI mode */ #define MICROREAD_I2C_LLC_LEN 1 #define MICROREAD_I2C_LLC_CRC 1 #define MICROREAD_I2C_LLC_LEN_CRC (MICROREAD_I2C_LLC_LEN + \ MICROREAD_I2C_LLC_CRC) #define MICROREAD_I2C_LLC_MIN_SIZE (1 + MICROREAD_I2C_LLC_LEN_CRC) #define MICROREAD_I2C_LLC_MAX_PAYLOAD 29 #define MICROREAD_I2C_LLC_MAX_SIZE (MICROREAD_I2C_LLC_LEN_CRC + 1 + \ MICROREAD_I2C_LLC_MAX_PAYLOAD) struct microread_i2c_phy { struct i2c_client *i2c_dev; struct nfc_hci_dev *hdev; int irq; int hard_fault; /* * < 0 if hardware error occured (e.g. i2c err) * and prevents normal operation. */ }; #define I2C_DUMP_SKB(info, skb) \ do { \ pr_debug("%s:\n", info); \ print_hex_dump(KERN_DEBUG, "i2c: ", DUMP_PREFIX_OFFSET, \ 16, 1, (skb)->data, (skb)->len, 0); \ } while (0) static void microread_i2c_add_len_crc(struct sk_buff *skb) { int i; u8 crc = 0; int len; len = skb->len; *skb_push(skb, 1) = len; for (i = 0; i < skb->len; i++) crc = crc ^ skb->data[i]; *skb_put(skb, 1) = crc; } static void microread_i2c_remove_len_crc(struct sk_buff *skb) { skb_pull(skb, MICROREAD_I2C_FRAME_HEADROOM); skb_trim(skb, MICROREAD_I2C_FRAME_TAILROOM); } static int check_crc(struct sk_buff *skb) { int i; u8 crc = 0; for (i = 0; i < skb->len - 1; i++) crc = crc ^ skb->data[i]; if (crc != skb->data[skb->len-1]) { pr_err("CRC error 0x%x != 0x%x\n", crc, skb->data[skb->len-1]); pr_info("%s: BAD CRC\n", __func__); return -EPERM; } return 0; } static int microread_i2c_enable(void *phy_id) { return 0; } static void microread_i2c_disable(void *phy_id) { return; } static int microread_i2c_write(void *phy_id, struct sk_buff *skb) { int r; struct microread_i2c_phy *phy = phy_id; struct i2c_client *client = phy->i2c_dev; if (phy->hard_fault != 0) return phy->hard_fault; usleep_range(3000, 6000); microread_i2c_add_len_crc(skb); I2C_DUMP_SKB("i2c frame written", skb); r = i2c_master_send(client, skb->data, skb->len); if (r == -EREMOTEIO) { /* Retry, chip was in standby */ usleep_range(6000, 10000); r = i2c_master_send(client, skb->data, skb->len); } if (r >= 0) { if (r != skb->len) r = -EREMOTEIO; else r = 0; } microread_i2c_remove_len_crc(skb); return r; } static int microread_i2c_read(struct microread_i2c_phy *phy, struct sk_buff **skb) { int r; u8 len; u8 tmp[MICROREAD_I2C_LLC_MAX_SIZE - 1]; struct i2c_client *client = phy->i2c_dev; r = i2c_master_recv(client, &len, 1); if (r != 1) { nfc_err(&client->dev, "cannot read len byte\n"); return -EREMOTEIO; } if ((len < MICROREAD_I2C_LLC_MIN_SIZE) || (len > MICROREAD_I2C_LLC_MAX_SIZE)) { nfc_err(&client->dev, "invalid len byte\n"); r = -EBADMSG; goto flush; } *skb = alloc_skb(1 + len, GFP_KERNEL); if (*skb == NULL) { r = -ENOMEM; goto flush; } *skb_put(*skb, 1) = len; r = i2c_master_recv(client, skb_put(*skb, len), len); if (r != len) { kfree_skb(*skb); return -EREMOTEIO; } I2C_DUMP_SKB("cc frame read", *skb); r = check_crc(*skb); if (r != 0) { kfree_skb(*skb); r = -EBADMSG; goto flush; } skb_pull(*skb, 1); skb_trim(*skb, (*skb)->len - MICROREAD_I2C_FRAME_TAILROOM); usleep_range(3000, 6000); return 0; flush: if (i2c_master_recv(client, tmp, sizeof(tmp)) < 0) r = -EREMOTEIO; usleep_range(3000, 6000); return r; } static irqreturn_t microread_i2c_irq_thread_fn(int irq, void *phy_id) { struct microread_i2c_phy *phy = phy_id; struct sk_buff *skb = NULL; int r; if (!phy || irq != phy->i2c_dev->irq) { WARN_ON_ONCE(1); return IRQ_NONE; } if (phy->hard_fault != 0) return IRQ_HANDLED; r = microread_i2c_read(phy, &skb); if (r == -EREMOTEIO) { phy->hard_fault = r; nfc_hci_recv_frame(phy->hdev, NULL); return IRQ_HANDLED; } else if ((r == -ENOMEM) || (r == -EBADMSG)) { return IRQ_HANDLED; } nfc_hci_recv_frame(phy->hdev, skb); return IRQ_HANDLED; } static struct nfc_phy_ops i2c_phy_ops = { .write = microread_i2c_write, .enable = microread_i2c_enable, .disable = microread_i2c_disable, }; static int microread_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct microread_i2c_phy *phy; struct microread_nfc_platform_data *pdata = dev_get_platdata(&client->dev); int r; dev_dbg(&client->dev, "client %p\n", client); if (!pdata) { nfc_err(&client->dev, "client %p: missing platform data\n", client); return -EINVAL; } phy = devm_kzalloc(&client->dev, sizeof(struct microread_i2c_phy), GFP_KERNEL); if (!phy) return -ENOMEM; i2c_set_clientdata(client, phy); phy->i2c_dev = client; r = request_threaded_irq(client->irq, NULL, microread_i2c_irq_thread_fn, IRQF_TRIGGER_RISING | IRQF_ONESHOT, MICROREAD_I2C_DRIVER_NAME, phy); if (r) { nfc_err(&client->dev, "Unable to register IRQ handler\n"); return r; } r = microread_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME, MICROREAD_I2C_FRAME_HEADROOM, MICROREAD_I2C_FRAME_TAILROOM, MICROREAD_I2C_LLC_MAX_PAYLOAD, &phy->hdev); if (r < 0) goto err_irq; nfc_info(&client->dev, "Probed\n"); return 0; err_irq: free_irq(client->irq, phy); return r; } static int microread_i2c_remove(struct i2c_client *client) { struct microread_i2c_phy *phy = i2c_get_clientdata(client); microread_remove(phy->hdev); free_irq(client->irq, phy); return 0; } static struct i2c_device_id microread_i2c_id[] = { { MICROREAD_I2C_DRIVER_NAME, 0}, { } }; MODULE_DEVICE_TABLE(i2c, microread_i2c_id); static struct i2c_driver microread_i2c_driver = { .driver = { .name = MICROREAD_I2C_DRIVER_NAME, }, .probe = microread_i2c_probe, .remove = microread_i2c_remove, .id_table = microread_i2c_id, }; module_i2c_driver(microread_i2c_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(DRIVER_DESC);
gpl-2.0
sirmordred/samsung-kernel-msm7x30-2
drivers/input/touchscreen/synaptics_i2c_rmi4.c
544
73128
/* * Synaptics RMI4 touchscreen driver * * Copyright (C) 2012 Synaptics Incorporated * * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com> * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com> * Copyright (c) 2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/input/synaptics_dsx.h> #include <linux/of_gpio.h> #include "synaptics_i2c_rmi4.h" #include <linux/input/mt.h> #define DRIVER_NAME "synaptics_rmi4_i2c" #define INPUT_PHYS_NAME "synaptics_rmi4_i2c/input0" #define RESET_DELAY 100 #define TYPE_B_PROTOCOL #define NO_0D_WHILE_2D /* #define REPORT_2D_Z */ #define REPORT_2D_W #define RPT_TYPE (1 << 0) #define RPT_X_LSB (1 << 1) #define RPT_X_MSB (1 << 2) #define RPT_Y_LSB (1 << 3) #define RPT_Y_MSB (1 << 4) #define RPT_Z (1 << 5) #define RPT_WX (1 << 6) #define RPT_WY (1 << 7) #define RPT_DEFAULT (RPT_TYPE | RPT_X_LSB | RPT_X_MSB | RPT_Y_LSB | RPT_Y_MSB) #define EXP_FN_DET_INTERVAL 1000 /* ms */ #define POLLING_PERIOD 1 /* ms */ #define SYN_I2C_RETRY_TIMES 10 #define MAX_ABS_MT_TOUCH_MAJOR 15 #define F01_STD_QUERY_LEN 21 #define F01_BUID_ID_OFFSET 18 #define F11_STD_QUERY_LEN 9 #define F11_STD_CTRL_LEN 10 #define F11_STD_DATA_LEN 12 #define NORMAL_OPERATION (0 << 0) #define SENSOR_SLEEP (1 << 0) #define NO_SLEEP_OFF (0 << 2) #define NO_SLEEP_ON (1 << 2) enum device_status { STATUS_NO_ERROR = 0x00, STATUS_RESET_OCCURED = 0x01, STATUS_INVALID_CONFIG = 0x02, STATUS_DEVICE_FAILURE = 0x03, STATUS_CONFIG_CRC_FAILURE = 0x04, STATUS_FIRMWARE_CRC_FAILURE = 0x05, STATUS_CRC_IN_PROGRESS = 0x06 }; #define RMI4_VTG_MIN_UV 2700000 #define RMI4_VTG_MAX_UV 3300000 #define RMI4_ACTIVE_LOAD_UA 15000 #define RMI4_LPM_LOAD_UA 10 #define RMI4_I2C_VTG_MIN_UV 1800000 #define RMI4_I2C_VTG_MAX_UV 1800000 #define RMI4_I2C_LOAD_UA 10000 #define RMI4_I2C_LPM_LOAD_UA 10 #define RMI4_GPIO_SLEEP_LOW_US 10000 static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data, unsigned short addr, unsigned char *data, unsigned short length); static int synaptics_rmi4_i2c_write(struct synaptics_rmi4_data *rmi4_data, unsigned short addr, unsigned char *data, unsigned short length); static int synaptics_rmi4_reset_device(struct synaptics_rmi4_data *rmi4_data); #ifdef CONFIG_PM static int synaptics_rmi4_suspend(struct device *dev); static int synaptics_rmi4_resume(struct device *dev); static ssize_t synaptics_rmi4_full_pm_cycle_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t synaptics_rmi4_full_pm_cycle_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); #if defined(CONFIG_FB) static int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data); #elif defined(CONFIG_HAS_EARLYSUSPEND) static void synaptics_rmi4_early_suspend(struct early_suspend *h); static void synaptics_rmi4_late_resume(struct early_suspend *h); #endif #endif static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); static ssize_t synaptics_rmi4_f01_productinfo_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t synaptics_rmi4_f01_buildid_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t synaptics_rmi4_f01_flashprog_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t synaptics_rmi4_0dbutton_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t synaptics_rmi4_0dbutton_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); static ssize_t synaptics_rmi4_flipx_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t synaptics_rmi4_flipx_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); static ssize_t synaptics_rmi4_flipy_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t synaptics_rmi4_flipy_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); struct synaptics_rmi4_f01_device_status { union { struct { unsigned char status_code:4; unsigned char reserved:2; unsigned char flash_prog:1; unsigned char unconfigured:1; } __packed; unsigned char data[1]; }; }; struct synaptics_rmi4_f1a_query { union { struct { unsigned char max_button_count:3; unsigned char reserved:5; unsigned char has_general_control:1; unsigned char has_interrupt_enable:1; unsigned char has_multibutton_select:1; unsigned char has_tx_rx_map:1; unsigned char has_perbutton_threshold:1; unsigned char has_release_threshold:1; unsigned char has_strongestbtn_hysteresis:1; unsigned char has_filter_strength:1; } __packed; unsigned char data[2]; }; }; struct synaptics_rmi4_f1a_control_0 { union { struct { unsigned char multibutton_report:2; unsigned char filter_mode:2; unsigned char reserved:4; } __packed; unsigned char data[1]; }; }; struct synaptics_rmi4_f1a_control_3_4 { unsigned char transmitterbutton; unsigned char receiverbutton; }; struct synaptics_rmi4_f1a_control { struct synaptics_rmi4_f1a_control_0 general_control; unsigned char *button_int_enable; unsigned char *multi_button; struct synaptics_rmi4_f1a_control_3_4 *electrode_map; unsigned char *button_threshold; unsigned char button_release_threshold; unsigned char strongest_button_hysteresis; unsigned char filter_strength; }; struct synaptics_rmi4_f1a_handle { int button_bitmask_size; unsigned char button_count; unsigned char valid_button_count; unsigned char *button_data_buffer; unsigned char *button_map; struct synaptics_rmi4_f1a_query button_query; struct synaptics_rmi4_f1a_control button_control; }; struct synaptics_rmi4_exp_fn { enum exp_fn fn_type; bool inserted; int (*func_init)(struct synaptics_rmi4_data *rmi4_data); void (*func_remove)(struct synaptics_rmi4_data *rmi4_data); void (*func_attn)(struct synaptics_rmi4_data *rmi4_data, unsigned char intr_mask); struct list_head link; }; static struct device_attribute attrs[] = { #ifdef CONFIG_PM __ATTR(full_pm_cycle, S_IRUGO | S_IWUSR | S_IWGRP, synaptics_rmi4_full_pm_cycle_show, synaptics_rmi4_full_pm_cycle_store), #endif __ATTR(reset, S_IWUSR | S_IWGRP, NULL, synaptics_rmi4_f01_reset_store), __ATTR(productinfo, S_IRUGO, synaptics_rmi4_f01_productinfo_show, synaptics_rmi4_store_error), __ATTR(buildid, S_IRUGO, synaptics_rmi4_f01_buildid_show, synaptics_rmi4_store_error), __ATTR(flashprog, S_IRUGO, synaptics_rmi4_f01_flashprog_show, synaptics_rmi4_store_error), __ATTR(0dbutton, S_IRUGO | S_IWUSR | S_IWGRP, synaptics_rmi4_0dbutton_show, synaptics_rmi4_0dbutton_store), __ATTR(flipx, (S_IRUGO | S_IWUSR | S_IWGRP), synaptics_rmi4_flipx_show, synaptics_rmi4_flipx_store), __ATTR(flipy, (S_IRUGO | S_IWUSR | S_IWGRP), synaptics_rmi4_flipy_show, synaptics_rmi4_flipy_store), }; static bool exp_fn_inited; static struct mutex exp_fn_list_mutex; static struct list_head exp_fn_list; #ifdef CONFIG_PM static ssize_t synaptics_rmi4_full_pm_cycle_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%u\n", rmi4_data->full_pm_cycle); } static ssize_t synaptics_rmi4_full_pm_cycle_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int input; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); if (sscanf(buf, "%u", &input) != 1) return -EINVAL; rmi4_data->full_pm_cycle = input > 0 ? 1 : 0; return count; } #ifdef CONFIG_FB static void configure_sleep(struct synaptics_rmi4_data *rmi4_data) { int retval = 0; rmi4_data->fb_notif.notifier_call = fb_notifier_callback; retval = fb_register_client(&rmi4_data->fb_notif); if (retval) dev_err(&rmi4_data->i2c_client->dev, "Unable to register fb_notifier: %d\n", retval); return; } #elif defined CONFIG_HAS_EARLYSUSPEND static void configure_sleep(struct synaptics_rmi4_data *rmi4_data) { rmi4_data->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; rmi4_data->early_suspend.suspend = synaptics_rmi4_early_suspend; rmi4_data->early_suspend.resume = synaptics_rmi4_late_resume; register_early_suspend(&rmi4_data->early_suspend); return; } #else static void configure_sleep(struct synaptics_rmi4_data *rmi4_data) { return; } #endif #endif static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int retval; unsigned int reset; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); if (sscanf(buf, "%u", &reset) != 1) return -EINVAL; if (reset != 1) return -EINVAL; retval = synaptics_rmi4_reset_device(rmi4_data); if (retval < 0) { dev_err(dev, "%s: Failed to issue reset command, error = %d\n", __func__, retval); return retval; } return count; } static ssize_t synaptics_rmi4_f01_productinfo_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "0x%02x 0x%02x\n", (rmi4_data->rmi4_mod_info.product_info[0]), (rmi4_data->rmi4_mod_info.product_info[1])); } static ssize_t synaptics_rmi4_f01_buildid_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned int build_id; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); struct synaptics_rmi4_device_info *rmi; rmi = &(rmi4_data->rmi4_mod_info); build_id = (unsigned int)rmi->build_id[0] + (unsigned int)rmi->build_id[1] * 0x100 + (unsigned int)rmi->build_id[2] * 0x10000; return snprintf(buf, PAGE_SIZE, "%u\n", build_id); } static ssize_t synaptics_rmi4_f01_flashprog_show(struct device *dev, struct device_attribute *attr, char *buf) { int retval; struct synaptics_rmi4_f01_device_status device_status; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_data_base_addr, device_status.data, sizeof(device_status.data)); if (retval < 0) { dev_err(dev, "%s: Failed to read device status, error = %d\n", __func__, retval); return retval; } return snprintf(buf, PAGE_SIZE, "%u\n", device_status.flash_prog); } static ssize_t synaptics_rmi4_0dbutton_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%u\n", rmi4_data->button_0d_enabled); } static ssize_t synaptics_rmi4_0dbutton_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int retval; unsigned int input; unsigned char ii; unsigned char intr_enable; struct synaptics_rmi4_fn *fhandler; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); struct synaptics_rmi4_device_info *rmi; rmi = &(rmi4_data->rmi4_mod_info); if (sscanf(buf, "%u", &input) != 1) return -EINVAL; input = input > 0 ? 1 : 0; if (rmi4_data->button_0d_enabled == input) return count; if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry(fhandler, &rmi->support_fn_list, link) { if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) { ii = fhandler->intr_reg_num; retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_ctrl_base_addr + 1 + ii, &intr_enable, sizeof(intr_enable)); if (retval < 0) return retval; if (input == 1) intr_enable |= fhandler->intr_mask; else intr_enable &= ~fhandler->intr_mask; retval = synaptics_rmi4_i2c_write(rmi4_data, rmi4_data->f01_ctrl_base_addr + 1 + ii, &intr_enable, sizeof(intr_enable)); if (retval < 0) return retval; } } } rmi4_data->button_0d_enabled = input; return count; } static ssize_t synaptics_rmi4_flipx_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%u\n", rmi4_data->flip_x); } static ssize_t synaptics_rmi4_flipx_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int input; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); if (sscanf(buf, "%u", &input) != 1) return -EINVAL; rmi4_data->flip_x = input > 0 ? 1 : 0; return count; } static ssize_t synaptics_rmi4_flipy_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%u\n", rmi4_data->flip_y); } static ssize_t synaptics_rmi4_flipy_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int input; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); if (sscanf(buf, "%u", &input) != 1) return -EINVAL; rmi4_data->flip_y = input > 0 ? 1 : 0; return count; } /** * synaptics_rmi4_set_page() * * Called by synaptics_rmi4_i2c_read() and synaptics_rmi4_i2c_write(). * * This function writes to the page select register to switch to the * assigned page. */ static int synaptics_rmi4_set_page(struct synaptics_rmi4_data *rmi4_data, unsigned int address) { int retval = 0; unsigned char retry; unsigned char buf[PAGE_SELECT_LEN]; unsigned char page; struct i2c_client *i2c = rmi4_data->i2c_client; page = ((address >> 8) & MASK_8BIT); if (page != rmi4_data->current_page) { buf[0] = MASK_8BIT; buf[1] = page; for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) { retval = i2c_master_send(i2c, buf, PAGE_SELECT_LEN); if (retval != PAGE_SELECT_LEN) { dev_err(&i2c->dev, "%s: I2C retry %d\n", __func__, retry + 1); msleep(20); } else { rmi4_data->current_page = page; break; } } } else return PAGE_SELECT_LEN; return (retval == PAGE_SELECT_LEN) ? retval : -EIO; } /** * synaptics_rmi4_i2c_read() * * Called by various functions in this driver, and also exported to * other expansion Function modules such as rmi_dev. * * This function reads data of an arbitrary length from the sensor, * starting from an assigned register address of the sensor, via I2C * with a retry mechanism. */ static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data, unsigned short addr, unsigned char *data, unsigned short length) { int retval; unsigned char retry; unsigned char buf; struct i2c_msg msg[] = { { .addr = rmi4_data->i2c_client->addr, .flags = 0, .len = 1, .buf = &buf, }, { .addr = rmi4_data->i2c_client->addr, .flags = I2C_M_RD, .len = length, .buf = data, }, }; buf = addr & MASK_8BIT; mutex_lock(&(rmi4_data->rmi4_io_ctrl_mutex)); retval = synaptics_rmi4_set_page(rmi4_data, addr); if (retval != PAGE_SELECT_LEN) goto exit; for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) { if (i2c_transfer(rmi4_data->i2c_client->adapter, msg, 2) == 2) { retval = length; break; } dev_err(&rmi4_data->i2c_client->dev, "%s: I2C retry %d\n", __func__, retry + 1); msleep(20); } if (retry == SYN_I2C_RETRY_TIMES) { dev_err(&rmi4_data->i2c_client->dev, "%s: I2C read over retry limit\n", __func__); retval = -EIO; } exit: mutex_unlock(&(rmi4_data->rmi4_io_ctrl_mutex)); return retval; } /** * synaptics_rmi4_i2c_write() * * Called by various functions in this driver, and also exported to * other expansion Function modules such as rmi_dev. * * This function writes data of an arbitrary length to the sensor, * starting from an assigned register address of the sensor, via I2C with * a retry mechanism. */ static int synaptics_rmi4_i2c_write(struct synaptics_rmi4_data *rmi4_data, unsigned short addr, unsigned char *data, unsigned short length) { int retval; unsigned char retry; unsigned char buf[length + 1]; struct i2c_msg msg[] = { { .addr = rmi4_data->i2c_client->addr, .flags = 0, .len = length + 1, .buf = buf, } }; mutex_lock(&(rmi4_data->rmi4_io_ctrl_mutex)); retval = synaptics_rmi4_set_page(rmi4_data, addr); if (retval != PAGE_SELECT_LEN) goto exit; buf[0] = addr & MASK_8BIT; memcpy(&buf[1], &data[0], length); for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) { if (i2c_transfer(rmi4_data->i2c_client->adapter, msg, 1) == 1) { retval = length; break; } dev_err(&rmi4_data->i2c_client->dev, "%s: I2C retry %d\n", __func__, retry + 1); msleep(20); } if (retry == SYN_I2C_RETRY_TIMES) { dev_err(&rmi4_data->i2c_client->dev, "%s: I2C write over retry limit\n", __func__); retval = -EIO; } exit: mutex_unlock(&(rmi4_data->rmi4_io_ctrl_mutex)); return retval; } /** * synaptics_rmi4_f11_abs_report() * * Called by synaptics_rmi4_report_touch() when valid Function $11 * finger data has been detected. * * This function reads the Function $11 data registers, determines the * status of each finger supported by the Function, processes any * necessary coordinate manipulation, reports the finger data to * the input subsystem, and returns the number of fingers detected. */ static int synaptics_rmi4_f11_abs_report(struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler) { int retval; unsigned char touch_count = 0; /* number of touch points */ unsigned char reg_index; unsigned char finger; unsigned char fingers_supported; unsigned char num_of_finger_status_regs; unsigned char finger_shift; unsigned char finger_status; unsigned char data_reg_blk_size; unsigned char finger_status_reg[3]; unsigned char data[F11_STD_DATA_LEN]; unsigned short data_addr; unsigned short data_offset; int x; int y; int wx; int wy; int z; /* * The number of finger status registers is determined by the * maximum number of fingers supported - 2 bits per finger. So * the number of finger status registers to read is: * register_count = ceil(max_num_of_fingers / 4) */ fingers_supported = fhandler->num_of_data_points; num_of_finger_status_regs = (fingers_supported + 3) / 4; data_addr = fhandler->full_addr.data_base; data_reg_blk_size = fhandler->size_of_data_register_block; retval = synaptics_rmi4_i2c_read(rmi4_data, data_addr, finger_status_reg, num_of_finger_status_regs); if (retval < 0) return 0; for (finger = 0; finger < fingers_supported; finger++) { reg_index = finger / 4; finger_shift = (finger % 4) * 2; finger_status = (finger_status_reg[reg_index] >> finger_shift) & MASK_2BIT; /* * Each 2-bit finger status field represents the following: * 00 = finger not present * 01 = finger present and data accurate * 10 = finger present but data may be inaccurate * 11 = reserved */ #ifdef TYPE_B_PROTOCOL input_mt_slot(rmi4_data->input_dev, finger); input_mt_report_slot_state(rmi4_data->input_dev, MT_TOOL_FINGER, finger_status != 0); #endif if (finger_status) { data_offset = data_addr + num_of_finger_status_regs + (finger * data_reg_blk_size); retval = synaptics_rmi4_i2c_read(rmi4_data, data_offset, data, data_reg_blk_size); if (retval < 0) return 0; x = (data[0] << 4) | (data[2] & MASK_4BIT); y = (data[1] << 4) | ((data[2] >> 4) & MASK_4BIT); wx = (data[3] & MASK_4BIT); wy = (data[3] >> 4) & MASK_4BIT; z = data[4]; if (rmi4_data->flip_x) x = rmi4_data->sensor_max_x - x; if (rmi4_data->flip_y) y = rmi4_data->sensor_max_y - y; dev_dbg(&rmi4_data->i2c_client->dev, "%s: Finger %d:\n" "status = 0x%02x\n" "x = %d\n" "y = %d\n" "wx = %d\n" "wy = %d\n", __func__, finger, finger_status, x, y, wx, wy); input_report_abs(rmi4_data->input_dev, ABS_MT_POSITION_X, x); input_report_abs(rmi4_data->input_dev, ABS_MT_POSITION_Y, y); input_report_abs(rmi4_data->input_dev, ABS_MT_PRESSURE, z); #ifdef REPORT_2D_W input_report_abs(rmi4_data->input_dev, ABS_MT_TOUCH_MAJOR, max(wx, wy)); input_report_abs(rmi4_data->input_dev, ABS_MT_TOUCH_MINOR, min(wx, wy)); #endif #ifndef TYPE_B_PROTOCOL input_mt_sync(rmi4_data->input_dev); #endif touch_count++; } } input_report_key(rmi4_data->input_dev, BTN_TOUCH, touch_count > 0); input_report_key(rmi4_data->input_dev, BTN_TOOL_FINGER, touch_count > 0); #ifndef TYPE_B_PROTOCOL if (!touch_count) input_mt_sync(rmi4_data->input_dev); #else /* sync after groups of events */ #ifdef KERNEL_ABOVE_3_7 input_mt_sync_frame(rmi4_data->input_dev); #endif #endif input_sync(rmi4_data->input_dev); return touch_count; } static void synaptics_rmi4_f1a_report(struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler) { int retval; unsigned char button; unsigned char index; unsigned char shift; unsigned char status; unsigned char *data; unsigned short data_addr = fhandler->full_addr.data_base; struct synaptics_rmi4_f1a_handle *f1a = fhandler->data; static unsigned char do_once = 1; static bool current_status[MAX_NUMBER_OF_BUTTONS]; #ifdef NO_0D_WHILE_2D static bool before_2d_status[MAX_NUMBER_OF_BUTTONS]; static bool while_2d_status[MAX_NUMBER_OF_BUTTONS]; #endif if (do_once) { memset(current_status, 0, sizeof(current_status)); #ifdef NO_0D_WHILE_2D memset(before_2d_status, 0, sizeof(before_2d_status)); memset(while_2d_status, 0, sizeof(while_2d_status)); #endif do_once = 0; } retval = synaptics_rmi4_i2c_read(rmi4_data, data_addr, f1a->button_data_buffer, f1a->button_bitmask_size); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to read button data registers\n", __func__); return; } data = f1a->button_data_buffer; for (button = 0; button < f1a->valid_button_count; button++) { index = button / 8; shift = button % 8; status = ((data[index] >> shift) & MASK_1BIT); if (current_status[button] == status) continue; else current_status[button] = status; dev_dbg(&rmi4_data->i2c_client->dev, "%s: Button %d (code %d) ->%d\n", __func__, button, f1a->button_map[button], status); #ifdef NO_0D_WHILE_2D if (rmi4_data->fingers_on_2d == false) { if (status == 1) { before_2d_status[button] = 1; } else { if (while_2d_status[button] == 1) { while_2d_status[button] = 0; continue; } else { before_2d_status[button] = 0; } } input_report_key(rmi4_data->input_dev, f1a->button_map[button], status); } else { if (before_2d_status[button] == 1) { before_2d_status[button] = 0; input_report_key(rmi4_data->input_dev, f1a->button_map[button], status); } else { if (status == 1) while_2d_status[button] = 1; else while_2d_status[button] = 0; } } #else input_report_key(rmi4_data->input_dev, f1a->button_map[button], status); #endif } input_sync(rmi4_data->input_dev); return; } /** * synaptics_rmi4_report_touch() * * Called by synaptics_rmi4_sensor_report(). * * This function calls the appropriate finger data reporting function * based on the function handler it receives and returns the number of * fingers detected. */ static void synaptics_rmi4_report_touch(struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler, unsigned char *touch_count) { unsigned char touch_count_2d; dev_dbg(&rmi4_data->i2c_client->dev, "%s: Function %02x reporting\n", __func__, fhandler->fn_number); switch (fhandler->fn_number) { case SYNAPTICS_RMI4_F11: touch_count_2d = synaptics_rmi4_f11_abs_report(rmi4_data, fhandler); *touch_count += touch_count_2d; if (touch_count_2d) rmi4_data->fingers_on_2d = true; else rmi4_data->fingers_on_2d = false; break; case SYNAPTICS_RMI4_F1A: synaptics_rmi4_f1a_report(rmi4_data, fhandler); break; default: break; } return; } /** * synaptics_rmi4_sensor_report() * * Called by synaptics_rmi4_irq(). * * This function determines the interrupt source(s) from the sensor * and calls synaptics_rmi4_report_touch() with the appropriate * function handler for each function with valid data inputs. */ static int synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *rmi4_data) { int retval; unsigned char touch_count = 0; unsigned char intr[MAX_INTR_REGISTERS]; struct synaptics_rmi4_fn *fhandler; struct synaptics_rmi4_exp_fn *exp_fhandler; struct synaptics_rmi4_device_info *rmi; rmi = &(rmi4_data->rmi4_mod_info); /* * Get interrupt status information from F01 Data1 register to * determine the source(s) that are flagging the interrupt. */ retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_data_base_addr + 1, intr, rmi4_data->num_of_intr_regs); if (retval < 0) return retval; /* * Traverse the function handler list and service the source(s) * of the interrupt accordingly. */ if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry(fhandler, &rmi->support_fn_list, link) { if (fhandler->num_of_data_sources) { if (fhandler->intr_mask & intr[fhandler->intr_reg_num]) { synaptics_rmi4_report_touch(rmi4_data, fhandler, &touch_count); } } } } mutex_lock(&exp_fn_list_mutex); if (!list_empty(&exp_fn_list)) { list_for_each_entry(exp_fhandler, &exp_fn_list, link) { if (exp_fhandler->inserted && (exp_fhandler->func_attn != NULL)) exp_fhandler->func_attn(rmi4_data, intr[0]); } } mutex_unlock(&exp_fn_list_mutex); return touch_count; } /** * synaptics_rmi4_irq() * * Called by the kernel when an interrupt occurs (when the sensor * asserts the attention irq). * * This function is the ISR thread and handles the acquisition * and the reporting of finger data when the presence of fingers * is detected. */ static irqreturn_t synaptics_rmi4_irq(int irq, void *data) { struct synaptics_rmi4_data *rmi4_data = data; synaptics_rmi4_sensor_report(rmi4_data); return IRQ_HANDLED; } static int synaptics_rmi4_parse_dt(struct device *dev, struct synaptics_rmi4_platform_data *rmi4_pdata) { struct device_node *np = dev->of_node; struct property *prop; u32 temp_val, num_buttons; u32 button_map[MAX_NUMBER_OF_BUTTONS]; int rc, i; rmi4_pdata->i2c_pull_up = of_property_read_bool(np, "synaptics,i2c-pull-up"); rmi4_pdata->regulator_en = of_property_read_bool(np, "synaptics,reg-en"); rmi4_pdata->x_flip = of_property_read_bool(np, "synaptics,x-flip"); rmi4_pdata->y_flip = of_property_read_bool(np, "synaptics,y-flip"); rc = of_property_read_u32(np, "synaptics,panel-x", &temp_val); if (rc && (rc != -EINVAL)) { dev_err(dev, "Unable to read panel X dimension\n"); return rc; } else { rmi4_pdata->panel_x = temp_val; } rc = of_property_read_u32(np, "synaptics,panel-y", &temp_val); if (rc && (rc != -EINVAL)) { dev_err(dev, "Unable to read panel Y dimension\n"); return rc; } else { rmi4_pdata->panel_y = temp_val; } rc = of_property_read_string(np, "synaptics,fw-image-name", &rmi4_pdata->fw_image_name); if (rc && (rc != -EINVAL)) { dev_err(dev, "Unable to read fw image name\n"); return rc; } /* reset, irq gpio info */ rmi4_pdata->reset_gpio = of_get_named_gpio_flags(np, "synaptics,reset-gpio", 0, &rmi4_pdata->reset_flags); rmi4_pdata->irq_gpio = of_get_named_gpio_flags(np, "synaptics,irq-gpio", 0, &rmi4_pdata->irq_flags); prop = of_find_property(np, "synaptics,button-map", NULL); if (prop) { num_buttons = prop->length / sizeof(temp_val); rmi4_pdata->capacitance_button_map = devm_kzalloc(dev, sizeof(*rmi4_pdata->capacitance_button_map), GFP_KERNEL); if (!rmi4_pdata->capacitance_button_map) return -ENOMEM; rmi4_pdata->capacitance_button_map->map = devm_kzalloc(dev, sizeof(*rmi4_pdata->capacitance_button_map->map) * MAX_NUMBER_OF_BUTTONS, GFP_KERNEL); if (!rmi4_pdata->capacitance_button_map->map) return -ENOMEM; if (num_buttons <= MAX_NUMBER_OF_BUTTONS) { rc = of_property_read_u32_array(np, "synaptics,button-map", button_map, num_buttons); if (rc) { dev_err(dev, "Unable to read key codes\n"); return rc; } for (i = 0; i < num_buttons; i++) rmi4_pdata->capacitance_button_map->map[i] = button_map[i]; rmi4_pdata->capacitance_button_map->nbuttons = num_buttons; } else { return -EINVAL; } } return 0; } /** * synaptics_rmi4_irq_enable() * * Called by synaptics_rmi4_probe() and the power management functions * in this driver and also exported to other expansion Function modules * such as rmi_dev. * * This function handles the enabling and disabling of the attention * irq including the setting up of the ISR thread. */ static int synaptics_rmi4_irq_enable(struct synaptics_rmi4_data *rmi4_data, bool enable) { int retval = 0; unsigned char *intr_status; if (enable) { if (rmi4_data->irq_enabled) return retval; intr_status = kzalloc(rmi4_data->num_of_intr_regs, GFP_KERNEL); if (!intr_status) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to alloc memory\n", __func__); return -ENOMEM; } /* Clear interrupts first */ retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_data_base_addr + 1, intr_status, rmi4_data->num_of_intr_regs); kfree(intr_status); if (retval < 0) return retval; enable_irq(rmi4_data->irq); rmi4_data->irq_enabled = true; } else { if (rmi4_data->irq_enabled) { disable_irq(rmi4_data->irq); rmi4_data->irq_enabled = false; } } return retval; } /** * synaptics_rmi4_f11_init() * * Called by synaptics_rmi4_query_device(). * * This funtion parses information from the Function 11 registers * and determines the number of fingers supported, x and y data ranges, * offset to the associated interrupt status register, interrupt bit * mask, and gathers finger data acquisition capabilities from the query * registers. */ static int synaptics_rmi4_f11_init(struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler, struct synaptics_rmi4_fn_desc *fd, unsigned int intr_count) { int retval; unsigned char ii; unsigned char intr_offset; unsigned char abs_data_size; unsigned char abs_data_blk_size; unsigned char query[F11_STD_QUERY_LEN]; unsigned char control[F11_STD_CTRL_LEN]; fhandler->fn_number = fd->fn_number; fhandler->num_of_data_sources = fd->intr_src_count; retval = synaptics_rmi4_i2c_read(rmi4_data, fhandler->full_addr.query_base, query, sizeof(query)); if (retval < 0) return retval; /* Maximum number of fingers supported */ if ((query[1] & MASK_3BIT) <= 4) fhandler->num_of_data_points = (query[1] & MASK_3BIT) + 1; else if ((query[1] & MASK_3BIT) == 5) fhandler->num_of_data_points = 10; rmi4_data->num_of_fingers = fhandler->num_of_data_points; retval = synaptics_rmi4_i2c_read(rmi4_data, fhandler->full_addr.ctrl_base, control, sizeof(control)); if (retval < 0) return retval; /* Maximum x and y */ rmi4_data->sensor_max_x = ((control[6] & MASK_8BIT) << 0) | ((control[7] & MASK_4BIT) << 8); rmi4_data->sensor_max_y = ((control[8] & MASK_8BIT) << 0) | ((control[9] & MASK_4BIT) << 8); dev_dbg(&rmi4_data->i2c_client->dev, "%s: Function %02x max x = %d max y = %d\n", __func__, fhandler->fn_number, rmi4_data->sensor_max_x, rmi4_data->sensor_max_y); fhandler->intr_reg_num = (intr_count + 7) / 8; if (fhandler->intr_reg_num != 0) fhandler->intr_reg_num -= 1; /* Set an enable bit for each data source */ intr_offset = intr_count % 8; fhandler->intr_mask = 0; for (ii = intr_offset; ii < ((fd->intr_src_count & MASK_3BIT) + intr_offset); ii++) fhandler->intr_mask |= 1 << ii; abs_data_size = query[5] & MASK_2BIT; abs_data_blk_size = 3 + (2 * (abs_data_size == 0 ? 1 : 0)); fhandler->size_of_data_register_block = abs_data_blk_size; return retval; } static int synaptics_rmi4_f1a_alloc_mem(struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler) { int retval; struct synaptics_rmi4_f1a_handle *f1a; f1a = kzalloc(sizeof(*f1a), GFP_KERNEL); if (!f1a) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to alloc mem for function handle\n", __func__); return -ENOMEM; } fhandler->data = (void *)f1a; retval = synaptics_rmi4_i2c_read(rmi4_data, fhandler->full_addr.query_base, f1a->button_query.data, sizeof(f1a->button_query.data)); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to read query registers\n", __func__); return retval; } f1a->button_count = f1a->button_query.max_button_count + 1; f1a->button_bitmask_size = (f1a->button_count + 7) / 8; f1a->button_data_buffer = kcalloc(f1a->button_bitmask_size, sizeof(*(f1a->button_data_buffer)), GFP_KERNEL); if (!f1a->button_data_buffer) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to alloc mem for data buffer\n", __func__); return -ENOMEM; } f1a->button_map = kcalloc(f1a->button_count, sizeof(*(f1a->button_map)), GFP_KERNEL); if (!f1a->button_map) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to alloc mem for button map\n", __func__); return -ENOMEM; } return 0; } static int synaptics_rmi4_capacitance_button_map( struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler) { unsigned char ii; struct synaptics_rmi4_f1a_handle *f1a = fhandler->data; const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board; if (!pdata->capacitance_button_map) { dev_err(&rmi4_data->i2c_client->dev, "%s: capacitance_button_map is" \ "NULL in board file\n", __func__); return -ENODEV; } else if (!pdata->capacitance_button_map->map) { dev_err(&rmi4_data->i2c_client->dev, "%s: Button map is missing in board file\n", __func__); return -ENODEV; } else { if (pdata->capacitance_button_map->nbuttons != f1a->button_count) { f1a->valid_button_count = min(f1a->button_count, pdata->capacitance_button_map->nbuttons); } else { f1a->valid_button_count = f1a->button_count; } for (ii = 0; ii < f1a->valid_button_count; ii++) f1a->button_map[ii] = pdata->capacitance_button_map->map[ii]; } return 0; } static void synaptics_rmi4_f1a_kfree(struct synaptics_rmi4_fn *fhandler) { struct synaptics_rmi4_f1a_handle *f1a = fhandler->data; if (f1a) { kfree(f1a->button_data_buffer); kfree(f1a->button_map); kfree(f1a); fhandler->data = NULL; } return; } static int synaptics_rmi4_f1a_init(struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler, struct synaptics_rmi4_fn_desc *fd, unsigned int intr_count) { int retval; unsigned char ii; unsigned short intr_offset; fhandler->fn_number = fd->fn_number; fhandler->num_of_data_sources = fd->intr_src_count; fhandler->intr_reg_num = (intr_count + 7) / 8; if (fhandler->intr_reg_num != 0) fhandler->intr_reg_num -= 1; /* Set an enable bit for each data source */ intr_offset = intr_count % 8; fhandler->intr_mask = 0; for (ii = intr_offset; ii < ((fd->intr_src_count & MASK_3BIT) + intr_offset); ii++) fhandler->intr_mask |= 1 << ii; retval = synaptics_rmi4_f1a_alloc_mem(rmi4_data, fhandler); if (retval < 0) goto error_exit; retval = synaptics_rmi4_capacitance_button_map(rmi4_data, fhandler); if (retval < 0) goto error_exit; rmi4_data->button_0d_enabled = 1; return 0; error_exit: synaptics_rmi4_f1a_kfree(fhandler); return retval; } static int synaptics_rmi4_alloc_fh(struct synaptics_rmi4_fn **fhandler, struct synaptics_rmi4_fn_desc *rmi_fd, int page_number) { *fhandler = kzalloc(sizeof(**fhandler), GFP_KERNEL); if (!(*fhandler)) return -ENOMEM; (*fhandler)->full_addr.data_base = (rmi_fd->data_base_addr | (page_number << 8)); (*fhandler)->full_addr.ctrl_base = (rmi_fd->ctrl_base_addr | (page_number << 8)); (*fhandler)->full_addr.cmd_base = (rmi_fd->cmd_base_addr | (page_number << 8)); (*fhandler)->full_addr.query_base = (rmi_fd->query_base_addr | (page_number << 8)); (*fhandler)->fn_number = rmi_fd->fn_number; return 0; } /** * synaptics_rmi4_query_device_info() * * Called by synaptics_rmi4_query_device(). * */ static int synaptics_rmi4_query_device_info( struct synaptics_rmi4_data *rmi4_data) { int retval; unsigned char f01_query[F01_STD_QUERY_LEN]; struct synaptics_rmi4_device_info *rmi = &(rmi4_data->rmi4_mod_info); retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_query_base_addr, f01_query, sizeof(f01_query)); if (retval < 0) return retval; /* RMI Version 4.0 currently supported */ rmi->version_major = 4; rmi->version_minor = 0; rmi->manufacturer_id = f01_query[0]; rmi->product_props = f01_query[1]; rmi->product_info[0] = f01_query[2] & MASK_7BIT; rmi->product_info[1] = f01_query[3] & MASK_7BIT; rmi->date_code[0] = f01_query[4] & MASK_5BIT; rmi->date_code[1] = f01_query[5] & MASK_4BIT; rmi->date_code[2] = f01_query[6] & MASK_5BIT; rmi->tester_id = ((f01_query[7] & MASK_7BIT) << 8) | (f01_query[8] & MASK_7BIT); rmi->serial_number = ((f01_query[9] & MASK_7BIT) << 8) | (f01_query[10] & MASK_7BIT); memcpy(rmi->product_id_string, &f01_query[11], 10); if (rmi->manufacturer_id != 1) { dev_err(&rmi4_data->i2c_client->dev, "%s: Non-Synaptics device found, manufacturer ID = %d\n", __func__, rmi->manufacturer_id); } retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_query_base_addr + F01_BUID_ID_OFFSET, rmi->build_id, sizeof(rmi->build_id)); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to read firmware build id (code %d)\n", __func__, retval); return retval; } return 0; } /** * synaptics_rmi4_query_device() * * Called by synaptics_rmi4_probe(). * * This funtion scans the page description table, records the offsets * to the register types of Function $01, sets up the function handlers * for Function $11 and Function $12, determines the number of interrupt * sources from the sensor, adds valid Functions with data inputs to the * Function linked list, parses information from the query registers of * Function $01, and enables the interrupt sources from the valid Functions * with data inputs. */ static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data) { int retval; unsigned char ii; unsigned char page_number; unsigned char intr_count = 0; unsigned char data_sources = 0; unsigned short pdt_entry_addr; unsigned short intr_addr; struct synaptics_rmi4_f01_device_status status; struct synaptics_rmi4_fn_desc rmi_fd; struct synaptics_rmi4_fn *fhandler; struct synaptics_rmi4_device_info *rmi; rmi = &(rmi4_data->rmi4_mod_info); INIT_LIST_HEAD(&rmi->support_fn_list); /* Scan the page description tables of the pages to service */ for (page_number = 0; page_number < PAGES_TO_SERVICE; page_number++) { for (pdt_entry_addr = PDT_START; pdt_entry_addr > PDT_END; pdt_entry_addr -= PDT_ENTRY_SIZE) { pdt_entry_addr |= (page_number << 8); retval = synaptics_rmi4_i2c_read(rmi4_data, pdt_entry_addr, (unsigned char *)&rmi_fd, sizeof(rmi_fd)); if (retval < 0) return retval; fhandler = NULL; if (rmi_fd.fn_number == 0) { dev_dbg(&rmi4_data->i2c_client->dev, "%s: Reached end of PDT\n", __func__); break; } dev_dbg(&rmi4_data->i2c_client->dev, "%s: F%02x found (page %d)\n", __func__, rmi_fd.fn_number, page_number); switch (rmi_fd.fn_number) { case SYNAPTICS_RMI4_F01: rmi4_data->f01_query_base_addr = rmi_fd.query_base_addr; rmi4_data->f01_ctrl_base_addr = rmi_fd.ctrl_base_addr; rmi4_data->f01_data_base_addr = rmi_fd.data_base_addr; rmi4_data->f01_cmd_base_addr = rmi_fd.cmd_base_addr; retval = synaptics_rmi4_query_device_info(rmi4_data); if (retval < 0) return retval; retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_data_base_addr, status.data, sizeof(status.data)); if (retval < 0) return retval; while (status.status_code == STATUS_CRC_IN_PROGRESS) { msleep(1); retval = synaptics_rmi4_i2c_read( rmi4_data, rmi4_data->f01_data_base_addr, status.data, sizeof(status.data)); if (retval < 0) return retval; } if (status.flash_prog == 1) { pr_notice("%s: In flash prog mode," \ "status = 0x%02x\n", __func__, status.status_code); goto flash_prog_mode; } break; case SYNAPTICS_RMI4_F34: if (rmi_fd.intr_src_count == 0) break; retval = synaptics_rmi4_alloc_fh(&fhandler, &rmi_fd, page_number); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to alloc for F%d\n", __func__, rmi_fd.fn_number); return retval; } retval = synaptics_rmi4_i2c_read(rmi4_data, rmi_fd.ctrl_base_addr, rmi->config_id, sizeof(rmi->config_id)); if (retval < 0) return retval; break; case SYNAPTICS_RMI4_F11: if (rmi_fd.intr_src_count == 0) break; retval = synaptics_rmi4_alloc_fh(&fhandler, &rmi_fd, page_number); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to alloc for F%d\n", __func__, rmi_fd.fn_number); return retval; } retval = synaptics_rmi4_f11_init(rmi4_data, fhandler, &rmi_fd, intr_count); if (retval < 0) return retval; break; case SYNAPTICS_RMI4_F1A: if (rmi_fd.intr_src_count == 0) break; retval = synaptics_rmi4_alloc_fh(&fhandler, &rmi_fd, page_number); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to alloc for F%d\n", __func__, rmi_fd.fn_number); return retval; } retval = synaptics_rmi4_f1a_init(rmi4_data, fhandler, &rmi_fd, intr_count); if (retval < 0) return retval; break; } /* Accumulate the interrupt count */ intr_count += (rmi_fd.intr_src_count & MASK_3BIT); if (fhandler && rmi_fd.intr_src_count) { list_add_tail(&fhandler->link, &rmi->support_fn_list); } } } flash_prog_mode: rmi4_data->num_of_intr_regs = (intr_count + 7) / 8; dev_dbg(&rmi4_data->i2c_client->dev, "%s: Number of interrupt registers = %d\n", __func__, rmi4_data->num_of_intr_regs); memset(rmi4_data->intr_mask, 0x00, sizeof(rmi4_data->intr_mask)); /* * Map out the interrupt bit masks for the interrupt sources * from the registered function handlers. */ if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry(fhandler, &rmi->support_fn_list, link) data_sources += fhandler->num_of_data_sources; } if (data_sources) { if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry(fhandler, &rmi->support_fn_list, link) { if (fhandler->num_of_data_sources) { rmi4_data->intr_mask[fhandler->intr_reg_num] |= fhandler->intr_mask; } } } } /* Enable the interrupt sources */ for (ii = 0; ii < rmi4_data->num_of_intr_regs; ii++) { if (rmi4_data->intr_mask[ii] != 0x00) { dev_dbg(&rmi4_data->i2c_client->dev, "%s: Interrupt enable mask %d = 0x%02x\n", __func__, ii, rmi4_data->intr_mask[ii]); intr_addr = rmi4_data->f01_ctrl_base_addr + 1 + ii; retval = synaptics_rmi4_i2c_write(rmi4_data, intr_addr, &(rmi4_data->intr_mask[ii]), sizeof(rmi4_data->intr_mask[ii])); if (retval < 0) return retval; } } return 0; } static int synaptics_rmi4_reset_command(struct synaptics_rmi4_data *rmi4_data) { int retval; int page_number; unsigned char command = 0x01; unsigned short pdt_entry_addr; struct synaptics_rmi4_fn_desc rmi_fd; bool done = false; /* Scan the page description tables of the pages to service */ for (page_number = 0; page_number < PAGES_TO_SERVICE; page_number++) { for (pdt_entry_addr = PDT_START; pdt_entry_addr > PDT_END; pdt_entry_addr -= PDT_ENTRY_SIZE) { retval = synaptics_rmi4_i2c_read(rmi4_data, pdt_entry_addr, (unsigned char *)&rmi_fd, sizeof(rmi_fd)); if (retval < 0) return retval; if (rmi_fd.fn_number == 0) break; switch (rmi_fd.fn_number) { case SYNAPTICS_RMI4_F01: rmi4_data->f01_cmd_base_addr = rmi_fd.cmd_base_addr; done = true; break; } } if (done) { dev_info(&rmi4_data->i2c_client->dev, "%s: Find F01 in page description table 0x%x\n", __func__, rmi4_data->f01_cmd_base_addr); break; } } if (!done) { dev_err(&rmi4_data->i2c_client->dev, "%s: Cannot find F01 in page description table\n", __func__); return -EINVAL; } retval = synaptics_rmi4_i2c_write(rmi4_data, rmi4_data->f01_cmd_base_addr, &command, sizeof(command)); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to issue reset command, error = %d\n", __func__, retval); return retval; } msleep(RESET_DELAY); return retval; }; static int synaptics_rmi4_reset_device(struct synaptics_rmi4_data *rmi4_data) { int retval; struct synaptics_rmi4_fn *fhandler; struct synaptics_rmi4_device_info *rmi; rmi = &(rmi4_data->rmi4_mod_info); retval = synaptics_rmi4_reset_command(rmi4_data); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to send command reset\n", __func__); return retval; } if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry(fhandler, &rmi->support_fn_list, link) { if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) synaptics_rmi4_f1a_kfree(fhandler); else kfree(fhandler->data); kfree(fhandler); } } retval = synaptics_rmi4_query_device(rmi4_data); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to query device\n", __func__); return retval; } return 0; } /** * synaptics_rmi4_detection_work() * * Called by the kernel at the scheduled time. * * This function is a self-rearming work thread that checks for the * insertion and removal of other expansion Function modules such as * rmi_dev and calls their initialization and removal callback functions * accordingly. */ static void synaptics_rmi4_detection_work(struct work_struct *work) { struct synaptics_rmi4_exp_fn *exp_fhandler, *next_list_entry; struct synaptics_rmi4_data *rmi4_data = container_of(work, struct synaptics_rmi4_data, det_work.work); mutex_lock(&exp_fn_list_mutex); if (!list_empty(&exp_fn_list)) { list_for_each_entry_safe(exp_fhandler, next_list_entry, &exp_fn_list, link) { if ((exp_fhandler->func_init != NULL) && (exp_fhandler->inserted == false)) { exp_fhandler->func_init(rmi4_data); exp_fhandler->inserted = true; } else if ((exp_fhandler->func_init == NULL) && (exp_fhandler->inserted == true)) { exp_fhandler->func_remove(rmi4_data); list_del(&exp_fhandler->link); kfree(exp_fhandler); } } } mutex_unlock(&exp_fn_list_mutex); return; } /** * synaptics_rmi4_new_function() * * Called by other expansion Function modules in their module init and * module exit functions. * * This function is used by other expansion Function modules such as * rmi_dev to register themselves with the driver by providing their * initialization and removal callback function pointers so that they * can be inserted or removed dynamically at module init and exit times, * respectively. */ void synaptics_rmi4_new_function(enum exp_fn fn_type, bool insert, int (*func_init)(struct synaptics_rmi4_data *rmi4_data), void (*func_remove)(struct synaptics_rmi4_data *rmi4_data), void (*func_attn)(struct synaptics_rmi4_data *rmi4_data, unsigned char intr_mask)) { struct synaptics_rmi4_exp_fn *exp_fhandler; if (!exp_fn_inited) { mutex_init(&exp_fn_list_mutex); INIT_LIST_HEAD(&exp_fn_list); exp_fn_inited = 1; } mutex_lock(&exp_fn_list_mutex); if (insert) { exp_fhandler = kzalloc(sizeof(*exp_fhandler), GFP_KERNEL); if (!exp_fhandler) { pr_err("%s: Failed to alloc mem for expansion function\n", __func__); goto exit; } exp_fhandler->fn_type = fn_type; exp_fhandler->func_init = func_init; exp_fhandler->func_attn = func_attn; exp_fhandler->func_remove = func_remove; exp_fhandler->inserted = false; list_add_tail(&exp_fhandler->link, &exp_fn_list); } else { if (!list_empty(&exp_fn_list)) { list_for_each_entry(exp_fhandler, &exp_fn_list, link) { if (exp_fhandler->func_init == func_init) { exp_fhandler->inserted = false; exp_fhandler->func_init = NULL; exp_fhandler->func_attn = NULL; goto exit; } } } } exit: mutex_unlock(&exp_fn_list_mutex); return; } EXPORT_SYMBOL(synaptics_rmi4_new_function); static int reg_set_optimum_mode_check(struct regulator *reg, int load_uA) { return (regulator_count_voltages(reg) > 0) ? regulator_set_optimum_mode(reg, load_uA) : 0; } static int synaptics_rmi4_regulator_configure(struct synaptics_rmi4_data *rmi4_data, bool on) { int retval; if (on == false) goto hw_shutdown; if (rmi4_data->board->regulator_en) { rmi4_data->vdd = regulator_get(&rmi4_data->i2c_client->dev, "vdd"); if (IS_ERR(rmi4_data->vdd)) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to get vdd regulator\n", __func__); return PTR_ERR(rmi4_data->vdd); } if (regulator_count_voltages(rmi4_data->vdd) > 0) { retval = regulator_set_voltage(rmi4_data->vdd, RMI4_VTG_MIN_UV, RMI4_VTG_MAX_UV); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "regulator set_vtg failed retval =%d\n", retval); goto err_set_vtg_vdd; } } } if (rmi4_data->board->i2c_pull_up) { rmi4_data->vcc_i2c = regulator_get(&rmi4_data->i2c_client->dev, "vcc_i2c"); if (IS_ERR(rmi4_data->vcc_i2c)) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to get i2c regulator\n", __func__); retval = PTR_ERR(rmi4_data->vcc_i2c); goto err_get_vtg_i2c; } if (regulator_count_voltages(rmi4_data->vcc_i2c) > 0) { retval = regulator_set_voltage(rmi4_data->vcc_i2c, RMI4_I2C_VTG_MIN_UV, RMI4_I2C_VTG_MAX_UV); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "reg set i2c vtg failed retval =%d\n", retval); goto err_set_vtg_i2c; } } } return 0; err_set_vtg_i2c: if (rmi4_data->board->i2c_pull_up) regulator_put(rmi4_data->vcc_i2c); err_get_vtg_i2c: if (rmi4_data->board->regulator_en) if (regulator_count_voltages(rmi4_data->vdd) > 0) regulator_set_voltage(rmi4_data->vdd, 0, RMI4_VTG_MAX_UV); err_set_vtg_vdd: if (rmi4_data->board->regulator_en) regulator_put(rmi4_data->vdd); return retval; hw_shutdown: if (rmi4_data->board->regulator_en) { if (regulator_count_voltages(rmi4_data->vdd) > 0) regulator_set_voltage(rmi4_data->vdd, 0, RMI4_VTG_MAX_UV); regulator_put(rmi4_data->vdd); } if (rmi4_data->board->i2c_pull_up) { if (regulator_count_voltages(rmi4_data->vcc_i2c) > 0) regulator_set_voltage(rmi4_data->vcc_i2c, 0, RMI4_I2C_VTG_MAX_UV); regulator_put(rmi4_data->vcc_i2c); } return 0; }; static int synaptics_rmi4_power_on(struct synaptics_rmi4_data *rmi4_data, bool on) { int retval; if (on == false) goto power_off; if (rmi4_data->board->regulator_en) { retval = reg_set_optimum_mode_check(rmi4_data->vdd, RMI4_ACTIVE_LOAD_UA); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vdd set_opt failed rc=%d\n", retval); return retval; } retval = regulator_enable(rmi4_data->vdd); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vdd enable failed rc=%d\n", retval); goto error_reg_en_vdd; } } if (rmi4_data->board->i2c_pull_up) { retval = reg_set_optimum_mode_check(rmi4_data->vcc_i2c, RMI4_I2C_LOAD_UA); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vcc_i2c set_opt failed rc=%d\n", retval); goto error_reg_opt_i2c; } retval = regulator_enable(rmi4_data->vcc_i2c); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vcc_i2c enable failed rc=%d\n", retval); goto error_reg_en_vcc_i2c; } } return 0; error_reg_en_vcc_i2c: if (rmi4_data->board->i2c_pull_up) reg_set_optimum_mode_check(rmi4_data->vdd, 0); error_reg_opt_i2c: if (rmi4_data->board->regulator_en) regulator_disable(rmi4_data->vdd); error_reg_en_vdd: if (rmi4_data->board->regulator_en) reg_set_optimum_mode_check(rmi4_data->vdd, 0); return retval; power_off: if (rmi4_data->board->regulator_en) { reg_set_optimum_mode_check(rmi4_data->vdd, 0); regulator_disable(rmi4_data->vdd); } if (rmi4_data->board->i2c_pull_up) { reg_set_optimum_mode_check(rmi4_data->vcc_i2c, 0); regulator_disable(rmi4_data->vcc_i2c); } return 0; } /** * synaptics_rmi4_probe() * * Called by the kernel when an association with an I2C device of the * same name is made (after doing i2c_add_driver). * * This funtion allocates and initializes the resources for the driver * as an input driver, turns on the power to the sensor, queries the * sensor for its supported Functions and characteristics, registers * the driver to the input subsystem, sets up the interrupt, handles * the registration of the early_suspend and late_resume functions, * and creates a work queue for detection of other expansion Function * modules. */ static int __devinit synaptics_rmi4_probe(struct i2c_client *client, const struct i2c_device_id *dev_id) { int retval = 0; unsigned char ii; unsigned char attr_count; struct synaptics_rmi4_f1a_handle *f1a; struct synaptics_rmi4_fn *fhandler; struct synaptics_rmi4_data *rmi4_data; struct synaptics_rmi4_device_info *rmi; struct synaptics_rmi4_platform_data *platform_data = client->dev.platform_data; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_err(&client->dev, "%s: SMBus byte data not supported\n", __func__); return -EIO; } if (client->dev.of_node) { platform_data = devm_kzalloc(&client->dev, sizeof(*platform_data), GFP_KERNEL); if (!platform_data) { dev_err(&client->dev, "Failed to allocate memory\n"); return -ENOMEM; } retval = synaptics_rmi4_parse_dt(&client->dev, platform_data); if (retval) return retval; } else { platform_data = client->dev.platform_data; } if (!platform_data) { dev_err(&client->dev, "%s: No platform data found\n", __func__); return -EINVAL; } rmi4_data = kzalloc(sizeof(*rmi4_data) * 2, GFP_KERNEL); if (!rmi4_data) { dev_err(&client->dev, "%s: Failed to alloc mem for rmi4_data\n", __func__); return -ENOMEM; } rmi = &(rmi4_data->rmi4_mod_info); rmi4_data->input_dev = input_allocate_device(); if (rmi4_data->input_dev == NULL) { dev_err(&client->dev, "%s: Failed to allocate input device\n", __func__); retval = -ENOMEM; goto err_input_device; } rmi4_data->i2c_client = client; rmi4_data->current_page = MASK_8BIT; rmi4_data->board = platform_data; rmi4_data->touch_stopped = false; rmi4_data->sensor_sleep = false; rmi4_data->irq_enabled = false; rmi4_data->i2c_read = synaptics_rmi4_i2c_read; rmi4_data->i2c_write = synaptics_rmi4_i2c_write; rmi4_data->irq_enable = synaptics_rmi4_irq_enable; rmi4_data->reset_device = synaptics_rmi4_reset_device; rmi4_data->flip_x = rmi4_data->board->x_flip; rmi4_data->flip_y = rmi4_data->board->y_flip; rmi4_data->fw_image_name = rmi4_data->board->fw_image_name; rmi4_data->input_dev->name = DRIVER_NAME; rmi4_data->input_dev->phys = INPUT_PHYS_NAME; rmi4_data->input_dev->id.bustype = BUS_I2C; rmi4_data->input_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT; rmi4_data->input_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION; rmi4_data->input_dev->dev.parent = &client->dev; input_set_drvdata(rmi4_data->input_dev, rmi4_data); set_bit(EV_SYN, rmi4_data->input_dev->evbit); set_bit(EV_KEY, rmi4_data->input_dev->evbit); set_bit(EV_ABS, rmi4_data->input_dev->evbit); set_bit(BTN_TOUCH, rmi4_data->input_dev->keybit); set_bit(BTN_TOOL_FINGER, rmi4_data->input_dev->keybit); #ifdef INPUT_PROP_DIRECT set_bit(INPUT_PROP_DIRECT, rmi4_data->input_dev->propbit); #endif retval = synaptics_rmi4_regulator_configure(rmi4_data, true); if (retval < 0) { dev_err(&client->dev, "Failed to configure regulators\n"); goto err_reg_configure; } retval = synaptics_rmi4_power_on(rmi4_data, true); if (retval < 0) { dev_err(&client->dev, "Failed to power on\n"); goto err_power_device; } if (gpio_is_valid(platform_data->irq_gpio)) { /* configure touchscreen irq gpio */ retval = gpio_request(platform_data->irq_gpio, "rmi4_irq_gpio"); if (retval) { dev_err(&client->dev, "unable to request gpio [%d]\n", platform_data->irq_gpio); goto err_irq_gpio_req; } retval = gpio_direction_input(platform_data->irq_gpio); if (retval) { dev_err(&client->dev, "unable to set direction for gpio [%d]\n", platform_data->irq_gpio); goto err_irq_gpio_dir; } } else { dev_err(&client->dev, "irq gpio not provided\n"); goto err_irq_gpio_req; } if (gpio_is_valid(platform_data->reset_gpio)) { /* configure touchscreen reset out gpio */ retval = gpio_request(platform_data->reset_gpio, "rmi4_reset_gpio"); if (retval) { dev_err(&client->dev, "unable to request gpio [%d]\n", platform_data->reset_gpio); goto err_irq_gpio_dir; } retval = gpio_direction_output(platform_data->reset_gpio, 1); if (retval) { dev_err(&client->dev, "unable to set direction for gpio [%d]\n", platform_data->reset_gpio); goto err_reset_gpio_dir; } gpio_set_value(platform_data->reset_gpio, 0); usleep(RMI4_GPIO_SLEEP_LOW_US); gpio_set_value(platform_data->reset_gpio, 1); msleep(RESET_DELAY); } else synaptics_rmi4_reset_command(rmi4_data); init_waitqueue_head(&rmi4_data->wait); mutex_init(&(rmi4_data->rmi4_io_ctrl_mutex)); retval = synaptics_rmi4_query_device(rmi4_data); if (retval < 0) { dev_err(&client->dev, "%s: Failed to query device\n", __func__); goto err_reset_gpio_dir; } input_set_abs_params(rmi4_data->input_dev, ABS_MT_POSITION_X, 0, rmi4_data->sensor_max_x, 0, 0); input_set_abs_params(rmi4_data->input_dev, ABS_MT_POSITION_Y, 0, rmi4_data->sensor_max_y, 0, 0); input_set_abs_params(rmi4_data->input_dev, ABS_PRESSURE, 0, 255, 0, 0); #ifdef REPORT_2D_W input_set_abs_params(rmi4_data->input_dev, ABS_MT_TOUCH_MAJOR, 0, MAX_ABS_MT_TOUCH_MAJOR, 0, 0); #endif #ifdef TYPE_B_PROTOCOL input_mt_init_slots(rmi4_data->input_dev, rmi4_data->num_of_fingers); #endif i2c_set_clientdata(client, rmi4_data); f1a = NULL; if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry(fhandler, &rmi->support_fn_list, link) { if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) f1a = fhandler->data; } } if (f1a) { for (ii = 0; ii < f1a->valid_button_count; ii++) { set_bit(f1a->button_map[ii], rmi4_data->input_dev->keybit); input_set_capability(rmi4_data->input_dev, EV_KEY, f1a->button_map[ii]); } } retval = input_register_device(rmi4_data->input_dev); if (retval) { dev_err(&client->dev, "%s: Failed to register input device\n", __func__); goto err_register_input; } configure_sleep(rmi4_data); if (!exp_fn_inited) { mutex_init(&exp_fn_list_mutex); INIT_LIST_HEAD(&exp_fn_list); exp_fn_inited = 1; } rmi4_data->det_workqueue = create_singlethread_workqueue("rmi_det_workqueue"); INIT_DELAYED_WORK(&rmi4_data->det_work, synaptics_rmi4_detection_work); queue_delayed_work(rmi4_data->det_workqueue, &rmi4_data->det_work, msecs_to_jiffies(EXP_FN_DET_INTERVAL)); rmi4_data->irq = gpio_to_irq(platform_data->irq_gpio); retval = request_threaded_irq(rmi4_data->irq, NULL, synaptics_rmi4_irq, platform_data->irq_flags, DRIVER_NAME, rmi4_data); rmi4_data->irq_enabled = true; if (retval < 0) { dev_err(&client->dev, "%s: Failed to create irq thread\n", __func__); goto err_enable_irq; } for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) { retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj, &attrs[attr_count].attr); if (retval < 0) { dev_err(&client->dev, "%s: Failed to create sysfs attributes\n", __func__); goto err_sysfs; } } retval = synaptics_rmi4_irq_enable(rmi4_data, true); if (retval < 0) { dev_err(&client->dev, "%s: Failed to enable attention interrupt\n", __func__); goto err_sysfs; } return retval; err_sysfs: for (attr_count--; attr_count >= 0; attr_count--) { sysfs_remove_file(&rmi4_data->input_dev->dev.kobj, &attrs[attr_count].attr); } err_enable_irq: cancel_delayed_work_sync(&rmi4_data->det_work); flush_workqueue(rmi4_data->det_workqueue); destroy_workqueue(rmi4_data->det_workqueue); input_unregister_device(rmi4_data->input_dev); err_register_input: if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry(fhandler, &rmi->support_fn_list, link) { if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) synaptics_rmi4_f1a_kfree(fhandler); else kfree(fhandler->data); kfree(fhandler); } } err_reset_gpio_dir: if (gpio_is_valid(platform_data->reset_gpio)) gpio_free(platform_data->reset_gpio); err_irq_gpio_dir: if (gpio_is_valid(platform_data->irq_gpio)) gpio_free(platform_data->irq_gpio); err_irq_gpio_req: synaptics_rmi4_power_on(rmi4_data, false); err_power_device: synaptics_rmi4_regulator_configure(rmi4_data, false); err_reg_configure: input_free_device(rmi4_data->input_dev); rmi4_data->input_dev = NULL; err_input_device: kfree(rmi4_data); return retval; } /** * synaptics_rmi4_remove() * * Called by the kernel when the association with an I2C device of the * same name is broken (when the driver is unloaded). * * This funtion terminates the work queue, stops sensor data acquisition, * frees the interrupt, unregisters the driver from the input subsystem, * turns off the power to the sensor, and frees other allocated resources. */ static int __devexit synaptics_rmi4_remove(struct i2c_client *client) { unsigned char attr_count; struct synaptics_rmi4_fn *fhandler; struct synaptics_rmi4_data *rmi4_data = i2c_get_clientdata(client); struct synaptics_rmi4_device_info *rmi; rmi = &(rmi4_data->rmi4_mod_info); cancel_delayed_work_sync(&rmi4_data->det_work); flush_workqueue(rmi4_data->det_workqueue); destroy_workqueue(rmi4_data->det_workqueue); rmi4_data->touch_stopped = true; wake_up(&rmi4_data->wait); free_irq(rmi4_data->irq, rmi4_data); for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) { sysfs_remove_file(&rmi4_data->input_dev->dev.kobj, &attrs[attr_count].attr); } input_unregister_device(rmi4_data->input_dev); if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry(fhandler, &rmi->support_fn_list, link) { if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) synaptics_rmi4_f1a_kfree(fhandler); else kfree(fhandler->data); kfree(fhandler); } } if (gpio_is_valid(rmi4_data->board->reset_gpio)) gpio_free(rmi4_data->board->reset_gpio); if (gpio_is_valid(rmi4_data->board->irq_gpio)) gpio_free(rmi4_data->board->irq_gpio); synaptics_rmi4_power_on(rmi4_data, false); synaptics_rmi4_regulator_configure(rmi4_data, false); kfree(rmi4_data); return 0; } #ifdef CONFIG_PM /** * synaptics_rmi4_sensor_sleep() * * Called by synaptics_rmi4_early_suspend() and synaptics_rmi4_suspend(). * * This function stops finger data acquisition and puts the sensor to sleep. */ static void synaptics_rmi4_sensor_sleep(struct synaptics_rmi4_data *rmi4_data) { int retval; unsigned char device_ctrl; retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_ctrl_base_addr, &device_ctrl, sizeof(device_ctrl)); if (retval < 0) { dev_err(&(rmi4_data->input_dev->dev), "%s: Failed to enter sleep mode\n", __func__); rmi4_data->sensor_sleep = false; return; } device_ctrl = (device_ctrl & ~MASK_3BIT); device_ctrl = (device_ctrl | NO_SLEEP_OFF | SENSOR_SLEEP); retval = synaptics_rmi4_i2c_write(rmi4_data, rmi4_data->f01_ctrl_base_addr, &device_ctrl, sizeof(device_ctrl)); if (retval < 0) { dev_err(&(rmi4_data->input_dev->dev), "%s: Failed to enter sleep mode\n", __func__); rmi4_data->sensor_sleep = false; return; } else { rmi4_data->sensor_sleep = true; } return; } /** * synaptics_rmi4_sensor_wake() * * Called by synaptics_rmi4_resume() and synaptics_rmi4_late_resume(). * * This function wakes the sensor from sleep. */ static void synaptics_rmi4_sensor_wake(struct synaptics_rmi4_data *rmi4_data) { int retval; unsigned char device_ctrl; retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_ctrl_base_addr, &device_ctrl, sizeof(device_ctrl)); if (retval < 0) { dev_err(&(rmi4_data->input_dev->dev), "%s: Failed to wake from sleep mode\n", __func__); rmi4_data->sensor_sleep = true; return; } device_ctrl = (device_ctrl & ~MASK_3BIT); device_ctrl = (device_ctrl | NO_SLEEP_OFF | NORMAL_OPERATION); retval = synaptics_rmi4_i2c_write(rmi4_data, rmi4_data->f01_ctrl_base_addr, &device_ctrl, sizeof(device_ctrl)); if (retval < 0) { dev_err(&(rmi4_data->input_dev->dev), "%s: Failed to wake from sleep mode\n", __func__); rmi4_data->sensor_sleep = true; return; } else { rmi4_data->sensor_sleep = false; } return; } #if defined(CONFIG_FB) static int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data) { struct fb_event *evdata = data; int *blank; struct synaptics_rmi4_data *rmi4_data = container_of(self, struct synaptics_rmi4_data, fb_notif); if (evdata && evdata->data && event == FB_EVENT_BLANK && rmi4_data && rmi4_data->i2c_client) { blank = evdata->data; if (*blank == FB_BLANK_UNBLANK) synaptics_rmi4_resume(&(rmi4_data->input_dev->dev)); else if (*blank == FB_BLANK_POWERDOWN) synaptics_rmi4_suspend(&(rmi4_data->input_dev->dev)); } return 0; } #elif defined(CONFIG_HAS_EARLYSUSPEND) /** * synaptics_rmi4_early_suspend() * * Called by the kernel during the early suspend phase when the system * enters suspend. * * This function calls synaptics_rmi4_sensor_sleep() to stop finger * data acquisition and put the sensor to sleep. */ static void synaptics_rmi4_early_suspend(struct early_suspend *h) { struct synaptics_rmi4_data *rmi4_data = container_of(h, struct synaptics_rmi4_data, early_suspend); rmi4_data->touch_stopped = true; wake_up(&rmi4_data->wait); synaptics_rmi4_irq_enable(rmi4_data, false); synaptics_rmi4_sensor_sleep(rmi4_data); if (rmi4_data->full_pm_cycle) synaptics_rmi4_suspend(&(rmi4_data->input_dev->dev)); return; } /** * synaptics_rmi4_late_resume() * * Called by the kernel during the late resume phase when the system * wakes up from suspend. * * This function goes through the sensor wake process if the system wakes * up from early suspend (without going into suspend). */ static void synaptics_rmi4_late_resume(struct early_suspend *h) { struct synaptics_rmi4_data *rmi4_data = container_of(h, struct synaptics_rmi4_data, early_suspend); if (rmi4_data->full_pm_cycle) synaptics_rmi4_resume(&(rmi4_data->input_dev->dev)); if (rmi4_data->sensor_sleep == true) { synaptics_rmi4_sensor_wake(rmi4_data); rmi4_data->touch_stopped = false; synaptics_rmi4_irq_enable(rmi4_data, true); } return; } #endif static int synaptics_rmi4_regulator_lpm(struct synaptics_rmi4_data *rmi4_data, bool on) { int retval; if (on == false) goto regulator_hpm; retval = reg_set_optimum_mode_check(rmi4_data->vdd, RMI4_LPM_LOAD_UA); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vcc_ana set_opt failed rc=%d\n", retval); goto fail_regulator_lpm; } if (rmi4_data->board->i2c_pull_up) { retval = reg_set_optimum_mode_check(rmi4_data->vcc_i2c, RMI4_I2C_LOAD_UA); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vcc_i2c set_opt failed rc=%d\n", retval); goto fail_regulator_lpm; } } return 0; regulator_hpm: retval = reg_set_optimum_mode_check(rmi4_data->vdd, RMI4_ACTIVE_LOAD_UA); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vcc_ana set_opt failed rc=%d\n", retval); goto fail_regulator_hpm; } if (rmi4_data->board->i2c_pull_up) { retval = reg_set_optimum_mode_check(rmi4_data->vcc_i2c, RMI4_I2C_LPM_LOAD_UA); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vcc_i2c set_opt failed rc=%d\n", retval); goto fail_regulator_hpm; } } return 0; fail_regulator_lpm: reg_set_optimum_mode_check(rmi4_data->vdd, RMI4_ACTIVE_LOAD_UA); if (rmi4_data->board->i2c_pull_up) reg_set_optimum_mode_check(rmi4_data->vcc_i2c, RMI4_I2C_LOAD_UA); return retval; fail_regulator_hpm: reg_set_optimum_mode_check(rmi4_data->vdd, RMI4_LPM_LOAD_UA); if (rmi4_data->board->i2c_pull_up) reg_set_optimum_mode_check(rmi4_data->vcc_i2c, RMI4_I2C_LPM_LOAD_UA); return retval; } /** * synaptics_rmi4_suspend() * * Called by the kernel during the suspend phase when the system * enters suspend. * * This function stops finger data acquisition and puts the sensor to * sleep (if not already done so during the early suspend phase), * disables the interrupt, and turns off the power to the sensor. */ static int synaptics_rmi4_suspend(struct device *dev) { struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); int retval; if (!rmi4_data->sensor_sleep) { rmi4_data->touch_stopped = true; wake_up(&rmi4_data->wait); synaptics_rmi4_irq_enable(rmi4_data, false); synaptics_rmi4_sensor_sleep(rmi4_data); } retval = synaptics_rmi4_regulator_lpm(rmi4_data, true); if (retval < 0) { dev_err(dev, "failed to enter low power mode\n"); return retval; } return 0; } /** * synaptics_rmi4_resume() * * Called by the kernel during the resume phase when the system * wakes up from suspend. * * This function turns on the power to the sensor, wakes the sensor * from sleep, enables the interrupt, and starts finger data * acquisition. */ static int synaptics_rmi4_resume(struct device *dev) { struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); int retval; retval = synaptics_rmi4_regulator_lpm(rmi4_data, false); if (retval < 0) { dev_err(dev, "failed to enter active power mode\n"); return retval; } synaptics_rmi4_sensor_wake(rmi4_data); rmi4_data->touch_stopped = false; synaptics_rmi4_irq_enable(rmi4_data, true); return 0; } #if (!defined(CONFIG_FB) && !defined(CONFIG_HAS_EARLYSUSPEND)) static const struct dev_pm_ops synaptics_rmi4_dev_pm_ops = { .suspend = synaptics_rmi4_suspend, .resume = synaptics_rmi4_resume, }; #else static const struct dev_pm_ops synaptics_rmi4_dev_pm_ops = { }; #endif #endif static const struct i2c_device_id synaptics_rmi4_id_table[] = { {DRIVER_NAME, 0}, {}, }; MODULE_DEVICE_TABLE(i2c, synaptics_rmi4_id_table); #ifdef CONFIG_OF static struct of_device_id rmi4_match_table[] = { { .compatible = "synaptics,rmi4",}, { }, }; #else #define rmi4_match_table NULL #endif static struct i2c_driver synaptics_rmi4_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = rmi4_match_table, #ifdef CONFIG_PM .pm = &synaptics_rmi4_dev_pm_ops, #endif }, .probe = synaptics_rmi4_probe, .remove = __devexit_p(synaptics_rmi4_remove), .id_table = synaptics_rmi4_id_table, }; /** * synaptics_rmi4_init() * * Called by the kernel during do_initcalls (if built-in) * or when the driver is loaded (if a module). * * This function registers the driver to the I2C subsystem. * */ static int __init synaptics_rmi4_init(void) { return i2c_add_driver(&synaptics_rmi4_driver); } /** * synaptics_rmi4_exit() * * Called by the kernel when the driver is unloaded. * * This funtion unregisters the driver from the I2C subsystem. * */ static void __exit synaptics_rmi4_exit(void) { i2c_del_driver(&synaptics_rmi4_driver); } module_init(synaptics_rmi4_init); module_exit(synaptics_rmi4_exit); MODULE_AUTHOR("Synaptics, Inc."); MODULE_DESCRIPTION("Synaptics RMI4 I2C Touch Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
drhonk/Bali_t959
arch/powerpc/sysdev/cpm_common.c
544
8715
/* * Common CPM code * * Author: Scott Wood <scottwood@freescale.com> * * Copyright 2007 Freescale Semiconductor, Inc. * * Some parts derived from commproc.c/cpm2_common.c, which is: * Copyright (c) 1997 Dan error_act (dmalek@jlc.net) * Copyright (c) 1999-2001 Dan Malek <dan@embeddedalley.com> * Copyright (c) 2000 MontaVista Software, Inc (source@mvista.com) * 2006 (c) MontaVista Software, Inc. * Vitaly Bordug <vbordug@ru.mvista.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/of_device.h> #include <linux/spinlock.h> #include <linux/of.h> #include <asm/udbg.h> #include <asm/io.h> #include <asm/system.h> #include <asm/rheap.h> #include <asm/cpm.h> #include <mm/mmu_decl.h> #if defined(CONFIG_CPM2) || defined(CONFIG_8xx_GPIO) #include <linux/of_gpio.h> #endif #ifdef CONFIG_PPC_EARLY_DEBUG_CPM static u32 __iomem *cpm_udbg_txdesc = (u32 __iomem __force *)CONFIG_PPC_EARLY_DEBUG_CPM_ADDR; static void udbg_putc_cpm(char c) { u8 __iomem *txbuf = (u8 __iomem __force *)in_be32(&cpm_udbg_txdesc[1]); if (c == '\n') udbg_putc_cpm('\r'); while (in_be32(&cpm_udbg_txdesc[0]) & 0x80000000) ; out_8(txbuf, c); out_be32(&cpm_udbg_txdesc[0], 0xa0000001); } void __init udbg_init_cpm(void) { if (cpm_udbg_txdesc) { #ifdef CONFIG_CPM2 setbat(1, 0xf0000000, 0xf0000000, 1024*1024, PAGE_KERNEL_NCG); #endif udbg_putc = udbg_putc_cpm; } } #endif static spinlock_t cpm_muram_lock; static rh_block_t cpm_boot_muram_rh_block[16]; static rh_info_t cpm_muram_info; static u8 __iomem *muram_vbase; static phys_addr_t muram_pbase; /* Max address size we deal with */ #define OF_MAX_ADDR_CELLS 4 int __init cpm_muram_init(void) { struct device_node *np; struct resource r; u32 zero[OF_MAX_ADDR_CELLS] = {}; resource_size_t max = 0; int i = 0; int ret = 0; spin_lock_init(&cpm_muram_lock); /* initialize the info header */ rh_init(&cpm_muram_info, 1, sizeof(cpm_boot_muram_rh_block) / sizeof(cpm_boot_muram_rh_block[0]), cpm_boot_muram_rh_block); np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data"); if (!np) { /* try legacy bindings */ np = of_find_node_by_name(NULL, "data-only"); if (!np) { printk(KERN_ERR "Cannot find CPM muram data node"); ret = -ENODEV; goto out; } } muram_pbase = of_translate_address(np, zero); if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) { printk(KERN_ERR "Cannot translate zero through CPM muram node"); ret = -ENODEV; goto out; } while (of_address_to_resource(np, i++, &r) == 0) { if (r.end > max) max = r.end; rh_attach_region(&cpm_muram_info, r.start - muram_pbase, r.end - r.start + 1); } muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1); if (!muram_vbase) { printk(KERN_ERR "Cannot map CPM muram"); ret = -ENOMEM; } out: of_node_put(np); return ret; } /** * cpm_muram_alloc - allocate the requested size worth of multi-user ram * @size: number of bytes to allocate * @align: requested alignment, in bytes * * This function returns an offset into the muram area. * Use cpm_dpram_addr() to get the virtual address of the area. * Use cpm_muram_free() to free the allocation. */ unsigned long cpm_muram_alloc(unsigned long size, unsigned long align) { unsigned long start; unsigned long flags; spin_lock_irqsave(&cpm_muram_lock, flags); cpm_muram_info.alignment = align; start = rh_alloc(&cpm_muram_info, size, "commproc"); spin_unlock_irqrestore(&cpm_muram_lock, flags); return start; } EXPORT_SYMBOL(cpm_muram_alloc); /** * cpm_muram_free - free a chunk of multi-user ram * @offset: The beginning of the chunk as returned by cpm_muram_alloc(). */ int cpm_muram_free(unsigned long offset) { int ret; unsigned long flags; spin_lock_irqsave(&cpm_muram_lock, flags); ret = rh_free(&cpm_muram_info, offset); spin_unlock_irqrestore(&cpm_muram_lock, flags); return ret; } EXPORT_SYMBOL(cpm_muram_free); /** * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram * @offset: the offset into the muram area to reserve * @size: the number of bytes to reserve * * This function returns "start" on success, -ENOMEM on failure. * Use cpm_dpram_addr() to get the virtual address of the area. * Use cpm_muram_free() to free the allocation. */ unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size) { unsigned long start; unsigned long flags; spin_lock_irqsave(&cpm_muram_lock, flags); cpm_muram_info.alignment = 1; start = rh_alloc_fixed(&cpm_muram_info, offset, size, "commproc"); spin_unlock_irqrestore(&cpm_muram_lock, flags); return start; } EXPORT_SYMBOL(cpm_muram_alloc_fixed); /** * cpm_muram_addr - turn a muram offset into a virtual address * @offset: muram offset to convert */ void __iomem *cpm_muram_addr(unsigned long offset) { return muram_vbase + offset; } EXPORT_SYMBOL(cpm_muram_addr); unsigned long cpm_muram_offset(void __iomem *addr) { return addr - (void __iomem *)muram_vbase; } EXPORT_SYMBOL(cpm_muram_offset); /** * cpm_muram_dma - turn a muram virtual address into a DMA address * @offset: virtual address from cpm_muram_addr() to convert */ dma_addr_t cpm_muram_dma(void __iomem *addr) { return muram_pbase + ((u8 __iomem *)addr - muram_vbase); } EXPORT_SYMBOL(cpm_muram_dma); #if defined(CONFIG_CPM2) || defined(CONFIG_8xx_GPIO) struct cpm2_ioports { u32 dir, par, sor, odr, dat; u32 res[3]; }; struct cpm2_gpio32_chip { struct of_mm_gpio_chip mm_gc; spinlock_t lock; /* shadowed data register to clear/set bits safely */ u32 cpdata; }; static inline struct cpm2_gpio32_chip * to_cpm2_gpio32_chip(struct of_mm_gpio_chip *mm_gc) { return container_of(mm_gc, struct cpm2_gpio32_chip, mm_gc); } static void cpm2_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc) { struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc); struct cpm2_ioports __iomem *iop = mm_gc->regs; cpm2_gc->cpdata = in_be32(&iop->dat); } static int cpm2_gpio32_get(struct gpio_chip *gc, unsigned int gpio) { struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct cpm2_ioports __iomem *iop = mm_gc->regs; u32 pin_mask; pin_mask = 1 << (31 - gpio); return !!(in_be32(&iop->dat) & pin_mask); } static void __cpm2_gpio32_set(struct of_mm_gpio_chip *mm_gc, u32 pin_mask, int value) { struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc); struct cpm2_ioports __iomem *iop = mm_gc->regs; if (value) cpm2_gc->cpdata |= pin_mask; else cpm2_gc->cpdata &= ~pin_mask; out_be32(&iop->dat, cpm2_gc->cpdata); } static void cpm2_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value) { struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc); unsigned long flags; u32 pin_mask = 1 << (31 - gpio); spin_lock_irqsave(&cpm2_gc->lock, flags); __cpm2_gpio32_set(mm_gc, pin_mask, value); spin_unlock_irqrestore(&cpm2_gc->lock, flags); } static int cpm2_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc); struct cpm2_ioports __iomem *iop = mm_gc->regs; unsigned long flags; u32 pin_mask = 1 << (31 - gpio); spin_lock_irqsave(&cpm2_gc->lock, flags); setbits32(&iop->dir, pin_mask); __cpm2_gpio32_set(mm_gc, pin_mask, val); spin_unlock_irqrestore(&cpm2_gc->lock, flags); return 0; } static int cpm2_gpio32_dir_in(struct gpio_chip *gc, unsigned int gpio) { struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc); struct cpm2_ioports __iomem *iop = mm_gc->regs; unsigned long flags; u32 pin_mask = 1 << (31 - gpio); spin_lock_irqsave(&cpm2_gc->lock, flags); clrbits32(&iop->dir, pin_mask); spin_unlock_irqrestore(&cpm2_gc->lock, flags); return 0; } int cpm2_gpiochip_add32(struct device_node *np) { struct cpm2_gpio32_chip *cpm2_gc; struct of_mm_gpio_chip *mm_gc; struct of_gpio_chip *of_gc; struct gpio_chip *gc; cpm2_gc = kzalloc(sizeof(*cpm2_gc), GFP_KERNEL); if (!cpm2_gc) return -ENOMEM; spin_lock_init(&cpm2_gc->lock); mm_gc = &cpm2_gc->mm_gc; of_gc = &mm_gc->of_gc; gc = &of_gc->gc; mm_gc->save_regs = cpm2_gpio32_save_regs; of_gc->gpio_cells = 2; gc->ngpio = 32; gc->direction_input = cpm2_gpio32_dir_in; gc->direction_output = cpm2_gpio32_dir_out; gc->get = cpm2_gpio32_get; gc->set = cpm2_gpio32_set; return of_mm_gpiochip_add(np, mm_gc); } #endif /* CONFIG_CPM2 || CONFIG_8xx_GPIO */
gpl-2.0
hephaex/a10c
arch/ia64/mm/fault.c
1568
7839
/* * MMU fault handling support. * * Copyright (C) 1998-2002 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/kprobes.h> #include <linux/kdebug.h> #include <linux/prefetch.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/uaccess.h> extern int die(char *, struct pt_regs *, long); #ifdef CONFIG_KPROBES static inline int notify_page_fault(struct pt_regs *regs, int trap) { int ret = 0; if (!user_mode(regs)) { /* kprobe_running() needs smp_processor_id() */ preempt_disable(); if (kprobe_running() && kprobe_fault_handler(regs, trap)) ret = 1; preempt_enable(); } return ret; } #else static inline int notify_page_fault(struct pt_regs *regs, int trap) { return 0; } #endif /* * Return TRUE if ADDRESS points at a page in the kernel's mapped segment * (inside region 5, on ia64) and that page is present. */ static int mapped_kernel_page_is_present (unsigned long address) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *ptep, pte; pgd = pgd_offset_k(address); if (pgd_none(*pgd) || pgd_bad(*pgd)) return 0; pud = pud_offset(pgd, address); if (pud_none(*pud) || pud_bad(*pud)) return 0; pmd = pmd_offset(pud, address); if (pmd_none(*pmd) || pmd_bad(*pmd)) return 0; ptep = pte_offset_kernel(pmd, address); if (!ptep) return 0; pte = *ptep; return pte_present(pte); } # define VM_READ_BIT 0 # define VM_WRITE_BIT 1 # define VM_EXEC_BIT 2 void __kprobes ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) { int signal = SIGSEGV, code = SEGV_MAPERR; struct vm_area_struct *vma, *prev_vma; struct mm_struct *mm = current->mm; struct siginfo si; unsigned long mask; int fault; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT) | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)); /* mmap_sem is performance critical.... */ prefetchw(&mm->mmap_sem); /* * If we're in an interrupt or have no user context, we must not take the fault.. */ if (in_atomic() || !mm) goto no_context; #ifdef CONFIG_VIRTUAL_MEM_MAP /* * If fault is in region 5 and we are in the kernel, we may already * have the mmap_sem (pfn_valid macro is called during mmap). There * is no vma for region 5 addr's anyway, so skip getting the semaphore * and go directly to the exception handling code. */ if ((REGION_NUMBER(address) == 5) && !user_mode(regs)) goto bad_area_no_up; #endif /* * This is to handle the kprobes on user space access instructions */ if (notify_page_fault(regs, TRAP_BRKPT)) return; if (user_mode(regs)) flags |= FAULT_FLAG_USER; if (mask & VM_WRITE) flags |= FAULT_FLAG_WRITE; retry: down_read(&mm->mmap_sem); vma = find_vma_prev(mm, address, &prev_vma); if (!vma && !prev_vma ) goto bad_area; /* * find_vma_prev() returns vma such that address < vma->vm_end or NULL * * May find no vma, but could be that the last vm area is the * register backing store that needs to expand upwards, in * this case vma will be null, but prev_vma will ne non-null */ if (( !vma && prev_vma ) || (address < vma->vm_start) ) goto check_expansion; good_area: code = SEGV_ACCERR; /* OK, we've got a good vm_area for this memory area. Check the access permissions: */ # if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \ || (1 << VM_EXEC_BIT) != VM_EXEC) # error File is out of sync with <linux/mm.h>. Please update. # endif if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) goto bad_area; if ((vma->vm_flags & mask) != mask) goto bad_area; /* * If for any reason at all we couldn't handle the fault, make * sure we exit gracefully rather than endlessly redo the * fault. */ fault = handle_mm_fault(mm, vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; if (unlikely(fault & VM_FAULT_ERROR)) { /* * We ran out of memory, or some other thing happened * to us that made us unable to handle the page fault * gracefully. */ if (fault & VM_FAULT_OOM) { goto out_of_memory; } else if (fault & VM_FAULT_SIGBUS) { signal = SIGBUS; goto bad_area; } BUG(); } if (flags & FAULT_FLAG_ALLOW_RETRY) { if (fault & VM_FAULT_MAJOR) current->maj_flt++; else current->min_flt++; if (fault & VM_FAULT_RETRY) { flags &= ~FAULT_FLAG_ALLOW_RETRY; flags |= FAULT_FLAG_TRIED; /* No need to up_read(&mm->mmap_sem) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ goto retry; } } up_read(&mm->mmap_sem); return; check_expansion: if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) { if (!vma) goto bad_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) || REGION_OFFSET(address) >= RGN_MAP_LIMIT) goto bad_area; if (expand_stack(vma, address)) goto bad_area; } else { vma = prev_vma; if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) || REGION_OFFSET(address) >= RGN_MAP_LIMIT) goto bad_area; /* * Since the register backing store is accessed sequentially, * we disallow growing it by more than a page at a time. */ if (address > vma->vm_end + PAGE_SIZE - sizeof(long)) goto bad_area; if (expand_upwards(vma, address)) goto bad_area; } goto good_area; bad_area: up_read(&mm->mmap_sem); #ifdef CONFIG_VIRTUAL_MEM_MAP bad_area_no_up: #endif if ((isr & IA64_ISR_SP) || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) { /* * This fault was due to a speculative load or lfetch.fault, set the "ed" * bit in the psr to ensure forward progress. (Target register will get a * NaT for ld.s, lfetch will be canceled.) */ ia64_psr(regs)->ed = 1; return; } if (user_mode(regs)) { si.si_signo = signal; si.si_errno = 0; si.si_code = code; si.si_addr = (void __user *) address; si.si_isr = isr; si.si_flags = __ISR_VALID; force_sig_info(signal, &si, current); return; } no_context: if ((isr & IA64_ISR_SP) || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) { /* * This fault was due to a speculative load or lfetch.fault, set the "ed" * bit in the psr to ensure forward progress. (Target register will get a * NaT for ld.s, lfetch will be canceled.) */ ia64_psr(regs)->ed = 1; return; } /* * Since we have no vma's for region 5, we might get here even if the address is * valid, due to the VHPT walker inserting a non present translation that becomes * stale. If that happens, the non present fault handler already purged the stale * translation, which fixed the problem. So, we check to see if the translation is * valid, and return if it is. */ if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address)) return; if (ia64_done_with_exception(regs)) return; /* * Oops. The kernel tried to access some bad page. We'll have to terminate things * with extreme prejudice. */ bust_spinlocks(1); if (address < PAGE_SIZE) printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address); else printk(KERN_ALERT "Unable to handle kernel paging request at " "virtual address %016lx\n", address); if (die("Oops", regs, isr)) regs = NULL; bust_spinlocks(0); if (regs) do_exit(SIGKILL); return; out_of_memory: up_read(&mm->mmap_sem); if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); }
gpl-2.0
vlw/android_kernel_samsung_msm8916_A3
drivers/acpi/acpica/hwacpi.c
2080
5654
/****************************************************************************** * * Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface * *****************************************************************************/ /* * Copyright (C) 2000 - 2013, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_HARDWARE ACPI_MODULE_NAME("hwacpi") #if (!ACPI_REDUCED_HARDWARE) /* Entire module */ /****************************************************************************** * * FUNCTION: acpi_hw_set_mode * * PARAMETERS: mode - SYS_MODE_ACPI or SYS_MODE_LEGACY * * RETURN: Status * * DESCRIPTION: Transitions the system into the requested mode. * ******************************************************************************/ acpi_status acpi_hw_set_mode(u32 mode) { acpi_status status; ACPI_FUNCTION_TRACE(hw_set_mode); /* If the Hardware Reduced flag is set, machine is always in acpi mode */ if (acpi_gbl_reduced_hardware) { return_ACPI_STATUS(AE_OK); } /* * ACPI 2.0 clarified that if SMI_CMD in FADT is zero, * system does not support mode transition. */ if (!acpi_gbl_FADT.smi_command) { ACPI_ERROR((AE_INFO, "No SMI_CMD in FADT, mode transition failed")); return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); } /* * ACPI 2.0 clarified the meaning of ACPI_ENABLE and ACPI_DISABLE * in FADT: If it is zero, enabling or disabling is not supported. * As old systems may have used zero for mode transition, * we make sure both the numbers are zero to determine these * transitions are not supported. */ if (!acpi_gbl_FADT.acpi_enable && !acpi_gbl_FADT.acpi_disable) { ACPI_ERROR((AE_INFO, "No ACPI mode transition supported in this system " "(enable/disable both zero)")); return_ACPI_STATUS(AE_OK); } switch (mode) { case ACPI_SYS_MODE_ACPI: /* BIOS should have disabled ALL fixed and GP events */ status = acpi_hw_write_port(acpi_gbl_FADT.smi_command, (u32) acpi_gbl_FADT.acpi_enable, 8); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Attempting to enable ACPI mode\n")); break; case ACPI_SYS_MODE_LEGACY: /* * BIOS should clear all fixed status bits and restore fixed event * enable bits to default */ status = acpi_hw_write_port(acpi_gbl_FADT.smi_command, (u32)acpi_gbl_FADT.acpi_disable, 8); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Attempting to enable Legacy (non-ACPI) mode\n")); break; default: return_ACPI_STATUS(AE_BAD_PARAMETER); } if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not write ACPI mode change")); return_ACPI_STATUS(status); } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_hw_get_mode * * PARAMETERS: none * * RETURN: SYS_MODE_ACPI or SYS_MODE_LEGACY * * DESCRIPTION: Return current operating state of system. Determined by * querying the SCI_EN bit. * ******************************************************************************/ u32 acpi_hw_get_mode(void) { acpi_status status; u32 value; ACPI_FUNCTION_TRACE(hw_get_mode); /* If the Hardware Reduced flag is set, machine is always in acpi mode */ if (acpi_gbl_reduced_hardware) { return_UINT32(ACPI_SYS_MODE_ACPI); } /* * ACPI 2.0 clarified that if SMI_CMD in FADT is zero, * system does not support mode transition. */ if (!acpi_gbl_FADT.smi_command) { return_UINT32(ACPI_SYS_MODE_ACPI); } status = acpi_read_bit_register(ACPI_BITREG_SCI_ENABLE, &value); if (ACPI_FAILURE(status)) { return_UINT32(ACPI_SYS_MODE_LEGACY); } if (value) { return_UINT32(ACPI_SYS_MODE_ACPI); } else { return_UINT32(ACPI_SYS_MODE_LEGACY); } } #endif /* !ACPI_REDUCED_HARDWARE */
gpl-2.0
Eliminater74/cm_android_kernel_g3_patched
sound/soc/msm/msm-lowlatency-pcm-q6.c
2080
22148
/* Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/init.h> #include <linux/err.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <asm/dma.h> #include <sound/core.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/pcm.h> #include <sound/initval.h> #include <sound/control.h> #include "msm-pcm-q6.h" #include "msm-pcm-routing.h" static struct audio_locks the_locks; struct snd_msm { struct snd_card *card; struct snd_pcm *pcm; }; struct snd_msm_volume { struct msm_audio *prtd; unsigned volume; }; #define PLAYBACK_NUM_PERIODS 4 #define PLAYBACK_MAX_PERIOD_SIZE 1024 #define PLAYBACK_MIN_PERIOD_SIZE 512 #define CAPTURE_NUM_PERIODS 4 #define CAPTURE_MIN_PERIOD_SIZE 128 #define CAPTURE_MAX_PERIOD_SIZE 1024 static struct snd_pcm_hardware msm_pcm_hardware_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, .rate_max = 48000, .channels_min = 1, .channels_max = 8, .buffer_bytes_max = CAPTURE_NUM_PERIODS * CAPTURE_MAX_PERIOD_SIZE, .period_bytes_min = CAPTURE_MIN_PERIOD_SIZE, .period_bytes_max = CAPTURE_MAX_PERIOD_SIZE, .periods_min = CAPTURE_NUM_PERIODS, .periods_max = CAPTURE_NUM_PERIODS, .fifo_size = 0, }; static struct snd_pcm_hardware msm_pcm_hardware_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_KNOT, .rate_min = 8000, .rate_max = 48000, .channels_min = 1, .channels_max = 6, .buffer_bytes_max = PLAYBACK_NUM_PERIODS * PLAYBACK_MAX_PERIOD_SIZE, .period_bytes_min = PLAYBACK_MIN_PERIOD_SIZE, .period_bytes_max = PLAYBACK_MAX_PERIOD_SIZE, .periods_min = PLAYBACK_NUM_PERIODS, .periods_max = PLAYBACK_NUM_PERIODS, .fifo_size = 0, }; /* Conventional and unconventional sample rate supported */ static unsigned int supported_sample_rates[] = { 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000 }; static uint32_t in_frame_info[CAPTURE_NUM_PERIODS][2]; static struct snd_pcm_hw_constraint_list constraints_sample_rates = { .count = ARRAY_SIZE(supported_sample_rates), .list = supported_sample_rates, .mask = 0, }; static void event_handler(uint32_t opcode, uint32_t token, uint32_t *payload, void *priv) { struct msm_audio *prtd = priv; struct snd_pcm_substream *substream = prtd->substream; uint32_t *ptrmem = (uint32_t *)payload; int i = 0; uint32_t idx = 0; uint32_t size = 0; pr_debug("%s\n", __func__); switch (opcode) { case ASM_DATA_EVENT_WRITE_DONE: { pr_debug("ASM_DATA_EVENT_WRITE_DONE\n"); pr_debug("Buffer Consumed = 0x%08x\n", *ptrmem); prtd->pcm_irq_pos += prtd->pcm_count; if (atomic_read(&prtd->start)) snd_pcm_period_elapsed(substream); atomic_inc(&prtd->out_count); wake_up(&the_locks.write_wait); if (!atomic_read(&prtd->start)) break; if (!prtd->mmap_flag) break; if (q6asm_is_cpu_buf_avail_nolock(IN, prtd->audio_client, &size, &idx)) { pr_debug("%s:writing %d bytes of buffer to dsp 2\n", __func__, prtd->pcm_count); q6asm_write_nolock(prtd->audio_client, prtd->pcm_count, 0, 0, NO_TIMESTAMP); } break; } case ASM_DATA_CMDRSP_EOS: pr_debug("ASM_DATA_CMDRSP_EOS\n"); prtd->cmd_ack = 1; wake_up(&the_locks.eos_wait); break; case ASM_DATA_EVENT_READ_DONE: { pr_debug("ASM_DATA_EVENT_READ_DONE\n"); pr_debug("token = 0x%08x\n", token); for (i = 0; i < 8; i++, ++ptrmem) pr_debug("cmd[%d]=0x%08x\n", i, *ptrmem); in_frame_info[token][0] = payload[2]; in_frame_info[token][1] = payload[3]; prtd->pcm_irq_pos += in_frame_info[token][0]; pr_debug("pcm_irq_pos=%d\n", prtd->pcm_irq_pos); if (atomic_read(&prtd->start)) snd_pcm_period_elapsed(substream); if (atomic_read(&prtd->in_count) <= prtd->periods) atomic_inc(&prtd->in_count); wake_up(&the_locks.read_wait); if (prtd->mmap_flag && q6asm_is_cpu_buf_avail_nolock(OUT, prtd->audio_client, &size, &idx)) q6asm_read_nolock(prtd->audio_client); break; } case APR_BASIC_RSP_RESULT: { switch (payload[0]) { case ASM_SESSION_CMD_RUN: if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) { atomic_set(&prtd->start, 1); break; } if (prtd->mmap_flag) { pr_debug("%s:writing %d bytes buffer to dsp\n", __func__, prtd->pcm_count); q6asm_write_nolock(prtd->audio_client, prtd->pcm_count, 0, 0, NO_TIMESTAMP); } else { while (atomic_read(&prtd->out_needed)) { pr_debug("%s:writing %d bytesto dsp\n", __func__, prtd->pcm_count); q6asm_write_nolock(prtd->audio_client, prtd->pcm_count, 0, 0, NO_TIMESTAMP); atomic_dec(&prtd->out_needed); wake_up(&the_locks.write_wait); }; } atomic_set(&prtd->start, 1); break; default: break; } } break; default: pr_debug("Not Supported Event opcode[0x%x]\n", opcode); break; } } static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; int ret; pr_debug("%s\n", __func__); prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream); prtd->pcm_count = snd_pcm_lib_period_bytes(substream); prtd->pcm_irq_pos = 0; /* rate and channels are sent to audio driver */ prtd->samp_rate = runtime->rate; prtd->channel_mode = runtime->channels; if (prtd->enabled) return 0; if (!prtd->set_channel_map) { memset(prtd->channel_map, 0, PCM_FORMAT_MAX_NUM_CHANNEL); if (prtd->channel_mode == 1) { prtd->channel_map[0] = PCM_CHANNEL_FL; } else if (prtd->channel_mode == 2) { prtd->channel_map[0] = PCM_CHANNEL_FL; prtd->channel_map[1] = PCM_CHANNEL_FR; } else if (prtd->channel_mode == 6) { prtd->channel_map[0] = PCM_CHANNEL_FC; prtd->channel_map[1] = PCM_CHANNEL_FL; prtd->channel_map[2] = PCM_CHANNEL_FR; prtd->channel_map[3] = PCM_CHANNEL_LB; prtd->channel_map[4] = PCM_CHANNEL_RB; prtd->channel_map[5] = PCM_CHANNEL_LFE; } else { pr_err("%s: ERROR.unsupported num_ch = %u\n", __func__, prtd->channel_mode); } } ret = q6asm_media_format_block_multi_ch_pcm(prtd->audio_client, runtime->rate, runtime->channels, prtd->channel_map); if (ret < 0) pr_info("%s: CMD Format block failed\n", __func__); atomic_set(&prtd->out_count, runtime->periods); prtd->enabled = 1; prtd->cmd_ack = 0; return 0; } static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; int ret = 0; int i = 0; pr_debug("%s\n", __func__); prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream); prtd->pcm_count = snd_pcm_lib_period_bytes(substream); prtd->pcm_irq_pos = 0; /* rate and channels are sent to audio driver */ prtd->samp_rate = runtime->rate; prtd->channel_mode = runtime->channels; if (prtd->enabled) return 0; pr_debug("Samp_rate = %d\n", prtd->samp_rate); pr_debug("Channel = %d\n", prtd->channel_mode); ret = q6asm_enc_cfg_blk_pcm(prtd->audio_client, prtd->samp_rate, prtd->channel_mode); if (ret < 0) pr_debug("%s: cmd cfg pcm was block failed", __func__); for (i = 0; i < runtime->periods; i++) q6asm_read(prtd->audio_client); prtd->periods = runtime->periods; prtd->enabled = 1; return ret; } static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { int ret = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: pr_debug("%s: Trigger start\n", __func__); q6asm_run_nowait(prtd->audio_client, 0, 0, 0); break; case SNDRV_PCM_TRIGGER_STOP: pr_debug("SNDRV_PCM_TRIGGER_STOP\n"); atomic_set(&prtd->start, 0); if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) break; prtd->cmd_ack = 0; q6asm_cmd_nowait(prtd->audio_client, CMD_EOS); break; case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: pr_debug("SNDRV_PCM_TRIGGER_PAUSE\n"); q6asm_cmd_nowait(prtd->audio_client, CMD_PAUSE); atomic_set(&prtd->start, 0); break; default: ret = -EINVAL; break; } return ret; } static int msm_pcm_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *soc_prtd = substream->private_data; struct msm_audio *prtd; int ret = 0; pr_debug("%s lowlatency\n", __func__); prtd = kzalloc(sizeof(struct msm_audio), GFP_KERNEL); if (prtd == NULL) { pr_err("Failed to allocate memory for msm_audio\n"); return -ENOMEM; } prtd->substream = substream; prtd->audio_client = q6asm_audio_client_alloc( (app_cb)event_handler, prtd); if (!prtd->audio_client) { pr_err("%s: Could not allocate memory\n", __func__); kfree(prtd); return -ENOMEM; } prtd->audio_client->perf_mode = true; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { runtime->hw = msm_pcm_hardware_playback; ret = q6asm_open_write(prtd->audio_client, FORMAT_MULTI_CHANNEL_LINEAR_PCM); if (ret < 0) { pr_err("%s: pcm out open failed\n", __func__); q6asm_audio_client_free(prtd->audio_client); kfree(prtd); return -ENOMEM; } } /* Capture path */ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { runtime->hw = msm_pcm_hardware_capture; ret = q6asm_open_read_v2_1(prtd->audio_client, FORMAT_LINEAR_PCM); if (ret < 0) { pr_err("%s: pcm in open failed\n", __func__); q6asm_audio_client_free(prtd->audio_client); kfree(prtd); return -ENOMEM; } } pr_debug("%s: session ID %d\n", __func__, prtd->audio_client->session); prtd->session_id = prtd->audio_client->session; msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->be_id, prtd->audio_client->perf_mode, prtd->session_id, substream->stream); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) prtd->cmd_ack = 1; ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &constraints_sample_rates); if (ret < 0) pr_err("snd_pcm_hw_constraint_list failed\n"); /* Ensure that buffer size is a multiple of period size */ ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) pr_err("snd_pcm_hw_constraint_integer failed\n"); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { ret = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, PLAYBACK_NUM_PERIODS * PLAYBACK_MIN_PERIOD_SIZE, PLAYBACK_NUM_PERIODS * PLAYBACK_MAX_PERIOD_SIZE); if (ret < 0) { pr_err("constraint for buffer bytes min max ret = %d\n", ret); } } if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { ret = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, CAPTURE_NUM_PERIODS * CAPTURE_MIN_PERIOD_SIZE, CAPTURE_NUM_PERIODS * CAPTURE_MAX_PERIOD_SIZE); if (ret < 0) { pr_err("constraint for buffer bytes min max ret = %d\n", ret); } } prtd->dsp_cnt = 0; prtd->set_channel_map = false; runtime->private_data = prtd; pr_debug("substream->pcm->device = %d\n", substream->pcm->device); pr_debug("soc_prtd->dai_link->be_id = %d\n", soc_prtd->dai_link->be_id); return 0; } static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a, snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames) { int ret = 0; int fbytes = 0; int xfer = 0; char *bufptr = NULL; void *data = NULL; uint32_t idx = 0; uint32_t size = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; fbytes = frames_to_bytes(runtime, frames); pr_debug("%s: prtd->out_count = %d\n", __func__, atomic_read(&prtd->out_count)); ret = wait_event_timeout(the_locks.write_wait, (atomic_read(&prtd->out_count)), 5 * HZ); if (!ret) { pr_err("%s: wait_event_timeout failed\n", __func__); goto fail; } if (!atomic_read(&prtd->out_count)) { pr_err("%s: pcm stopped out_count 0\n", __func__); return 0; } data = q6asm_is_cpu_buf_avail(IN, prtd->audio_client, &size, &idx); bufptr = data; if (bufptr) { pr_debug("%s:fbytes =%d: xfer=%d size=%d\n", __func__, fbytes, xfer, size); xfer = fbytes; if (copy_from_user(bufptr, buf, xfer)) { ret = -EFAULT; goto fail; } buf += xfer; fbytes -= xfer; pr_debug("%s:fbytes = %d: xfer=%d\n", __func__, fbytes, xfer); if (atomic_read(&prtd->start)) { pr_debug("%s:writing %d bytes of buffer to dsp\n", __func__, xfer); ret = q6asm_write(prtd->audio_client, xfer, 0, 0, NO_TIMESTAMP); if (ret < 0) { ret = -EFAULT; goto fail; } } else atomic_inc(&prtd->out_needed); atomic_dec(&prtd->out_count); } fail: return ret; } static int msm_pcm_playback_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *soc_prtd = substream->private_data; struct msm_audio *prtd = runtime->private_data; int dir = 0; int ret = 0; pr_debug("%s\n", __func__); dir = IN; ret = wait_event_timeout(the_locks.eos_wait, prtd->cmd_ack, 5 * HZ); if (!ret) pr_err("%s: CMD_EOS failed\n", __func__); q6asm_cmd(prtd->audio_client, CMD_CLOSE); q6asm_audio_client_buf_free_contiguous(dir, prtd->audio_client); msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id, SNDRV_PCM_STREAM_PLAYBACK); q6asm_audio_client_free(prtd->audio_client); kfree(prtd); return 0; } static int msm_pcm_capture_copy(struct snd_pcm_substream *substream, int channel, snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames) { int ret = 0; int fbytes = 0; int xfer; char *bufptr; void *data = NULL; static uint32_t idx; static uint32_t size; uint32_t offset = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = substream->runtime->private_data; pr_debug("%s\n", __func__); fbytes = frames_to_bytes(runtime, frames); pr_debug("appl_ptr %d\n", (int)runtime->control->appl_ptr); pr_debug("hw_ptr %d\n", (int)runtime->status->hw_ptr); pr_debug("avail_min %d\n", (int)runtime->control->avail_min); ret = wait_event_timeout(the_locks.read_wait, (atomic_read(&prtd->in_count)), 5 * HZ); if (!ret) { pr_debug("%s: wait_event_timeout failed\n", __func__); goto fail; } if (!atomic_read(&prtd->in_count)) { pr_debug("%s: pcm stopped in_count 0\n", __func__); return 0; } pr_debug("Checking if valid buffer is available...%08x\n", (unsigned int) data); data = q6asm_is_cpu_buf_avail(OUT, prtd->audio_client, &size, &idx); bufptr = data; pr_debug("Size = %d\n", size); pr_debug("fbytes = %d\n", fbytes); pr_debug("idx = %d\n", idx); if (bufptr) { xfer = fbytes; if (xfer > size) xfer = size; offset = in_frame_info[idx][1]; pr_debug("Offset value = %d\n", offset); if (copy_to_user(buf, bufptr+offset, xfer)) { pr_err("Failed to copy buf to user\n"); ret = -EFAULT; goto fail; } fbytes -= xfer; size -= xfer; in_frame_info[idx][1] += xfer; pr_debug("%s:fbytes = %d: size=%d: xfer=%d\n", __func__, fbytes, size, xfer); pr_debug(" Sending next buffer to dsp\n"); memset(&in_frame_info[idx], 0, sizeof(uint32_t) * 2); atomic_dec(&prtd->in_count); ret = q6asm_read(prtd->audio_client); if (ret < 0) { pr_err("q6asm read failed\n"); ret = -EFAULT; goto fail; } } else pr_err("No valid buffer\n"); pr_debug("Returning from capture_copy... %d\n", ret); fail: return ret; } static int msm_pcm_capture_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *soc_prtd = substream->private_data; struct msm_audio *prtd = runtime->private_data; int dir = OUT; pr_debug("%s\n", __func__); q6asm_cmd(prtd->audio_client, CMD_CLOSE); q6asm_audio_client_buf_free_contiguous(dir, prtd->audio_client); msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id, SNDRV_PCM_STREAM_CAPTURE); q6asm_audio_client_free(prtd->audio_client); kfree(prtd); return 0; } static int msm_pcm_copy(struct snd_pcm_substream *substream, int a, snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames) { int ret = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ret = msm_pcm_playback_copy(substream, a, hwoff, buf, frames); else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ret = msm_pcm_capture_copy(substream, a, hwoff, buf, frames); return ret; } static int msm_pcm_close(struct snd_pcm_substream *substream) { int ret = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ret = msm_pcm_playback_close(substream); else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ret = msm_pcm_capture_close(substream); return ret; } static int msm_pcm_prepare(struct snd_pcm_substream *substream) { int ret = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ret = msm_pcm_playback_prepare(substream); else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ret = msm_pcm_capture_prepare(substream); return ret; } static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; if (prtd->pcm_irq_pos >= prtd->pcm_size) prtd->pcm_irq_pos = 0; pr_debug("pcm_irq_pos = %d\n", prtd->pcm_irq_pos); return bytes_to_frames(runtime, (prtd->pcm_irq_pos)); } static int msm_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { int result = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; pr_debug("%s\n", __func__); prtd->mmap_flag = 1; if (runtime->dma_addr && runtime->dma_bytes) { vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); result = remap_pfn_range(vma, vma->vm_start, runtime->dma_addr >> PAGE_SHIFT, runtime->dma_bytes, vma->vm_page_prot); } else { pr_err("Physical address or size of buf is NULL"); return -EINVAL; } return result; } static int msm_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; struct snd_dma_buffer *dma_buf = &substream->dma_buffer; struct audio_buffer *buf; int dir, ret; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) dir = IN; else dir = OUT; /* *TODO : Need to Add Async IO changes. All period * size might not be supported. */ ret = q6asm_audio_client_buf_alloc_contiguous(dir, prtd->audio_client, (params_buffer_bytes(params) / params_periods(params)), params_periods(params)); if (ret < 0) { pr_err("Audio Start: Buffer Allocation failed rc = %d\n", ret); return -ENOMEM; } buf = prtd->audio_client->port[dir].buf; pr_debug("%s:buf = %p\n", __func__, buf); dma_buf->dev.type = SNDRV_DMA_TYPE_DEV; dma_buf->dev.dev = substream->pcm->card->dev; dma_buf->private_data = NULL; dma_buf->area = buf[0].data; dma_buf->addr = buf[0].phys; dma_buf->bytes = params_buffer_bytes(params); if (!dma_buf->area) return -ENOMEM; snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); return 0; } static struct snd_pcm_ops msm_pcm_ops = { .open = msm_pcm_open, .copy = msm_pcm_copy, .hw_params = msm_pcm_hw_params, .close = msm_pcm_close, .ioctl = snd_pcm_lib_ioctl, .prepare = msm_pcm_prepare, .trigger = msm_pcm_trigger, .pointer = msm_pcm_pointer, .mmap = msm_pcm_mmap, }; static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd) { struct snd_card *card = rtd->card->snd_card; int ret = 0; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = DMA_BIT_MASK(32); return ret; } static struct snd_soc_platform_driver msm_soc_platform = { .ops = &msm_pcm_ops, .pcm_new = msm_asoc_pcm_new, }; static __devinit int msm_pcm_probe(struct platform_device *pdev) { pr_info("%s: dev name %s\n", __func__, dev_name(&pdev->dev)); return snd_soc_register_platform(&pdev->dev, &msm_soc_platform); } static int msm_pcm_remove(struct platform_device *pdev) { snd_soc_unregister_platform(&pdev->dev); return 0; } static struct platform_driver msm_pcm_driver = { .driver = { .name = "msm-lowlatency-pcm-dsp", .owner = THIS_MODULE, }, .probe = msm_pcm_probe, .remove = __devexit_p(msm_pcm_remove), }; static int __init msm_soc_platform_init(void) { init_waitqueue_head(&the_locks.enable_wait); init_waitqueue_head(&the_locks.eos_wait); init_waitqueue_head(&the_locks.write_wait); init_waitqueue_head(&the_locks.read_wait); return platform_driver_register(&msm_pcm_driver); } module_init(msm_soc_platform_init); static void __exit msm_soc_platform_exit(void) { platform_driver_unregister(&msm_pcm_driver); } module_exit(msm_soc_platform_exit); MODULE_DESCRIPTION("Multi channel PCM module platform driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
zf2-laser-dev/android_kernel_asus_msm8939
drivers/gpu/host1x/hw/cdma_hw.c
2080
9119
/* * Tegra host1x Command DMA * * Copyright (c) 2010-2013, NVIDIA Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/slab.h> #include <linux/scatterlist.h> #include <linux/dma-mapping.h> #include "cdma.h" #include "channel.h" #include "dev.h" #include "debug.h" /* * Put the restart at the end of pushbuffer memor */ static void push_buffer_init(struct push_buffer *pb) { *(pb->mapped + (pb->size_bytes >> 2)) = host1x_opcode_restart(0); } /* * Increment timedout buffer's syncpt via CPU. */ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr, u32 syncpt_incrs, u32 syncval, u32 nr_slots) { struct host1x *host1x = cdma_to_host1x(cdma); struct push_buffer *pb = &cdma->push_buffer; u32 i; for (i = 0; i < syncpt_incrs; i++) host1x_syncpt_cpu_incr(cdma->timeout.syncpt); /* after CPU incr, ensure shadow is up to date */ host1x_syncpt_load(cdma->timeout.syncpt); /* NOP all the PB slots */ while (nr_slots--) { u32 *p = (u32 *)((u32)pb->mapped + getptr); *(p++) = HOST1X_OPCODE_NOP; *(p++) = HOST1X_OPCODE_NOP; dev_dbg(host1x->dev, "%s: NOP at 0x%x\n", __func__, pb->phys + getptr); getptr = (getptr + 8) & (pb->size_bytes - 1); } wmb(); } /* * Start channel DMA */ static void cdma_start(struct host1x_cdma *cdma) { struct host1x_channel *ch = cdma_to_channel(cdma); if (cdma->running) return; cdma->last_pos = cdma->push_buffer.pos; host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP, HOST1X_CHANNEL_DMACTRL); /* set base, put and end pointer */ host1x_ch_writel(ch, cdma->push_buffer.phys, HOST1X_CHANNEL_DMASTART); host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT); host1x_ch_writel(ch, cdma->push_buffer.phys + cdma->push_buffer.size_bytes + 4, HOST1X_CHANNEL_DMAEND); /* reset GET */ host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP | HOST1X_CHANNEL_DMACTRL_DMAGETRST | HOST1X_CHANNEL_DMACTRL_DMAINITGET, HOST1X_CHANNEL_DMACTRL); /* start the command DMA */ host1x_ch_writel(ch, 0, HOST1X_CHANNEL_DMACTRL); cdma->running = true; } /* * Similar to cdma_start(), but rather than starting from an idle * state (where DMA GET is set to DMA PUT), on a timeout we restore * DMA GET from an explicit value (so DMA may again be pending). */ static void cdma_timeout_restart(struct host1x_cdma *cdma, u32 getptr) { struct host1x *host1x = cdma_to_host1x(cdma); struct host1x_channel *ch = cdma_to_channel(cdma); if (cdma->running) return; cdma->last_pos = cdma->push_buffer.pos; host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP, HOST1X_CHANNEL_DMACTRL); /* set base, end pointer (all of memory) */ host1x_ch_writel(ch, cdma->push_buffer.phys, HOST1X_CHANNEL_DMASTART); host1x_ch_writel(ch, cdma->push_buffer.phys + cdma->push_buffer.size_bytes, HOST1X_CHANNEL_DMAEND); /* set GET, by loading the value in PUT (then reset GET) */ host1x_ch_writel(ch, getptr, HOST1X_CHANNEL_DMAPUT); host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP | HOST1X_CHANNEL_DMACTRL_DMAGETRST | HOST1X_CHANNEL_DMACTRL_DMAINITGET, HOST1X_CHANNEL_DMACTRL); dev_dbg(host1x->dev, "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n", __func__, host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET), host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT), cdma->last_pos); /* deassert GET reset and set PUT */ host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP, HOST1X_CHANNEL_DMACTRL); host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT); /* start the command DMA */ host1x_ch_writel(ch, 0, HOST1X_CHANNEL_DMACTRL); cdma->running = true; } /* * Kick channel DMA into action by writing its PUT offset (if it has changed) */ static void cdma_flush(struct host1x_cdma *cdma) { struct host1x_channel *ch = cdma_to_channel(cdma); if (cdma->push_buffer.pos != cdma->last_pos) { host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT); cdma->last_pos = cdma->push_buffer.pos; } } static void cdma_stop(struct host1x_cdma *cdma) { struct host1x_channel *ch = cdma_to_channel(cdma); mutex_lock(&cdma->lock); if (cdma->running) { host1x_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY); host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP, HOST1X_CHANNEL_DMACTRL); cdma->running = false; } mutex_unlock(&cdma->lock); } /* * Stops both channel's command processor and CDMA immediately. * Also, tears down the channel and resets corresponding module. */ static void cdma_freeze(struct host1x_cdma *cdma) { struct host1x *host = cdma_to_host1x(cdma); struct host1x_channel *ch = cdma_to_channel(cdma); u32 cmdproc_stop; if (cdma->torndown && !cdma->running) { dev_warn(host->dev, "Already torn down\n"); return; } dev_dbg(host->dev, "freezing channel (id %d)\n", ch->id); cmdproc_stop = host1x_sync_readl(host, HOST1X_SYNC_CMDPROC_STOP); cmdproc_stop |= BIT(ch->id); host1x_sync_writel(host, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP); dev_dbg(host->dev, "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n", __func__, host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET), host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT), cdma->last_pos); host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP, HOST1X_CHANNEL_DMACTRL); host1x_sync_writel(host, BIT(ch->id), HOST1X_SYNC_CH_TEARDOWN); cdma->running = false; cdma->torndown = true; } static void cdma_resume(struct host1x_cdma *cdma, u32 getptr) { struct host1x *host1x = cdma_to_host1x(cdma); struct host1x_channel *ch = cdma_to_channel(cdma); u32 cmdproc_stop; dev_dbg(host1x->dev, "resuming channel (id %d, DMAGET restart = 0x%x)\n", ch->id, getptr); cmdproc_stop = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP); cmdproc_stop &= ~(BIT(ch->id)); host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP); cdma->torndown = false; cdma_timeout_restart(cdma, getptr); } /* * If this timeout fires, it indicates the current sync_queue entry has * exceeded its TTL and the userctx should be timed out and remaining * submits already issued cleaned up (future submits return an error). */ static void cdma_timeout_handler(struct work_struct *work) { struct host1x_cdma *cdma; struct host1x *host1x; struct host1x_channel *ch; u32 syncpt_val; u32 prev_cmdproc, cmdproc_stop; cdma = container_of(to_delayed_work(work), struct host1x_cdma, timeout.wq); host1x = cdma_to_host1x(cdma); ch = cdma_to_channel(cdma); host1x_debug_dump(cdma_to_host1x(cdma)); mutex_lock(&cdma->lock); if (!cdma->timeout.client) { dev_dbg(host1x->dev, "cdma_timeout: expired, but has no clientid\n"); mutex_unlock(&cdma->lock); return; } /* stop processing to get a clean snapshot */ prev_cmdproc = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP); cmdproc_stop = prev_cmdproc | BIT(ch->id); host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP); dev_dbg(host1x->dev, "cdma_timeout: cmdproc was 0x%x is 0x%x\n", prev_cmdproc, cmdproc_stop); syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt); /* has buffer actually completed? */ if ((s32)(syncpt_val - cdma->timeout.syncpt_val) >= 0) { dev_dbg(host1x->dev, "cdma_timeout: expired, but buffer had completed\n"); /* restore */ cmdproc_stop = prev_cmdproc & ~(BIT(ch->id)); host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP); mutex_unlock(&cdma->lock); return; } dev_warn(host1x->dev, "%s: timeout: %d (%s), HW thresh %d, done %d\n", __func__, cdma->timeout.syncpt->id, cdma->timeout.syncpt->name, syncpt_val, cdma->timeout.syncpt_val); /* stop HW, resetting channel/module */ host1x_hw_cdma_freeze(host1x, cdma); host1x_cdma_update_sync_queue(cdma, ch->dev); mutex_unlock(&cdma->lock); } /* * Init timeout resources */ static int cdma_timeout_init(struct host1x_cdma *cdma, u32 syncpt_id) { INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler); cdma->timeout.initialized = true; return 0; } /* * Clean up timeout resources */ static void cdma_timeout_destroy(struct host1x_cdma *cdma) { if (cdma->timeout.initialized) cancel_delayed_work(&cdma->timeout.wq); cdma->timeout.initialized = false; } static const struct host1x_cdma_ops host1x_cdma_ops = { .start = cdma_start, .stop = cdma_stop, .flush = cdma_flush, .timeout_init = cdma_timeout_init, .timeout_destroy = cdma_timeout_destroy, .freeze = cdma_freeze, .resume = cdma_resume, .timeout_cpu_incr = cdma_timeout_cpu_incr, }; static const struct host1x_pushbuffer_ops host1x_pushbuffer_ops = { .init = push_buffer_init, };
gpl-2.0
CyanideL/android_kernel_lge_g3
net/ipv6/ip6_output.c
2336
42026
/* * IPv6 output functions * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Based on linux/net/ipv4/ip_output.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Changes: * A.N.Kuznetsov : airthmetics in fragmentation. * extension headers are implemented. * route changes now work. * ip6_forward does not confuse sniffers. * etc. * * H. von Brand : Added missing #include <linux/string.h> * Imran Patel : frag id should be in NBO * Kazunori MIYAZAWA @USAGI * : add ip6_append_data and related functions * for datagram xmit */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/in6.h> #include <linux/tcp.h> #include <linux/route.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv6.h> #include <net/sock.h> #include <net/snmp.h> #include <net/ipv6.h> #include <net/ndisc.h> #include <net/protocol.h> #include <net/ip6_route.h> #include <net/addrconf.h> #include <net/rawv6.h> #include <net/icmp.h> #include <net/xfrm.h> #include <net/checksum.h> #include <linux/mroute6.h> int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); int __ip6_local_out(struct sk_buff *skb) { int len; len = skb->len - sizeof(struct ipv6hdr); if (len > IPV6_MAXPLEN) len = 0; ipv6_hdr(skb)->payload_len = htons(len); return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev, dst_output); } int ip6_local_out(struct sk_buff *skb) { int err; err = __ip6_local_out(skb); if (likely(err == 1)) err = dst_output(skb); return err; } EXPORT_SYMBOL_GPL(ip6_local_out); /* dev_loopback_xmit for use with netfilter. */ static int ip6_dev_loopback_xmit(struct sk_buff *newskb) { skb_reset_mac_header(newskb); __skb_pull(newskb, skb_network_offset(newskb)); newskb->pkt_type = PACKET_LOOPBACK; newskb->ip_summed = CHECKSUM_UNNECESSARY; WARN_ON(!skb_dst(newskb)); netif_rx_ni(newskb); return 0; } static int ip6_finish_output2(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct net_device *dev = dst->dev; struct neighbour *neigh; skb->protocol = htons(ETH_P_IPV6); skb->dev = dev; if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) && ((mroute6_socket(dev_net(dev), skb) && !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, &ipv6_hdr(skb)->saddr))) { struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); /* Do not check for IFF_ALLMULTI; multicast routing is not supported in any case. */ if (newskb) NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, newskb, NULL, newskb->dev, ip6_dev_loopback_xmit); if (ipv6_hdr(skb)->hop_limit == 0) { IP6_INC_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return 0; } } IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST, skb->len); } rcu_read_lock(); neigh = dst_get_neighbour_noref(dst); if (neigh) { int res = neigh_output(neigh, skb); rcu_read_unlock(); return res; } rcu_read_unlock(); IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); kfree_skb(skb); return -EINVAL; } static int ip6_finish_output(struct sk_buff *skb) { if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || dst_allfrag(skb_dst(skb))) return ip6_fragment(skb, ip6_finish_output2); else return ip6_finish_output2(skb); } int ip6_output(struct sk_buff *skb) { struct net_device *dev = skb_dst(skb)->dev; struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); if (unlikely(idev->cnf.disable_ipv6)) { IP6_INC_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return 0; } return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev, ip6_finish_output, !(IP6CB(skb)->flags & IP6SKB_REROUTED)); } /* * xmit an sk_buff (used by TCP, SCTP and DCCP) */ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, struct ipv6_txoptions *opt, int tclass) { struct net *net = sock_net(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct in6_addr *first_hop = &fl6->daddr; struct dst_entry *dst = skb_dst(skb); struct ipv6hdr *hdr; u8 proto = fl6->flowi6_proto; int seg_len = skb->len; int hlimit = -1; u32 mtu; if (opt) { unsigned int head_room; /* First: exthdrs may take lots of space (~8K for now) MAX_HEADER is not enough. */ head_room = opt->opt_nflen + opt->opt_flen; seg_len += head_room; head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev); if (skb_headroom(skb) < head_room) { struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room); if (skb2 == NULL) { IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return -ENOBUFS; } kfree_skb(skb); skb = skb2; skb_set_owner_w(skb, sk); } if (opt->opt_flen) ipv6_push_frag_opts(skb, opt, &proto); if (opt->opt_nflen) ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop); } skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); hdr = ipv6_hdr(skb); /* * Fill in the IPv6 header */ if (np) hlimit = np->hop_limit; if (hlimit < 0) hlimit = ip6_dst_hoplimit(dst); *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl6->flowlabel; hdr->payload_len = htons(seg_len); hdr->nexthdr = proto; hdr->hop_limit = hlimit; hdr->saddr = fl6->saddr; hdr->daddr = *first_hop; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; mtu = dst_mtu(dst); if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) { IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUT, skb->len); return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, dst_output); } if (net_ratelimit()) printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n"); skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } EXPORT_SYMBOL(ip6_xmit); /* * To avoid extra problems ND packets are send through this * routine. It's code duplication but I really want to avoid * extra checks since ipv6_build_header is used by TCP (which * is for us performance critical) */ int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev, const struct in6_addr *saddr, const struct in6_addr *daddr, int proto, int len) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6hdr *hdr; skb->protocol = htons(ETH_P_IPV6); skb->dev = dev; skb_reset_network_header(skb); skb_put(skb, sizeof(struct ipv6hdr)); hdr = ipv6_hdr(skb); *(__be32*)hdr = htonl(0x60000000); hdr->payload_len = htons(len); hdr->nexthdr = proto; hdr->hop_limit = np->hop_limit; hdr->saddr = *saddr; hdr->daddr = *daddr; return 0; } static int ip6_call_ra_chain(struct sk_buff *skb, int sel) { struct ip6_ra_chain *ra; struct sock *last = NULL; read_lock(&ip6_ra_lock); for (ra = ip6_ra_chain; ra; ra = ra->next) { struct sock *sk = ra->sk; if (sk && ra->sel == sel && (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == skb->dev->ifindex)) { if (last) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) rawv6_rcv(last, skb2); } last = sk; } } if (last) { rawv6_rcv(last, skb); read_unlock(&ip6_ra_lock); return 1; } read_unlock(&ip6_ra_lock); return 0; } static int ip6_forward_proxy_check(struct sk_buff *skb) { struct ipv6hdr *hdr = ipv6_hdr(skb); u8 nexthdr = hdr->nexthdr; __be16 frag_off; int offset; if (ipv6_ext_hdr(nexthdr)) { offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off); if (offset < 0) return 0; } else offset = sizeof(struct ipv6hdr); if (nexthdr == IPPROTO_ICMPV6) { struct icmp6hdr *icmp6; if (!pskb_may_pull(skb, (skb_network_header(skb) + offset + 1 - skb->data))) return 0; icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset); switch (icmp6->icmp6_type) { case NDISC_ROUTER_SOLICITATION: case NDISC_ROUTER_ADVERTISEMENT: case NDISC_NEIGHBOUR_SOLICITATION: case NDISC_NEIGHBOUR_ADVERTISEMENT: case NDISC_REDIRECT: /* For reaction involving unicast neighbor discovery * message destined to the proxied address, pass it to * input function. */ return 1; default: break; } } /* * The proxying router can't forward traffic sent to a link-local * address, so signal the sender and discard the packet. This * behavior is clarified by the MIPv6 specification. */ if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) { dst_link_failure(skb); return -1; } return 0; } static inline int ip6_forward_finish(struct sk_buff *skb) { return dst_output(skb); } int ip6_forward(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct ipv6hdr *hdr = ipv6_hdr(skb); struct inet6_skb_parm *opt = IP6CB(skb); struct net *net = dev_net(dst->dev); u32 mtu; if (net->ipv6.devconf_all->forwarding == 0) goto error; if (skb_warn_if_lro(skb)) goto drop; if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } if (skb->pkt_type != PACKET_HOST) goto drop; skb_forward_csum(skb); /* * We DO NOT make any processing on * RA packets, pushing them to user level AS IS * without ane WARRANTY that application will be able * to interpret them. The reason is that we * cannot make anything clever here. * * We are not end-node, so that if packet contains * AH/ESP, we cannot make anything. * Defragmentation also would be mistake, RA packets * cannot be fragmented, because there is no warranty * that different fragments will go along one path. --ANK */ if (opt->ra) { u8 *ptr = skb_network_header(skb) + opt->ra; if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3])) return 0; } /* * check and decrement ttl */ if (hdr->hop_limit <= 1) { /* Force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -ETIMEDOUT; } /* XXX: idev->cnf.proxy_ndp? */ if ((net->ipv6.devconf_all->proxy_ndp == 1 && pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) || net->ipv6.devconf_all->proxy_ndp >= 2) { int proxied = ip6_forward_proxy_check(skb); if (proxied > 0) return ip6_input(skb); else if (proxied < 0) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } } if (!xfrm6_route_forward(skb)) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } dst = skb_dst(skb); /* IPv6 specs say nothing about it, but it is clear that we cannot send redirects to source routed frames. We don't send redirects to frames decapsulated from IPsec. */ if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) { struct in6_addr *target = NULL; struct rt6_info *rt; /* * incoming and outgoing devices are the same * send a redirect. */ rt = (struct rt6_info *) dst; if (rt->rt6i_flags & RTF_GATEWAY) target = &rt->rt6i_gateway; else target = &hdr->daddr; if (!rt->rt6i_peer) rt6_bind_peer(rt, 1); /* Limit redirects both by destination (here) and by source (inside ndisc_send_redirect) */ if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ)) ndisc_send_redirect(skb, target); } else { int addrtype = ipv6_addr_type(&hdr->saddr); /* This check is security critical. */ if (addrtype == IPV6_ADDR_ANY || addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK)) goto error; if (addrtype & IPV6_ADDR_LINKLOCAL) { icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOT_NEIGHBOUR, 0); goto error; } } mtu = dst_mtu(dst); if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; if (skb->len > mtu && !skb_is_gso(skb)) { /* Again, force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } if (skb_cow(skb, dst->dev->hard_header_len)) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS); goto drop; } hdr = ipv6_hdr(skb); /* Mangling hops number delayed to point after skb COW */ hdr->hop_limit--; IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish); error: IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS); drop: kfree_skb(skb); return -EINVAL; } static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) { to->pkt_type = from->pkt_type; to->priority = from->priority; to->protocol = from->protocol; skb_dst_drop(to); skb_dst_set(to, dst_clone(skb_dst(from))); to->dev = from->dev; to->mark = from->mark; #ifdef CONFIG_NET_SCHED to->tc_index = from->tc_index; #endif nf_copy(to, from); #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) to->nf_trace = from->nf_trace; #endif skb_copy_secmark(to, from); } int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) { u16 offset = sizeof(struct ipv6hdr); struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1); unsigned int packet_len = skb->tail - skb->network_header; int found_rhdr = 0; *nexthdr = &ipv6_hdr(skb)->nexthdr; while (offset + 1 <= packet_len) { switch (**nexthdr) { case NEXTHDR_HOP: break; case NEXTHDR_ROUTING: found_rhdr = 1; break; case NEXTHDR_DEST: #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) break; #endif if (found_rhdr) return offset; break; default : return offset; } offset += ipv6_optlen(exthdr); *nexthdr = &exthdr->nexthdr; exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + offset); } return offset; } void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt) { static atomic_t ipv6_fragmentation_id; int old, new; if (rt && !(rt->dst.flags & DST_NOPEER)) { struct inet_peer *peer; if (!rt->rt6i_peer) rt6_bind_peer(rt, 1); peer = rt->rt6i_peer; if (peer) { fhdr->identification = htonl(inet_getid(peer, 0)); return; } } do { old = atomic_read(&ipv6_fragmentation_id); new = old + 1; if (!new) new = 1; } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old); fhdr->identification = htonl(new); } int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) { struct sk_buff *frag; struct rt6_info *rt = (struct rt6_info*)skb_dst(skb); struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; struct ipv6hdr *tmp_hdr; struct frag_hdr *fh; unsigned int mtu, hlen, left, len; int hroom, troom; __be32 frag_id = 0; int ptr, offset = 0, err=0; u8 *prevhdr, nexthdr = 0; struct net *net = dev_net(skb_dst(skb)->dev); hlen = ip6_find_1stfragopt(skb, &prevhdr); nexthdr = *prevhdr; mtu = ip6_skb_dst_mtu(skb); /* We must not fragment if the socket is set to force MTU discovery * or if the skb it not generated by a local socket. */ if (!skb->local_df && skb->len > mtu) { skb->dev = skb_dst(skb)->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } if (np && np->frag_size < mtu) { if (np->frag_size) mtu = np->frag_size; } mtu -= hlen + sizeof(struct frag_hdr); if (skb_has_frag_list(skb)) { int first_len = skb_pagelen(skb); struct sk_buff *frag2; if (first_len - hlen > mtu || ((first_len - hlen) & 7) || skb_cloned(skb)) goto slow_path; skb_walk_frags(skb, frag) { /* Correct geometry. */ if (frag->len > mtu || ((frag->len & 7) && frag->next) || skb_headroom(frag) < hlen) goto slow_path_clean; /* Partially cloned skb? */ if (skb_shared(frag)) goto slow_path_clean; BUG_ON(frag->sk); if (skb->sk) { frag->sk = skb->sk; frag->destructor = sock_wfree; } skb->truesize -= frag->truesize; } err = 0; offset = 0; frag = skb_shinfo(skb)->frag_list; skb_frag_list_init(skb); /* BUILD HEADER */ *prevhdr = NEXTHDR_FRAGMENT; tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC); if (!tmp_hdr) { IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); return -ENOMEM; } __skb_pull(skb, hlen); fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr)); __skb_push(skb, hlen); skb_reset_network_header(skb); memcpy(skb_network_header(skb), tmp_hdr, hlen); ipv6_select_ident(fh, rt); fh->nexthdr = nexthdr; fh->reserved = 0; fh->frag_off = htons(IP6_MF); frag_id = fh->identification; first_len = skb_pagelen(skb); skb->data_len = first_len - skb_headlen(skb); skb->len = first_len; ipv6_hdr(skb)->payload_len = htons(first_len - sizeof(struct ipv6hdr)); dst_hold(&rt->dst); for (;;) { /* Prepare header of the next frame, * before previous one went down. */ if (frag) { frag->ip_summed = CHECKSUM_NONE; skb_reset_transport_header(frag); fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr)); __skb_push(frag, hlen); skb_reset_network_header(frag); memcpy(skb_network_header(frag), tmp_hdr, hlen); offset += skb->len - hlen - sizeof(struct frag_hdr); fh->nexthdr = nexthdr; fh->reserved = 0; fh->frag_off = htons(offset); if (frag->next != NULL) fh->frag_off |= htons(IP6_MF); fh->identification = frag_id; ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr)); ip6_copy_metadata(frag, skb); } err = output(skb); if(!err) IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGCREATES); if (err || !frag) break; skb = frag; frag = skb->next; skb->next = NULL; } kfree(tmp_hdr); if (err == 0) { IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGOKS); dst_release(&rt->dst); return 0; } while (frag) { skb = frag->next; kfree_skb(frag); frag = skb; } IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGFAILS); dst_release(&rt->dst); return err; slow_path_clean: skb_walk_frags(skb, frag2) { if (frag2 == frag) break; frag2->sk = NULL; frag2->destructor = NULL; skb->truesize += frag2->truesize; } } slow_path: left = skb->len - hlen; /* Space per frame */ ptr = hlen; /* Where to start from */ /* * Fragment the datagram. */ *prevhdr = NEXTHDR_FRAGMENT; hroom = LL_RESERVED_SPACE(rt->dst.dev); troom = rt->dst.dev->needed_tailroom; /* * Keep copying data until we run out. */ while(left > 0) { len = left; /* IF: it doesn't fit, use 'mtu' - the data space left */ if (len > mtu) len = mtu; /* IF: we are not sending up to and including the packet end then align the next start on an eight byte boundary */ if (len < left) { len &= ~7; } /* * Allocate buffer. */ if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) + hroom + troom, GFP_ATOMIC)) == NULL) { NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); err = -ENOMEM; goto fail; } /* * Set up data on packet */ ip6_copy_metadata(frag, skb); skb_reserve(frag, hroom); skb_put(frag, len + hlen + sizeof(struct frag_hdr)); skb_reset_network_header(frag); fh = (struct frag_hdr *)(skb_network_header(frag) + hlen); frag->transport_header = (frag->network_header + hlen + sizeof(struct frag_hdr)); /* * Charge the memory for the fragment to any owner * it might possess */ if (skb->sk) skb_set_owner_w(frag, skb->sk); /* * Copy the packet header into the new buffer. */ skb_copy_from_linear_data(skb, skb_network_header(frag), hlen); /* * Build fragment header. */ fh->nexthdr = nexthdr; fh->reserved = 0; if (!frag_id) { ipv6_select_ident(fh, rt); frag_id = fh->identification; } else fh->identification = frag_id; /* * Copy a block of the IP datagram. */ if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len)) BUG(); left -= len; fh->frag_off = htons(offset); if (left > 0) fh->frag_off |= htons(IP6_MF); ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr)); ptr += len; offset += len; /* * Put this fragment into the sending queue. */ err = output(frag); if (err) goto fail; IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGCREATES); } IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGOKS); kfree_skb(skb); return err; fail: IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return err; } static inline int ip6_rt_check(const struct rt6key *rt_key, const struct in6_addr *fl_addr, const struct in6_addr *addr_cache) { return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) && (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache)); } static struct dst_entry *ip6_sk_dst_check(struct sock *sk, struct dst_entry *dst, const struct flowi6 *fl6) { struct ipv6_pinfo *np = inet6_sk(sk); struct rt6_info *rt = (struct rt6_info *)dst; if (!dst) goto out; /* Yes, checking route validity in not connected * case is not very simple. Take into account, * that we do not support routing by source, TOS, * and MSG_DONTROUTE --ANK (980726) * * 1. ip6_rt_check(): If route was host route, * check that cached destination is current. * If it is network route, we still may * check its validity using saved pointer * to the last used address: daddr_cache. * We do not want to save whole address now, * (because main consumer of this service * is tcp, which has not this problem), * so that the last trick works only on connected * sockets. * 2. oif also should be the same. */ if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) || #ifdef CONFIG_IPV6_SUBTREES ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) || #endif (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) { dst_release(dst); dst = NULL; } out: return dst; } static int ip6_dst_lookup_tail(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6) { struct net *net = sock_net(sk); #ifdef CONFIG_IPV6_OPTIMISTIC_DAD struct neighbour *n; #endif int err; if (*dst == NULL) *dst = ip6_route_output(net, sk, fl6); if ((err = (*dst)->error)) goto out_err_release; if (ipv6_addr_any(&fl6->saddr)) { struct rt6_info *rt = (struct rt6_info *) *dst; err = ip6_route_get_saddr(net, rt, &fl6->daddr, sk ? inet6_sk(sk)->srcprefs : 0, &fl6->saddr); if (err) goto out_err_release; } #ifdef CONFIG_IPV6_OPTIMISTIC_DAD /* * Here if the dst entry we've looked up * has a neighbour entry that is in the INCOMPLETE * state and the src address from the flow is * marked as OPTIMISTIC, we release the found * dst entry and replace it instead with the * dst entry of the nexthop router */ rcu_read_lock(); n = dst_get_neighbour_noref(*dst); if (n && !(n->nud_state & NUD_VALID)) { struct inet6_ifaddr *ifp; struct flowi6 fl_gw6; int redirect; rcu_read_unlock(); ifp = ipv6_get_ifaddr(net, &fl6->saddr, (*dst)->dev, 1); redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC); if (ifp) in6_ifa_put(ifp); if (redirect) { /* * We need to get the dst entry for the * default router instead */ dst_release(*dst); memcpy(&fl_gw6, fl6, sizeof(struct flowi6)); memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr)); *dst = ip6_route_output(net, sk, &fl_gw6); if ((err = (*dst)->error)) goto out_err_release; } } else { rcu_read_unlock(); } #endif return 0; out_err_release: if (err == -ENETUNREACH) IP6_INC_STATS_BH(net, NULL, IPSTATS_MIB_OUTNOROUTES); dst_release(*dst); *dst = NULL; return err; } /** * ip6_dst_lookup - perform route lookup on flow * @sk: socket which provides route info * @dst: pointer to dst_entry * for result * @fl6: flow to lookup * * This function performs a route lookup on the given flow. * * It returns zero on success, or a standard errno code on error. */ int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6) { *dst = NULL; return ip6_dst_lookup_tail(sk, dst, fl6); } EXPORT_SYMBOL_GPL(ip6_dst_lookup); /** * ip6_dst_lookup_flow - perform route lookup on flow with ipsec * @sk: socket which provides route info * @fl6: flow to lookup * @final_dst: final destination address for ipsec lookup * @can_sleep: we are in a sleepable context * * This function performs a route lookup on the given flow. * * It returns a valid dst pointer on success, or a pointer encoded * error code. */ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst, bool can_sleep) { struct dst_entry *dst = NULL; int err; err = ip6_dst_lookup_tail(sk, &dst, fl6); if (err) return ERR_PTR(err); if (final_dst) fl6->daddr = *final_dst; if (can_sleep) fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP; return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); } EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow); /** * ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow * @sk: socket which provides the dst cache and route info * @fl6: flow to lookup * @final_dst: final destination address for ipsec lookup * @can_sleep: we are in a sleepable context * * This function performs a route lookup on the given flow with the * possibility of using the cached route in the socket if it is valid. * It will take the socket dst lock when operating on the dst cache. * As a result, this function can only be used in process context. * * It returns a valid dst pointer on success, or a pointer encoded * error code. */ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst, bool can_sleep) { struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie); int err; dst = ip6_sk_dst_check(sk, dst, fl6); err = ip6_dst_lookup_tail(sk, &dst, fl6); if (err) return ERR_PTR(err); if (final_dst) fl6->daddr = *final_dst; if (can_sleep) fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP; return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); } EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow); static inline int ip6_ufo_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int hh_len, int fragheaderlen, int transhdrlen, int mtu,unsigned int flags, struct rt6_info *rt) { struct sk_buff *skb; int err; /* There is support for UDP large send offload by network * device, so create one single skb packet containing complete * udp datagram */ if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) { skb = sock_alloc_send_skb(sk, hh_len + fragheaderlen + transhdrlen + 20, (flags & MSG_DONTWAIT), &err); if (skb == NULL) return err; /* reserve space for Hardware header */ skb_reserve(skb, hh_len); /* create space for UDP/IP header */ skb_put(skb,fragheaderlen + transhdrlen); /* initialize network header pointer */ skb_reset_network_header(skb); /* initialize protocol header pointer */ skb->transport_header = skb->network_header + fragheaderlen; skb->ip_summed = CHECKSUM_PARTIAL; skb->csum = 0; } err = skb_append_datato_frags(sk,skb, getfrag, from, (length - transhdrlen)); if (!err) { struct frag_hdr fhdr; /* Specify the length of each IPv6 datagram fragment. * It has to be a multiple of 8. */ skb_shinfo(skb)->gso_size = (mtu - fragheaderlen - sizeof(struct frag_hdr)) & ~7; skb_shinfo(skb)->gso_type = SKB_GSO_UDP; ipv6_select_ident(&fhdr, rt); skb_shinfo(skb)->ip6_frag_id = fhdr.identification; __skb_queue_tail(&sk->sk_write_queue, skb); return 0; } /* There is not enough support do UPD LSO, * so follow normal path */ kfree_skb(skb); return err; } static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src, gfp_t gfp) { return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL; } static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src, gfp_t gfp) { return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL; } int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6, struct rt6_info *rt, unsigned int flags, int dontfrag) { struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct inet_cork *cork; struct sk_buff *skb; unsigned int maxfraglen, fragheaderlen; int exthdrlen; int dst_exthdrlen; int hh_len; int mtu; int copy; int err; int offset = 0; int csummode = CHECKSUM_NONE; __u8 tx_flags = 0; if (flags&MSG_PROBE) return 0; cork = &inet->cork.base; if (skb_queue_empty(&sk->sk_write_queue)) { /* * setup for corking */ if (opt) { if (WARN_ON(np->cork.opt)) return -EINVAL; np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation); if (unlikely(np->cork.opt == NULL)) return -ENOBUFS; np->cork.opt->tot_len = opt->tot_len; np->cork.opt->opt_flen = opt->opt_flen; np->cork.opt->opt_nflen = opt->opt_nflen; np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt, sk->sk_allocation); if (opt->dst0opt && !np->cork.opt->dst0opt) return -ENOBUFS; np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt, sk->sk_allocation); if (opt->dst1opt && !np->cork.opt->dst1opt) return -ENOBUFS; np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt, sk->sk_allocation); if (opt->hopopt && !np->cork.opt->hopopt) return -ENOBUFS; np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt, sk->sk_allocation); if (opt->srcrt && !np->cork.opt->srcrt) return -ENOBUFS; /* need source address above miyazawa*/ } dst_hold(&rt->dst); cork->dst = &rt->dst; inet->cork.fl.u.ip6 = *fl6; np->cork.hop_limit = hlimit; np->cork.tclass = tclass; mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ? rt->dst.dev->mtu : dst_mtu(&rt->dst); if (np->frag_size < mtu) { if (np->frag_size) mtu = np->frag_size; } cork->fragsize = mtu; if (dst_allfrag(rt->dst.path)) cork->flags |= IPCORK_ALLFRAG; cork->length = 0; sk->sk_sndmsg_page = NULL; sk->sk_sndmsg_off = 0; exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len; length += exthdrlen; transhdrlen += exthdrlen; dst_exthdrlen = rt->dst.header_len; } else { rt = (struct rt6_info *)cork->dst; fl6 = &inet->cork.fl.u.ip6; opt = np->cork.opt; transhdrlen = 0; exthdrlen = 0; dst_exthdrlen = 0; mtu = cork->fragsize; } hh_len = LL_RESERVED_SPACE(rt->dst.dev); fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + (opt ? opt->opt_nflen : 0); maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr); if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) { if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) { ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen); return -EMSGSIZE; } } /* For UDP, check if TX timestamp is enabled */ if (sk->sk_type == SOCK_DGRAM) { err = sock_tx_timestamp(sk, &tx_flags); if (err) goto error; } /* * Let's try using as much space as possible. * Use MTU if total length of the message fits into the MTU. * Otherwise, we need to reserve fragment header and * fragment alignment (= 8-15 octects, in total). * * Note that we may need to "move" the data from the tail of * of the buffer to the new fragment when we split * the message. * * FIXME: It may be fragmented into multiple chunks * at once if non-fragmentable extension headers * are too large. * --yoshfuji */ cork->length += length; if (length > mtu) { int proto = sk->sk_protocol; if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){ ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen); return -EMSGSIZE; } if (proto == IPPROTO_UDP && (rt->dst.dev->features & NETIF_F_UFO)) { err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len, fragheaderlen, transhdrlen, mtu, flags, rt); if (err) goto error; return 0; } } if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) goto alloc_new_skb; while (length > 0) { /* Check if the remaining data fits into current packet. */ copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len; if (copy < length) copy = maxfraglen - skb->len; if (copy <= 0) { char *data; unsigned int datalen; unsigned int fraglen; unsigned int fraggap; unsigned int alloclen; struct sk_buff *skb_prev; alloc_new_skb: skb_prev = skb; /* There's no room in the current skb */ if (skb_prev) fraggap = skb_prev->len - maxfraglen; else fraggap = 0; /* * If remaining data exceeds the mtu, * we know we need more fragment(s). */ datalen = length + fraggap; if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen) datalen = maxfraglen - fragheaderlen; fraglen = datalen + fragheaderlen; if ((flags & MSG_MORE) && !(rt->dst.dev->features&NETIF_F_SG)) alloclen = mtu; else alloclen = datalen + fragheaderlen; alloclen += dst_exthdrlen; /* * The last fragment gets additional space at tail. * Note: we overallocate on fragments with MSG_MODE * because we have no idea if we're the last one. */ if (datalen == length + fraggap) alloclen += rt->dst.trailer_len; /* * We just reserve space for fragment header. * Note: this may be overallocation if the message * (without MSG_MORE) fits into the MTU. */ alloclen += sizeof(struct frag_hdr); if (transhdrlen) { skb = sock_alloc_send_skb(sk, alloclen + hh_len, (flags & MSG_DONTWAIT), &err); } else { skb = NULL; if (atomic_read(&sk->sk_wmem_alloc) <= 2 * sk->sk_sndbuf) skb = sock_wmalloc(sk, alloclen + hh_len, 1, sk->sk_allocation); if (unlikely(skb == NULL)) err = -ENOBUFS; else { /* Only the initial fragment * is time stamped. */ tx_flags = 0; } } if (skb == NULL) goto error; /* * Fill in the control structures */ skb->ip_summed = csummode; skb->csum = 0; /* reserve for fragmentation and ipsec header */ skb_reserve(skb, hh_len + sizeof(struct frag_hdr) + dst_exthdrlen); if (sk->sk_type == SOCK_DGRAM) skb_shinfo(skb)->tx_flags = tx_flags; /* * Find where to start putting bytes */ data = skb_put(skb, fraglen); skb_set_network_header(skb, exthdrlen); data += fragheaderlen; skb->transport_header = (skb->network_header + fragheaderlen); if (fraggap) { skb->csum = skb_copy_and_csum_bits( skb_prev, maxfraglen, data + transhdrlen, fraggap, 0); skb_prev->csum = csum_sub(skb_prev->csum, skb->csum); data += fraggap; pskb_trim_unique(skb_prev, maxfraglen); } copy = datalen - transhdrlen - fraggap; if (copy < 0) { err = -EINVAL; kfree_skb(skb); goto error; } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) { err = -EFAULT; kfree_skb(skb); goto error; } offset += copy; length -= datalen - fraggap; transhdrlen = 0; exthdrlen = 0; dst_exthdrlen = 0; csummode = CHECKSUM_NONE; /* * Put the packet on the pending queue */ __skb_queue_tail(&sk->sk_write_queue, skb); continue; } if (copy > length) copy = length; if (!(rt->dst.dev->features&NETIF_F_SG)) { unsigned int off; off = skb->len; if (getfrag(from, skb_put(skb, copy), offset, copy, off, skb) < 0) { __skb_trim(skb, off); err = -EFAULT; goto error; } } else { int i = skb_shinfo(skb)->nr_frags; skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1]; struct page *page = sk->sk_sndmsg_page; int off = sk->sk_sndmsg_off; unsigned int left; if (page && (left = PAGE_SIZE - off) > 0) { if (copy >= left) copy = left; if (page != skb_frag_page(frag)) { if (i == MAX_SKB_FRAGS) { err = -EMSGSIZE; goto error; } skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0); skb_frag_ref(skb, i); frag = &skb_shinfo(skb)->frags[i]; } } else if(i < MAX_SKB_FRAGS) { if (copy > PAGE_SIZE) copy = PAGE_SIZE; page = alloc_pages(sk->sk_allocation, 0); if (page == NULL) { err = -ENOMEM; goto error; } sk->sk_sndmsg_page = page; sk->sk_sndmsg_off = 0; skb_fill_page_desc(skb, i, page, 0, 0); frag = &skb_shinfo(skb)->frags[i]; } else { err = -EMSGSIZE; goto error; } if (getfrag(from, skb_frag_address(frag) + skb_frag_size(frag), offset, copy, skb->len, skb) < 0) { err = -EFAULT; goto error; } sk->sk_sndmsg_off += copy; skb_frag_size_add(frag, copy); skb->len += copy; skb->data_len += copy; skb->truesize += copy; atomic_add(copy, &sk->sk_wmem_alloc); } offset += copy; length -= copy; } return 0; error: cork->length -= length; IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); return err; } static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np) { if (np->cork.opt) { kfree(np->cork.opt->dst0opt); kfree(np->cork.opt->dst1opt); kfree(np->cork.opt->hopopt); kfree(np->cork.opt->srcrt); kfree(np->cork.opt); np->cork.opt = NULL; } if (inet->cork.base.dst) { dst_release(inet->cork.base.dst); inet->cork.base.dst = NULL; inet->cork.base.flags &= ~IPCORK_ALLFRAG; } memset(&inet->cork.fl, 0, sizeof(inet->cork.fl)); } int ip6_push_pending_frames(struct sock *sk) { struct sk_buff *skb, *tmp_skb; struct sk_buff **tail_skb; struct in6_addr final_dst_buf, *final_dst = &final_dst_buf; struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct net *net = sock_net(sk); struct ipv6hdr *hdr; struct ipv6_txoptions *opt = np->cork.opt; struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst; struct flowi6 *fl6 = &inet->cork.fl.u.ip6; unsigned char proto = fl6->flowi6_proto; int err = 0; if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL) goto out; tail_skb = &(skb_shinfo(skb)->frag_list); /* move skb->data to ip header from ext header */ if (skb->data < skb_network_header(skb)) __skb_pull(skb, skb_network_offset(skb)); while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { __skb_pull(tmp_skb, skb_network_header_len(skb)); *tail_skb = tmp_skb; tail_skb = &(tmp_skb->next); skb->len += tmp_skb->len; skb->data_len += tmp_skb->len; skb->truesize += tmp_skb->truesize; tmp_skb->destructor = NULL; tmp_skb->sk = NULL; } /* Allow local fragmentation. */ if (np->pmtudisc < IPV6_PMTUDISC_DO) skb->local_df = 1; *final_dst = fl6->daddr; __skb_pull(skb, skb_network_header_len(skb)); if (opt && opt->opt_flen) ipv6_push_frag_opts(skb, opt, &proto); if (opt && opt->opt_nflen) ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst); skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); hdr = ipv6_hdr(skb); *(__be32*)hdr = fl6->flowlabel | htonl(0x60000000 | ((int)np->cork.tclass << 20)); hdr->hop_limit = np->cork.hop_limit; hdr->nexthdr = proto; hdr->saddr = fl6->saddr; hdr->daddr = *final_dst; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; skb_dst_set(skb, dst_clone(&rt->dst)); IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); if (proto == IPPROTO_ICMPV6) { struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type); ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); } err = ip6_local_out(skb); if (err) { if (err > 0) err = net_xmit_errno(err); if (err) goto error; } out: ip6_cork_release(inet, np); return err; error: IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); goto out; } void ip6_flush_pending_frames(struct sock *sk) { struct sk_buff *skb; while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) { if (skb_dst(skb)) IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); } ip6_cork_release(inet_sk(sk), inet6_sk(sk)); }
gpl-2.0
Razdroid/razdroid-kernel
drivers/net/wireless/mwifiex/txrx.c
2336
5819
/* * Marvell Wireless LAN device driver: generic TX/RX data handling * * Copyright (C) 2011, Marvell International Ltd. * * This software file (the "File") is distributed by Marvell International * Ltd. under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available by writing to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. * * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE * ARE EXPRESSLY DISCLAIMED. The License provides additional details about * this warranty disclaimer. */ #include "decl.h" #include "ioctl.h" #include "util.h" #include "fw.h" #include "main.h" #include "wmm.h" /* * This function processes the received buffer. * * Main responsibility of this function is to parse the RxPD to * identify the correct interface this packet is headed for and * forwarding it to the associated handling function, where the * packet will be further processed and sent to kernel/upper layer * if required. */ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter, struct sk_buff *skb) { struct mwifiex_private *priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); struct rxpd *local_rx_pd; struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb); local_rx_pd = (struct rxpd *) (skb->data); /* Get the BSS number from rxpd, get corresponding priv */ priv = mwifiex_get_priv_by_id(adapter, local_rx_pd->bss_num & BSS_NUM_MASK, local_rx_pd->bss_type); if (!priv) priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); if (!priv) { dev_err(adapter->dev, "data: priv not found. Drop RX packet\n"); dev_kfree_skb_any(skb); return -1; } rx_info->bss_num = priv->bss_num; rx_info->bss_type = priv->bss_type; if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) return mwifiex_process_uap_rx_packet(priv, skb); return mwifiex_process_sta_rx_packet(priv, skb); } EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet); /* * This function sends a packet to device. * * It processes the packet to add the TxPD, checks condition and * sends the processed packet to firmware for transmission. * * On successful completion, the function calls the completion callback * and logs the time. */ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb, struct mwifiex_tx_param *tx_param) { int ret = -1; struct mwifiex_adapter *adapter = priv->adapter; u8 *head_ptr; struct txpd *local_tx_pd = NULL; if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) head_ptr = mwifiex_process_uap_txpd(priv, skb); else head_ptr = mwifiex_process_sta_txpd(priv, skb); if (head_ptr) { if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) local_tx_pd = (struct txpd *) (head_ptr + INTF_HEADER_LEN); if (adapter->iface_type == MWIFIEX_USB) { adapter->data_sent = true; skb_pull(skb, INTF_HEADER_LEN); ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA, skb, NULL); } else { ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, skb, tx_param); } } switch (ret) { case -ENOSR: dev_err(adapter->dev, "data: -ENOSR is returned\n"); break; case -EBUSY: if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && (adapter->pps_uapsd_mode) && (adapter->tx_lock_flag)) { priv->adapter->tx_lock_flag = false; if (local_tx_pd) local_tx_pd->flags = 0; } dev_dbg(adapter->dev, "data: -EBUSY is returned\n"); break; case -1: if (adapter->iface_type != MWIFIEX_PCIE) adapter->data_sent = false; dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n", ret); adapter->dbg.num_tx_host_to_card_failure++; mwifiex_write_data_complete(adapter, skb, 0, ret); break; case -EINPROGRESS: if (adapter->iface_type != MWIFIEX_PCIE) adapter->data_sent = false; break; case 0: mwifiex_write_data_complete(adapter, skb, 0, ret); break; default: break; } return ret; } /* * Packet send completion callback handler. * * It either frees the buffer directly or forwards it to another * completion callback which checks conditions, updates statistics, * wakes up stalled traffic queue if required, and then frees the buffer. */ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter, struct sk_buff *skb, int aggr, int status) { struct mwifiex_private *priv; struct mwifiex_txinfo *tx_info; struct netdev_queue *txq; int index; if (!skb) return 0; tx_info = MWIFIEX_SKB_TXCB(skb); priv = mwifiex_get_priv_by_id(adapter, tx_info->bss_num, tx_info->bss_type); if (!priv) goto done; if (adapter->iface_type == MWIFIEX_USB) adapter->data_sent = false; mwifiex_set_trans_start(priv->netdev); if (!status) { priv->stats.tx_packets++; priv->stats.tx_bytes += skb->len; if (priv->tx_timeout_cnt) priv->tx_timeout_cnt = 0; } else { priv->stats.tx_errors++; } if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT) atomic_dec_return(&adapter->pending_bridged_pkts); if (aggr) /* For skb_aggr, do not wake up tx queue */ goto done; atomic_dec(&adapter->tx_pending); index = mwifiex_1d_to_wmm_queue[skb->priority]; if (atomic_dec_return(&priv->wmm_tx_pending[index]) < LOW_TX_PENDING) { txq = netdev_get_tx_queue(priv->netdev, index); if (netif_tx_queue_stopped(txq)) { netif_tx_wake_queue(txq); dev_dbg(adapter->dev, "wake queue: %d\n", index); } } done: dev_kfree_skb_any(skb); return 0; } EXPORT_SYMBOL_GPL(mwifiex_write_data_complete);
gpl-2.0
garwynn/L710_MA6_Kernel
arch/blackfin/mach-bf533/boards/ip0x.c
2336
7304
/* * Copyright 2004-2009 Analog Devices Inc. * 2007 David Rowe * 2006 Intratrade Ltd. * Ivan Danov <idanov@gmail.com> * 2005 National ICT Australia (NICTA) * Aidan Williams <aidan@nicta.com.au> * * Licensed under the GPL-2 or later. */ #include <linux/device.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) #include <linux/usb/isp1362.h> #endif #include <asm/irq.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> #include <asm/portmux.h> /* * Name the Board for the /proc/cpuinfo */ const char bfin_board_name[] = "IP04/IP08"; /* * Driver needs to know address, irq and flag pin. */ #if defined(CONFIG_BFIN532_IP0X) #if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE) #include <linux/dm9000.h> static struct resource dm9000_resource1[] = { { .start = 0x20100000, .end = 0x20100000 + 1, .flags = IORESOURCE_MEM },{ .start = 0x20100000 + 2, .end = 0x20100000 + 3, .flags = IORESOURCE_MEM },{ .start = IRQ_PF15, .end = IRQ_PF15, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE } }; static struct resource dm9000_resource2[] = { { .start = 0x20200000, .end = 0x20200000 + 1, .flags = IORESOURCE_MEM },{ .start = 0x20200000 + 2, .end = 0x20200000 + 3, .flags = IORESOURCE_MEM },{ .start = IRQ_PF14, .end = IRQ_PF14, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE } }; /* * for the moment we limit ourselves to 16bit IO until some * better IO routines can be written and tested */ static struct dm9000_plat_data dm9000_platdata1 = { .flags = DM9000_PLATF_16BITONLY, }; static struct platform_device dm9000_device1 = { .name = "dm9000", .id = 0, .num_resources = ARRAY_SIZE(dm9000_resource1), .resource = dm9000_resource1, .dev = { .platform_data = &dm9000_platdata1, } }; static struct dm9000_plat_data dm9000_platdata2 = { .flags = DM9000_PLATF_16BITONLY, }; static struct platform_device dm9000_device2 = { .name = "dm9000", .id = 1, .num_resources = ARRAY_SIZE(dm9000_resource2), .resource = dm9000_resource2, .dev = { .platform_data = &dm9000_platdata2, } }; #endif #endif #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) /* all SPI peripherals info goes here */ #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) static struct bfin5xx_spi_chip mmc_spi_chip_info = { .enable_dma = 0, /* if 1 - block!!! */ .bits_per_word = 8, }; #endif /* Notice: for blackfin, the speed_hz is the value of register * SPI_BAUD, not the real baudrate */ static struct spi_board_info bfin_spi_board_info[] __initdata = { #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) { .modalias = "mmc_spi", .max_speed_hz = 2, .bus_num = 1, .chip_select = 5, .controller_data = &mmc_spi_chip_info, }, #endif }; /* SPI controller data */ static struct bfin5xx_spi_master spi_bfin_master_info = { .num_chipselect = 8, .enable_dma = 1, /* master has the ability to do dma transfer */ }; static struct platform_device spi_bfin_master_device = { .name = "bfin-spi-master", .id = 1, /* Bus number */ .dev = { .platform_data = &spi_bfin_master_info, /* Passed to driver */ }, }; #endif /* spi master and devices */ #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 static struct resource bfin_uart0_resources[] = { { .start = BFIN_UART_THR, .end = BFIN_UART_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX + 1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_ERROR, .end = IRQ_UART0_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_TX, .end = CH_UART0_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART0_RX, .end = CH_UART0_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart0_peripherals[] = { P_UART0_TX, P_UART0_RX, 0 }; static struct platform_device bfin_uart0_device = { .name = "bfin-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_uart0_resources), .resource = bfin_uart0_resources, .dev = { .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 static struct resource bfin_sir0_resources[] = { { .start = 0xFFC00400, .end = 0xFFC004FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_RX, .end = CH_UART0_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir0_device = { .name = "bfin_sir", .id = 0, .num_resources = ARRAY_SIZE(bfin_sir0_resources), .resource = bfin_sir0_resources, }; #endif #endif #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) static struct resource isp1362_hcd_resources[] = { { .start = 0x20300000, .end = 0x20300000 + 1, .flags = IORESOURCE_MEM, },{ .start = 0x20300000 + 2, .end = 0x20300000 + 3, .flags = IORESOURCE_MEM, },{ .start = IRQ_PF11, .end = IRQ_PF11, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE, }, }; static struct isp1362_platform_data isp1362_priv = { .sel15Kres = 1, .clknotstop = 0, .oc_enable = 0, /* external OC */ .int_act_high = 0, .int_edge_triggered = 0, .remote_wakeup_connected = 0, .no_power_switching = 1, .power_switching_mode = 0, }; static struct platform_device isp1362_hcd_device = { .name = "isp1362-hcd", .id = 0, .dev = { .platform_data = &isp1362_priv, }, .num_resources = ARRAY_SIZE(isp1362_hcd_resources), .resource = isp1362_hcd_resources, }; #endif static struct platform_device *ip0x_devices[] __initdata = { #if defined(CONFIG_BFIN532_IP0X) #if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE) &dm9000_device1, &dm9000_device2, #endif #endif #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) &spi_bfin_master_device, #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 &bfin_sir0_device, #endif #endif #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) &isp1362_hcd_device, #endif }; static int __init ip0x_init(void) { printk(KERN_INFO "%s(): registering device resources\n", __func__); platform_add_devices(ip0x_devices, ARRAY_SIZE(ip0x_devices)); spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); return 0; } arch_initcall(ip0x_init); static struct platform_device *ip0x_early_devices[] __initdata = { #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #endif }; void __init native_machine_early_platform_add_devices(void) { printk(KERN_INFO "register early platform devices\n"); early_platform_add_devices(ip0x_early_devices, ARRAY_SIZE(ip0x_early_devices)); }
gpl-2.0
bestmjh47/kernel_msm
arch/powerpc/platforms/powernv/pci-p5ioc2.c
4640
6741
/* * Support PCI/PCIe on PowerNV platforms * * Currently supports only P5IOC2 * * Copyright 2011 Benjamin Herrenschmidt, IBM Corp. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/msi.h> #include <asm/sections.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> #include <asm/ppc-pci.h> #include <asm/opal.h> #include <asm/iommu.h> #include <asm/tce.h> #include <asm/abs_addr.h> #include "powernv.h" #include "pci.h" /* For now, use a fixed amount of TCE memory for each p5ioc2 * hub, 16M will do */ #define P5IOC2_TCE_MEMORY 0x01000000 #ifdef CONFIG_PCI_MSI static int pnv_pci_p5ioc2_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, unsigned int hwirq, unsigned int is_64, struct msi_msg *msg) { if (WARN_ON(!is_64)) return -ENXIO; msg->data = hwirq - phb->msi_base; msg->address_hi = 0x10000000; msg->address_lo = 0; return 0; } static void pnv_pci_init_p5ioc2_msis(struct pnv_phb *phb) { unsigned int bmap_size; const __be32 *prop = of_get_property(phb->hose->dn, "ibm,opal-msi-ranges", NULL); if (!prop) return; /* Don't do MSI's on p5ioc2 PCI-X are they are not properly * verified in HW */ if (of_device_is_compatible(phb->hose->dn, "ibm,p5ioc2-pcix")) return; phb->msi_base = be32_to_cpup(prop); phb->msi_count = be32_to_cpup(prop + 1); bmap_size = BITS_TO_LONGS(phb->msi_count) * sizeof(unsigned long); phb->msi_map = zalloc_maybe_bootmem(bmap_size, GFP_KERNEL); if (!phb->msi_map) { pr_err("PCI %d: Failed to allocate MSI bitmap !\n", phb->hose->global_number); return; } phb->msi_setup = pnv_pci_p5ioc2_msi_setup; phb->msi32_support = 0; pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n", phb->msi_count, phb->msi_base); } #else static void pnv_pci_init_p5ioc2_msis(struct pnv_phb *phb) { } #endif /* CONFIG_PCI_MSI */ static void __devinit pnv_pci_p5ioc2_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev) { if (phb->p5ioc2.iommu_table.it_map == NULL) iommu_init_table(&phb->p5ioc2.iommu_table, phb->hose->node); set_iommu_table_base(&pdev->dev, &phb->p5ioc2.iommu_table); } static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, void *tce_mem, u64 tce_size) { struct pnv_phb *phb; const u64 *prop64; u64 phb_id; int64_t rc; static int primary = 1; pr_info(" Initializing p5ioc2 PHB %s\n", np->full_name); prop64 = of_get_property(np, "ibm,opal-phbid", NULL); if (!prop64) { pr_err(" Missing \"ibm,opal-phbid\" property !\n"); return; } phb_id = be64_to_cpup(prop64); pr_devel(" PHB-ID : 0x%016llx\n", phb_id); pr_devel(" TCE AT : 0x%016lx\n", __pa(tce_mem)); pr_devel(" TCE SZ : 0x%016llx\n", tce_size); rc = opal_pci_set_phb_tce_memory(phb_id, __pa(tce_mem), tce_size); if (rc != OPAL_SUCCESS) { pr_err(" Failed to set TCE memory, OPAL error %lld\n", rc); return; } phb = alloc_bootmem(sizeof(struct pnv_phb)); if (phb) { memset(phb, 0, sizeof(struct pnv_phb)); phb->hose = pcibios_alloc_controller(np); } if (!phb || !phb->hose) { pr_err(" Failed to allocate PCI controller\n"); return; } spin_lock_init(&phb->lock); phb->hose->first_busno = 0; phb->hose->last_busno = 0xff; phb->hose->private_data = phb; phb->opal_id = phb_id; phb->type = PNV_PHB_P5IOC2; phb->model = PNV_PHB_MODEL_P5IOC2; phb->regs = of_iomap(np, 0); if (phb->regs == NULL) pr_err(" Failed to map registers !\n"); else { pr_devel(" P_BUID = 0x%08x\n", in_be32(phb->regs + 0x100)); pr_devel(" P_IOSZ = 0x%08x\n", in_be32(phb->regs + 0x1b0)); pr_devel(" P_IO_ST = 0x%08x\n", in_be32(phb->regs + 0x1e0)); pr_devel(" P_MEM1_H = 0x%08x\n", in_be32(phb->regs + 0x1a0)); pr_devel(" P_MEM1_L = 0x%08x\n", in_be32(phb->regs + 0x190)); pr_devel(" P_MSZ1_L = 0x%08x\n", in_be32(phb->regs + 0x1c0)); pr_devel(" P_MEM_ST = 0x%08x\n", in_be32(phb->regs + 0x1d0)); pr_devel(" P_MEM2_H = 0x%08x\n", in_be32(phb->regs + 0x2c0)); pr_devel(" P_MEM2_L = 0x%08x\n", in_be32(phb->regs + 0x2b0)); pr_devel(" P_MSZ2_H = 0x%08x\n", in_be32(phb->regs + 0x2d0)); pr_devel(" P_MSZ2_L = 0x%08x\n", in_be32(phb->regs + 0x2e0)); } /* Interpret the "ranges" property */ /* This also maps the I/O region and sets isa_io/mem_base */ pci_process_bridge_OF_ranges(phb->hose, np, primary); primary = 0; phb->hose->ops = &pnv_pci_ops; /* Setup MSI support */ pnv_pci_init_p5ioc2_msis(phb); /* Setup TCEs */ phb->dma_dev_setup = pnv_pci_p5ioc2_dma_dev_setup; pnv_pci_setup_iommu_table(&phb->p5ioc2.iommu_table, tce_mem, tce_size, 0); } void __init pnv_pci_init_p5ioc2_hub(struct device_node *np) { struct device_node *phbn; const u64 *prop64; u64 hub_id; void *tce_mem; uint64_t tce_per_phb; int64_t rc; int phb_count = 0; pr_info("Probing p5ioc2 IO-Hub %s\n", np->full_name); prop64 = of_get_property(np, "ibm,opal-hubid", NULL); if (!prop64) { pr_err(" Missing \"ibm,opal-hubid\" property !\n"); return; } hub_id = be64_to_cpup(prop64); pr_info(" HUB-ID : 0x%016llx\n", hub_id); /* Currently allocate 16M of TCE memory for every Hub * * XXX TODO: Make it chip local if possible */ tce_mem = __alloc_bootmem(P5IOC2_TCE_MEMORY, P5IOC2_TCE_MEMORY, __pa(MAX_DMA_ADDRESS)); if (!tce_mem) { pr_err(" Failed to allocate TCE Memory !\n"); return; } pr_debug(" TCE : 0x%016lx..0x%016lx\n", __pa(tce_mem), __pa(tce_mem) + P5IOC2_TCE_MEMORY - 1); rc = opal_pci_set_hub_tce_memory(hub_id, __pa(tce_mem), P5IOC2_TCE_MEMORY); if (rc != OPAL_SUCCESS) { pr_err(" Failed to allocate TCE memory, OPAL error %lld\n", rc); return; } /* Count child PHBs */ for_each_child_of_node(np, phbn) { if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") || of_device_is_compatible(phbn, "ibm,p5ioc2-pciex")) phb_count++; } /* Calculate how much TCE space we can give per PHB */ tce_per_phb = __rounddown_pow_of_two(P5IOC2_TCE_MEMORY / phb_count); pr_info(" Allocating %lld MB of TCE memory per PHB\n", tce_per_phb >> 20); /* Initialize PHBs */ for_each_child_of_node(np, phbn) { if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") || of_device_is_compatible(phbn, "ibm,p5ioc2-pciex")) { pnv_pci_init_p5ioc2_phb(phbn, tce_mem, tce_per_phb); tce_mem += tce_per_phb; } } }
gpl-2.0
Negamann303/kernel-nk1-negalite-lt02ltespr
drivers/mtd/maps/scb2_flash.c
4896
7611
/* * MTD map driver for BIOS Flash on Intel SCB2 boards * Copyright (C) 2002 Sun Microsystems, Inc. * Tim Hockin <thockin@sun.com> * * A few notes on this MTD map: * * This was developed with a small number of SCB2 boards to test on. * Hopefully, Intel has not introducted too many unaccounted variables in the * making of this board. * * The BIOS marks its own memory region as 'reserved' in the e820 map. We * try to request it here, but if it fails, we carry on anyway. * * This is how the chip is attached, so said the schematic: * * a 4 MiB (32 Mib) 16 bit chip * * a 1 MiB memory region * * A20 and A21 pulled up * * D8-D15 ignored * What this means is that, while we are addressing bytes linearly, we are * really addressing words, and discarding the other byte. This means that * the chip MUST BE at least 2 MiB. This also means that every block is * actually half as big as the chip reports. It also means that accesses of * logical address 0 hit higher-address sections of the chip, not physical 0. * One can only hope that these 4MiB x16 chips were a lot cheaper than 1MiB x8 * chips. * * This driver assumes the chip is not write-protected by an external signal. * As of the this writing, that is true, but may change, just to spite me. * * The actual BIOS layout has been mostly reverse engineered. Intel BIOS * updates for this board include 10 related (*.bio - &.bi9) binary files and * another separate (*.bbo) binary file. The 10 files are 64k of data + a * small header. If the headers are stripped off, the 10 64k files can be * concatenated into a 640k image. This is your BIOS image, proper. The * separate .bbo file also has a small header. It is the 'Boot Block' * recovery BIOS. Once the header is stripped, no further prep is needed. * As best I can tell, the BIOS is arranged as such: * offset 0x00000 to 0x4ffff (320k): unknown - SCSI BIOS, etc? * offset 0x50000 to 0xeffff (640k): BIOS proper * offset 0xf0000 ty 0xfffff (64k): Boot Block region * * Intel's BIOS update program flashes the BIOS and Boot Block in separate * steps. Probably a wise thing to do. */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/cfi.h> #include <linux/pci.h> #include <linux/pci_ids.h> #define MODNAME "scb2_flash" #define SCB2_ADDR 0xfff00000 #define SCB2_WINDOW 0x00100000 static void __iomem *scb2_ioaddr; static struct mtd_info *scb2_mtd; static struct map_info scb2_map = { .name = "SCB2 BIOS Flash", .size = 0, .bankwidth = 1, }; static int region_fail; static int __devinit scb2_fixup_mtd(struct mtd_info *mtd) { int i; int done = 0; struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; /* barf if this doesn't look right */ if (cfi->cfiq->InterfaceDesc != CFI_INTERFACE_X16_ASYNC) { printk(KERN_ERR MODNAME ": unsupported InterfaceDesc: %#x\n", cfi->cfiq->InterfaceDesc); return -1; } /* I wasn't here. I didn't see. dwmw2. */ /* the chip is sometimes bigger than the map - what a waste */ mtd->size = map->size; /* * We only REALLY get half the chip, due to the way it is * wired up - D8-D15 are tossed away. We read linear bytes, * but in reality we are getting 1/2 of each 16-bit read, * which LOOKS linear to us. Because CFI code accounts for * things like lock/unlock/erase by eraseregions, we need to * fudge them to reflect this. Erases go like this: * * send an erase to an address * * the chip samples the address and erases the block * * add the block erasesize to the address and repeat * -- the problem is that addresses are 16-bit addressable * -- we end up erasing every-other block */ mtd->erasesize /= 2; for (i = 0; i < mtd->numeraseregions; i++) { struct mtd_erase_region_info *region = &mtd->eraseregions[i]; region->erasesize /= 2; } /* * If the chip is bigger than the map, it is wired with the high * address lines pulled up. This makes us access the top portion of * the chip, so all our erase-region info is wrong. Start cutting from * the bottom. */ for (i = 0; !done && i < mtd->numeraseregions; i++) { struct mtd_erase_region_info *region = &mtd->eraseregions[i]; if (region->numblocks * region->erasesize > mtd->size) { region->numblocks = ((unsigned long)mtd->size / region->erasesize); done = 1; } else { region->numblocks = 0; } region->offset = 0; } return 0; } /* CSB5's 'Function Control Register' has bits for decoding @ >= 0xffc00000 */ #define CSB5_FCR 0x41 #define CSB5_FCR_DECODE_ALL 0x0e static int __devinit scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent) { u8 reg; /* enable decoding of the flash region in the south bridge */ pci_read_config_byte(dev, CSB5_FCR, &reg); pci_write_config_byte(dev, CSB5_FCR, reg | CSB5_FCR_DECODE_ALL); if (!request_mem_region(SCB2_ADDR, SCB2_WINDOW, scb2_map.name)) { /* * The BIOS seems to mark the flash region as 'reserved' * in the e820 map. Warn and go about our business. */ printk(KERN_WARNING MODNAME ": warning - can't reserve rom window, continuing\n"); region_fail = 1; } /* remap the IO window (w/o caching) */ scb2_ioaddr = ioremap_nocache(SCB2_ADDR, SCB2_WINDOW); if (!scb2_ioaddr) { printk(KERN_ERR MODNAME ": Failed to ioremap window!\n"); if (!region_fail) release_mem_region(SCB2_ADDR, SCB2_WINDOW); return -ENOMEM; } scb2_map.phys = SCB2_ADDR; scb2_map.virt = scb2_ioaddr; scb2_map.size = SCB2_WINDOW; simple_map_init(&scb2_map); /* try to find a chip */ scb2_mtd = do_map_probe("cfi_probe", &scb2_map); if (!scb2_mtd) { printk(KERN_ERR MODNAME ": flash probe failed!\n"); iounmap(scb2_ioaddr); if (!region_fail) release_mem_region(SCB2_ADDR, SCB2_WINDOW); return -ENODEV; } scb2_mtd->owner = THIS_MODULE; if (scb2_fixup_mtd(scb2_mtd) < 0) { mtd_device_unregister(scb2_mtd); map_destroy(scb2_mtd); iounmap(scb2_ioaddr); if (!region_fail) release_mem_region(SCB2_ADDR, SCB2_WINDOW); return -ENODEV; } printk(KERN_NOTICE MODNAME ": chip size 0x%llx at offset 0x%llx\n", (unsigned long long)scb2_mtd->size, (unsigned long long)(SCB2_WINDOW - scb2_mtd->size)); mtd_device_register(scb2_mtd, NULL, 0); return 0; } static void __devexit scb2_flash_remove(struct pci_dev *dev) { if (!scb2_mtd) return; /* disable flash writes */ mtd_lock(scb2_mtd, 0, scb2_mtd->size); mtd_device_unregister(scb2_mtd); map_destroy(scb2_mtd); iounmap(scb2_ioaddr); scb2_ioaddr = NULL; if (!region_fail) release_mem_region(SCB2_ADDR, SCB2_WINDOW); pci_set_drvdata(dev, NULL); } static struct pci_device_id scb2_flash_pci_ids[] = { { .vendor = PCI_VENDOR_ID_SERVERWORKS, .device = PCI_DEVICE_ID_SERVERWORKS_CSB5, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID }, { 0, } }; static struct pci_driver scb2_flash_driver = { .name = "Intel SCB2 BIOS Flash", .id_table = scb2_flash_pci_ids, .probe = scb2_flash_probe, .remove = __devexit_p(scb2_flash_remove), }; static int __init scb2_flash_init(void) { return pci_register_driver(&scb2_flash_driver); } static void __exit scb2_flash_exit(void) { pci_unregister_driver(&scb2_flash_driver); } module_init(scb2_flash_init); module_exit(scb2_flash_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Tim Hockin <thockin@sun.com>"); MODULE_DESCRIPTION("MTD map driver for Intel SCB2 BIOS Flash"); MODULE_DEVICE_TABLE(pci, scb2_flash_pci_ids);
gpl-2.0
Project-Elite/elite_kernel_m7
drivers/staging/media/easycap/easycap_ioctl.c
4896
73759
/****************************************************************************** * * * easycap_ioctl.c * * * ******************************************************************************/ /* * * Copyright (C) 2010 R.M. Thomas <rmthomas@sciolus.org> * * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * The software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this software; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /*****************************************************************************/ #include "easycap.h" /*--------------------------------------------------------------------------*/ /* * UNLESS THERE IS A PREMATURE ERROR RETURN THIS ROUTINE UPDATES THE * FOLLOWING: * peasycap->standard_offset * peasycap->inputset[peasycap->input].standard_offset * peasycap->fps * peasycap->usec * peasycap->tolerate * peasycap->skip */ /*---------------------------------------------------------------------------*/ int adjust_standard(struct easycap *peasycap, v4l2_std_id std_id) { struct easycap_standard const *peasycap_standard; u16 reg, set; int ir, rc, need, k; unsigned int itwas, isnow; bool resubmit; if (!peasycap) { SAY("ERROR: peasycap is NULL\n"); return -EFAULT; } if (!peasycap->pusb_device) { SAM("ERROR: peasycap->pusb_device is NULL\n"); return -EFAULT; } peasycap_standard = &easycap_standard[0]; while (0xFFFF != peasycap_standard->mask) { if (std_id == peasycap_standard->v4l2_standard.id) break; peasycap_standard++; } if (0xFFFF == peasycap_standard->mask) { peasycap_standard = &easycap_standard[0]; while (0xFFFF != peasycap_standard->mask) { if (std_id & peasycap_standard->v4l2_standard.id) break; peasycap_standard++; } } if (0xFFFF == peasycap_standard->mask) { SAM("ERROR: 0x%08X=std_id: standard not found\n", (unsigned int)std_id); return -EINVAL; } SAM("selected standard: %s\n", &(peasycap_standard->v4l2_standard.name[0])); if (peasycap->standard_offset == peasycap_standard - easycap_standard) { SAM("requested standard already in effect\n"); return 0; } peasycap->standard_offset = peasycap_standard - easycap_standard; for (k = 0; k < INPUT_MANY; k++) { if (!peasycap->inputset[k].standard_offset_ok) { peasycap->inputset[k].standard_offset = peasycap->standard_offset; } } if ((0 <= peasycap->input) && (INPUT_MANY > peasycap->input)) { peasycap->inputset[peasycap->input].standard_offset = peasycap->standard_offset; peasycap->inputset[peasycap->input].standard_offset_ok = 1; } else JOM(8, "%i=peasycap->input\n", peasycap->input); peasycap->fps = peasycap_standard->v4l2_standard.frameperiod.denominator / peasycap_standard->v4l2_standard.frameperiod.numerator; switch (peasycap->fps) { case 6: case 30: { peasycap->ntsc = true; break; } case 5: case 25: { peasycap->ntsc = false; break; } default: { SAM("MISTAKE: %i=frames-per-second\n", peasycap->fps); return -ENOENT; } } JOM(8, "%i frames-per-second\n", peasycap->fps); if (0x8000 & peasycap_standard->mask) { peasycap->skip = 5; peasycap->usec = 1000000 / (2 * (5 * peasycap->fps)); peasycap->tolerate = 1000 * (25 / (5 * peasycap->fps)); } else { peasycap->skip = 0; peasycap->usec = 1000000 / (2 * peasycap->fps); peasycap->tolerate = 1000 * (25 / peasycap->fps); } if (peasycap->video_isoc_streaming) { resubmit = true; easycap_video_kill_urbs(peasycap); } else resubmit = false; /*--------------------------------------------------------------------------*/ /* * SAA7113H DATASHEET PAGE 44, TABLE 42 */ /*--------------------------------------------------------------------------*/ need = 0; itwas = 0; reg = 0x00; set = 0x00; switch (peasycap_standard->mask & 0x000F) { case NTSC_M_JP: { reg = 0x0A; set = 0x95; ir = read_saa(peasycap->pusb_device, reg); if (0 > ir) SAM("ERROR: cannot read SAA register 0x%02X\n", reg); else itwas = (unsigned int)ir; rc = write_saa(peasycap->pusb_device, reg, set); if (rc) SAM("ERROR: failed to set SAA register " "0x%02X to 0x%02X for JP standard\n", reg, set); else { isnow = (unsigned int)read_saa(peasycap->pusb_device, reg); if (0 > ir) JOM(8, "SAA register 0x%02X changed " "to 0x%02X\n", reg, isnow); else JOM(8, "SAA register 0x%02X changed " "from 0x%02X to 0x%02X\n", reg, itwas, isnow); } reg = 0x0B; set = 0x48; ir = read_saa(peasycap->pusb_device, reg); if (0 > ir) SAM("ERROR: cannot read SAA register 0x%02X\n", reg); else itwas = (unsigned int)ir; rc = write_saa(peasycap->pusb_device, reg, set); if (rc) SAM("ERROR: failed to set SAA register 0x%02X to 0x%02X " "for JP standard\n", reg, set); else { isnow = (unsigned int)read_saa(peasycap->pusb_device, reg); if (0 > ir) JOM(8, "SAA register 0x%02X changed " "to 0x%02X\n", reg, isnow); else JOM(8, "SAA register 0x%02X changed " "from 0x%02X to 0x%02X\n", reg, itwas, isnow); } /*--------------------------------------------------------------------------*/ /* * NOTE: NO break HERE: RUN ON TO NEXT CASE */ /*--------------------------------------------------------------------------*/ } case NTSC_M: case PAL_BGHIN: { reg = 0x0E; set = 0x01; need = 1; break; } case NTSC_N_443: case PAL_60: { reg = 0x0E; set = 0x11; need = 1; break; } case NTSC_443: case PAL_Nc: { reg = 0x0E; set = 0x21; need = 1; break; } case NTSC_N: case PAL_M: { reg = 0x0E; set = 0x31; need = 1; break; } case SECAM: { reg = 0x0E; set = 0x51; need = 1; break; } default: break; } /*--------------------------------------------------------------------------*/ if (need) { ir = read_saa(peasycap->pusb_device, reg); if (0 > ir) SAM("ERROR: failed to read SAA register 0x%02X\n", reg); else itwas = (unsigned int)ir; rc = write_saa(peasycap->pusb_device, reg, set); if (0 != write_saa(peasycap->pusb_device, reg, set)) { SAM("ERROR: failed to set SAA register " "0x%02X to 0x%02X for table 42\n", reg, set); } else { isnow = (unsigned int)read_saa(peasycap->pusb_device, reg); if (0 > ir) JOM(8, "SAA register 0x%02X changed " "to 0x%02X\n", reg, isnow); else JOM(8, "SAA register 0x%02X changed " "from 0x%02X to 0x%02X\n", reg, itwas, isnow); } } /*--------------------------------------------------------------------------*/ /* * SAA7113H DATASHEET PAGE 41 */ /*--------------------------------------------------------------------------*/ reg = 0x08; ir = read_saa(peasycap->pusb_device, reg); if (0 > ir) SAM("ERROR: failed to read SAA register 0x%02X " "so cannot reset\n", reg); else { itwas = (unsigned int)ir; if (peasycap_standard->mask & 0x0001) set = itwas | 0x40 ; else set = itwas & ~0x40 ; rc = write_saa(peasycap->pusb_device, reg, set); if (rc) SAM("ERROR: failed to set SAA register 0x%02X to 0x%02X\n", reg, set); else { isnow = (unsigned int)read_saa(peasycap->pusb_device, reg); if (0 > ir) JOM(8, "SAA register 0x%02X changed to 0x%02X\n", reg, isnow); else JOM(8, "SAA register 0x%02X changed " "from 0x%02X to 0x%02X\n", reg, itwas, isnow); } } /*--------------------------------------------------------------------------*/ /* * SAA7113H DATASHEET PAGE 51, TABLE 57 */ /*---------------------------------------------------------------------------*/ reg = 0x40; ir = read_saa(peasycap->pusb_device, reg); if (0 > ir) SAM("ERROR: failed to read SAA register 0x%02X " "so cannot reset\n", reg); else { itwas = (unsigned int)ir; if (peasycap_standard->mask & 0x0001) set = itwas | 0x80 ; else set = itwas & ~0x80 ; rc = write_saa(peasycap->pusb_device, reg, set); if (rc) SAM("ERROR: failed to set SAA register 0x%02X to 0x%02X\n", reg, set); else { isnow = (unsigned int)read_saa(peasycap->pusb_device, reg); if (0 > ir) JOM(8, "SAA register 0x%02X changed to 0x%02X\n", reg, isnow); else JOM(8, "SAA register 0x%02X changed " "from 0x%02X to 0x%02X\n", reg, itwas, isnow); } } /*--------------------------------------------------------------------------*/ /* * SAA7113H DATASHEET PAGE 53, TABLE 66 */ /*--------------------------------------------------------------------------*/ reg = 0x5A; ir = read_saa(peasycap->pusb_device, reg); if (0 > ir) SAM("ERROR: failed to read SAA register 0x%02X but continuing\n", reg); itwas = (unsigned int)ir; if (peasycap_standard->mask & 0x0001) set = 0x0A ; else set = 0x07 ; if (0 != write_saa(peasycap->pusb_device, reg, set)) SAM("ERROR: failed to set SAA register 0x%02X to 0x%02X\n", reg, set); else { isnow = (unsigned int)read_saa(peasycap->pusb_device, reg); if (0 > ir) JOM(8, "SAA register 0x%02X changed " "to 0x%02X\n", reg, isnow); else JOM(8, "SAA register 0x%02X changed " "from 0x%02X to 0x%02X\n", reg, itwas, isnow); } if (resubmit) easycap_video_submit_urbs(peasycap); return 0; } /*****************************************************************************/ /*--------------------------------------------------------------------------*/ /* * THE ALGORITHM FOR RESPONDING TO THE VIDIO_S_FMT IOCTL REQUIRES * A VALID VALUE OF peasycap->standard_offset, OTHERWISE -EBUSY IS RETURNED. * * PROVIDED THE ARGUMENT try IS false AND THERE IS NO PREMATURE ERROR RETURN * THIS ROUTINE UPDATES THE FOLLOWING: * peasycap->format_offset * peasycap->inputset[peasycap->input].format_offset * peasycap->pixelformat * peasycap->height * peasycap->width * peasycap->bytesperpixel * peasycap->byteswaporder * peasycap->decimatepixel * peasycap->frame_buffer_used * peasycap->videofieldamount * peasycap->offerfields * * IF SUCCESSFUL THE FUNCTION RETURNS THE OFFSET IN easycap_format[] * IDENTIFYING THE FORMAT WHICH IS TO RETURNED TO THE USER. * ERRORS RETURN A NEGATIVE NUMBER. */ /*--------------------------------------------------------------------------*/ int adjust_format(struct easycap *peasycap, u32 width, u32 height, u32 pixelformat, int field, bool try) { struct easycap_format *peasycap_format, *peasycap_best_format; u16 mask; struct usb_device *p; int miss, multiplier, best, k; char bf[5], fo[32], *pc; u32 uc; bool resubmit; if (!peasycap) { SAY("ERROR: peasycap is NULL\n"); return -EFAULT; } if (0 > peasycap->standard_offset) { JOM(8, "%i=peasycap->standard_offset\n", peasycap->standard_offset); return -EBUSY; } p = peasycap->pusb_device; if (!p) { SAM("ERROR: peaycap->pusb_device is NULL\n"); return -EFAULT; } pc = &bf[0]; uc = pixelformat; memcpy((void *)pc, (void *)(&uc), 4); bf[4] = 0; mask = 0xFF & easycap_standard[peasycap->standard_offset].mask; SAM("sought: %ix%i,%s(0x%08X),%i=field,0x%02X=std mask\n", width, height, pc, pixelformat, field, mask); switch (field) { case V4L2_FIELD_ANY: { strcpy(&fo[0], "V4L2_FIELD_ANY "); break; } case V4L2_FIELD_NONE: { strcpy(&fo[0], "V4L2_FIELD_NONE"); break; } case V4L2_FIELD_TOP: { strcpy(&fo[0], "V4L2_FIELD_TOP"); break; } case V4L2_FIELD_BOTTOM: { strcpy(&fo[0], "V4L2_FIELD_BOTTOM"); break; } case V4L2_FIELD_INTERLACED: { strcpy(&fo[0], "V4L2_FIELD_INTERLACED"); break; } case V4L2_FIELD_SEQ_TB: { strcpy(&fo[0], "V4L2_FIELD_SEQ_TB"); break; } case V4L2_FIELD_SEQ_BT: { strcpy(&fo[0], "V4L2_FIELD_SEQ_BT"); break; } case V4L2_FIELD_ALTERNATE: { strcpy(&fo[0], "V4L2_FIELD_ALTERNATE"); break; } case V4L2_FIELD_INTERLACED_TB: { strcpy(&fo[0], "V4L2_FIELD_INTERLACED_TB"); break; } case V4L2_FIELD_INTERLACED_BT: { strcpy(&fo[0], "V4L2_FIELD_INTERLACED_BT"); break; } default: { strcpy(&fo[0], "V4L2_FIELD_... UNKNOWN "); break; } } SAM("sought: %s\n", &fo[0]); if (V4L2_FIELD_ANY == field) { field = V4L2_FIELD_NONE; SAM("prefer: V4L2_FIELD_NONE=field, was V4L2_FIELD_ANY\n"); } peasycap_best_format = NULL; peasycap_format = &easycap_format[0]; while (0 != peasycap_format->v4l2_format.fmt.pix.width) { JOM(16, ".> %i %i 0x%08X %ix%i\n", peasycap_format->mask & 0x01, peasycap_format->v4l2_format.fmt.pix.field, peasycap_format->v4l2_format.fmt.pix.pixelformat, peasycap_format->v4l2_format.fmt.pix.width, peasycap_format->v4l2_format.fmt.pix.height); if (((peasycap_format->mask & 0x1F) == (mask & 0x1F)) && (peasycap_format->v4l2_format.fmt.pix.field == field) && (peasycap_format->v4l2_format.fmt.pix.pixelformat == pixelformat) && (peasycap_format->v4l2_format.fmt.pix.width == width) && (peasycap_format->v4l2_format.fmt.pix.height == height)) { peasycap_best_format = peasycap_format; break; } peasycap_format++; } if (0 == peasycap_format->v4l2_format.fmt.pix.width) { SAM("cannot do: %ix%i with standard mask 0x%02X\n", width, height, mask); peasycap_format = &easycap_format[0]; best = -1; while (0 != peasycap_format->v4l2_format.fmt.pix.width) { if (((peasycap_format->mask & 0x1F) == (mask & 0x1F)) && (peasycap_format->v4l2_format.fmt.pix.field == field) && (peasycap_format->v4l2_format.fmt.pix.pixelformat == pixelformat)) { miss = abs(peasycap_format->v4l2_format.fmt.pix.width - width); if ((best > miss) || (best < 0)) { best = miss; peasycap_best_format = peasycap_format; if (!miss) break; } } peasycap_format++; } if (-1 == best) { SAM("cannot do %ix... with standard mask 0x%02X\n", width, mask); SAM("cannot do ...x%i with standard mask 0x%02X\n", height, mask); SAM(" %ix%i unmatched\n", width, height); return peasycap->format_offset; } } if (!peasycap_best_format) { SAM("MISTAKE: peasycap_best_format is NULL"); return -EINVAL; } peasycap_format = peasycap_best_format; /*...........................................................................*/ if (try) return peasycap_best_format - easycap_format; /*...........................................................................*/ if (false != try) { SAM("MISTAKE: true==try where is should be false\n"); return -EINVAL; } SAM("actioning: %ix%i %s\n", peasycap_format->v4l2_format.fmt.pix.width, peasycap_format->v4l2_format.fmt.pix.height, &peasycap_format->name[0]); peasycap->height = peasycap_format->v4l2_format.fmt.pix.height; peasycap->width = peasycap_format->v4l2_format.fmt.pix.width; peasycap->pixelformat = peasycap_format->v4l2_format.fmt.pix.pixelformat; peasycap->format_offset = peasycap_format - easycap_format; for (k = 0; k < INPUT_MANY; k++) { if (!peasycap->inputset[k].format_offset_ok) { peasycap->inputset[k].format_offset = peasycap->format_offset; } } if ((0 <= peasycap->input) && (INPUT_MANY > peasycap->input)) { peasycap->inputset[peasycap->input].format_offset = peasycap->format_offset; peasycap->inputset[peasycap->input].format_offset_ok = 1; } else JOM(8, "%i=peasycap->input\n", peasycap->input); peasycap->bytesperpixel = (0x00E0 & peasycap_format->mask) >> 5 ; if (0x0100 & peasycap_format->mask) peasycap->byteswaporder = true; else peasycap->byteswaporder = false; if (0x0200 & peasycap_format->mask) peasycap->skip = 5; else peasycap->skip = 0; if (0x0800 & peasycap_format->mask) peasycap->decimatepixel = true; else peasycap->decimatepixel = false; if (0x1000 & peasycap_format->mask) peasycap->offerfields = true; else peasycap->offerfields = false; if (peasycap->decimatepixel) multiplier = 2; else multiplier = 1; peasycap->videofieldamount = multiplier * peasycap->width * multiplier * peasycap->height; peasycap->frame_buffer_used = peasycap->bytesperpixel * peasycap->width * peasycap->height; if (peasycap->video_isoc_streaming) { resubmit = true; easycap_video_kill_urbs(peasycap); } else resubmit = false; /*---------------------------------------------------------------------------*/ /* * PAL */ /*---------------------------------------------------------------------------*/ if (0 == (0x01 & peasycap_format->mask)) { if (((720 == peasycap_format->v4l2_format.fmt.pix.width) && (576 == peasycap_format->v4l2_format.fmt.pix.height)) || ((360 == peasycap_format->v4l2_format.fmt.pix.width) && (288 == peasycap_format->v4l2_format.fmt.pix.height))) { if (set_resolution(p, 0x0000, 0x0001, 0x05A0, 0x0121)) { SAM("ERROR: set_resolution() failed\n"); return -EINVAL; } } else if ((704 == peasycap_format->v4l2_format.fmt.pix.width) && (576 == peasycap_format->v4l2_format.fmt.pix.height)) { if (set_resolution(p, 0x0004, 0x0001, 0x0584, 0x0121)) { SAM("ERROR: set_resolution() failed\n"); return -EINVAL; } } else if (((640 == peasycap_format->v4l2_format.fmt.pix.width) && (480 == peasycap_format->v4l2_format.fmt.pix.height)) || ((320 == peasycap_format->v4l2_format.fmt.pix.width) && (240 == peasycap_format->v4l2_format.fmt.pix.height))) { if (set_resolution(p, 0x0014, 0x0020, 0x0514, 0x0110)) { SAM("ERROR: set_resolution() failed\n"); return -EINVAL; } } else { SAM("MISTAKE: bad format, cannot set resolution\n"); return -EINVAL; } /*---------------------------------------------------------------------------*/ /* * NTSC */ /*---------------------------------------------------------------------------*/ } else { if (((720 == peasycap_format->v4l2_format.fmt.pix.width) && (480 == peasycap_format->v4l2_format.fmt.pix.height)) || ((360 == peasycap_format->v4l2_format.fmt.pix.width) && (240 == peasycap_format->v4l2_format.fmt.pix.height))) { if (set_resolution(p, 0x0000, 0x0003, 0x05A0, 0x00F3)) { SAM("ERROR: set_resolution() failed\n"); return -EINVAL; } } else if (((640 == peasycap_format->v4l2_format.fmt.pix.width) && (480 == peasycap_format->v4l2_format.fmt.pix.height)) || ((320 == peasycap_format->v4l2_format.fmt.pix.width) && (240 == peasycap_format->v4l2_format.fmt.pix.height))) { if (set_resolution(p, 0x0014, 0x0003, 0x0514, 0x00F3)) { SAM("ERROR: set_resolution() failed\n"); return -EINVAL; } } else { SAM("MISTAKE: bad format, cannot set resolution\n"); return -EINVAL; } } /*---------------------------------------------------------------------------*/ if (resubmit) easycap_video_submit_urbs(peasycap); return peasycap_best_format - easycap_format; } /*****************************************************************************/ int adjust_brightness(struct easycap *peasycap, int value) { unsigned int mood; int i1, k; if (!peasycap) { SAY("ERROR: peasycap is NULL\n"); return -EFAULT; } if (!peasycap->pusb_device) { SAM("ERROR: peasycap->pusb_device is NULL\n"); return -EFAULT; } i1 = 0; while (0xFFFFFFFF != easycap_control[i1].id) { if (V4L2_CID_BRIGHTNESS == easycap_control[i1].id) { if ((easycap_control[i1].minimum > value) || (easycap_control[i1].maximum < value)) value = easycap_control[i1].default_value; if ((easycap_control[i1].minimum <= peasycap->brightness) && (easycap_control[i1].maximum >= peasycap->brightness)) { if (peasycap->brightness == value) { SAM("unchanged brightness at 0x%02X\n", value); return 0; } } peasycap->brightness = value; for (k = 0; k < INPUT_MANY; k++) { if (!peasycap->inputset[k].brightness_ok) peasycap->inputset[k].brightness = peasycap->brightness; } if ((0 <= peasycap->input) && (INPUT_MANY > peasycap->input)) { peasycap->inputset[peasycap->input].brightness = peasycap->brightness; peasycap->inputset[peasycap->input].brightness_ok = 1; } else JOM(8, "%i=peasycap->input\n", peasycap->input); mood = 0x00FF & (unsigned int)peasycap->brightness; if (write_saa(peasycap->pusb_device, 0x0A, mood)) { SAM("WARNING: failed to adjust brightness " "to 0x%02X\n", mood); return -ENOENT; } SAM("adjusting brightness to 0x%02X\n", mood); return 0; } i1++; } SAM("WARNING: failed to adjust brightness: control not found\n"); return -ENOENT; } /*****************************************************************************/ int adjust_contrast(struct easycap *peasycap, int value) { unsigned int mood; int i1, k; if (!peasycap) { SAY("ERROR: peasycap is NULL\n"); return -EFAULT; } if (!peasycap->pusb_device) { SAM("ERROR: peasycap->pusb_device is NULL\n"); return -EFAULT; } i1 = 0; while (0xFFFFFFFF != easycap_control[i1].id) { if (V4L2_CID_CONTRAST == easycap_control[i1].id) { if ((easycap_control[i1].minimum > value) || (easycap_control[i1].maximum < value)) value = easycap_control[i1].default_value; if ((easycap_control[i1].minimum <= peasycap->contrast) && (easycap_control[i1].maximum >= peasycap->contrast)) { if (peasycap->contrast == value) { SAM("unchanged contrast at 0x%02X\n", value); return 0; } } peasycap->contrast = value; for (k = 0; k < INPUT_MANY; k++) { if (!peasycap->inputset[k].contrast_ok) peasycap->inputset[k].contrast = peasycap->contrast; } if ((0 <= peasycap->input) && (INPUT_MANY > peasycap->input)) { peasycap->inputset[peasycap->input].contrast = peasycap->contrast; peasycap->inputset[peasycap->input].contrast_ok = 1; } else JOM(8, "%i=peasycap->input\n", peasycap->input); mood = 0x00FF & (unsigned int) (peasycap->contrast - 128); if (write_saa(peasycap->pusb_device, 0x0B, mood)) { SAM("WARNING: failed to adjust contrast to " "0x%02X\n", mood); return -ENOENT; } SAM("adjusting contrast to 0x%02X\n", mood); return 0; } i1++; } SAM("WARNING: failed to adjust contrast: control not found\n"); return -ENOENT; } /*****************************************************************************/ int adjust_saturation(struct easycap *peasycap, int value) { unsigned int mood; int i1, k; if (!peasycap) { SAY("ERROR: peasycap is NULL\n"); return -EFAULT; } if (!peasycap->pusb_device) { SAM("ERROR: peasycap->pusb_device is NULL\n"); return -EFAULT; } i1 = 0; while (0xFFFFFFFF != easycap_control[i1].id) { if (V4L2_CID_SATURATION == easycap_control[i1].id) { if ((easycap_control[i1].minimum > value) || (easycap_control[i1].maximum < value)) value = easycap_control[i1].default_value; if ((easycap_control[i1].minimum <= peasycap->saturation) && (easycap_control[i1].maximum >= peasycap->saturation)) { if (peasycap->saturation == value) { SAM("unchanged saturation at 0x%02X\n", value); return 0; } } peasycap->saturation = value; for (k = 0; k < INPUT_MANY; k++) { if (!peasycap->inputset[k].saturation_ok) peasycap->inputset[k].saturation = peasycap->saturation; } if ((0 <= peasycap->input) && (INPUT_MANY > peasycap->input)) { peasycap->inputset[peasycap->input].saturation = peasycap->saturation; peasycap->inputset[peasycap->input].saturation_ok = 1; } else JOM(8, "%i=peasycap->input\n", peasycap->input); mood = 0x00FF & (unsigned int) (peasycap->saturation - 128); if (write_saa(peasycap->pusb_device, 0x0C, mood)) { SAM("WARNING: failed to adjust saturation to " "0x%02X\n", mood); return -ENOENT; } SAM("adjusting saturation to 0x%02X\n", mood); return 0; break; } i1++; } SAM("WARNING: failed to adjust saturation: control not found\n"); return -ENOENT; } /*****************************************************************************/ int adjust_hue(struct easycap *peasycap, int value) { unsigned int mood; int i1, i2, k; if (!peasycap) { SAY("ERROR: peasycap is NULL\n"); return -EFAULT; } if (!peasycap->pusb_device) { SAM("ERROR: peasycap->pusb_device is NULL\n"); return -EFAULT; } i1 = 0; while (0xFFFFFFFF != easycap_control[i1].id) { if (V4L2_CID_HUE == easycap_control[i1].id) { if ((easycap_control[i1].minimum > value) || (easycap_control[i1].maximum < value)) value = easycap_control[i1].default_value; if ((easycap_control[i1].minimum <= peasycap->hue) && (easycap_control[i1].maximum >= peasycap->hue)) { if (peasycap->hue == value) { SAM("unchanged hue at 0x%02X\n", value); return 0; } } peasycap->hue = value; for (k = 0; k < INPUT_MANY; k++) { if (!peasycap->inputset[k].hue_ok) peasycap->inputset[k].hue = peasycap->hue; } if (0 <= peasycap->input && INPUT_MANY > peasycap->input) { peasycap->inputset[peasycap->input].hue = peasycap->hue; peasycap->inputset[peasycap->input].hue_ok = 1; } else JOM(8, "%i=peasycap->input\n", peasycap->input); i2 = peasycap->hue - 128; mood = 0x00FF & ((int) i2); if (write_saa(peasycap->pusb_device, 0x0D, mood)) { SAM("WARNING: failed to adjust hue to 0x%02X\n", mood); return -ENOENT; } SAM("adjusting hue to 0x%02X\n", mood); return 0; break; } i1++; } SAM("WARNING: failed to adjust hue: control not found\n"); return -ENOENT; } /*****************************************************************************/ static int adjust_volume(struct easycap *peasycap, int value) { s8 mood; int i1; if (!peasycap) { SAY("ERROR: peasycap is NULL\n"); return -EFAULT; } if (!peasycap->pusb_device) { SAM("ERROR: peasycap->pusb_device is NULL\n"); return -EFAULT; } i1 = 0; while (0xFFFFFFFF != easycap_control[i1].id) { if (V4L2_CID_AUDIO_VOLUME == easycap_control[i1].id) { if ((easycap_control[i1].minimum > value) || (easycap_control[i1].maximum < value)) value = easycap_control[i1].default_value; if ((easycap_control[i1].minimum <= peasycap->volume) && (easycap_control[i1].maximum >= peasycap->volume)) { if (peasycap->volume == value) { SAM("unchanged volume at 0x%02X\n", value); return 0; } } peasycap->volume = value; mood = (16 > peasycap->volume) ? 16 : ((31 < peasycap->volume) ? 31 : (s8) peasycap->volume); if (!easycap_audio_gainset(peasycap->pusb_device, mood)) { SAM("WARNING: failed to adjust volume to " "0x%2X\n", mood); return -ENOENT; } SAM("adjusting volume to 0x%02X\n", mood); return 0; } i1++; } SAM("WARNING: failed to adjust volume: control not found\n"); return -ENOENT; } /*****************************************************************************/ /*---------------------------------------------------------------------------*/ /* * AN ALTERNATIVE METHOD OF MUTING MIGHT SEEM TO BE: * usb_set_interface(peasycap->pusb_device, * peasycap->audio_interface, * peasycap->audio_altsetting_off); * HOWEVER, AFTER THIS COMMAND IS ISSUED ALL SUBSEQUENT URBS RECEIVE STATUS * -ESHUTDOWN. THE HANDLER ROUTINE easyxxx_complete() DECLINES TO RESUBMIT * THE URB AND THE PIPELINE COLLAPSES IRRETRIEVABLY. BEWARE. */ /*---------------------------------------------------------------------------*/ static int adjust_mute(struct easycap *peasycap, int value) { int i1; if (!peasycap) { SAY("ERROR: peasycap is NULL\n"); return -EFAULT; } if (!peasycap->pusb_device) { SAM("ERROR: peasycap->pusb_device is NULL\n"); return -EFAULT; } i1 = 0; while (0xFFFFFFFF != easycap_control[i1].id) { if (V4L2_CID_AUDIO_MUTE == easycap_control[i1].id) { peasycap->mute = value; switch (peasycap->mute) { case 1: { peasycap->audio_idle = 1; SAM("adjusting mute: %i=peasycap->audio_idle\n", peasycap->audio_idle); return 0; } default: { peasycap->audio_idle = 0; SAM("adjusting mute: %i=peasycap->audio_idle\n", peasycap->audio_idle); return 0; } } break; } i1++; } SAM("WARNING: failed to adjust mute: control not found\n"); return -ENOENT; } /*---------------------------------------------------------------------------*/ long easycap_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct easycap *peasycap; struct usb_device *p; int kd; if (!file) { SAY("ERROR: file is NULL\n"); return -ERESTARTSYS; } peasycap = file->private_data; if (!peasycap) { SAY("ERROR: peasycap is NULL\n"); return -1; } p = peasycap->pusb_device; if (!p) { SAM("ERROR: peasycap->pusb_device is NULL\n"); return -EFAULT; } kd = easycap_isdongle(peasycap); if (0 <= kd && DONGLE_MANY > kd) { if (mutex_lock_interruptible(&easycapdc60_dongle[kd].mutex_video)) { SAY("ERROR: cannot lock " "easycapdc60_dongle[%i].mutex_video\n", kd); return -ERESTARTSYS; } JOM(4, "locked easycapdc60_dongle[%i].mutex_video\n", kd); /*---------------------------------------------------------------------------*/ /* * MEANWHILE, easycap_usb_disconnect() MAY HAVE FREED POINTER peasycap, * IN WHICH CASE A REPEAT CALL TO isdongle() WILL FAIL. * IF NECESSARY, BAIL OUT. */ /*---------------------------------------------------------------------------*/ if (kd != easycap_isdongle(peasycap)) return -ERESTARTSYS; if (!file) { SAY("ERROR: file is NULL\n"); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -ERESTARTSYS; } peasycap = file->private_data; if (!peasycap) { SAY("ERROR: peasycap is NULL\n"); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -ERESTARTSYS; } if (!peasycap->pusb_device) { SAM("ERROR: peasycap->pusb_device is NULL\n"); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -ERESTARTSYS; } } else { /*---------------------------------------------------------------------------*/ /* * IF easycap_usb_disconnect() HAS ALREADY FREED POINTER peasycap BEFORE THE * ATTEMPT TO ACQUIRE THE SEMAPHORE, isdongle() WILL HAVE FAILED. BAIL OUT. */ /*---------------------------------------------------------------------------*/ return -ERESTARTSYS; } /*---------------------------------------------------------------------------*/ switch (cmd) { case VIDIOC_QUERYCAP: { struct v4l2_capability v4l2_capability; char version[16], *p1, *p2; int i, rc, k[3]; long lng; JOM(8, "VIDIOC_QUERYCAP\n"); if (16 <= strlen(EASYCAP_DRIVER_VERSION)) { SAM("ERROR: bad driver version string\n"); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } strcpy(&version[0], EASYCAP_DRIVER_VERSION); for (i = 0; i < 3; i++) k[i] = 0; p2 = &version[0]; i = 0; while (*p2) { p1 = p2; while (*p2 && ('.' != *p2)) p2++; if (*p2) *p2++ = 0; if (3 > i) { rc = (int) strict_strtol(p1, 10, &lng); if (rc) { SAM("ERROR: %i=strict_strtol(%s,.,,)\n", rc, p1); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } k[i] = (int)lng; } i++; } memset(&v4l2_capability, 0, sizeof(struct v4l2_capability)); strlcpy(&v4l2_capability.driver[0], "easycap", sizeof(v4l2_capability.driver)); v4l2_capability.capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE; v4l2_capability.version = KERNEL_VERSION(k[0], k[1], k[2]); JOM(8, "v4l2_capability.version=(%i,%i,%i)\n", k[0], k[1], k[2]); strlcpy(&v4l2_capability.card[0], "EasyCAP DC60", sizeof(v4l2_capability.card)); if (usb_make_path(peasycap->pusb_device, &v4l2_capability.bus_info[0], sizeof(v4l2_capability.bus_info)) < 0) { strlcpy(&v4l2_capability.bus_info[0], "EasyCAP bus_info", sizeof(v4l2_capability.bus_info)); JOM(8, "%s=v4l2_capability.bus_info\n", &v4l2_capability.bus_info[0]); } if (copy_to_user((void __user *)arg, &v4l2_capability, sizeof(struct v4l2_capability))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } break; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_ENUMINPUT: { struct v4l2_input v4l2_input; u32 index; JOM(8, "VIDIOC_ENUMINPUT\n"); if (copy_from_user(&v4l2_input, (void __user *)arg, sizeof(struct v4l2_input))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } index = v4l2_input.index; memset(&v4l2_input, 0, sizeof(struct v4l2_input)); switch (index) { case 0: { v4l2_input.index = index; strcpy(&v4l2_input.name[0], "CVBS0"); v4l2_input.type = V4L2_INPUT_TYPE_CAMERA; v4l2_input.audioset = 0x01; v4l2_input.tuner = 0; v4l2_input.std = V4L2_STD_PAL | V4L2_STD_SECAM | V4L2_STD_NTSC ; v4l2_input.status = 0; JOM(8, "%i=index: %s\n", index, &v4l2_input.name[0]); break; } case 1: { v4l2_input.index = index; strcpy(&v4l2_input.name[0], "CVBS1"); v4l2_input.type = V4L2_INPUT_TYPE_CAMERA; v4l2_input.audioset = 0x01; v4l2_input.tuner = 0; v4l2_input.std = V4L2_STD_PAL | V4L2_STD_SECAM | V4L2_STD_NTSC; v4l2_input.status = 0; JOM(8, "%i=index: %s\n", index, &v4l2_input.name[0]); break; } case 2: { v4l2_input.index = index; strcpy(&v4l2_input.name[0], "CVBS2"); v4l2_input.type = V4L2_INPUT_TYPE_CAMERA; v4l2_input.audioset = 0x01; v4l2_input.tuner = 0; v4l2_input.std = V4L2_STD_PAL | V4L2_STD_SECAM | V4L2_STD_NTSC ; v4l2_input.status = 0; JOM(8, "%i=index: %s\n", index, &v4l2_input.name[0]); break; } case 3: { v4l2_input.index = index; strcpy(&v4l2_input.name[0], "CVBS3"); v4l2_input.type = V4L2_INPUT_TYPE_CAMERA; v4l2_input.audioset = 0x01; v4l2_input.tuner = 0; v4l2_input.std = V4L2_STD_PAL | V4L2_STD_SECAM | V4L2_STD_NTSC ; v4l2_input.status = 0; JOM(8, "%i=index: %s\n", index, &v4l2_input.name[0]); break; } case 4: { v4l2_input.index = index; strcpy(&v4l2_input.name[0], "CVBS4"); v4l2_input.type = V4L2_INPUT_TYPE_CAMERA; v4l2_input.audioset = 0x01; v4l2_input.tuner = 0; v4l2_input.std = V4L2_STD_PAL | V4L2_STD_SECAM | V4L2_STD_NTSC ; v4l2_input.status = 0; JOM(8, "%i=index: %s\n", index, &v4l2_input.name[0]); break; } case 5: { v4l2_input.index = index; strcpy(&v4l2_input.name[0], "S-VIDEO"); v4l2_input.type = V4L2_INPUT_TYPE_CAMERA; v4l2_input.audioset = 0x01; v4l2_input.tuner = 0; v4l2_input.std = V4L2_STD_PAL | V4L2_STD_SECAM | V4L2_STD_NTSC ; v4l2_input.status = 0; JOM(8, "%i=index: %s\n", index, &v4l2_input.name[0]); break; } default: { JOM(8, "%i=index: exhausts inputs\n", index); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } } if (copy_to_user((void __user *)arg, &v4l2_input, sizeof(struct v4l2_input))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } break; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_G_INPUT: { u32 index; JOM(8, "VIDIOC_G_INPUT\n"); index = (u32)peasycap->input; JOM(8, "user is told: %i\n", index); if (copy_to_user((void __user *)arg, &index, sizeof(u32))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } break; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_S_INPUT: { u32 index; int rc; JOM(8, "VIDIOC_S_INPUT\n"); if (0 != copy_from_user(&index, (void __user *)arg, sizeof(u32))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } JOM(8, "user requests input %i\n", index); if ((int)index == peasycap->input) { SAM("requested input already in effect\n"); break; } if ((0 > index) || (INPUT_MANY <= index)) { JOM(8, "ERROR: bad requested input: %i\n", index); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } rc = easycap_newinput(peasycap, (int)index); if (0 == rc) { JOM(8, "newinput(.,%i) OK\n", (int)index); } else { SAM("ERROR: newinput(.,%i) returned %i\n", (int)index, rc); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } break; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_ENUMAUDIO: { JOM(8, "VIDIOC_ENUMAUDIO\n"); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_ENUMAUDOUT: { struct v4l2_audioout v4l2_audioout; JOM(8, "VIDIOC_ENUMAUDOUT\n"); if (copy_from_user(&v4l2_audioout, (void __user *)arg, sizeof(struct v4l2_audioout))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } if (0 != v4l2_audioout.index) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } memset(&v4l2_audioout, 0, sizeof(struct v4l2_audioout)); v4l2_audioout.index = 0; strcpy(&v4l2_audioout.name[0], "Soundtrack"); if (copy_to_user((void __user *)arg, &v4l2_audioout, sizeof(struct v4l2_audioout))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } break; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_QUERYCTRL: { int i1; struct v4l2_queryctrl v4l2_queryctrl; JOM(8, "VIDIOC_QUERYCTRL\n"); if (0 != copy_from_user(&v4l2_queryctrl, (void __user *)arg, sizeof(struct v4l2_queryctrl))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } i1 = 0; while (0xFFFFFFFF != easycap_control[i1].id) { if (easycap_control[i1].id == v4l2_queryctrl.id) { JOM(8, "VIDIOC_QUERYCTRL %s=easycap_control[%i]" ".name\n", &easycap_control[i1].name[0], i1); memcpy(&v4l2_queryctrl, &easycap_control[i1], sizeof(struct v4l2_queryctrl)); break; } i1++; } if (0xFFFFFFFF == easycap_control[i1].id) { JOM(8, "%i=index: exhausts controls\n", i1); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } if (copy_to_user((void __user *)arg, &v4l2_queryctrl, sizeof(struct v4l2_queryctrl))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } break; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_QUERYMENU: { JOM(8, "VIDIOC_QUERYMENU unsupported\n"); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_G_CTRL: { struct v4l2_control *pv4l2_control; JOM(8, "VIDIOC_G_CTRL\n"); pv4l2_control = memdup_user((void __user *)arg, sizeof(struct v4l2_control)); if (IS_ERR(pv4l2_control)) { SAM("ERROR: copy from user failed\n"); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return PTR_ERR(pv4l2_control); } switch (pv4l2_control->id) { case V4L2_CID_BRIGHTNESS: { pv4l2_control->value = peasycap->brightness; JOM(8, "user enquires brightness: %i\n", pv4l2_control->value); break; } case V4L2_CID_CONTRAST: { pv4l2_control->value = peasycap->contrast; JOM(8, "user enquires contrast: %i\n", pv4l2_control->value); break; } case V4L2_CID_SATURATION: { pv4l2_control->value = peasycap->saturation; JOM(8, "user enquires saturation: %i\n", pv4l2_control->value); break; } case V4L2_CID_HUE: { pv4l2_control->value = peasycap->hue; JOM(8, "user enquires hue: %i\n", pv4l2_control->value); break; } case V4L2_CID_AUDIO_VOLUME: { pv4l2_control->value = peasycap->volume; JOM(8, "user enquires volume: %i\n", pv4l2_control->value); break; } case V4L2_CID_AUDIO_MUTE: { if (1 == peasycap->mute) pv4l2_control->value = true; else pv4l2_control->value = false; JOM(8, "user enquires mute: %i\n", pv4l2_control->value); break; } default: { SAM("ERROR: unknown V4L2 control: 0x%08X=id\n", pv4l2_control->id); kfree(pv4l2_control); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } } if (copy_to_user((void __user *)arg, pv4l2_control, sizeof(struct v4l2_control))) { kfree(pv4l2_control); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } kfree(pv4l2_control); break; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_S_CTRL: { struct v4l2_control v4l2_control; JOM(8, "VIDIOC_S_CTRL\n"); if (0 != copy_from_user(&v4l2_control, (void __user *)arg, sizeof(struct v4l2_control))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } switch (v4l2_control.id) { case V4L2_CID_BRIGHTNESS: { JOM(8, "user requests brightness %i\n", v4l2_control.value); if (0 != adjust_brightness(peasycap, v4l2_control.value)) ; break; } case V4L2_CID_CONTRAST: { JOM(8, "user requests contrast %i\n", v4l2_control.value); if (0 != adjust_contrast(peasycap, v4l2_control.value)) ; break; } case V4L2_CID_SATURATION: { JOM(8, "user requests saturation %i\n", v4l2_control.value); if (0 != adjust_saturation(peasycap, v4l2_control.value)) ; break; } case V4L2_CID_HUE: { JOM(8, "user requests hue %i\n", v4l2_control.value); if (0 != adjust_hue(peasycap, v4l2_control.value)) ; break; } case V4L2_CID_AUDIO_VOLUME: { JOM(8, "user requests volume %i\n", v4l2_control.value); if (0 != adjust_volume(peasycap, v4l2_control.value)) ; break; } case V4L2_CID_AUDIO_MUTE: { int mute; JOM(8, "user requests mute %i\n", v4l2_control.value); if (v4l2_control.value) mute = 1; else mute = 0; if (0 != adjust_mute(peasycap, mute)) SAM("WARNING: failed to adjust mute to %i\n", mute); break; } default: { SAM("ERROR: unknown V4L2 control: 0x%08X=id\n", v4l2_control.id); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } } break; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_S_EXT_CTRLS: { JOM(8, "VIDIOC_S_EXT_CTRLS unsupported\n"); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_ENUM_FMT: { u32 index; struct v4l2_fmtdesc v4l2_fmtdesc; JOM(8, "VIDIOC_ENUM_FMT\n"); if (0 != copy_from_user(&v4l2_fmtdesc, (void __user *)arg, sizeof(struct v4l2_fmtdesc))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } index = v4l2_fmtdesc.index; memset(&v4l2_fmtdesc, 0, sizeof(struct v4l2_fmtdesc)); v4l2_fmtdesc.index = index; v4l2_fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; switch (index) { case 0: { v4l2_fmtdesc.flags = 0; strcpy(&v4l2_fmtdesc.description[0], "uyvy"); v4l2_fmtdesc.pixelformat = V4L2_PIX_FMT_UYVY; JOM(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]); break; } case 1: { v4l2_fmtdesc.flags = 0; strcpy(&v4l2_fmtdesc.description[0], "yuy2"); v4l2_fmtdesc.pixelformat = V4L2_PIX_FMT_YUYV; JOM(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]); break; } case 2: { v4l2_fmtdesc.flags = 0; strcpy(&v4l2_fmtdesc.description[0], "rgb24"); v4l2_fmtdesc.pixelformat = V4L2_PIX_FMT_RGB24; JOM(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]); break; } case 3: { v4l2_fmtdesc.flags = 0; strcpy(&v4l2_fmtdesc.description[0], "rgb32"); v4l2_fmtdesc.pixelformat = V4L2_PIX_FMT_RGB32; JOM(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]); break; } case 4: { v4l2_fmtdesc.flags = 0; strcpy(&v4l2_fmtdesc.description[0], "bgr24"); v4l2_fmtdesc.pixelformat = V4L2_PIX_FMT_BGR24; JOM(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]); break; } case 5: { v4l2_fmtdesc.flags = 0; strcpy(&v4l2_fmtdesc.description[0], "bgr32"); v4l2_fmtdesc.pixelformat = V4L2_PIX_FMT_BGR32; JOM(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]); break; } default: { JOM(8, "%i=index: exhausts formats\n", index); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } } if (copy_to_user((void __user *)arg, &v4l2_fmtdesc, sizeof(struct v4l2_fmtdesc))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } break; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* * THE RESPONSE TO VIDIOC_ENUM_FRAMESIZES MUST BE CONDITIONED ON THE * THE CURRENT STANDARD, BECAUSE THAT IS WHAT gstreamer EXPECTS. BEWARE. */ /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_ENUM_FRAMESIZES: { u32 index; struct v4l2_frmsizeenum v4l2_frmsizeenum; JOM(8, "VIDIOC_ENUM_FRAMESIZES\n"); if (0 != copy_from_user(&v4l2_frmsizeenum, (void __user *)arg, sizeof(struct v4l2_frmsizeenum))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } index = v4l2_frmsizeenum.index; v4l2_frmsizeenum.type = (u32) V4L2_FRMSIZE_TYPE_DISCRETE; if (peasycap->ntsc) { switch (index) { case 0: { v4l2_frmsizeenum.discrete.width = 640; v4l2_frmsizeenum.discrete.height = 480; JOM(8, "%i=index: %ix%i\n", index, (int)(v4l2_frmsizeenum. discrete.width), (int)(v4l2_frmsizeenum. discrete.height)); break; } case 1: { v4l2_frmsizeenum.discrete.width = 320; v4l2_frmsizeenum.discrete.height = 240; JOM(8, "%i=index: %ix%i\n", index, (int)(v4l2_frmsizeenum. discrete.width), (int)(v4l2_frmsizeenum. discrete.height)); break; } case 2: { v4l2_frmsizeenum.discrete.width = 720; v4l2_frmsizeenum.discrete.height = 480; JOM(8, "%i=index: %ix%i\n", index, (int)(v4l2_frmsizeenum. discrete.width), (int)(v4l2_frmsizeenum. discrete.height)); break; } case 3: { v4l2_frmsizeenum.discrete.width = 360; v4l2_frmsizeenum.discrete.height = 240; JOM(8, "%i=index: %ix%i\n", index, (int)(v4l2_frmsizeenum. discrete.width), (int)(v4l2_frmsizeenum. discrete.height)); break; } default: { JOM(8, "%i=index: exhausts framesizes\n", index); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } } } else { switch (index) { case 0: { v4l2_frmsizeenum.discrete.width = 640; v4l2_frmsizeenum.discrete.height = 480; JOM(8, "%i=index: %ix%i\n", index, (int)(v4l2_frmsizeenum. discrete.width), (int)(v4l2_frmsizeenum. discrete.height)); break; } case 1: { v4l2_frmsizeenum.discrete.width = 320; v4l2_frmsizeenum.discrete.height = 240; JOM(8, "%i=index: %ix%i\n", index, (int)(v4l2_frmsizeenum. discrete.width), (int)(v4l2_frmsizeenum. discrete.height)); break; } case 2: { v4l2_frmsizeenum.discrete.width = 704; v4l2_frmsizeenum.discrete.height = 576; JOM(8, "%i=index: %ix%i\n", index, (int)(v4l2_frmsizeenum. discrete.width), (int)(v4l2_frmsizeenum. discrete.height)); break; } case 3: { v4l2_frmsizeenum.discrete.width = 720; v4l2_frmsizeenum.discrete.height = 576; JOM(8, "%i=index: %ix%i\n", index, (int)(v4l2_frmsizeenum. discrete.width), (int)(v4l2_frmsizeenum. discrete.height)); break; } case 4: { v4l2_frmsizeenum.discrete.width = 360; v4l2_frmsizeenum.discrete.height = 288; JOM(8, "%i=index: %ix%i\n", index, (int)(v4l2_frmsizeenum. discrete.width), (int)(v4l2_frmsizeenum. discrete.height)); break; } default: { JOM(8, "%i=index: exhausts framesizes\n", index); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } } } if (copy_to_user((void __user *)arg, &v4l2_frmsizeenum, sizeof(struct v4l2_frmsizeenum))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } break; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* * THE RESPONSE TO VIDIOC_ENUM_FRAMEINTERVALS MUST BE CONDITIONED ON THE * THE CURRENT STANDARD, BECAUSE THAT IS WHAT gstreamer EXPECTS. BEWARE. */ /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_ENUM_FRAMEINTERVALS: { u32 index; int denominator; struct v4l2_frmivalenum v4l2_frmivalenum; JOM(8, "VIDIOC_ENUM_FRAMEINTERVALS\n"); if (peasycap->fps) denominator = peasycap->fps; else { if (peasycap->ntsc) denominator = 30; else denominator = 25; } if (0 != copy_from_user(&v4l2_frmivalenum, (void __user *)arg, sizeof(struct v4l2_frmivalenum))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } index = v4l2_frmivalenum.index; v4l2_frmivalenum.type = (u32) V4L2_FRMIVAL_TYPE_DISCRETE; switch (index) { case 0: { v4l2_frmivalenum.discrete.numerator = 1; v4l2_frmivalenum.discrete.denominator = denominator; JOM(8, "%i=index: %i/%i\n", index, (int)(v4l2_frmivalenum.discrete.numerator), (int)(v4l2_frmivalenum.discrete.denominator)); break; } case 1: { v4l2_frmivalenum.discrete.numerator = 1; v4l2_frmivalenum.discrete.denominator = denominator/5; JOM(8, "%i=index: %i/%i\n", index, (int)(v4l2_frmivalenum.discrete.numerator), (int)(v4l2_frmivalenum.discrete.denominator)); break; } default: { JOM(8, "%i=index: exhausts frameintervals\n", index); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } } if (copy_to_user((void __user *)arg, &v4l2_frmivalenum, sizeof(struct v4l2_frmivalenum))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } break; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_G_FMT: { struct v4l2_format *pv4l2_format; struct v4l2_pix_format *pv4l2_pix_format; JOM(8, "VIDIOC_G_FMT\n"); pv4l2_format = kzalloc(sizeof(struct v4l2_format), GFP_KERNEL); if (!pv4l2_format) { SAM("ERROR: out of memory\n"); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -ENOMEM; } pv4l2_pix_format = kzalloc(sizeof(struct v4l2_pix_format), GFP_KERNEL); if (!pv4l2_pix_format) { SAM("ERROR: out of memory\n"); kfree(pv4l2_format); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -ENOMEM; } if (0 != copy_from_user(pv4l2_format, (void __user *)arg, sizeof(struct v4l2_format))) { kfree(pv4l2_format); kfree(pv4l2_pix_format); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } if (pv4l2_format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { kfree(pv4l2_format); kfree(pv4l2_pix_format); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } memset(pv4l2_pix_format, 0, sizeof(struct v4l2_pix_format)); pv4l2_format->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; memcpy(&pv4l2_format->fmt.pix, &easycap_format[peasycap->format_offset] .v4l2_format.fmt.pix, sizeof(struct v4l2_pix_format)); JOM(8, "user is told: %s\n", &easycap_format[peasycap->format_offset].name[0]); if (copy_to_user((void __user *)arg, pv4l2_format, sizeof(struct v4l2_format))) { kfree(pv4l2_format); kfree(pv4l2_pix_format); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } kfree(pv4l2_format); kfree(pv4l2_pix_format); break; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_TRY_FMT: case VIDIOC_S_FMT: { struct v4l2_format v4l2_format; struct v4l2_pix_format v4l2_pix_format; bool try; int best_format; if (VIDIOC_TRY_FMT == cmd) { JOM(8, "VIDIOC_TRY_FMT\n"); try = true; } else { JOM(8, "VIDIOC_S_FMT\n"); try = false; } if (0 != copy_from_user(&v4l2_format, (void __user *)arg, sizeof(struct v4l2_format))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } best_format = adjust_format(peasycap, v4l2_format.fmt.pix.width, v4l2_format.fmt.pix.height, v4l2_format.fmt.pix.pixelformat, v4l2_format.fmt.pix.field, try); if (0 > best_format) { if (-EBUSY == best_format) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EBUSY; } JOM(8, "WARNING: adjust_format() returned %i\n", best_format); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -ENOENT; } /*...........................................................................*/ memset(&v4l2_pix_format, 0, sizeof(struct v4l2_pix_format)); v4l2_format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; memcpy(&(v4l2_format.fmt.pix), &(easycap_format[best_format].v4l2_format.fmt.pix), sizeof(v4l2_pix_format)); JOM(8, "user is told: %s\n", &easycap_format[best_format].name[0]); if (copy_to_user((void __user *)arg, &v4l2_format, sizeof(struct v4l2_format))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } break; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_CROPCAP: { struct v4l2_cropcap v4l2_cropcap; JOM(8, "VIDIOC_CROPCAP\n"); if (0 != copy_from_user(&v4l2_cropcap, (void __user *)arg, sizeof(struct v4l2_cropcap))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } if (v4l2_cropcap.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) JOM(8, "v4l2_cropcap.type != V4L2_BUF_TYPE_VIDEO_CAPTURE\n"); memset(&v4l2_cropcap, 0, sizeof(struct v4l2_cropcap)); v4l2_cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; v4l2_cropcap.bounds.left = 0; v4l2_cropcap.bounds.top = 0; v4l2_cropcap.bounds.width = peasycap->width; v4l2_cropcap.bounds.height = peasycap->height; v4l2_cropcap.defrect.left = 0; v4l2_cropcap.defrect.top = 0; v4l2_cropcap.defrect.width = peasycap->width; v4l2_cropcap.defrect.height = peasycap->height; v4l2_cropcap.pixelaspect.numerator = 1; v4l2_cropcap.pixelaspect.denominator = 1; JOM(8, "user is told: %ix%i\n", peasycap->width, peasycap->height); if (copy_to_user((void __user *)arg, &v4l2_cropcap, sizeof(struct v4l2_cropcap))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } break; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_G_CROP: case VIDIOC_S_CROP: { JOM(8, "VIDIOC_G_CROP|VIDIOC_S_CROP unsupported\n"); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_QUERYSTD: { JOM(8, "VIDIOC_QUERYSTD: " "EasyCAP is incapable of detecting standard\n"); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; break; } /*-------------------------------------------------------------------*/ /* * THE MANIPULATIONS INVOLVING last0,last1,last2,last3 * CONSTITUTE A WORKAROUND * FOR WHAT APPEARS TO BE * A BUG IN 64-BIT mplayer. * NOT NEEDED, BUT HOPEFULLY HARMLESS, FOR 32-BIT mplayer. */ /*------------------------------------------------------------------*/ case VIDIOC_ENUMSTD: { int last0 = -1, last1 = -1, last2 = -1, last3 = -1; struct v4l2_standard v4l2_standard; u32 index; struct easycap_standard const *peasycap_standard; JOM(8, "VIDIOC_ENUMSTD\n"); if (0 != copy_from_user(&v4l2_standard, (void __user *)arg, sizeof(struct v4l2_standard))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } index = v4l2_standard.index; last3 = last2; last2 = last1; last1 = last0; last0 = index; if ((index == last3) && (index == last2) && (index == last1) && (index == last0)) { index++; last3 = last2; last2 = last1; last1 = last0; last0 = index; } memset(&v4l2_standard, 0, sizeof(struct v4l2_standard)); peasycap_standard = &easycap_standard[0]; while (0xFFFF != peasycap_standard->mask) { if ((int)(peasycap_standard - &easycap_standard[0]) == index) break; peasycap_standard++; } if (0xFFFF == peasycap_standard->mask) { JOM(8, "%i=index: exhausts standards\n", index); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } JOM(8, "%i=index: %s\n", index, &(peasycap_standard->v4l2_standard.name[0])); memcpy(&v4l2_standard, &(peasycap_standard->v4l2_standard), sizeof(struct v4l2_standard)); v4l2_standard.index = index; if (copy_to_user((void __user *)arg, &v4l2_standard, sizeof(struct v4l2_standard))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } break; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_G_STD: { v4l2_std_id std_id; struct easycap_standard const *peasycap_standard; JOM(8, "VIDIOC_G_STD\n"); if (0 > peasycap->standard_offset) { JOM(8, "%i=peasycap->standard_offset\n", peasycap->standard_offset); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EBUSY; } if (0 != copy_from_user(&std_id, (void __user *)arg, sizeof(v4l2_std_id))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } peasycap_standard = &easycap_standard[peasycap->standard_offset]; std_id = peasycap_standard->v4l2_standard.id; JOM(8, "user is told: %s\n", &peasycap_standard->v4l2_standard.name[0]); if (copy_to_user((void __user *)arg, &std_id, sizeof(v4l2_std_id))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } break; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_S_STD: { v4l2_std_id std_id; int rc; JOM(8, "VIDIOC_S_STD\n"); if (0 != copy_from_user(&std_id, (void __user *)arg, sizeof(v4l2_std_id))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } JOM(8, "User requests standard: 0x%08X%08X\n", (int)((std_id & (((v4l2_std_id)0xFFFFFFFF) << 32)) >> 32), (int)(std_id & ((v4l2_std_id)0xFFFFFFFF))); rc = adjust_standard(peasycap, std_id); if (0 > rc) { JOM(8, "WARNING: adjust_standard() returned %i\n", rc); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -ENOENT; } break; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_REQBUFS: { int nbuffers; struct v4l2_requestbuffers v4l2_requestbuffers; JOM(8, "VIDIOC_REQBUFS\n"); if (0 != copy_from_user(&v4l2_requestbuffers, (void __user *)arg, sizeof(struct v4l2_requestbuffers))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } if (v4l2_requestbuffers.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } if (v4l2_requestbuffers.memory != V4L2_MEMORY_MMAP) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } nbuffers = v4l2_requestbuffers.count; JOM(8, " User requests %i buffers ...\n", nbuffers); if (nbuffers < 2) nbuffers = 2; if (nbuffers > FRAME_BUFFER_MANY) nbuffers = FRAME_BUFFER_MANY; if (v4l2_requestbuffers.count == nbuffers) { JOM(8, " ... agree to %i buffers\n", nbuffers); } else { JOM(8, " ... insist on %i buffers\n", nbuffers); v4l2_requestbuffers.count = nbuffers; } peasycap->frame_buffer_many = nbuffers; if (copy_to_user((void __user *)arg, &v4l2_requestbuffers, sizeof(struct v4l2_requestbuffers))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } break; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_QUERYBUF: { u32 index; struct v4l2_buffer v4l2_buffer; JOM(8, "VIDIOC_QUERYBUF\n"); if (peasycap->video_eof) { JOM(8, "returning -EIO because %i=video_eof\n", peasycap->video_eof); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EIO; } if (0 != copy_from_user(&v4l2_buffer, (void __user *)arg, sizeof(struct v4l2_buffer))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } if (v4l2_buffer.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } index = v4l2_buffer.index; if (index < 0 || index >= peasycap->frame_buffer_many) return -EINVAL; memset(&v4l2_buffer, 0, sizeof(struct v4l2_buffer)); v4l2_buffer.index = index; v4l2_buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; v4l2_buffer.bytesused = peasycap->frame_buffer_used; v4l2_buffer.flags = V4L2_BUF_FLAG_MAPPED | peasycap->done[index] | peasycap->queued[index]; v4l2_buffer.field = V4L2_FIELD_NONE; v4l2_buffer.memory = V4L2_MEMORY_MMAP; v4l2_buffer.m.offset = index * FRAME_BUFFER_SIZE; v4l2_buffer.length = FRAME_BUFFER_SIZE; JOM(16, " %10i=index\n", v4l2_buffer.index); JOM(16, " 0x%08X=type\n", v4l2_buffer.type); JOM(16, " %10i=bytesused\n", v4l2_buffer.bytesused); JOM(16, " 0x%08X=flags\n", v4l2_buffer.flags); JOM(16, " %10i=field\n", v4l2_buffer.field); JOM(16, " %10li=timestamp.tv_usec\n", (long)v4l2_buffer.timestamp.tv_usec); JOM(16, " %10i=sequence\n", v4l2_buffer.sequence); JOM(16, " 0x%08X=memory\n", v4l2_buffer.memory); JOM(16, " %10i=m.offset\n", v4l2_buffer.m.offset); JOM(16, " %10i=length\n", v4l2_buffer.length); if (copy_to_user((void __user *)arg, &v4l2_buffer, sizeof(struct v4l2_buffer))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } break; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_QBUF: { struct v4l2_buffer v4l2_buffer; JOM(8, "VIDIOC_QBUF\n"); if (0 != copy_from_user(&v4l2_buffer, (void __user *)arg, sizeof(struct v4l2_buffer))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } if (v4l2_buffer.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } if (v4l2_buffer.memory != V4L2_MEMORY_MMAP) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } if (v4l2_buffer.index < 0 || v4l2_buffer.index >= peasycap->frame_buffer_many) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } v4l2_buffer.flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED; peasycap->done[v4l2_buffer.index] = 0; peasycap->queued[v4l2_buffer.index] = V4L2_BUF_FLAG_QUEUED; if (copy_to_user((void __user *)arg, &v4l2_buffer, sizeof(struct v4l2_buffer))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } JOM(8, "..... user queueing frame buffer %i\n", (int)v4l2_buffer.index); peasycap->frame_lock = 0; break; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_DQBUF: { struct timeval timeval, timeval2; int i, j; struct v4l2_buffer v4l2_buffer; int rcdq; u16 input; JOM(8, "VIDIOC_DQBUF\n"); if ((peasycap->video_idle) || (peasycap->video_eof)) { JOM(8, "returning -EIO because " "%i=video_idle %i=video_eof\n", peasycap->video_idle, peasycap->video_eof); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EIO; } if (copy_from_user(&v4l2_buffer, (void __user *)arg, sizeof(struct v4l2_buffer))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } if (v4l2_buffer.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } if (peasycap->offerfields) { /*---------------------------------------------------*/ /* * IN ITS 50 "fps" MODE tvtime SEEMS ALWAYS TO REQUEST * V4L2_FIELD_BOTTOM */ /*---------------------------------------------------*/ if (V4L2_FIELD_TOP == v4l2_buffer.field) JOM(8, "user wants V4L2_FIELD_TOP\n"); else if (V4L2_FIELD_BOTTOM == v4l2_buffer.field) JOM(8, "user wants V4L2_FIELD_BOTTOM\n"); else if (V4L2_FIELD_ANY == v4l2_buffer.field) JOM(8, "user wants V4L2_FIELD_ANY\n"); else JOM(8, "user wants V4L2_FIELD_...UNKNOWN: %i\n", v4l2_buffer.field); } if (!peasycap->video_isoc_streaming) { JOM(16, "returning -EIO because video urbs not streaming\n"); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EIO; } /*-------------------------------------------------------------------*/ /* * IF THE USER HAS PREVIOUSLY CALLED easycap_poll(), * AS DETERMINED BY FINDING * THE FLAG peasycap->polled SET, THERE MUST BE * NO FURTHER WAIT HERE. IN THIS * CASE, JUST CHOOSE THE FRAME INDICATED BY peasycap->frame_read */ /*-------------------------------------------------------------------*/ if (!peasycap->polled) { do { rcdq = easycap_video_dqbuf(peasycap, 0); if (-EIO == rcdq) { JOM(8, "returning -EIO because " "dqbuf() returned -EIO\n"); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EIO; } } while (0 != rcdq); } else { if (peasycap->video_eof) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EIO; } } if (V4L2_BUF_FLAG_DONE != peasycap->done[peasycap->frame_read]) { JOM(8, "V4L2_BUF_FLAG_DONE != 0x%08X\n", peasycap->done[peasycap->frame_read]); } peasycap->polled = 0; if (!(peasycap->isequence % 10)) { for (i = 0; i < 179; i++) peasycap->merit[i] = peasycap->merit[i+1]; peasycap->merit[179] = merit_saa(peasycap->pusb_device); j = 0; for (i = 0; i < 180; i++) j += peasycap->merit[i]; if (90 < j) { SAM("easycap driver shutting down " "on condition blue\n"); peasycap->video_eof = 1; peasycap->audio_eof = 1; } } v4l2_buffer.index = peasycap->frame_read; v4l2_buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; v4l2_buffer.bytesused = peasycap->frame_buffer_used; v4l2_buffer.flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_DONE; if (peasycap->offerfields) v4l2_buffer.field = V4L2_FIELD_BOTTOM; else v4l2_buffer.field = V4L2_FIELD_NONE; do_gettimeofday(&timeval); timeval2 = timeval; v4l2_buffer.timestamp = timeval2; v4l2_buffer.sequence = peasycap->isequence++; v4l2_buffer.memory = V4L2_MEMORY_MMAP; v4l2_buffer.m.offset = v4l2_buffer.index * FRAME_BUFFER_SIZE; v4l2_buffer.length = FRAME_BUFFER_SIZE; JOM(16, " %10i=index\n", v4l2_buffer.index); JOM(16, " 0x%08X=type\n", v4l2_buffer.type); JOM(16, " %10i=bytesused\n", v4l2_buffer.bytesused); JOM(16, " 0x%08X=flags\n", v4l2_buffer.flags); JOM(16, " %10i=field\n", v4l2_buffer.field); JOM(16, " %10li=timestamp.tv_sec\n", (long)v4l2_buffer.timestamp.tv_sec); JOM(16, " %10li=timestamp.tv_usec\n", (long)v4l2_buffer.timestamp.tv_usec); JOM(16, " %10i=sequence\n", v4l2_buffer.sequence); JOM(16, " 0x%08X=memory\n", v4l2_buffer.memory); JOM(16, " %10i=m.offset\n", v4l2_buffer.m.offset); JOM(16, " %10i=length\n", v4l2_buffer.length); if (copy_to_user((void __user *)arg, &v4l2_buffer, sizeof(struct v4l2_buffer))) { mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } input = peasycap->frame_buffer[peasycap->frame_read][0].input; if (0x08 & input) { JOM(8, "user is offered frame buffer %i, input %i\n", peasycap->frame_read, (0x07 & input)); } else { JOM(8, "user is offered frame buffer %i\n", peasycap->frame_read); } peasycap->frame_lock = 1; JOM(8, "%i=peasycap->frame_fill\n", peasycap->frame_fill); if (peasycap->frame_read == peasycap->frame_fill) { if (peasycap->frame_lock) { JOM(8, "WORRY: filling frame buffer " "while offered to user\n"); } } break; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_STREAMON: { int i; JOM(8, "VIDIOC_STREAMON\n"); peasycap->isequence = 0; for (i = 0; i < 180; i++) peasycap->merit[i] = 0; if (!peasycap->pusb_device) { SAM("ERROR: peasycap->pusb_device is NULL\n"); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } easycap_video_submit_urbs(peasycap); peasycap->video_idle = 0; peasycap->audio_idle = 0; peasycap->video_eof = 0; peasycap->audio_eof = 0; break; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_STREAMOFF: { JOM(8, "VIDIOC_STREAMOFF\n"); if (!peasycap->pusb_device) { SAM("ERROR: peasycap->pusb_device is NULL\n"); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } peasycap->video_idle = 1; peasycap->audio_idle = 1; /*---------------------------------------------------------------------------*/ /* * IF THE WAIT QUEUES ARE NOT CLEARED IN RESPONSE TO THE STREAMOFF COMMAND * THE USERSPACE PROGRAM, E.G. mplayer, MAY HANG ON EXIT. BEWARE. */ /*---------------------------------------------------------------------------*/ JOM(8, "calling wake_up on wq_video and wq_audio\n"); wake_up_interruptible(&(peasycap->wq_video)); if (peasycap->psubstream) snd_pcm_period_elapsed(peasycap->psubstream); break; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_G_PARM: { struct v4l2_streamparm *pv4l2_streamparm; JOM(8, "VIDIOC_G_PARM\n"); pv4l2_streamparm = memdup_user((void __user *)arg, sizeof(struct v4l2_streamparm)); if (IS_ERR(pv4l2_streamparm)) { SAM("ERROR: copy from user failed\n"); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return PTR_ERR(pv4l2_streamparm); } if (pv4l2_streamparm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { kfree(pv4l2_streamparm); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } pv4l2_streamparm->parm.capture.capability = 0; pv4l2_streamparm->parm.capture.capturemode = 0; pv4l2_streamparm->parm.capture.timeperframe.numerator = 1; if (peasycap->fps) { pv4l2_streamparm->parm.capture.timeperframe. denominator = peasycap->fps; } else { if (peasycap->ntsc) { pv4l2_streamparm->parm.capture.timeperframe. denominator = 30; } else { pv4l2_streamparm->parm.capture.timeperframe. denominator = 25; } } pv4l2_streamparm->parm.capture.readbuffers = peasycap->frame_buffer_many; pv4l2_streamparm->parm.capture.extendedmode = 0; if (copy_to_user((void __user *)arg, pv4l2_streamparm, sizeof(struct v4l2_streamparm))) { kfree(pv4l2_streamparm); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EFAULT; } kfree(pv4l2_streamparm); break; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_S_PARM: { JOM(8, "VIDIOC_S_PARM unsupported\n"); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_G_AUDIO: { JOM(8, "VIDIOC_G_AUDIO unsupported\n"); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_S_AUDIO: { JOM(8, "VIDIOC_S_AUDIO unsupported\n"); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_S_TUNER: { JOM(8, "VIDIOC_S_TUNER unsupported\n"); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_G_FBUF: case VIDIOC_S_FBUF: case VIDIOC_OVERLAY: { JOM(8, "VIDIOC_G_FBUF|VIDIOC_S_FBUF|VIDIOC_OVERLAY unsupported\n"); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ case VIDIOC_G_TUNER: { JOM(8, "VIDIOC_G_TUNER unsupported\n"); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } case VIDIOC_G_FREQUENCY: case VIDIOC_S_FREQUENCY: { JOM(8, "VIDIOC_G_FREQUENCY|VIDIOC_S_FREQUENCY unsupported\n"); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -EINVAL; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ default: { JOM(8, "ERROR: unrecognized V4L2 IOCTL command: 0x%08X\n", cmd); mutex_unlock(&easycapdc60_dongle[kd].mutex_video); return -ENOIOCTLCMD; } } mutex_unlock(&easycapdc60_dongle[kd].mutex_video); JOM(4, "unlocked easycapdc60_dongle[%i].mutex_video\n", kd); return 0; } /*****************************************************************************/
gpl-2.0
flar2/evita-ElementalX
drivers/staging/comedi/drivers/cb_pcidas.c
4896
57863
/* comedi/drivers/cb_pcidas.c Developed by Ivan Martinez and Frank Mori Hess, with valuable help from David Schleef and the rest of the Comedi developers comunity. Copyright (C) 2001-2003 Ivan Martinez <imr@oersted.dtu.dk> Copyright (C) 2001,2002 Frank Mori Hess <fmhess@users.sourceforge.net> COMEDI - Linux Control and Measurement Device Interface Copyright (C) 1997-8 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ************************************************************************ */ /* Driver: cb_pcidas Description: MeasurementComputing PCI-DAS series with the AMCC S5933 PCI controller Author: Ivan Martinez <imr@oersted.dtu.dk>, Frank Mori Hess <fmhess@users.sourceforge.net> Updated: 2003-3-11 Devices: [Measurement Computing] PCI-DAS1602/16 (cb_pcidas), PCI-DAS1602/16jr, PCI-DAS1602/12, PCI-DAS1200, PCI-DAS1200jr, PCI-DAS1000, PCI-DAS1001, PCI_DAS1002 Status: There are many reports of the driver being used with most of the supported cards. Despite no detailed log is maintained, it can be said that the driver is quite tested and stable. The boards may be autocalibrated using the comedi_calibrate utility. Configuration options: [0] - PCI bus of device (optional) [1] - PCI slot of device (optional) If bus/slot is not specified, the first supported PCI device found will be used. For commands, the scanned channels must be consecutive (i.e. 4-5-6-7, 2-3-4,...), and must all have the same range and aref. AI Triggering: For start_src == TRIG_EXT, the A/D EXTERNAL TRIGGER IN (pin 45) is used. For 1602 series, the start_arg is interpreted as follows: start_arg == 0 => gated triger (level high) start_arg == CR_INVERT => gated triger (level low) start_arg == CR_EDGE => Rising edge start_arg == CR_EDGE | CR_INVERT => Falling edge For the other boards the trigger will be done on rising edge */ /* TODO: analog triggering on 1602 series */ #include "../comedidev.h" #include <linux/delay.h> #include <linux/interrupt.h> #include "8253.h" #include "8255.h" #include "amcc_s5933.h" #include "comedi_pci.h" #include "comedi_fc.h" #undef CB_PCIDAS_DEBUG /* disable debugging code */ /* #define CB_PCIDAS_DEBUG enable debugging code */ /* PCI vendor number of ComputerBoards/MeasurementComputing */ #define PCI_VENDOR_ID_CB 0x1307 #define TIMER_BASE 100 /* 10MHz master clock */ #define AI_BUFFER_SIZE 1024 /* maximum fifo size of any supported board */ #define AO_BUFFER_SIZE 1024 /* maximum fifo size of any supported board */ #define NUM_CHANNELS_8800 8 #define NUM_CHANNELS_7376 1 #define NUM_CHANNELS_8402 2 #define NUM_CHANNELS_DAC08 1 /* PCI-DAS base addresses */ /* indices of base address regions */ #define S5933_BADRINDEX 0 #define CONT_STAT_BADRINDEX 1 #define ADC_FIFO_BADRINDEX 2 #define PACER_BADRINDEX 3 #define AO_BADRINDEX 4 /* sizes of io regions */ #define CONT_STAT_SIZE 10 #define ADC_FIFO_SIZE 4 #define PACER_SIZE 12 #define AO_SIZE 4 /* Control/Status registers */ #define INT_ADCFIFO 0 /* INTERRUPT / ADC FIFO register */ #define INT_EOS 0x1 /* interrupt end of scan */ #define INT_FHF 0x2 /* interrupt fifo half full */ #define INT_FNE 0x3 /* interrupt fifo not empty */ #define INT_MASK 0x3 /* mask of interrupt select bits */ #define INTE 0x4 /* interrupt enable */ #define DAHFIE 0x8 /* dac half full interrupt enable */ #define EOAIE 0x10 /* end of acquisition interrupt enable */ #define DAHFI 0x20 /* dac half full read status / write interrupt clear */ #define EOAI 0x40 /* read end of acq. interrupt status / write clear */ #define INT 0x80 /* read interrupt status / write clear */ #define EOBI 0x200 /* read end of burst interrupt status */ #define ADHFI 0x400 /* read half-full interrupt status */ #define ADNEI 0x800 /* read fifo not empty interrupt latch status */ #define ADNE 0x1000 /* read, fifo not empty (realtime, not latched) status */ #define DAEMIE 0x1000 /* write, dac empty interrupt enable */ #define LADFUL 0x2000 /* read fifo overflow / write clear */ #define DAEMI 0x4000 /* dac fifo empty interrupt status / write clear */ #define ADCMUX_CONT 2 /* ADC CHANNEL MUX AND CONTROL register */ #define BEGIN_SCAN(x) ((x) & 0xf) #define END_SCAN(x) (((x) & 0xf) << 4) #define GAIN_BITS(x) (((x) & 0x3) << 8) #define UNIP 0x800 /* Analog front-end unipolar for range */ #define SE 0x400 /* Inputs in single-ended mode */ #define PACER_MASK 0x3000 /* pacer source bits */ #define PACER_INT 0x1000 /* internal pacer */ #define PACER_EXT_FALL 0x2000 /* external falling edge */ #define PACER_EXT_RISE 0x3000 /* external rising edge */ #define EOC 0x4000 /* adc not busy */ #define TRIG_CONTSTAT 4 /* TRIGGER CONTROL/STATUS register */ #define SW_TRIGGER 0x1 /* software start trigger */ #define EXT_TRIGGER 0x2 /* external start trigger */ #define ANALOG_TRIGGER 0x3 /* external analog trigger */ #define TRIGGER_MASK 0x3 /* mask of bits that determine start trigger */ #define TGPOL 0x04 /* invert the edge/level of the external trigger (1602 only) */ #define TGSEL 0x08 /* if set edge triggered, otherwise level trigerred (1602 only) */ #define TGEN 0x10 /* enable external start trigger */ #define BURSTE 0x20 /* burst mode enable */ #define XTRCL 0x80 /* clear external trigger */ #define CALIBRATION_REG 6 /* CALIBRATION register */ #define SELECT_8800_BIT 0x100 /* select 8800 caldac */ #define SELECT_TRIMPOT_BIT 0x200 /* select ad7376 trim pot */ #define SELECT_DAC08_BIT 0x400 /* select dac08 caldac */ #define CAL_SRC_BITS(x) (((x) & 0x7) << 11) #define CAL_EN_BIT 0x4000 /* read calibration source instead of analog input channel 0 */ #define SERIAL_DATA_IN_BIT 0x8000 /* serial data stream going to 8800 and 7376 */ #define DAC_CSR 0x8 /* dac control and status register */ enum dac_csr_bits { DACEN = 0x2, /* dac enable */ DAC_MODE_UPDATE_BOTH = 0x80, /* update both dacs when dac0 is written */ }; static inline unsigned int DAC_RANGE(unsigned int channel, unsigned int range) { return (range & 0x3) << (8 + 2 * (channel & 0x1)); } static inline unsigned int DAC_RANGE_MASK(unsigned int channel) { return 0x3 << (8 + 2 * (channel & 0x1)); }; /* bits for 1602 series only */ enum dac_csr_bits_1602 { DAC_EMPTY = 0x1, /* dac fifo empty, read, write clear */ DAC_START = 0x4, /* start/arm dac fifo operations */ DAC_PACER_MASK = 0x18, /* bits that set dac pacer source */ DAC_PACER_INT = 0x8, /* dac internal pacing */ DAC_PACER_EXT_FALL = 0x10, /* dac external pacing, falling edge */ DAC_PACER_EXT_RISE = 0x18, /* dac external pacing, rising edge */ }; static inline unsigned int DAC_CHAN_EN(unsigned int channel) { return 1 << (5 + (channel & 0x1)); /* enable channel 0 or 1 */ }; /* analog input fifo */ #define ADCDATA 0 /* ADC DATA register */ #define ADCFIFOCLR 2 /* ADC FIFO CLEAR */ /* pacer, counter, dio registers */ #define ADC8254 0 #define DIO_8255 4 #define DAC8254 8 /* analog output registers for 100x, 1200 series */ static inline unsigned int DAC_DATA_REG(unsigned int channel) { return 2 * (channel & 0x1); } /* analog output registers for 1602 series*/ #define DACDATA 0 /* DAC DATA register */ #define DACFIFOCLR 2 /* DAC FIFO CLEAR */ /* bit in hexadecimal representation of range index that indicates unipolar input range */ #define IS_UNIPOLAR 0x4 /* analog input ranges for most boards */ static const struct comedi_lrange cb_pcidas_ranges = { 8, { BIP_RANGE(10), BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2.5), UNI_RANGE(1.25) } }; /* pci-das1001 input ranges */ static const struct comedi_lrange cb_pcidas_alt_ranges = { 8, { BIP_RANGE(10), BIP_RANGE(1), BIP_RANGE(0.1), BIP_RANGE(0.01), UNI_RANGE(10), UNI_RANGE(1), UNI_RANGE(0.1), UNI_RANGE(0.01) } }; /* analog output ranges */ static const struct comedi_lrange cb_pcidas_ao_ranges = { 4, { BIP_RANGE(5), BIP_RANGE(10), UNI_RANGE(5), UNI_RANGE(10), } }; enum trimpot_model { AD7376, AD8402, }; struct cb_pcidas_board { const char *name; unsigned short device_id; int ai_se_chans; /* Inputs in single-ended mode */ int ai_diff_chans; /* Inputs in differential mode */ int ai_bits; /* analog input resolution */ int ai_speed; /* fastest conversion period in ns */ int ao_nchan; /* number of analog out channels */ int has_ao_fifo; /* analog output has fifo */ int ao_scan_speed; /* analog output speed for 1602 series (for a scan, not conversion) */ int fifo_size; /* number of samples fifo can hold */ const struct comedi_lrange *ranges; enum trimpot_model trimpot; unsigned has_dac08:1; unsigned has_ai_trig_gated:1; /* Tells if the AI trigger can be gated */ unsigned has_ai_trig_invert:1; /* Tells if the AI trigger can be inverted */ }; static const struct cb_pcidas_board cb_pcidas_boards[] = { { .name = "pci-das1602/16", .device_id = 0x1, .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 16, .ai_speed = 5000, .ao_nchan = 2, .has_ao_fifo = 1, .ao_scan_speed = 10000, .fifo_size = 512, .ranges = &cb_pcidas_ranges, .trimpot = AD8402, .has_dac08 = 1, .has_ai_trig_gated = 1, .has_ai_trig_invert = 1, }, { .name = "pci-das1200", .device_id = 0xF, .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 12, .ai_speed = 3200, .ao_nchan = 2, .has_ao_fifo = 0, .fifo_size = 1024, .ranges = &cb_pcidas_ranges, .trimpot = AD7376, .has_dac08 = 0, .has_ai_trig_gated = 0, .has_ai_trig_invert = 0, }, { .name = "pci-das1602/12", .device_id = 0x10, .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 12, .ai_speed = 3200, .ao_nchan = 2, .has_ao_fifo = 1, .ao_scan_speed = 4000, .fifo_size = 1024, .ranges = &cb_pcidas_ranges, .trimpot = AD7376, .has_dac08 = 0, .has_ai_trig_gated = 1, .has_ai_trig_invert = 1, }, { .name = "pci-das1200/jr", .device_id = 0x19, .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 12, .ai_speed = 3200, .ao_nchan = 0, .has_ao_fifo = 0, .fifo_size = 1024, .ranges = &cb_pcidas_ranges, .trimpot = AD7376, .has_dac08 = 0, .has_ai_trig_gated = 0, .has_ai_trig_invert = 0, }, { .name = "pci-das1602/16/jr", .device_id = 0x1C, .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 16, .ai_speed = 5000, .ao_nchan = 0, .has_ao_fifo = 0, .fifo_size = 512, .ranges = &cb_pcidas_ranges, .trimpot = AD8402, .has_dac08 = 1, .has_ai_trig_gated = 1, .has_ai_trig_invert = 1, }, { .name = "pci-das1000", .device_id = 0x4C, .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 12, .ai_speed = 4000, .ao_nchan = 0, .has_ao_fifo = 0, .fifo_size = 1024, .ranges = &cb_pcidas_ranges, .trimpot = AD7376, .has_dac08 = 0, .has_ai_trig_gated = 0, .has_ai_trig_invert = 0, }, { .name = "pci-das1001", .device_id = 0x1a, .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 12, .ai_speed = 6800, .ao_nchan = 2, .has_ao_fifo = 0, .fifo_size = 1024, .ranges = &cb_pcidas_alt_ranges, .trimpot = AD7376, .has_dac08 = 0, .has_ai_trig_gated = 0, .has_ai_trig_invert = 0, }, { .name = "pci-das1002", .device_id = 0x1b, .ai_se_chans = 16, .ai_diff_chans = 8, .ai_bits = 12, .ai_speed = 6800, .ao_nchan = 2, .has_ao_fifo = 0, .fifo_size = 1024, .ranges = &cb_pcidas_ranges, .trimpot = AD7376, .has_dac08 = 0, .has_ai_trig_gated = 0, .has_ai_trig_invert = 0, }, }; static DEFINE_PCI_DEVICE_TABLE(cb_pcidas_pci_table) = { { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0001) }, { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x000f) }, { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0010) }, { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x0019) }, { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x001c) }, { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x004c) }, { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x001a) }, { PCI_DEVICE(PCI_VENDOR_ID_CB, 0x001b) }, { 0 } }; MODULE_DEVICE_TABLE(pci, cb_pcidas_pci_table); /* * Useful for shorthand access to the particular board structure */ #define thisboard ((const struct cb_pcidas_board *)dev->board_ptr) /* this structure is for data unique to this hardware driver. If several hardware drivers keep similar information in this structure, feel free to suggest moving the variable to the struct comedi_device struct. */ struct cb_pcidas_private { /* would be useful for a PCI device */ struct pci_dev *pci_dev; /* base addresses */ unsigned long s5933_config; unsigned long control_status; unsigned long adc_fifo; unsigned long pacer_counter_dio; unsigned long ao_registers; /* divisors of master clock for analog input pacing */ unsigned int divisor1; unsigned int divisor2; volatile unsigned int count; /* number of analog input samples remaining */ volatile unsigned int adc_fifo_bits; /* bits to write to interrupt/adcfifo register */ volatile unsigned int s5933_intcsr_bits; /* bits to write to amcc s5933 interrupt control/status register */ volatile unsigned int ao_control_bits; /* bits to write to ao control and status register */ short ai_buffer[AI_BUFFER_SIZE]; short ao_buffer[AO_BUFFER_SIZE]; /* divisors of master clock for analog output pacing */ unsigned int ao_divisor1; unsigned int ao_divisor2; volatile unsigned int ao_count; /* number of analog output samples remaining */ int ao_value[2]; /* remember what the analog outputs are set to, to allow readback */ unsigned int caldac_value[NUM_CHANNELS_8800]; /* for readback of caldac */ unsigned int trimpot_value[NUM_CHANNELS_8402]; /* for readback of trimpot */ unsigned int dac08_value; unsigned int calibration_source; }; /* * most drivers define the following macro to make it easy to * access the private structure. */ #define devpriv ((struct cb_pcidas_private *)dev->private) /* * The struct comedi_driver structure tells the Comedi core module * which functions to call to configure/deconfigure (attach/detach) * the board, and also about the kernel module that contains * the device code. */ static int cb_pcidas_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int cb_pcidas_detach(struct comedi_device *dev); static struct comedi_driver driver_cb_pcidas = { .driver_name = "cb_pcidas", .module = THIS_MODULE, .attach = cb_pcidas_attach, .detach = cb_pcidas_detach, }; static int cb_pcidas_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int ai_config_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int cb_pcidas_ao_nofifo_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int cb_pcidas_ao_fifo_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int cb_pcidas_ao_readback_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int cb_pcidas_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int cb_pcidas_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static int cb_pcidas_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int cb_pcidas_ao_inttrig(struct comedi_device *dev, struct comedi_subdevice *subdev, unsigned int trig_num); static int cb_pcidas_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static irqreturn_t cb_pcidas_interrupt(int irq, void *d); static void handle_ao_interrupt(struct comedi_device *dev, unsigned int status); static int cb_pcidas_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static int cb_pcidas_ao_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static void cb_pcidas_load_counters(struct comedi_device *dev, unsigned int *ns, int round_flags); static int eeprom_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int caldac_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int caldac_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int trimpot_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int cb_pcidas_trimpot_write(struct comedi_device *dev, unsigned int channel, unsigned int value); static int trimpot_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int dac08_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int dac08_write(struct comedi_device *dev, unsigned int value); static int dac08_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int caldac_8800_write(struct comedi_device *dev, unsigned int address, uint8_t value); static int trimpot_7376_write(struct comedi_device *dev, uint8_t value); static int trimpot_8402_write(struct comedi_device *dev, unsigned int channel, uint8_t value); static int nvram_read(struct comedi_device *dev, unsigned int address, uint8_t *data); static inline unsigned int cal_enable_bits(struct comedi_device *dev) { return CAL_EN_BIT | CAL_SRC_BITS(devpriv->calibration_source); } /* * Attach is called by the Comedi core to configure the driver * for a particular board. */ static int cb_pcidas_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; struct pci_dev *pcidev = NULL; int index; int i; /* * Allocate the private structure area. */ if (alloc_private(dev, sizeof(struct cb_pcidas_private)) < 0) return -ENOMEM; /* * Probe the device to determine what device in the series it is. */ for_each_pci_dev(pcidev) { /* is it not a computer boards card? */ if (pcidev->vendor != PCI_VENDOR_ID_CB) continue; /* loop through cards supported by this driver */ for (index = 0; index < ARRAY_SIZE(cb_pcidas_boards); index++) { if (cb_pcidas_boards[index].device_id != pcidev->device) continue; /* was a particular bus/slot requested? */ if (it->options[0] || it->options[1]) { /* are we on the wrong bus/slot? */ if (pcidev->bus->number != it->options[0] || PCI_SLOT(pcidev->devfn) != it->options[1]) { continue; } } devpriv->pci_dev = pcidev; dev->board_ptr = cb_pcidas_boards + index; goto found; } } dev_err(dev->hw_dev, "No supported ComputerBoards/MeasurementComputing card found on requested position\n"); return -EIO; found: dev_dbg(dev->hw_dev, "Found %s on bus %i, slot %i\n", cb_pcidas_boards[index].name, pcidev->bus->number, PCI_SLOT(pcidev->devfn)); /* * Enable PCI device and reserve I/O ports. */ if (comedi_pci_enable(pcidev, "cb_pcidas")) { dev_err(dev->hw_dev, "Failed to enable PCI device and request regions\n"); return -EIO; } /* * Initialize devpriv->control_status and devpriv->adc_fifo to point to * their base address. */ devpriv->s5933_config = pci_resource_start(devpriv->pci_dev, S5933_BADRINDEX); devpriv->control_status = pci_resource_start(devpriv->pci_dev, CONT_STAT_BADRINDEX); devpriv->adc_fifo = pci_resource_start(devpriv->pci_dev, ADC_FIFO_BADRINDEX); devpriv->pacer_counter_dio = pci_resource_start(devpriv->pci_dev, PACER_BADRINDEX); if (thisboard->ao_nchan) { devpriv->ao_registers = pci_resource_start(devpriv->pci_dev, AO_BADRINDEX); } /* disable and clear interrupts on amcc s5933 */ outl(INTCSR_INBOX_INTR_STATUS, devpriv->s5933_config + AMCC_OP_REG_INTCSR); /* get irq */ if (request_irq(devpriv->pci_dev->irq, cb_pcidas_interrupt, IRQF_SHARED, "cb_pcidas", dev)) { dev_dbg(dev->hw_dev, "unable to allocate irq %d\n", devpriv->pci_dev->irq); return -EINVAL; } dev->irq = devpriv->pci_dev->irq; /* Initialize dev->board_name */ dev->board_name = thisboard->name; /* * Allocate the subdevice structures. */ if (alloc_subdevices(dev, 7) < 0) return -ENOMEM; s = dev->subdevices + 0; /* analog input subdevice */ dev->read_subdev = s; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF | SDF_CMD_READ; /* WARNING: Number of inputs in differential mode is ignored */ s->n_chan = thisboard->ai_se_chans; s->len_chanlist = thisboard->ai_se_chans; s->maxdata = (1 << thisboard->ai_bits) - 1; s->range_table = thisboard->ranges; s->insn_read = cb_pcidas_ai_rinsn; s->insn_config = ai_config_insn; s->do_cmd = cb_pcidas_ai_cmd; s->do_cmdtest = cb_pcidas_ai_cmdtest; s->cancel = cb_pcidas_cancel; /* analog output subdevice */ s = dev->subdevices + 1; if (thisboard->ao_nchan) { s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_GROUND; s->n_chan = thisboard->ao_nchan; /* analog out resolution is the same as analog input resolution, so use ai_bits */ s->maxdata = (1 << thisboard->ai_bits) - 1; s->range_table = &cb_pcidas_ao_ranges; s->insn_read = cb_pcidas_ao_readback_insn; if (thisboard->has_ao_fifo) { dev->write_subdev = s; s->subdev_flags |= SDF_CMD_WRITE; s->insn_write = cb_pcidas_ao_fifo_winsn; s->do_cmdtest = cb_pcidas_ao_cmdtest; s->do_cmd = cb_pcidas_ao_cmd; s->cancel = cb_pcidas_ao_cancel; } else { s->insn_write = cb_pcidas_ao_nofifo_winsn; } } else { s->type = COMEDI_SUBD_UNUSED; } /* 8255 */ s = dev->subdevices + 2; subdev_8255_init(dev, s, NULL, devpriv->pacer_counter_dio + DIO_8255); /* serial EEPROM, */ s = dev->subdevices + 3; s->type = COMEDI_SUBD_MEMORY; s->subdev_flags = SDF_READABLE | SDF_INTERNAL; s->n_chan = 256; s->maxdata = 0xff; s->insn_read = eeprom_read_insn; /* 8800 caldac */ s = dev->subdevices + 4; s->type = COMEDI_SUBD_CALIB; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; s->n_chan = NUM_CHANNELS_8800; s->maxdata = 0xff; s->insn_read = caldac_read_insn; s->insn_write = caldac_write_insn; for (i = 0; i < s->n_chan; i++) caldac_8800_write(dev, i, s->maxdata / 2); /* trim potentiometer */ s = dev->subdevices + 5; s->type = COMEDI_SUBD_CALIB; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; if (thisboard->trimpot == AD7376) { s->n_chan = NUM_CHANNELS_7376; s->maxdata = 0x7f; } else { s->n_chan = NUM_CHANNELS_8402; s->maxdata = 0xff; } s->insn_read = trimpot_read_insn; s->insn_write = trimpot_write_insn; for (i = 0; i < s->n_chan; i++) cb_pcidas_trimpot_write(dev, i, s->maxdata / 2); /* dac08 caldac */ s = dev->subdevices + 6; if (thisboard->has_dac08) { s->type = COMEDI_SUBD_CALIB; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; s->n_chan = NUM_CHANNELS_DAC08; s->insn_read = dac08_read_insn; s->insn_write = dac08_write_insn; s->maxdata = 0xff; dac08_write(dev, s->maxdata / 2); } else s->type = COMEDI_SUBD_UNUSED; /* make sure mailbox 4 is empty */ inl(devpriv->s5933_config + AMCC_OP_REG_IMB4); /* Set bits to enable incoming mailbox interrupts on amcc s5933. */ devpriv->s5933_intcsr_bits = INTCSR_INBOX_BYTE(3) | INTCSR_INBOX_SELECT(3) | INTCSR_INBOX_FULL_INT; /* clear and enable interrupt on amcc s5933 */ outl(devpriv->s5933_intcsr_bits | INTCSR_INBOX_INTR_STATUS, devpriv->s5933_config + AMCC_OP_REG_INTCSR); return 1; } /* * cb_pcidas_detach is called to deconfigure a device. It should deallocate * resources. * This function is also called when _attach() fails, so it should be * careful not to release resources that were not necessarily * allocated by _attach(). dev->private and dev->subdevices are * deallocated automatically by the core. */ static int cb_pcidas_detach(struct comedi_device *dev) { if (devpriv) { if (devpriv->s5933_config) { /* disable and clear interrupts on amcc s5933 */ outl(INTCSR_INBOX_INTR_STATUS, devpriv->s5933_config + AMCC_OP_REG_INTCSR); #ifdef CB_PCIDAS_DEBUG dev_dbg(dev->hw_dev, "detaching, incsr is 0x%x\n", inl(devpriv->s5933_config + AMCC_OP_REG_INTCSR)); #endif } } if (dev->irq) free_irq(dev->irq, dev); if (dev->subdevices) subdev_8255_cleanup(dev, dev->subdevices + 2); if (devpriv && devpriv->pci_dev) { if (devpriv->s5933_config) comedi_pci_disable(devpriv->pci_dev); pci_dev_put(devpriv->pci_dev); } return 0; } /* * "instructions" read/write data in "one-shot" or "software-triggered" * mode. */ static int cb_pcidas_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int n, i; unsigned int bits; static const int timeout = 10000; int channel; /* enable calibration input if appropriate */ if (insn->chanspec & CR_ALT_SOURCE) { outw(cal_enable_bits(dev), devpriv->control_status + CALIBRATION_REG); channel = 0; } else { outw(0, devpriv->control_status + CALIBRATION_REG); channel = CR_CHAN(insn->chanspec); } /* set mux limits and gain */ bits = BEGIN_SCAN(channel) | END_SCAN(channel) | GAIN_BITS(CR_RANGE(insn->chanspec)); /* set unipolar/bipolar */ if (CR_RANGE(insn->chanspec) & IS_UNIPOLAR) bits |= UNIP; /* set singleended/differential */ if (CR_AREF(insn->chanspec) != AREF_DIFF) bits |= SE; outw(bits, devpriv->control_status + ADCMUX_CONT); /* clear fifo */ outw(0, devpriv->adc_fifo + ADCFIFOCLR); /* convert n samples */ for (n = 0; n < insn->n; n++) { /* trigger conversion */ outw(0, devpriv->adc_fifo + ADCDATA); /* wait for conversion to end */ /* return -ETIMEDOUT if there is a timeout */ for (i = 0; i < timeout; i++) { if (inw(devpriv->control_status + ADCMUX_CONT) & EOC) break; } if (i == timeout) return -ETIMEDOUT; /* read data */ data[n] = inw(devpriv->adc_fifo + ADCDATA); } /* return the number of samples read/written */ return n; } static int ai_config_calibration_source(struct comedi_device *dev, unsigned int *data) { static const int num_calibration_sources = 8; unsigned int source = data[1]; if (source >= num_calibration_sources) { dev_err(dev->hw_dev, "invalid calibration source: %i\n", source); return -EINVAL; } devpriv->calibration_source = source; return 2; } static int ai_config_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int id = data[0]; switch (id) { case INSN_CONFIG_ALT_SOURCE: return ai_config_calibration_source(dev, data); break; default: return -EINVAL; break; } return -EINVAL; } /* analog output insn for pcidas-1000 and 1200 series */ static int cb_pcidas_ao_nofifo_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int channel; unsigned long flags; /* set channel and range */ channel = CR_CHAN(insn->chanspec); spin_lock_irqsave(&dev->spinlock, flags); devpriv->ao_control_bits &= ~DAC_MODE_UPDATE_BOTH & ~DAC_RANGE_MASK(channel); devpriv->ao_control_bits |= DACEN | DAC_RANGE(channel, CR_RANGE(insn->chanspec)); outw(devpriv->ao_control_bits, devpriv->control_status + DAC_CSR); spin_unlock_irqrestore(&dev->spinlock, flags); /* remember value for readback */ devpriv->ao_value[channel] = data[0]; /* send data */ outw(data[0], devpriv->ao_registers + DAC_DATA_REG(channel)); return 1; } /* analog output insn for pcidas-1602 series */ static int cb_pcidas_ao_fifo_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int channel; unsigned long flags; /* clear dac fifo */ outw(0, devpriv->ao_registers + DACFIFOCLR); /* set channel and range */ channel = CR_CHAN(insn->chanspec); spin_lock_irqsave(&dev->spinlock, flags); devpriv->ao_control_bits &= ~DAC_CHAN_EN(0) & ~DAC_CHAN_EN(1) & ~DAC_RANGE_MASK(channel) & ~DAC_PACER_MASK; devpriv->ao_control_bits |= DACEN | DAC_RANGE(channel, CR_RANGE(insn-> chanspec)) | DAC_CHAN_EN(channel) | DAC_START; outw(devpriv->ao_control_bits, devpriv->control_status + DAC_CSR); spin_unlock_irqrestore(&dev->spinlock, flags); /* remember value for readback */ devpriv->ao_value[channel] = data[0]; /* send data */ outw(data[0], devpriv->ao_registers + DACDATA); return 1; } /* analog output readback insn */ /* XXX loses track of analog output value back after an analog ouput command is executed */ static int cb_pcidas_ao_readback_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[0] = devpriv->ao_value[CR_CHAN(insn->chanspec)]; return 1; } static int eeprom_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { uint8_t nvram_data; int retval; retval = nvram_read(dev, CR_CHAN(insn->chanspec), &nvram_data); if (retval < 0) return retval; data[0] = nvram_data; return 1; } static int caldac_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { const unsigned int channel = CR_CHAN(insn->chanspec); return caldac_8800_write(dev, channel, data[0]); } static int caldac_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[0] = devpriv->caldac_value[CR_CHAN(insn->chanspec)]; return 1; } /* 1602/16 pregain offset */ static int dac08_write(struct comedi_device *dev, unsigned int value) { if (devpriv->dac08_value == value) return 1; devpriv->dac08_value = value; outw(cal_enable_bits(dev) | (value & 0xff), devpriv->control_status + CALIBRATION_REG); udelay(1); outw(cal_enable_bits(dev) | SELECT_DAC08_BIT | (value & 0xff), devpriv->control_status + CALIBRATION_REG); udelay(1); outw(cal_enable_bits(dev) | (value & 0xff), devpriv->control_status + CALIBRATION_REG); udelay(1); return 1; } static int dac08_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { return dac08_write(dev, data[0]); } static int dac08_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[0] = devpriv->dac08_value; return 1; } static int cb_pcidas_trimpot_write(struct comedi_device *dev, unsigned int channel, unsigned int value) { if (devpriv->trimpot_value[channel] == value) return 1; devpriv->trimpot_value[channel] = value; switch (thisboard->trimpot) { case AD7376: trimpot_7376_write(dev, value); break; case AD8402: trimpot_8402_write(dev, channel, value); break; default: comedi_error(dev, "driver bug?"); return -1; break; } return 1; } static int trimpot_write_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int channel = CR_CHAN(insn->chanspec); return cb_pcidas_trimpot_write(dev, channel, data[0]); } static int trimpot_read_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int channel = CR_CHAN(insn->chanspec); data[0] = devpriv->trimpot_value[channel]; return 1; } static int cb_pcidas_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; int i, gain, start_chan; /* cmdtest tests a particular command to see if it is valid. * Using the cmdtest ioctl, a user can create a valid cmd * and then have it executes by the cmd ioctl. * * cmdtest returns 1,2,3,4 or 0, depending on which tests * the command passes. */ /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW | TRIG_EXT; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_FOLLOW | TRIG_TIMER | TRIG_EXT; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_TIMER | TRIG_NOW | TRIG_EXT; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: make sure trigger sources are unique and mutually compatible */ if (cmd->start_src != TRIG_NOW && cmd->start_src != TRIG_EXT) err++; if (cmd->scan_begin_src != TRIG_FOLLOW && cmd->scan_begin_src != TRIG_TIMER && cmd->scan_begin_src != TRIG_EXT) err++; if (cmd->convert_src != TRIG_TIMER && cmd->convert_src != TRIG_EXT && cmd->convert_src != TRIG_NOW) err++; if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE) err++; /* make sure trigger sources are compatible with each other */ if (cmd->scan_begin_src == TRIG_FOLLOW && cmd->convert_src == TRIG_NOW) err++; if (cmd->scan_begin_src != TRIG_FOLLOW && cmd->convert_src != TRIG_NOW) err++; if (cmd->start_src == TRIG_EXT && (cmd->convert_src == TRIG_EXT || cmd->scan_begin_src == TRIG_EXT)) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ switch (cmd->start_src) { case TRIG_EXT: /* External trigger, only CR_EDGE and CR_INVERT flags allowed */ if ((cmd->start_arg & (CR_FLAGS_MASK & ~(CR_EDGE | CR_INVERT))) != 0) { cmd->start_arg &= ~(CR_FLAGS_MASK & ~(CR_EDGE | CR_INVERT)); err++; } if (!thisboard->has_ai_trig_invert && (cmd->start_arg & CR_INVERT)) { cmd->start_arg &= (CR_FLAGS_MASK & ~CR_INVERT); err++; } break; default: if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } break; } if (cmd->scan_begin_src == TRIG_TIMER) { if (cmd->scan_begin_arg < thisboard->ai_speed * cmd->chanlist_len) { cmd->scan_begin_arg = thisboard->ai_speed * cmd->chanlist_len; err++; } } if (cmd->convert_src == TRIG_TIMER) { if (cmd->convert_arg < thisboard->ai_speed) { cmd->convert_arg = thisboard->ai_speed; err++; } } if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } if (cmd->stop_src == TRIG_NONE) { /* TRIG_NONE */ if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } } if (err) return 3; /* step 4: fix up any arguments */ if (cmd->scan_begin_src == TRIG_TIMER) { tmp = cmd->scan_begin_arg; i8253_cascade_ns_to_timer_2div(TIMER_BASE, &(devpriv->divisor1), &(devpriv->divisor2), &(cmd->scan_begin_arg), cmd->flags & TRIG_ROUND_MASK); if (tmp != cmd->scan_begin_arg) err++; } if (cmd->convert_src == TRIG_TIMER) { tmp = cmd->convert_arg; i8253_cascade_ns_to_timer_2div(TIMER_BASE, &(devpriv->divisor1), &(devpriv->divisor2), &(cmd->convert_arg), cmd->flags & TRIG_ROUND_MASK); if (tmp != cmd->convert_arg) err++; } if (err) return 4; /* check channel/gain list against card's limitations */ if (cmd->chanlist) { gain = CR_RANGE(cmd->chanlist[0]); start_chan = CR_CHAN(cmd->chanlist[0]); for (i = 1; i < cmd->chanlist_len; i++) { if (CR_CHAN(cmd->chanlist[i]) != (start_chan + i) % s->n_chan) { comedi_error(dev, "entries in chanlist must be consecutive channels, counting upwards\n"); err++; } if (CR_RANGE(cmd->chanlist[i]) != gain) { comedi_error(dev, "entries in chanlist must all have the same gain\n"); err++; } } } if (err) return 5; return 0; } static int cb_pcidas_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned int bits; unsigned long flags; /* make sure CAL_EN_BIT is disabled */ outw(0, devpriv->control_status + CALIBRATION_REG); /* initialize before settings pacer source and count values */ outw(0, devpriv->control_status + TRIG_CONTSTAT); /* clear fifo */ outw(0, devpriv->adc_fifo + ADCFIFOCLR); /* set mux limits, gain and pacer source */ bits = BEGIN_SCAN(CR_CHAN(cmd->chanlist[0])) | END_SCAN(CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1])) | GAIN_BITS(CR_RANGE(cmd->chanlist[0])); /* set unipolar/bipolar */ if (CR_RANGE(cmd->chanlist[0]) & IS_UNIPOLAR) bits |= UNIP; /* set singleended/differential */ if (CR_AREF(cmd->chanlist[0]) != AREF_DIFF) bits |= SE; /* set pacer source */ if (cmd->convert_src == TRIG_EXT || cmd->scan_begin_src == TRIG_EXT) bits |= PACER_EXT_RISE; else bits |= PACER_INT; outw(bits, devpriv->control_status + ADCMUX_CONT); #ifdef CB_PCIDAS_DEBUG dev_dbg(dev->hw_dev, "comedi: sent 0x%x to adcmux control\n", bits); #endif /* load counters */ if (cmd->convert_src == TRIG_TIMER) cb_pcidas_load_counters(dev, &cmd->convert_arg, cmd->flags & TRIG_ROUND_MASK); else if (cmd->scan_begin_src == TRIG_TIMER) cb_pcidas_load_counters(dev, &cmd->scan_begin_arg, cmd->flags & TRIG_ROUND_MASK); /* set number of conversions */ if (cmd->stop_src == TRIG_COUNT) devpriv->count = cmd->chanlist_len * cmd->stop_arg; /* enable interrupts */ spin_lock_irqsave(&dev->spinlock, flags); devpriv->adc_fifo_bits |= INTE; devpriv->adc_fifo_bits &= ~INT_MASK; if (cmd->flags & TRIG_WAKE_EOS) { if (cmd->convert_src == TRIG_NOW && cmd->chanlist_len > 1) devpriv->adc_fifo_bits |= INT_EOS; /* interrupt end of burst */ else devpriv->adc_fifo_bits |= INT_FNE; /* interrupt fifo not empty */ } else { devpriv->adc_fifo_bits |= INT_FHF; /* interrupt fifo half full */ } #ifdef CB_PCIDAS_DEBUG dev_dbg(dev->hw_dev, "comedi: adc_fifo_bits are 0x%x\n", devpriv->adc_fifo_bits); #endif /* enable (and clear) interrupts */ outw(devpriv->adc_fifo_bits | EOAI | INT | LADFUL, devpriv->control_status + INT_ADCFIFO); spin_unlock_irqrestore(&dev->spinlock, flags); /* set start trigger and burst mode */ bits = 0; if (cmd->start_src == TRIG_NOW) bits |= SW_TRIGGER; else if (cmd->start_src == TRIG_EXT) { bits |= EXT_TRIGGER | TGEN | XTRCL; if (thisboard->has_ai_trig_invert && (cmd->start_arg & CR_INVERT)) bits |= TGPOL; if (thisboard->has_ai_trig_gated && (cmd->start_arg & CR_EDGE)) bits |= TGSEL; } else { comedi_error(dev, "bug!"); return -1; } if (cmd->convert_src == TRIG_NOW && cmd->chanlist_len > 1) bits |= BURSTE; outw(bits, devpriv->control_status + TRIG_CONTSTAT); #ifdef CB_PCIDAS_DEBUG dev_dbg(dev->hw_dev, "comedi: sent 0x%x to trig control\n", bits); #endif return 0; } static int cb_pcidas_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; /* cmdtest tests a particular command to see if it is valid. * Using the cmdtest ioctl, a user can create a valid cmd * and then have it executes by the cmd ioctl. * * cmdtest returns 1,2,3,4 or 0, depending on which tests * the command passes. */ /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_INT; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_TIMER | TRIG_EXT; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_NOW; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: make sure trigger sources are unique and mutually compatible */ if (cmd->scan_begin_src != TRIG_TIMER && cmd->scan_begin_src != TRIG_EXT) err++; if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (cmd->scan_begin_src == TRIG_TIMER) { if (cmd->scan_begin_arg < thisboard->ao_scan_speed) { cmd->scan_begin_arg = thisboard->ao_scan_speed; err++; } } if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } if (cmd->stop_src == TRIG_NONE) { /* TRIG_NONE */ if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } } if (err) return 3; /* step 4: fix up any arguments */ if (cmd->scan_begin_src == TRIG_TIMER) { tmp = cmd->scan_begin_arg; i8253_cascade_ns_to_timer_2div(TIMER_BASE, &(devpriv->ao_divisor1), &(devpriv->ao_divisor2), &(cmd->scan_begin_arg), cmd->flags & TRIG_ROUND_MASK); if (tmp != cmd->scan_begin_arg) err++; } if (err) return 4; /* check channel/gain list against card's limitations */ if (cmd->chanlist && cmd->chanlist_len > 1) { if (CR_CHAN(cmd->chanlist[0]) != 0 || CR_CHAN(cmd->chanlist[1]) != 1) { comedi_error(dev, "channels must be ordered channel 0, channel 1 in chanlist\n"); err++; } } if (err) return 5; return 0; } static int cb_pcidas_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned int i; unsigned long flags; /* set channel limits, gain */ spin_lock_irqsave(&dev->spinlock, flags); for (i = 0; i < cmd->chanlist_len; i++) { /* enable channel */ devpriv->ao_control_bits |= DAC_CHAN_EN(CR_CHAN(cmd->chanlist[i])); /* set range */ devpriv->ao_control_bits |= DAC_RANGE(CR_CHAN(cmd->chanlist[i]), CR_RANGE(cmd-> chanlist[i])); } /* disable analog out before settings pacer source and count values */ outw(devpriv->ao_control_bits, devpriv->control_status + DAC_CSR); spin_unlock_irqrestore(&dev->spinlock, flags); /* clear fifo */ outw(0, devpriv->ao_registers + DACFIFOCLR); /* load counters */ if (cmd->scan_begin_src == TRIG_TIMER) { i8253_cascade_ns_to_timer_2div(TIMER_BASE, &(devpriv->ao_divisor1), &(devpriv->ao_divisor2), &(cmd->scan_begin_arg), cmd->flags); /* Write the values of ctr1 and ctr2 into counters 1 and 2 */ i8254_load(devpriv->pacer_counter_dio + DAC8254, 0, 1, devpriv->ao_divisor1, 2); i8254_load(devpriv->pacer_counter_dio + DAC8254, 0, 2, devpriv->ao_divisor2, 2); } /* set number of conversions */ if (cmd->stop_src == TRIG_COUNT) devpriv->ao_count = cmd->chanlist_len * cmd->stop_arg; /* set pacer source */ spin_lock_irqsave(&dev->spinlock, flags); switch (cmd->scan_begin_src) { case TRIG_TIMER: devpriv->ao_control_bits |= DAC_PACER_INT; break; case TRIG_EXT: devpriv->ao_control_bits |= DAC_PACER_EXT_RISE; break; default: spin_unlock_irqrestore(&dev->spinlock, flags); comedi_error(dev, "error setting dac pacer source"); return -1; break; } spin_unlock_irqrestore(&dev->spinlock, flags); async->inttrig = cb_pcidas_ao_inttrig; return 0; } static int cb_pcidas_ao_inttrig(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trig_num) { unsigned int num_bytes, num_points = thisboard->fifo_size; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &s->async->cmd; unsigned long flags; if (trig_num != 0) return -EINVAL; /* load up fifo */ if (cmd->stop_src == TRIG_COUNT && devpriv->ao_count < num_points) num_points = devpriv->ao_count; num_bytes = cfc_read_array_from_buffer(s, devpriv->ao_buffer, num_points * sizeof(short)); num_points = num_bytes / sizeof(short); if (cmd->stop_src == TRIG_COUNT) devpriv->ao_count -= num_points; /* write data to board's fifo */ outsw(devpriv->ao_registers + DACDATA, devpriv->ao_buffer, num_bytes); /* enable dac half-full and empty interrupts */ spin_lock_irqsave(&dev->spinlock, flags); devpriv->adc_fifo_bits |= DAEMIE | DAHFIE; #ifdef CB_PCIDAS_DEBUG dev_dbg(dev->hw_dev, "comedi: adc_fifo_bits are 0x%x\n", devpriv->adc_fifo_bits); #endif /* enable and clear interrupts */ outw(devpriv->adc_fifo_bits | DAEMI | DAHFI, devpriv->control_status + INT_ADCFIFO); /* start dac */ devpriv->ao_control_bits |= DAC_START | DACEN | DAC_EMPTY; outw(devpriv->ao_control_bits, devpriv->control_status + DAC_CSR); #ifdef CB_PCIDAS_DEBUG dev_dbg(dev->hw_dev, "comedi: sent 0x%x to dac control\n", devpriv->ao_control_bits); #endif spin_unlock_irqrestore(&dev->spinlock, flags); async->inttrig = NULL; return 0; } static irqreturn_t cb_pcidas_interrupt(int irq, void *d) { struct comedi_device *dev = (struct comedi_device *)d; struct comedi_subdevice *s = dev->read_subdev; struct comedi_async *async; int status, s5933_status; int half_fifo = thisboard->fifo_size / 2; unsigned int num_samples, i; static const int timeout = 10000; unsigned long flags; if (dev->attached == 0) return IRQ_NONE; async = s->async; async->events = 0; s5933_status = inl(devpriv->s5933_config + AMCC_OP_REG_INTCSR); #ifdef CB_PCIDAS_DEBUG dev_dbg(dev->hw_dev, "intcsr 0x%x\n", s5933_status); dev_dbg(dev->hw_dev, "mbef 0x%x\n", inl(devpriv->s5933_config + AMCC_OP_REG_MBEF)); #endif if ((INTCSR_INTR_ASSERTED & s5933_status) == 0) return IRQ_NONE; /* make sure mailbox 4 is empty */ inl_p(devpriv->s5933_config + AMCC_OP_REG_IMB4); /* clear interrupt on amcc s5933 */ outl(devpriv->s5933_intcsr_bits | INTCSR_INBOX_INTR_STATUS, devpriv->s5933_config + AMCC_OP_REG_INTCSR); status = inw(devpriv->control_status + INT_ADCFIFO); #ifdef CB_PCIDAS_DEBUG if ((status & (INT | EOAI | LADFUL | DAHFI | DAEMI)) == 0) comedi_error(dev, "spurious interrupt"); #endif /* check for analog output interrupt */ if (status & (DAHFI | DAEMI)) handle_ao_interrupt(dev, status); /* check for analog input interrupts */ /* if fifo half-full */ if (status & ADHFI) { /* read data */ num_samples = half_fifo; if (async->cmd.stop_src == TRIG_COUNT && num_samples > devpriv->count) { num_samples = devpriv->count; } insw(devpriv->adc_fifo + ADCDATA, devpriv->ai_buffer, num_samples); cfc_write_array_to_buffer(s, devpriv->ai_buffer, num_samples * sizeof(short)); devpriv->count -= num_samples; if (async->cmd.stop_src == TRIG_COUNT && devpriv->count == 0) { async->events |= COMEDI_CB_EOA; cb_pcidas_cancel(dev, s); } /* clear half-full interrupt latch */ spin_lock_irqsave(&dev->spinlock, flags); outw(devpriv->adc_fifo_bits | INT, devpriv->control_status + INT_ADCFIFO); spin_unlock_irqrestore(&dev->spinlock, flags); /* else if fifo not empty */ } else if (status & (ADNEI | EOBI)) { for (i = 0; i < timeout; i++) { /* break if fifo is empty */ if ((ADNE & inw(devpriv->control_status + INT_ADCFIFO)) == 0) break; cfc_write_to_buffer(s, inw(devpriv->adc_fifo)); if (async->cmd.stop_src == TRIG_COUNT && --devpriv->count == 0) { /* end of acquisition */ cb_pcidas_cancel(dev, s); async->events |= COMEDI_CB_EOA; break; } } /* clear not-empty interrupt latch */ spin_lock_irqsave(&dev->spinlock, flags); outw(devpriv->adc_fifo_bits | INT, devpriv->control_status + INT_ADCFIFO); spin_unlock_irqrestore(&dev->spinlock, flags); } else if (status & EOAI) { comedi_error(dev, "bug! encountered end of acquisition interrupt?"); /* clear EOA interrupt latch */ spin_lock_irqsave(&dev->spinlock, flags); outw(devpriv->adc_fifo_bits | EOAI, devpriv->control_status + INT_ADCFIFO); spin_unlock_irqrestore(&dev->spinlock, flags); } /* check for fifo overflow */ if (status & LADFUL) { comedi_error(dev, "fifo overflow"); /* clear overflow interrupt latch */ spin_lock_irqsave(&dev->spinlock, flags); outw(devpriv->adc_fifo_bits | LADFUL, devpriv->control_status + INT_ADCFIFO); spin_unlock_irqrestore(&dev->spinlock, flags); cb_pcidas_cancel(dev, s); async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR; } comedi_event(dev, s); return IRQ_HANDLED; } static void handle_ao_interrupt(struct comedi_device *dev, unsigned int status) { struct comedi_subdevice *s = dev->write_subdev; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned int half_fifo = thisboard->fifo_size / 2; unsigned int num_points; unsigned long flags; async->events = 0; if (status & DAEMI) { /* clear dac empty interrupt latch */ spin_lock_irqsave(&dev->spinlock, flags); outw(devpriv->adc_fifo_bits | DAEMI, devpriv->control_status + INT_ADCFIFO); spin_unlock_irqrestore(&dev->spinlock, flags); if (inw(devpriv->ao_registers + DAC_CSR) & DAC_EMPTY) { if (cmd->stop_src == TRIG_NONE || (cmd->stop_src == TRIG_COUNT && devpriv->ao_count)) { comedi_error(dev, "dac fifo underflow"); cb_pcidas_ao_cancel(dev, s); async->events |= COMEDI_CB_ERROR; } async->events |= COMEDI_CB_EOA; } } else if (status & DAHFI) { unsigned int num_bytes; /* figure out how many points we are writing to fifo */ num_points = half_fifo; if (cmd->stop_src == TRIG_COUNT && devpriv->ao_count < num_points) num_points = devpriv->ao_count; num_bytes = cfc_read_array_from_buffer(s, devpriv->ao_buffer, num_points * sizeof(short)); num_points = num_bytes / sizeof(short); if (async->cmd.stop_src == TRIG_COUNT) devpriv->ao_count -= num_points; /* write data to board's fifo */ outsw(devpriv->ao_registers + DACDATA, devpriv->ao_buffer, num_points); /* clear half-full interrupt latch */ spin_lock_irqsave(&dev->spinlock, flags); outw(devpriv->adc_fifo_bits | DAHFI, devpriv->control_status + INT_ADCFIFO); spin_unlock_irqrestore(&dev->spinlock, flags); } comedi_event(dev, s); } /* cancel analog input command */ static int cb_pcidas_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned long flags; spin_lock_irqsave(&dev->spinlock, flags); /* disable interrupts */ devpriv->adc_fifo_bits &= ~INTE & ~EOAIE; outw(devpriv->adc_fifo_bits, devpriv->control_status + INT_ADCFIFO); spin_unlock_irqrestore(&dev->spinlock, flags); /* disable start trigger source and burst mode */ outw(0, devpriv->control_status + TRIG_CONTSTAT); /* software pacer source */ outw(0, devpriv->control_status + ADCMUX_CONT); return 0; } /* cancel analog output command */ static int cb_pcidas_ao_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned long flags; spin_lock_irqsave(&dev->spinlock, flags); /* disable interrupts */ devpriv->adc_fifo_bits &= ~DAHFIE & ~DAEMIE; outw(devpriv->adc_fifo_bits, devpriv->control_status + INT_ADCFIFO); /* disable output */ devpriv->ao_control_bits &= ~DACEN & ~DAC_PACER_MASK; outw(devpriv->ao_control_bits, devpriv->control_status + DAC_CSR); spin_unlock_irqrestore(&dev->spinlock, flags); return 0; } static void cb_pcidas_load_counters(struct comedi_device *dev, unsigned int *ns, int rounding_flags) { i8253_cascade_ns_to_timer_2div(TIMER_BASE, &(devpriv->divisor1), &(devpriv->divisor2), ns, rounding_flags & TRIG_ROUND_MASK); /* Write the values of ctr1 and ctr2 into counters 1 and 2 */ i8254_load(devpriv->pacer_counter_dio + ADC8254, 0, 1, devpriv->divisor1, 2); i8254_load(devpriv->pacer_counter_dio + ADC8254, 0, 2, devpriv->divisor2, 2); } static void write_calibration_bitstream(struct comedi_device *dev, unsigned int register_bits, unsigned int bitstream, unsigned int bitstream_length) { static const int write_delay = 1; unsigned int bit; for (bit = 1 << (bitstream_length - 1); bit; bit >>= 1) { if (bitstream & bit) register_bits |= SERIAL_DATA_IN_BIT; else register_bits &= ~SERIAL_DATA_IN_BIT; udelay(write_delay); outw(register_bits, devpriv->control_status + CALIBRATION_REG); } } static int caldac_8800_write(struct comedi_device *dev, unsigned int address, uint8_t value) { static const int num_caldac_channels = 8; static const int bitstream_length = 11; unsigned int bitstream = ((address & 0x7) << 8) | value; static const int caldac_8800_udelay = 1; if (address >= num_caldac_channels) { comedi_error(dev, "illegal caldac channel"); return -1; } if (value == devpriv->caldac_value[address]) return 1; devpriv->caldac_value[address] = value; write_calibration_bitstream(dev, cal_enable_bits(dev), bitstream, bitstream_length); udelay(caldac_8800_udelay); outw(cal_enable_bits(dev) | SELECT_8800_BIT, devpriv->control_status + CALIBRATION_REG); udelay(caldac_8800_udelay); outw(cal_enable_bits(dev), devpriv->control_status + CALIBRATION_REG); return 1; } static int trimpot_7376_write(struct comedi_device *dev, uint8_t value) { static const int bitstream_length = 7; unsigned int bitstream = value & 0x7f; unsigned int register_bits; static const int ad7376_udelay = 1; register_bits = cal_enable_bits(dev) | SELECT_TRIMPOT_BIT; udelay(ad7376_udelay); outw(register_bits, devpriv->control_status + CALIBRATION_REG); write_calibration_bitstream(dev, register_bits, bitstream, bitstream_length); udelay(ad7376_udelay); outw(cal_enable_bits(dev), devpriv->control_status + CALIBRATION_REG); return 0; } /* For 1602/16 only * ch 0 : adc gain * ch 1 : adc postgain offset */ static int trimpot_8402_write(struct comedi_device *dev, unsigned int channel, uint8_t value) { static const int bitstream_length = 10; unsigned int bitstream = ((channel & 0x3) << 8) | (value & 0xff); unsigned int register_bits; static const int ad8402_udelay = 1; register_bits = cal_enable_bits(dev) | SELECT_TRIMPOT_BIT; udelay(ad8402_udelay); outw(register_bits, devpriv->control_status + CALIBRATION_REG); write_calibration_bitstream(dev, register_bits, bitstream, bitstream_length); udelay(ad8402_udelay); outw(cal_enable_bits(dev), devpriv->control_status + CALIBRATION_REG); return 0; } static int wait_for_nvram_ready(unsigned long s5933_base_addr) { static const int timeout = 1000; unsigned int i; for (i = 0; i < timeout; i++) { if ((inb(s5933_base_addr + AMCC_OP_REG_MCSR_NVCMD) & MCSR_NV_BUSY) == 0) return 0; udelay(1); } return -1; } static int nvram_read(struct comedi_device *dev, unsigned int address, uint8_t *data) { unsigned long iobase = devpriv->s5933_config; if (wait_for_nvram_ready(iobase) < 0) return -ETIMEDOUT; outb(MCSR_NV_ENABLE | MCSR_NV_LOAD_LOW_ADDR, iobase + AMCC_OP_REG_MCSR_NVCMD); outb(address & 0xff, iobase + AMCC_OP_REG_MCSR_NVDATA); outb(MCSR_NV_ENABLE | MCSR_NV_LOAD_HIGH_ADDR, iobase + AMCC_OP_REG_MCSR_NVCMD); outb((address >> 8) & 0xff, iobase + AMCC_OP_REG_MCSR_NVDATA); outb(MCSR_NV_ENABLE | MCSR_NV_READ, iobase + AMCC_OP_REG_MCSR_NVCMD); if (wait_for_nvram_ready(iobase) < 0) return -ETIMEDOUT; *data = inb(iobase + AMCC_OP_REG_MCSR_NVDATA); return 0; } /* * A convenient macro that defines init_module() and cleanup_module(), * as necessary. */ static int __devinit driver_cb_pcidas_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, driver_cb_pcidas.driver_name); } static void __devexit driver_cb_pcidas_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver driver_cb_pcidas_pci_driver = { .id_table = cb_pcidas_pci_table, .probe = &driver_cb_pcidas_pci_probe, .remove = __devexit_p(&driver_cb_pcidas_pci_remove) }; static int __init driver_cb_pcidas_init_module(void) { int retval; retval = comedi_driver_register(&driver_cb_pcidas); if (retval < 0) return retval; driver_cb_pcidas_pci_driver.name = (char *)driver_cb_pcidas.driver_name; return pci_register_driver(&driver_cb_pcidas_pci_driver); } static void __exit driver_cb_pcidas_cleanup_module(void) { pci_unregister_driver(&driver_cb_pcidas_pci_driver); comedi_driver_unregister(&driver_cb_pcidas); } module_init(driver_cb_pcidas_init_module); module_exit(driver_cb_pcidas_cleanup_module); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
Tegra4/android_kernel_hp_ranger
drivers/media/dvb/dvb-usb/dw2102.c
4896
47307
/* DVB USB framework compliant Linux driver for the * DVBWorld DVB-S 2101, 2102, DVB-S2 2104, DVB-C 3101, * TeVii S600, S630, S650, S660, S480, * Prof 1100, 7500, * Geniatech SU3000 Cards * Copyright (C) 2008-2011 Igor M. Liplianin (liplianin@me.by) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation, version 2. * * see Documentation/dvb/README.dvb-usb for more information */ #include "dw2102.h" #include "si21xx.h" #include "stv0299.h" #include "z0194a.h" #include "stv0288.h" #include "stb6000.h" #include "eds1547.h" #include "cx24116.h" #include "tda1002x.h" #include "mt312.h" #include "zl10039.h" #include "ds3000.h" #include "stv0900.h" #include "stv6110.h" #include "stb6100.h" #include "stb6100_proc.h" #ifndef USB_PID_DW2102 #define USB_PID_DW2102 0x2102 #endif #ifndef USB_PID_DW2104 #define USB_PID_DW2104 0x2104 #endif #ifndef USB_PID_DW3101 #define USB_PID_DW3101 0x3101 #endif #ifndef USB_PID_CINERGY_S #define USB_PID_CINERGY_S 0x0064 #endif #ifndef USB_PID_TEVII_S630 #define USB_PID_TEVII_S630 0xd630 #endif #ifndef USB_PID_TEVII_S650 #define USB_PID_TEVII_S650 0xd650 #endif #ifndef USB_PID_TEVII_S660 #define USB_PID_TEVII_S660 0xd660 #endif #ifndef USB_PID_TEVII_S480_1 #define USB_PID_TEVII_S480_1 0xd481 #endif #ifndef USB_PID_TEVII_S480_2 #define USB_PID_TEVII_S480_2 0xd482 #endif #ifndef USB_PID_PROF_1100 #define USB_PID_PROF_1100 0xb012 #endif #define DW210X_READ_MSG 0 #define DW210X_WRITE_MSG 1 #define REG_1F_SYMBOLRATE_BYTE0 0x1f #define REG_20_SYMBOLRATE_BYTE1 0x20 #define REG_21_SYMBOLRATE_BYTE2 0x21 /* on my own*/ #define DW2102_VOLTAGE_CTRL (0x1800) #define SU3000_STREAM_CTRL (0x1900) #define DW2102_RC_QUERY (0x1a00) #define DW2102_LED_CTRL (0x1b00) #define err_str "did not find the firmware file. (%s) " \ "Please see linux/Documentation/dvb/ for more details " \ "on firmware-problems." struct rc_map_dvb_usb_table_table { struct rc_map_table *rc_keys; int rc_keys_size; }; struct su3000_state { u8 initialized; }; struct s6x0_state { int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v); }; /* debug */ static int dvb_usb_dw2102_debug; module_param_named(debug, dvb_usb_dw2102_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info 2=xfer 4=rc(or-able))." DVB_USB_DEBUG_STATUS); /* keymaps */ static int ir_keymap; module_param_named(keymap, ir_keymap, int, 0644); MODULE_PARM_DESC(keymap, "set keymap 0=default 1=dvbworld 2=tevii 3=tbs ..." " 256=none"); /* demod probe */ static int demod_probe = 1; module_param_named(demod, demod_probe, int, 0644); MODULE_PARM_DESC(demod, "demod to probe (1=cx24116 2=stv0903+stv6110 " "4=stv0903+stb6100(or-able))."); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int dw210x_op_rw(struct usb_device *dev, u8 request, u16 value, u16 index, u8 * data, u16 len, int flags) { int ret; u8 *u8buf; unsigned int pipe = (flags == DW210X_READ_MSG) ? usb_rcvctrlpipe(dev, 0) : usb_sndctrlpipe(dev, 0); u8 request_type = (flags == DW210X_READ_MSG) ? USB_DIR_IN : USB_DIR_OUT; u8buf = kmalloc(len, GFP_KERNEL); if (!u8buf) return -ENOMEM; if (flags == DW210X_WRITE_MSG) memcpy(u8buf, data, len); ret = usb_control_msg(dev, pipe, request, request_type | USB_TYPE_VENDOR, value, index , u8buf, len, 2000); if (flags == DW210X_READ_MSG) memcpy(data, u8buf, len); kfree(u8buf); return ret; } /* I2C */ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int i = 0, ret = 0; u8 buf6[] = {0x2c, 0x05, 0xc0, 0, 0, 0, 0}; u16 value; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 2: /* read stv0299 register */ value = msg[0].buf[0];/* register */ for (i = 0; i < msg[1].len; i++) { ret = dw210x_op_rw(d->udev, 0xb5, value + i, 0, buf6, 2, DW210X_READ_MSG); msg[1].buf[i] = buf6[0]; } break; case 1: switch (msg[0].addr) { case 0x68: /* write to stv0299 register */ buf6[0] = 0x2a; buf6[1] = msg[0].buf[0]; buf6[2] = msg[0].buf[1]; ret = dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 3, DW210X_WRITE_MSG); break; case 0x60: if (msg[0].flags == 0) { /* write to tuner pll */ buf6[0] = 0x2c; buf6[1] = 5; buf6[2] = 0xc0; buf6[3] = msg[0].buf[0]; buf6[4] = msg[0].buf[1]; buf6[5] = msg[0].buf[2]; buf6[6] = msg[0].buf[3]; ret = dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 7, DW210X_WRITE_MSG); } else { /* read from tuner */ ret = dw210x_op_rw(d->udev, 0xb5, 0, 0, buf6, 1, DW210X_READ_MSG); msg[0].buf[0] = buf6[0]; } break; case (DW2102_RC_QUERY): ret = dw210x_op_rw(d->udev, 0xb8, 0, 0, buf6, 2, DW210X_READ_MSG); msg[0].buf[0] = buf6[0]; msg[0].buf[1] = buf6[1]; break; case (DW2102_VOLTAGE_CTRL): buf6[0] = 0x30; buf6[1] = msg[0].buf[0]; ret = dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 2, DW210X_WRITE_MSG); break; } break; } mutex_unlock(&d->i2c_mutex); return num; } static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int ret = 0; u8 buf6[] = {0, 0, 0, 0, 0, 0, 0}; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 2: /* read si2109 register by number */ buf6[0] = msg[0].addr << 1; buf6[1] = msg[0].len; buf6[2] = msg[0].buf[0]; ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, buf6, msg[0].len + 2, DW210X_WRITE_MSG); /* read si2109 register */ ret = dw210x_op_rw(d->udev, 0xc3, 0xd0, 0, buf6, msg[1].len + 2, DW210X_READ_MSG); memcpy(msg[1].buf, buf6 + 2, msg[1].len); break; case 1: switch (msg[0].addr) { case 0x68: /* write to si2109 register */ buf6[0] = msg[0].addr << 1; buf6[1] = msg[0].len; memcpy(buf6 + 2, msg[0].buf, msg[0].len); ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, buf6, msg[0].len + 2, DW210X_WRITE_MSG); break; case(DW2102_RC_QUERY): ret = dw210x_op_rw(d->udev, 0xb8, 0, 0, buf6, 2, DW210X_READ_MSG); msg[0].buf[0] = buf6[0]; msg[0].buf[1] = buf6[1]; break; case(DW2102_VOLTAGE_CTRL): buf6[0] = 0x30; buf6[1] = msg[0].buf[0]; ret = dw210x_op_rw(d->udev, 0xb2, 0, 0, buf6, 2, DW210X_WRITE_MSG); break; } break; } mutex_unlock(&d->i2c_mutex); return num; } static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int ret = 0; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 2: { /* read */ /* first write first register number */ u8 ibuf[msg[1].len + 2], obuf[3]; obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; obuf[2] = msg[0].buf[0]; ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); /* second read registers */ ret = dw210x_op_rw(d->udev, 0xc3, 0xd1 , 0, ibuf, msg[1].len + 2, DW210X_READ_MSG); memcpy(msg[1].buf, ibuf + 2, msg[1].len); break; } case 1: switch (msg[0].addr) { case 0x68: { /* write to register */ u8 obuf[msg[0].len + 2]; obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; memcpy(obuf + 2, msg[0].buf, msg[0].len); ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); break; } case 0x61: { /* write to tuner */ u8 obuf[msg[0].len + 2]; obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; memcpy(obuf + 2, msg[0].buf, msg[0].len); ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); break; } case(DW2102_RC_QUERY): { u8 ibuf[2]; ret = dw210x_op_rw(d->udev, 0xb8, 0, 0, ibuf, 2, DW210X_READ_MSG); memcpy(msg[0].buf, ibuf , 2); break; } case(DW2102_VOLTAGE_CTRL): { u8 obuf[2]; obuf[0] = 0x30; obuf[1] = msg[0].buf[0]; ret = dw210x_op_rw(d->udev, 0xb2, 0, 0, obuf, 2, DW210X_WRITE_MSG); break; } } break; } mutex_unlock(&d->i2c_mutex); return num; } static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int ret = 0; int len, i, j; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (j = 0; j < num; j++) { switch (msg[j].addr) { case(DW2102_RC_QUERY): { u8 ibuf[2]; ret = dw210x_op_rw(d->udev, 0xb8, 0, 0, ibuf, 2, DW210X_READ_MSG); memcpy(msg[j].buf, ibuf , 2); break; } case(DW2102_VOLTAGE_CTRL): { u8 obuf[2]; obuf[0] = 0x30; obuf[1] = msg[j].buf[0]; ret = dw210x_op_rw(d->udev, 0xb2, 0, 0, obuf, 2, DW210X_WRITE_MSG); break; } /*case 0x55: cx24116 case 0x6a: stv0903 case 0x68: ds3000, stv0903 case 0x60: ts2020, stv6110, stb6100 */ default: { if (msg[j].flags == I2C_M_RD) { /* read registers */ u8 ibuf[msg[j].len + 2]; ret = dw210x_op_rw(d->udev, 0xc3, (msg[j].addr << 1) + 1, 0, ibuf, msg[j].len + 2, DW210X_READ_MSG); memcpy(msg[j].buf, ibuf + 2, msg[j].len); mdelay(10); } else if (((msg[j].buf[0] == 0xb0) && (msg[j].addr == 0x68)) || ((msg[j].buf[0] == 0xf7) && (msg[j].addr == 0x55))) { /* write firmware */ u8 obuf[19]; obuf[0] = msg[j].addr << 1; obuf[1] = (msg[j].len > 15 ? 17 : msg[j].len); obuf[2] = msg[j].buf[0]; len = msg[j].len - 1; i = 1; do { memcpy(obuf + 3, msg[j].buf + i, (len > 16 ? 16 : len)); ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, (len > 16 ? 16 : len) + 3, DW210X_WRITE_MSG); i += 16; len -= 16; } while (len > 0); } else { /* write registers */ u8 obuf[msg[j].len + 2]; obuf[0] = msg[j].addr << 1; obuf[1] = msg[j].len; memcpy(obuf + 2, msg[j].buf, msg[j].len); ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[j].len + 2, DW210X_WRITE_MSG); } break; } } } mutex_unlock(&d->i2c_mutex); return num; } static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int ret = 0, i; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 2: { /* read */ /* first write first register number */ u8 ibuf[msg[1].len + 2], obuf[3]; obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; obuf[2] = msg[0].buf[0]; ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); /* second read registers */ ret = dw210x_op_rw(d->udev, 0xc3, 0x19 , 0, ibuf, msg[1].len + 2, DW210X_READ_MSG); memcpy(msg[1].buf, ibuf + 2, msg[1].len); break; } case 1: switch (msg[0].addr) { case 0x60: case 0x0c: { /* write to register */ u8 obuf[msg[0].len + 2]; obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; memcpy(obuf + 2, msg[0].buf, msg[0].len); ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, obuf, msg[0].len + 2, DW210X_WRITE_MSG); break; } case(DW2102_RC_QUERY): { u8 ibuf[2]; ret = dw210x_op_rw(d->udev, 0xb8, 0, 0, ibuf, 2, DW210X_READ_MSG); memcpy(msg[0].buf, ibuf , 2); break; } } break; } for (i = 0; i < num; i++) { deb_xfer("%02x:%02x: %s ", i, msg[i].addr, msg[i].flags == 0 ? ">>>" : "<<<"); debug_dump(msg[i].buf, msg[i].len, deb_xfer); } mutex_unlock(&d->i2c_mutex); return num; } static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); struct usb_device *udev; int ret = 0; int len, i, j; if (!d) return -ENODEV; udev = d->udev; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (j = 0; j < num; j++) { switch (msg[j].addr) { case (DW2102_RC_QUERY): { u8 ibuf[5]; ret = dw210x_op_rw(d->udev, 0xb8, 0, 0, ibuf, 5, DW210X_READ_MSG); memcpy(msg[j].buf, ibuf + 3, 2); break; } case (DW2102_VOLTAGE_CTRL): { u8 obuf[2]; obuf[0] = 1; obuf[1] = msg[j].buf[1];/* off-on */ ret = dw210x_op_rw(d->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); obuf[0] = 3; obuf[1] = msg[j].buf[0];/* 13v-18v */ ret = dw210x_op_rw(d->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); break; } case (DW2102_LED_CTRL): { u8 obuf[2]; obuf[0] = 5; obuf[1] = msg[j].buf[0]; ret = dw210x_op_rw(d->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); break; } /*case 0x55: cx24116 case 0x6a: stv0903 case 0x68: ds3000, stv0903 case 0x60: ts2020, stv6110, stb6100 case 0xa0: eeprom */ default: { if (msg[j].flags == I2C_M_RD) { /* read registers */ u8 ibuf[msg[j].len]; ret = dw210x_op_rw(d->udev, 0x91, 0, 0, ibuf, msg[j].len, DW210X_READ_MSG); memcpy(msg[j].buf, ibuf, msg[j].len); break; } else if ((msg[j].buf[0] == 0xb0) && (msg[j].addr == 0x68)) { /* write firmware */ u8 obuf[19]; obuf[0] = (msg[j].len > 16 ? 18 : msg[j].len + 1); obuf[1] = msg[j].addr << 1; obuf[2] = msg[j].buf[0]; len = msg[j].len - 1; i = 1; do { memcpy(obuf + 3, msg[j].buf + i, (len > 16 ? 16 : len)); ret = dw210x_op_rw(d->udev, 0x80, 0, 0, obuf, (len > 16 ? 16 : len) + 3, DW210X_WRITE_MSG); i += 16; len -= 16; } while (len > 0); } else if (j < (num - 1)) { /* write register addr before read */ u8 obuf[msg[j].len + 2]; obuf[0] = msg[j + 1].len; obuf[1] = (msg[j].addr << 1); memcpy(obuf + 2, msg[j].buf, msg[j].len); ret = dw210x_op_rw(d->udev, udev->descriptor.idProduct == 0x7500 ? 0x92 : 0x90, 0, 0, obuf, msg[j].len + 2, DW210X_WRITE_MSG); break; } else { /* write registers */ u8 obuf[msg[j].len + 2]; obuf[0] = msg[j].len + 1; obuf[1] = (msg[j].addr << 1); memcpy(obuf + 2, msg[j].buf, msg[j].len); ret = dw210x_op_rw(d->udev, 0x80, 0, 0, obuf, msg[j].len + 2, DW210X_WRITE_MSG); break; } break; } } } mutex_unlock(&d->i2c_mutex); return num; } static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); u8 obuf[0x40], ibuf[0x40]; if (!d) return -ENODEV; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; switch (num) { case 1: switch (msg[0].addr) { case SU3000_STREAM_CTRL: obuf[0] = msg[0].buf[0] + 0x36; obuf[1] = 3; obuf[2] = 0; if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 0, 0) < 0) err("i2c transfer failed."); break; case DW2102_RC_QUERY: obuf[0] = 0x10; if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 2, 0) < 0) err("i2c transfer failed."); msg[0].buf[1] = ibuf[0]; msg[0].buf[0] = ibuf[1]; break; default: /* always i2c write*/ obuf[0] = 0x08; obuf[1] = msg[0].addr; obuf[2] = msg[0].len; memcpy(&obuf[3], msg[0].buf, msg[0].len); if (dvb_usb_generic_rw(d, obuf, msg[0].len + 3, ibuf, 1, 0) < 0) err("i2c transfer failed."); } break; case 2: /* always i2c read */ obuf[0] = 0x09; obuf[1] = msg[0].len; obuf[2] = msg[1].len; obuf[3] = msg[0].addr; memcpy(&obuf[4], msg[0].buf, msg[0].len); if (dvb_usb_generic_rw(d, obuf, msg[0].len + 4, ibuf, msg[1].len + 1, 0) < 0) err("i2c transfer failed."); memcpy(msg[1].buf, &ibuf[1], msg[1].len); break; default: warn("more than 2 i2c messages at a time is not handled yet."); break; } mutex_unlock(&d->i2c_mutex); return num; } static u32 dw210x_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm dw2102_i2c_algo = { .master_xfer = dw2102_i2c_transfer, .functionality = dw210x_i2c_func, }; static struct i2c_algorithm dw2102_serit_i2c_algo = { .master_xfer = dw2102_serit_i2c_transfer, .functionality = dw210x_i2c_func, }; static struct i2c_algorithm dw2102_earda_i2c_algo = { .master_xfer = dw2102_earda_i2c_transfer, .functionality = dw210x_i2c_func, }; static struct i2c_algorithm dw2104_i2c_algo = { .master_xfer = dw2104_i2c_transfer, .functionality = dw210x_i2c_func, }; static struct i2c_algorithm dw3101_i2c_algo = { .master_xfer = dw3101_i2c_transfer, .functionality = dw210x_i2c_func, }; static struct i2c_algorithm s6x0_i2c_algo = { .master_xfer = s6x0_i2c_transfer, .functionality = dw210x_i2c_func, }; static struct i2c_algorithm su3000_i2c_algo = { .master_xfer = su3000_i2c_transfer, .functionality = dw210x_i2c_func, }; static int dw210x_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) { int i; u8 ibuf[] = {0, 0}; u8 eeprom[256], eepromline[16]; for (i = 0; i < 256; i++) { if (dw210x_op_rw(d->udev, 0xb6, 0xa0 , i, ibuf, 2, DW210X_READ_MSG) < 0) { err("read eeprom failed."); return -1; } else { eepromline[i%16] = ibuf[0]; eeprom[i] = ibuf[0]; } if ((i % 16) == 15) { deb_xfer("%02x: ", i - 15); debug_dump(eepromline, 16, deb_xfer); } } memcpy(mac, eeprom + 8, 6); return 0; }; static int s6x0_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) { int i, ret; u8 ibuf[] = { 0 }, obuf[] = { 0 }; u8 eeprom[256], eepromline[16]; struct i2c_msg msg[] = { { .addr = 0xa0 >> 1, .flags = 0, .buf = obuf, .len = 1, }, { .addr = 0xa0 >> 1, .flags = I2C_M_RD, .buf = ibuf, .len = 1, } }; for (i = 0; i < 256; i++) { obuf[0] = i; ret = s6x0_i2c_transfer(&d->i2c_adap, msg, 2); if (ret != 2) { err("read eeprom failed."); return -1; } else { eepromline[i % 16] = ibuf[0]; eeprom[i] = ibuf[0]; } if ((i % 16) == 15) { deb_xfer("%02x: ", i - 15); debug_dump(eepromline, 16, deb_xfer); } } memcpy(mac, eeprom + 16, 6); return 0; }; static int su3000_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { static u8 command_start[] = {0x00}; static u8 command_stop[] = {0x01}; struct i2c_msg msg = { .addr = SU3000_STREAM_CTRL, .flags = 0, .buf = onoff ? command_start : command_stop, .len = 1 }; i2c_transfer(&adap->dev->i2c_adap, &msg, 1); return 0; } static int su3000_power_ctrl(struct dvb_usb_device *d, int i) { struct su3000_state *state = (struct su3000_state *)d->priv; u8 obuf[] = {0xde, 0}; info("%s: %d, initialized %d\n", __func__, i, state->initialized); if (i && !state->initialized) { state->initialized = 1; /* reset board */ dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0); } return 0; } static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) { int i; u8 obuf[] = { 0x1f, 0xf0 }; u8 ibuf[] = { 0 }; struct i2c_msg msg[] = { { .addr = 0x51, .flags = 0, .buf = obuf, .len = 2, }, { .addr = 0x51, .flags = I2C_M_RD, .buf = ibuf, .len = 1, } }; for (i = 0; i < 6; i++) { obuf[1] = 0xf0 + i; if (i2c_transfer(&d->i2c_adap, msg, 2) != 2) break; else mac[i] = ibuf[0]; debug_dump(mac, 6, printk); } return 0; } static int su3000_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props, struct dvb_usb_device_description **desc, int *cold) { info("%s\n", __func__); *cold = 0; return 0; } static int dw210x_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { static u8 command_13v[] = {0x00, 0x01}; static u8 command_18v[] = {0x01, 0x01}; static u8 command_off[] = {0x00, 0x00}; struct i2c_msg msg = { .addr = DW2102_VOLTAGE_CTRL, .flags = 0, .buf = command_off, .len = 2, }; struct dvb_usb_adapter *udev_adap = (struct dvb_usb_adapter *)(fe->dvb->priv); if (voltage == SEC_VOLTAGE_18) msg.buf = command_18v; else if (voltage == SEC_VOLTAGE_13) msg.buf = command_13v; i2c_transfer(&udev_adap->dev->i2c_adap, &msg, 1); return 0; } static int s660_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { struct dvb_usb_adapter *d = (struct dvb_usb_adapter *)(fe->dvb->priv); struct s6x0_state *st = (struct s6x0_state *)d->dev->priv; dw210x_set_voltage(fe, voltage); if (st->old_set_voltage) st->old_set_voltage(fe, voltage); return 0; } static void dw210x_led_ctrl(struct dvb_frontend *fe, int offon) { static u8 led_off[] = { 0 }; static u8 led_on[] = { 1 }; struct i2c_msg msg = { .addr = DW2102_LED_CTRL, .flags = 0, .buf = led_off, .len = 1 }; struct dvb_usb_adapter *udev_adap = (struct dvb_usb_adapter *)(fe->dvb->priv); if (offon) msg.buf = led_on; i2c_transfer(&udev_adap->dev->i2c_adap, &msg, 1); } static struct stv0299_config sharp_z0194a_config = { .demod_address = 0x68, .inittab = sharp_z0194a_inittab, .mclk = 88000000UL, .invert = 1, .skip_reinit = 0, .lock_output = STV0299_LOCKOUTPUT_1, .volt13_op0_op1 = STV0299_VOLT13_OP1, .min_delay_ms = 100, .set_symbol_rate = sharp_z0194a_set_symbol_rate, }; static struct cx24116_config dw2104_config = { .demod_address = 0x55, .mpg_clk_pos_pol = 0x01, }; static struct si21xx_config serit_sp1511lhb_config = { .demod_address = 0x68, .min_delay_ms = 100, }; static struct tda10023_config dw3101_tda10023_config = { .demod_address = 0x0c, .invert = 1, }; static struct mt312_config zl313_config = { .demod_address = 0x0e, }; static struct ds3000_config dw2104_ds3000_config = { .demod_address = 0x68, }; static struct stv0900_config dw2104a_stv0900_config = { .demod_address = 0x6a, .demod_mode = 0, .xtal = 27000000, .clkmode = 3,/* 0-CLKI, 2-XTALI, else AUTO */ .diseqc_mode = 2,/* 2/3 PWM */ .tun1_maddress = 0,/* 0x60 */ .tun1_adc = 0,/* 2 Vpp */ .path1_mode = 3, }; static struct stb6100_config dw2104a_stb6100_config = { .tuner_address = 0x60, .refclock = 27000000, }; static struct stv0900_config dw2104_stv0900_config = { .demod_address = 0x68, .demod_mode = 0, .xtal = 8000000, .clkmode = 3, .diseqc_mode = 2, .tun1_maddress = 0, .tun1_adc = 1,/* 1 Vpp */ .path1_mode = 3, }; static struct stv6110_config dw2104_stv6110_config = { .i2c_address = 0x60, .mclk = 16000000, .clk_div = 1, }; static struct stv0900_config prof_7500_stv0900_config = { .demod_address = 0x6a, .demod_mode = 0, .xtal = 27000000, .clkmode = 3,/* 0-CLKI, 2-XTALI, else AUTO */ .diseqc_mode = 2,/* 2/3 PWM */ .tun1_maddress = 0,/* 0x60 */ .tun1_adc = 0,/* 2 Vpp */ .path1_mode = 3, .tun1_type = 3, .set_lock_led = dw210x_led_ctrl, }; static struct ds3000_config su3000_ds3000_config = { .demod_address = 0x68, .ci_mode = 1, }; static int dw2104_frontend_attach(struct dvb_usb_adapter *d) { struct dvb_tuner_ops *tuner_ops = NULL; if (demod_probe & 4) { d->fe_adap[0].fe = dvb_attach(stv0900_attach, &dw2104a_stv0900_config, &d->dev->i2c_adap, 0); if (d->fe_adap[0].fe != NULL) { if (dvb_attach(stb6100_attach, d->fe_adap[0].fe, &dw2104a_stb6100_config, &d->dev->i2c_adap)) { tuner_ops = &d->fe_adap[0].fe->ops.tuner_ops; tuner_ops->set_frequency = stb6100_set_freq; tuner_ops->get_frequency = stb6100_get_freq; tuner_ops->set_bandwidth = stb6100_set_bandw; tuner_ops->get_bandwidth = stb6100_get_bandw; d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached STV0900+STB6100!\n"); return 0; } } } if (demod_probe & 2) { d->fe_adap[0].fe = dvb_attach(stv0900_attach, &dw2104_stv0900_config, &d->dev->i2c_adap, 0); if (d->fe_adap[0].fe != NULL) { if (dvb_attach(stv6110_attach, d->fe_adap[0].fe, &dw2104_stv6110_config, &d->dev->i2c_adap)) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached STV0900+STV6110A!\n"); return 0; } } } if (demod_probe & 1) { d->fe_adap[0].fe = dvb_attach(cx24116_attach, &dw2104_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe != NULL) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached cx24116!\n"); return 0; } } d->fe_adap[0].fe = dvb_attach(ds3000_attach, &dw2104_ds3000_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe != NULL) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached DS3000!\n"); return 0; } return -EIO; } static struct dvb_usb_device_properties dw2102_properties; static struct dvb_usb_device_properties dw2104_properties; static struct dvb_usb_device_properties s6x0_properties; static int dw2102_frontend_attach(struct dvb_usb_adapter *d) { if (dw2102_properties.i2c_algo == &dw2102_serit_i2c_algo) { /*dw2102_properties.adapter->tuner_attach = NULL;*/ d->fe_adap[0].fe = dvb_attach(si21xx_attach, &serit_sp1511lhb_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe != NULL) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached si21xx!\n"); return 0; } } if (dw2102_properties.i2c_algo == &dw2102_earda_i2c_algo) { d->fe_adap[0].fe = dvb_attach(stv0288_attach, &earda_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe != NULL) { if (dvb_attach(stb6000_attach, d->fe_adap[0].fe, 0x61, &d->dev->i2c_adap)) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached stv0288!\n"); return 0; } } } if (dw2102_properties.i2c_algo == &dw2102_i2c_algo) { /*dw2102_properties.adapter->tuner_attach = dw2102_tuner_attach;*/ d->fe_adap[0].fe = dvb_attach(stv0299_attach, &sharp_z0194a_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe != NULL) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached stv0299!\n"); return 0; } } return -EIO; } static int dw3101_frontend_attach(struct dvb_usb_adapter *d) { d->fe_adap[0].fe = dvb_attach(tda10023_attach, &dw3101_tda10023_config, &d->dev->i2c_adap, 0x48); if (d->fe_adap[0].fe != NULL) { info("Attached tda10023!\n"); return 0; } return -EIO; } static int zl100313_frontend_attach(struct dvb_usb_adapter *d) { d->fe_adap[0].fe = dvb_attach(mt312_attach, &zl313_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe != NULL) { if (dvb_attach(zl10039_attach, d->fe_adap[0].fe, 0x60, &d->dev->i2c_adap)) { d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; info("Attached zl100313+zl10039!\n"); return 0; } } return -EIO; } static int stv0288_frontend_attach(struct dvb_usb_adapter *d) { u8 obuf[] = {7, 1}; d->fe_adap[0].fe = dvb_attach(stv0288_attach, &earda_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe == NULL) return -EIO; if (NULL == dvb_attach(stb6000_attach, d->fe_adap[0].fe, 0x61, &d->dev->i2c_adap)) return -EIO; d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; dw210x_op_rw(d->dev->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); info("Attached stv0288+stb6000!\n"); return 0; } static int ds3000_frontend_attach(struct dvb_usb_adapter *d) { struct s6x0_state *st = (struct s6x0_state *)d->dev->priv; u8 obuf[] = {7, 1}; d->fe_adap[0].fe = dvb_attach(ds3000_attach, &dw2104_ds3000_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe == NULL) return -EIO; st->old_set_voltage = d->fe_adap[0].fe->ops.set_voltage; d->fe_adap[0].fe->ops.set_voltage = s660_set_voltage; dw210x_op_rw(d->dev->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); info("Attached ds3000+ds2020!\n"); return 0; } static int prof_7500_frontend_attach(struct dvb_usb_adapter *d) { u8 obuf[] = {7, 1}; d->fe_adap[0].fe = dvb_attach(stv0900_attach, &prof_7500_stv0900_config, &d->dev->i2c_adap, 0); if (d->fe_adap[0].fe == NULL) return -EIO; d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage; dw210x_op_rw(d->dev->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG); info("Attached STV0900+STB6100A!\n"); return 0; } static int su3000_frontend_attach(struct dvb_usb_adapter *d) { u8 obuf[3] = { 0xe, 0x80, 0 }; u8 ibuf[] = { 0 }; if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) err("command 0x0e transfer failed."); obuf[0] = 0xe; obuf[1] = 0x83; obuf[2] = 0; if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) err("command 0x0e transfer failed."); obuf[0] = 0xe; obuf[1] = 0x83; obuf[2] = 1; if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0) err("command 0x0e transfer failed."); obuf[0] = 0x51; if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0) err("command 0x51 transfer failed."); d->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config, &d->dev->i2c_adap); if (d->fe_adap[0].fe == NULL) return -EIO; info("Attached DS3000!\n"); return 0; } static int dw2102_tuner_attach(struct dvb_usb_adapter *adap) { dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60, &adap->dev->i2c_adap, DVB_PLL_OPERA1); return 0; } static int dw3101_tuner_attach(struct dvb_usb_adapter *adap) { dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60, &adap->dev->i2c_adap, DVB_PLL_TUA6034); return 0; } static struct rc_map_table rc_map_dw210x_table[] = { { 0xf80a, KEY_POWER2 }, /*power*/ { 0xf80c, KEY_MUTE }, /*mute*/ { 0xf811, KEY_1 }, { 0xf812, KEY_2 }, { 0xf813, KEY_3 }, { 0xf814, KEY_4 }, { 0xf815, KEY_5 }, { 0xf816, KEY_6 }, { 0xf817, KEY_7 }, { 0xf818, KEY_8 }, { 0xf819, KEY_9 }, { 0xf810, KEY_0 }, { 0xf81c, KEY_CHANNELUP }, /*ch+*/ { 0xf80f, KEY_CHANNELDOWN }, /*ch-*/ { 0xf81a, KEY_VOLUMEUP }, /*vol+*/ { 0xf80e, KEY_VOLUMEDOWN }, /*vol-*/ { 0xf804, KEY_RECORD }, /*rec*/ { 0xf809, KEY_FAVORITES }, /*fav*/ { 0xf808, KEY_REWIND }, /*rewind*/ { 0xf807, KEY_FASTFORWARD }, /*fast*/ { 0xf80b, KEY_PAUSE }, /*pause*/ { 0xf802, KEY_ESC }, /*cancel*/ { 0xf803, KEY_TAB }, /*tab*/ { 0xf800, KEY_UP }, /*up*/ { 0xf81f, KEY_OK }, /*ok*/ { 0xf801, KEY_DOWN }, /*down*/ { 0xf805, KEY_CAMERA }, /*cap*/ { 0xf806, KEY_STOP }, /*stop*/ { 0xf840, KEY_ZOOM }, /*full*/ { 0xf81e, KEY_TV }, /*tvmode*/ { 0xf81b, KEY_LAST }, /*recall*/ }; static struct rc_map_table rc_map_tevii_table[] = { { 0xf80a, KEY_POWER }, { 0xf80c, KEY_MUTE }, { 0xf811, KEY_1 }, { 0xf812, KEY_2 }, { 0xf813, KEY_3 }, { 0xf814, KEY_4 }, { 0xf815, KEY_5 }, { 0xf816, KEY_6 }, { 0xf817, KEY_7 }, { 0xf818, KEY_8 }, { 0xf819, KEY_9 }, { 0xf810, KEY_0 }, { 0xf81c, KEY_MENU }, { 0xf80f, KEY_VOLUMEDOWN }, { 0xf81a, KEY_LAST }, { 0xf80e, KEY_OPEN }, { 0xf804, KEY_RECORD }, { 0xf809, KEY_VOLUMEUP }, { 0xf808, KEY_CHANNELUP }, { 0xf807, KEY_PVR }, { 0xf80b, KEY_TIME }, { 0xf802, KEY_RIGHT }, { 0xf803, KEY_LEFT }, { 0xf800, KEY_UP }, { 0xf81f, KEY_OK }, { 0xf801, KEY_DOWN }, { 0xf805, KEY_TUNER }, { 0xf806, KEY_CHANNELDOWN }, { 0xf840, KEY_PLAYPAUSE }, { 0xf81e, KEY_REWIND }, { 0xf81b, KEY_FAVORITES }, { 0xf81d, KEY_BACK }, { 0xf84d, KEY_FASTFORWARD }, { 0xf844, KEY_EPG }, { 0xf84c, KEY_INFO }, { 0xf841, KEY_AB }, { 0xf843, KEY_AUDIO }, { 0xf845, KEY_SUBTITLE }, { 0xf84a, KEY_LIST }, { 0xf846, KEY_F1 }, { 0xf847, KEY_F2 }, { 0xf85e, KEY_F3 }, { 0xf85c, KEY_F4 }, { 0xf852, KEY_F5 }, { 0xf85a, KEY_F6 }, { 0xf856, KEY_MODE }, { 0xf858, KEY_SWITCHVIDEOMODE }, }; static struct rc_map_table rc_map_tbs_table[] = { { 0xf884, KEY_POWER }, { 0xf894, KEY_MUTE }, { 0xf887, KEY_1 }, { 0xf886, KEY_2 }, { 0xf885, KEY_3 }, { 0xf88b, KEY_4 }, { 0xf88a, KEY_5 }, { 0xf889, KEY_6 }, { 0xf88f, KEY_7 }, { 0xf88e, KEY_8 }, { 0xf88d, KEY_9 }, { 0xf892, KEY_0 }, { 0xf896, KEY_CHANNELUP }, { 0xf891, KEY_CHANNELDOWN }, { 0xf893, KEY_VOLUMEUP }, { 0xf88c, KEY_VOLUMEDOWN }, { 0xf883, KEY_RECORD }, { 0xf898, KEY_PAUSE }, { 0xf899, KEY_OK }, { 0xf89a, KEY_SHUFFLE }, { 0xf881, KEY_UP }, { 0xf890, KEY_LEFT }, { 0xf882, KEY_RIGHT }, { 0xf888, KEY_DOWN }, { 0xf895, KEY_FAVORITES }, { 0xf897, KEY_SUBTITLE }, { 0xf89d, KEY_ZOOM }, { 0xf89f, KEY_EXIT }, { 0xf89e, KEY_MENU }, { 0xf89c, KEY_EPG }, { 0xf880, KEY_PREVIOUS }, { 0xf89b, KEY_MODE } }; static struct rc_map_table rc_map_su3000_table[] = { { 0x25, KEY_POWER }, /* right-bottom Red */ { 0x0a, KEY_MUTE }, /* -/-- */ { 0x01, KEY_1 }, { 0x02, KEY_2 }, { 0x03, KEY_3 }, { 0x04, KEY_4 }, { 0x05, KEY_5 }, { 0x06, KEY_6 }, { 0x07, KEY_7 }, { 0x08, KEY_8 }, { 0x09, KEY_9 }, { 0x00, KEY_0 }, { 0x20, KEY_UP }, /* CH+ */ { 0x21, KEY_DOWN }, /* CH+ */ { 0x12, KEY_VOLUMEUP }, /* Brightness Up */ { 0x13, KEY_VOLUMEDOWN },/* Brightness Down */ { 0x1f, KEY_RECORD }, { 0x17, KEY_PLAY }, { 0x16, KEY_PAUSE }, { 0x0b, KEY_STOP }, { 0x27, KEY_FASTFORWARD },/* >> */ { 0x26, KEY_REWIND }, /* << */ { 0x0d, KEY_OK }, /* Mute */ { 0x11, KEY_LEFT }, /* VOL- */ { 0x10, KEY_RIGHT }, /* VOL+ */ { 0x29, KEY_BACK }, /* button under 9 */ { 0x2c, KEY_MENU }, /* TTX */ { 0x2b, KEY_EPG }, /* EPG */ { 0x1e, KEY_RED }, /* OSD */ { 0x0e, KEY_GREEN }, /* Window */ { 0x2d, KEY_YELLOW }, /* button under << */ { 0x0f, KEY_BLUE }, /* bottom yellow button */ { 0x14, KEY_AUDIO }, /* Snapshot */ { 0x38, KEY_TV }, /* TV/Radio */ { 0x0c, KEY_ESC } /* upper Red button */ }; static struct rc_map_dvb_usb_table_table keys_tables[] = { { rc_map_dw210x_table, ARRAY_SIZE(rc_map_dw210x_table) }, { rc_map_tevii_table, ARRAY_SIZE(rc_map_tevii_table) }, { rc_map_tbs_table, ARRAY_SIZE(rc_map_tbs_table) }, { rc_map_su3000_table, ARRAY_SIZE(rc_map_su3000_table) }, }; static int dw2102_rc_query(struct dvb_usb_device *d, u32 *event, int *state) { struct rc_map_table *keymap = d->props.rc.legacy.rc_map_table; int keymap_size = d->props.rc.legacy.rc_map_size; u8 key[2]; struct i2c_msg msg = { .addr = DW2102_RC_QUERY, .flags = I2C_M_RD, .buf = key, .len = 2 }; int i; /* override keymap */ if ((ir_keymap > 0) && (ir_keymap <= ARRAY_SIZE(keys_tables))) { keymap = keys_tables[ir_keymap - 1].rc_keys ; keymap_size = keys_tables[ir_keymap - 1].rc_keys_size; } else if (ir_keymap > ARRAY_SIZE(keys_tables)) return 0; /* none */ *state = REMOTE_NO_KEY_PRESSED; if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) { for (i = 0; i < keymap_size ; i++) { if (rc5_data(&keymap[i]) == msg.buf[0]) { *state = REMOTE_KEY_PRESSED; *event = keymap[i].keycode; break; } } if ((*state) == REMOTE_KEY_PRESSED) deb_rc("%s: found rc key: %x, %x, event: %x\n", __func__, key[0], key[1], (*event)); else if (key[0] != 0xff) deb_rc("%s: unknown rc key: %x, %x\n", __func__, key[0], key[1]); } return 0; } enum dw2102_table_entry { CYPRESS_DW2102, CYPRESS_DW2101, CYPRESS_DW2104, TEVII_S650, TERRATEC_CINERGY_S, CYPRESS_DW3101, TEVII_S630, PROF_1100, TEVII_S660, PROF_7500, GENIATECH_SU3000, TERRATEC_CINERGY_S2, TEVII_S480_1, TEVII_S480_2, X3M_SPC1400HD, }; static struct usb_device_id dw2102_table[] = { [CYPRESS_DW2102] = {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW2102)}, [CYPRESS_DW2101] = {USB_DEVICE(USB_VID_CYPRESS, 0x2101)}, [CYPRESS_DW2104] = {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW2104)}, [TEVII_S650] = {USB_DEVICE(0x9022, USB_PID_TEVII_S650)}, [TERRATEC_CINERGY_S] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_CINERGY_S)}, [CYPRESS_DW3101] = {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW3101)}, [TEVII_S630] = {USB_DEVICE(0x9022, USB_PID_TEVII_S630)}, [PROF_1100] = {USB_DEVICE(0x3011, USB_PID_PROF_1100)}, [TEVII_S660] = {USB_DEVICE(0x9022, USB_PID_TEVII_S660)}, [PROF_7500] = {USB_DEVICE(0x3034, 0x7500)}, [GENIATECH_SU3000] = {USB_DEVICE(0x1f4d, 0x3000)}, [TERRATEC_CINERGY_S2] = {USB_DEVICE(USB_VID_TERRATEC, 0x00a8)}, [TEVII_S480_1] = {USB_DEVICE(0x9022, USB_PID_TEVII_S480_1)}, [TEVII_S480_2] = {USB_DEVICE(0x9022, USB_PID_TEVII_S480_2)}, [X3M_SPC1400HD] = {USB_DEVICE(0x1f4d, 0x3100)}, { } }; MODULE_DEVICE_TABLE(usb, dw2102_table); static int dw2102_load_firmware(struct usb_device *dev, const struct firmware *frmwr) { u8 *b, *p; int ret = 0, i; u8 reset; u8 reset16[] = {0, 0, 0, 0, 0, 0, 0}; const struct firmware *fw; const char *fw_2101 = "dvb-usb-dw2101.fw"; switch (dev->descriptor.idProduct) { case 0x2101: ret = request_firmware(&fw, fw_2101, &dev->dev); if (ret != 0) { err(err_str, fw_2101); return ret; } break; default: fw = frmwr; break; } info("start downloading DW210X firmware"); p = kmalloc(fw->size, GFP_KERNEL); reset = 1; /*stop the CPU*/ dw210x_op_rw(dev, 0xa0, 0x7f92, 0, &reset, 1, DW210X_WRITE_MSG); dw210x_op_rw(dev, 0xa0, 0xe600, 0, &reset, 1, DW210X_WRITE_MSG); if (p != NULL) { memcpy(p, fw->data, fw->size); for (i = 0; i < fw->size; i += 0x40) { b = (u8 *) p + i; if (dw210x_op_rw(dev, 0xa0, i, 0, b , 0x40, DW210X_WRITE_MSG) != 0x40) { err("error while transferring firmware"); ret = -EINVAL; break; } } /* restart the CPU */ reset = 0; if (ret || dw210x_op_rw(dev, 0xa0, 0x7f92, 0, &reset, 1, DW210X_WRITE_MSG) != 1) { err("could not restart the USB controller CPU."); ret = -EINVAL; } if (ret || dw210x_op_rw(dev, 0xa0, 0xe600, 0, &reset, 1, DW210X_WRITE_MSG) != 1) { err("could not restart the USB controller CPU."); ret = -EINVAL; } /* init registers */ switch (dev->descriptor.idProduct) { case USB_PID_TEVII_S650: dw2104_properties.rc.legacy.rc_map_table = rc_map_tevii_table; dw2104_properties.rc.legacy.rc_map_size = ARRAY_SIZE(rc_map_tevii_table); case USB_PID_DW2104: reset = 1; dw210x_op_rw(dev, 0xc4, 0x0000, 0, &reset, 1, DW210X_WRITE_MSG); /* break omitted intentionally */ case USB_PID_DW3101: reset = 0; dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0, DW210X_WRITE_MSG); break; case USB_PID_CINERGY_S: case USB_PID_DW2102: dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0, DW210X_WRITE_MSG); dw210x_op_rw(dev, 0xb9, 0x0000, 0, &reset16[0], 2, DW210X_READ_MSG); /* check STV0299 frontend */ dw210x_op_rw(dev, 0xb5, 0, 0, &reset16[0], 2, DW210X_READ_MSG); if ((reset16[0] == 0xa1) || (reset16[0] == 0x80)) { dw2102_properties.i2c_algo = &dw2102_i2c_algo; dw2102_properties.adapter->fe[0].tuner_attach = &dw2102_tuner_attach; break; } else { /* check STV0288 frontend */ reset16[0] = 0xd0; reset16[1] = 1; reset16[2] = 0; dw210x_op_rw(dev, 0xc2, 0, 0, &reset16[0], 3, DW210X_WRITE_MSG); dw210x_op_rw(dev, 0xc3, 0xd1, 0, &reset16[0], 3, DW210X_READ_MSG); if (reset16[2] == 0x11) { dw2102_properties.i2c_algo = &dw2102_earda_i2c_algo; break; } } case 0x2101: dw210x_op_rw(dev, 0xbc, 0x0030, 0, &reset16[0], 2, DW210X_READ_MSG); dw210x_op_rw(dev, 0xba, 0x0000, 0, &reset16[0], 7, DW210X_READ_MSG); dw210x_op_rw(dev, 0xba, 0x0000, 0, &reset16[0], 7, DW210X_READ_MSG); dw210x_op_rw(dev, 0xb9, 0x0000, 0, &reset16[0], 2, DW210X_READ_MSG); break; } msleep(100); kfree(p); } return ret; } static struct dvb_usb_device_properties dw2102_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-dw2102.fw", .no_reconnect = 1, .i2c_algo = &dw2102_serit_i2c_algo, .rc.legacy = { .rc_map_table = rc_map_dw210x_table, .rc_map_size = ARRAY_SIZE(rc_map_dw210x_table), .rc_interval = 150, .rc_query = dw2102_rc_query, }, .generic_bulk_ctrl_endpoint = 0x81, /* parameter for the MPEG2-data transfer */ .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = dw210x_read_mac_address, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = dw2102_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, }}, } }, .num_device_descs = 3, .devices = { {"DVBWorld DVB-S 2102 USB2.0", {&dw2102_table[CYPRESS_DW2102], NULL}, {NULL}, }, {"DVBWorld DVB-S 2101 USB2.0", {&dw2102_table[CYPRESS_DW2101], NULL}, {NULL}, }, {"TerraTec Cinergy S USB", {&dw2102_table[TERRATEC_CINERGY_S], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties dw2104_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-dw2104.fw", .no_reconnect = 1, .i2c_algo = &dw2104_i2c_algo, .rc.legacy = { .rc_map_table = rc_map_dw210x_table, .rc_map_size = ARRAY_SIZE(rc_map_dw210x_table), .rc_interval = 150, .rc_query = dw2102_rc_query, }, .generic_bulk_ctrl_endpoint = 0x81, /* parameter for the MPEG2-data transfer */ .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = dw210x_read_mac_address, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = dw2104_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, }}, } }, .num_device_descs = 2, .devices = { { "DVBWorld DW2104 USB2.0", {&dw2102_table[CYPRESS_DW2104], NULL}, {NULL}, }, { "TeVii S650 USB2.0", {&dw2102_table[TEVII_S650], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties dw3101_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-dw3101.fw", .no_reconnect = 1, .i2c_algo = &dw3101_i2c_algo, .rc.legacy = { .rc_map_table = rc_map_dw210x_table, .rc_map_size = ARRAY_SIZE(rc_map_dw210x_table), .rc_interval = 150, .rc_query = dw2102_rc_query, }, .generic_bulk_ctrl_endpoint = 0x81, /* parameter for the MPEG2-data transfer */ .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = dw210x_read_mac_address, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = dw3101_frontend_attach, .tuner_attach = dw3101_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, }}, } }, .num_device_descs = 1, .devices = { { "DVBWorld DVB-C 3101 USB2.0", {&dw2102_table[CYPRESS_DW3101], NULL}, {NULL}, }, } }; static struct dvb_usb_device_properties s6x0_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct s6x0_state), .firmware = "dvb-usb-s630.fw", .no_reconnect = 1, .i2c_algo = &s6x0_i2c_algo, .rc.legacy = { .rc_map_table = rc_map_tevii_table, .rc_map_size = ARRAY_SIZE(rc_map_tevii_table), .rc_interval = 150, .rc_query = dw2102_rc_query, }, .generic_bulk_ctrl_endpoint = 0x81, .num_adapters = 1, .download_firmware = dw2102_load_firmware, .read_mac_address = s6x0_read_mac_address, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = zl100313_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } }, }}, } }, .num_device_descs = 1, .devices = { {"TeVii S630 USB", {&dw2102_table[TEVII_S630], NULL}, {NULL}, }, } }; struct dvb_usb_device_properties *p1100; static struct dvb_usb_device_description d1100 = { "Prof 1100 USB ", {&dw2102_table[PROF_1100], NULL}, {NULL}, }; struct dvb_usb_device_properties *s660; static struct dvb_usb_device_description d660 = { "TeVii S660 USB", {&dw2102_table[TEVII_S660], NULL}, {NULL}, }; static struct dvb_usb_device_description d480_1 = { "TeVii S480.1 USB", {&dw2102_table[TEVII_S480_1], NULL}, {NULL}, }; static struct dvb_usb_device_description d480_2 = { "TeVii S480.2 USB", {&dw2102_table[TEVII_S480_2], NULL}, {NULL}, }; struct dvb_usb_device_properties *p7500; static struct dvb_usb_device_description d7500 = { "Prof 7500 USB DVB-S2", {&dw2102_table[PROF_7500], NULL}, {NULL}, }; static struct dvb_usb_device_properties su3000_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = sizeof(struct su3000_state), .power_ctrl = su3000_power_ctrl, .num_adapters = 1, .identify_state = su3000_identify_state, .i2c_algo = &su3000_i2c_algo, .rc.legacy = { .rc_map_table = rc_map_su3000_table, .rc_map_size = ARRAY_SIZE(rc_map_su3000_table), .rc_interval = 150, .rc_query = dw2102_rc_query, }, .read_mac_address = su3000_read_mac_address, .generic_bulk_ctrl_endpoint = 0x01, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = su3000_streaming_ctrl, .frontend_attach = su3000_frontend_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 4096, } } } }}, } }, .num_device_descs = 3, .devices = { { "SU3000HD DVB-S USB2.0", { &dw2102_table[GENIATECH_SU3000], NULL }, { NULL }, }, { "Terratec Cinergy S2 USB HD", { &dw2102_table[TERRATEC_CINERGY_S2], NULL }, { NULL }, }, { "X3M TV SPC1400HD PCI", { &dw2102_table[X3M_SPC1400HD], NULL }, { NULL }, }, } }; static int dw2102_probe(struct usb_interface *intf, const struct usb_device_id *id) { p1100 = kmemdup(&s6x0_properties, sizeof(struct dvb_usb_device_properties), GFP_KERNEL); if (!p1100) return -ENOMEM; /* copy default structure */ /* fill only different fields */ p1100->firmware = "dvb-usb-p1100.fw"; p1100->devices[0] = d1100; p1100->rc.legacy.rc_map_table = rc_map_tbs_table; p1100->rc.legacy.rc_map_size = ARRAY_SIZE(rc_map_tbs_table); p1100->adapter->fe[0].frontend_attach = stv0288_frontend_attach; s660 = kmemdup(&s6x0_properties, sizeof(struct dvb_usb_device_properties), GFP_KERNEL); if (!s660) { kfree(p1100); return -ENOMEM; } s660->firmware = "dvb-usb-s660.fw"; s660->num_device_descs = 3; s660->devices[0] = d660; s660->devices[1] = d480_1; s660->devices[2] = d480_2; s660->adapter->fe[0].frontend_attach = ds3000_frontend_attach; p7500 = kmemdup(&s6x0_properties, sizeof(struct dvb_usb_device_properties), GFP_KERNEL); if (!p7500) { kfree(p1100); kfree(s660); return -ENOMEM; } p7500->firmware = "dvb-usb-p7500.fw"; p7500->devices[0] = d7500; p7500->rc.legacy.rc_map_table = rc_map_tbs_table; p7500->rc.legacy.rc_map_size = ARRAY_SIZE(rc_map_tbs_table); p7500->adapter->fe[0].frontend_attach = prof_7500_frontend_attach; if (0 == dvb_usb_device_init(intf, &dw2102_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &dw2104_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &dw3101_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &s6x0_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, p1100, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, s660, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, p7500, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &su3000_properties, THIS_MODULE, NULL, adapter_nr)) return 0; return -ENODEV; } static struct usb_driver dw2102_driver = { .name = "dw2102", .probe = dw2102_probe, .disconnect = dvb_usb_device_exit, .id_table = dw2102_table, }; module_usb_driver(dw2102_driver); MODULE_AUTHOR("Igor M. Liplianin (c) liplianin@me.by"); MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104," " DVB-C 3101 USB2.0," " TeVii S600, S630, S650, S660, S480," " Prof 1100, 7500 USB2.0," " Geniatech SU3000 devices"); MODULE_VERSION("0.1"); MODULE_LICENSE("GPL");
gpl-2.0
Kurre/kernel_exynos_KK
drivers/net/wireless/b43/bus.c
5152
7022
/* Broadcom B43 wireless driver Bus abstraction layer Copyright (c) 2011 Rafał Miłecki <zajec5@gmail.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "b43.h" #include "bus.h" /* BCMA */ #ifdef CONFIG_B43_BCMA static int b43_bus_bcma_bus_may_powerdown(struct b43_bus_dev *dev) { return 0; /* bcma_bus_may_powerdown(dev->bdev->bus); */ } static int b43_bus_bcma_bus_powerup(struct b43_bus_dev *dev, bool dynamic_pctl) { return 0; /* bcma_bus_powerup(dev->sdev->bus, dynamic_pctl); */ } static int b43_bus_bcma_device_is_enabled(struct b43_bus_dev *dev) { return bcma_core_is_enabled(dev->bdev); } static void b43_bus_bcma_device_enable(struct b43_bus_dev *dev, u32 core_specific_flags) { bcma_core_enable(dev->bdev, core_specific_flags); } static void b43_bus_bcma_device_disable(struct b43_bus_dev *dev, u32 core_specific_flags) { bcma_core_disable(dev->bdev, core_specific_flags); } static u16 b43_bus_bcma_read16(struct b43_bus_dev *dev, u16 offset) { return bcma_read16(dev->bdev, offset); } static u32 b43_bus_bcma_read32(struct b43_bus_dev *dev, u16 offset) { return bcma_read32(dev->bdev, offset); } static void b43_bus_bcma_write16(struct b43_bus_dev *dev, u16 offset, u16 value) { bcma_write16(dev->bdev, offset, value); } static void b43_bus_bcma_write32(struct b43_bus_dev *dev, u16 offset, u32 value) { bcma_write32(dev->bdev, offset, value); } static void b43_bus_bcma_block_read(struct b43_bus_dev *dev, void *buffer, size_t count, u16 offset, u8 reg_width) { bcma_block_read(dev->bdev, buffer, count, offset, reg_width); } static void b43_bus_bcma_block_write(struct b43_bus_dev *dev, const void *buffer, size_t count, u16 offset, u8 reg_width) { bcma_block_write(dev->bdev, buffer, count, offset, reg_width); } struct b43_bus_dev *b43_bus_dev_bcma_init(struct bcma_device *core) { struct b43_bus_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return NULL; dev->bus_type = B43_BUS_BCMA; dev->bdev = core; dev->bus_may_powerdown = b43_bus_bcma_bus_may_powerdown; dev->bus_powerup = b43_bus_bcma_bus_powerup; dev->device_is_enabled = b43_bus_bcma_device_is_enabled; dev->device_enable = b43_bus_bcma_device_enable; dev->device_disable = b43_bus_bcma_device_disable; dev->read16 = b43_bus_bcma_read16; dev->read32 = b43_bus_bcma_read32; dev->write16 = b43_bus_bcma_write16; dev->write32 = b43_bus_bcma_write32; dev->block_read = b43_bus_bcma_block_read; dev->block_write = b43_bus_bcma_block_write; dev->dev = &core->dev; dev->dma_dev = core->dma_dev; dev->irq = core->irq; /* dev->board_vendor = core->bus->boardinfo.vendor; dev->board_type = core->bus->boardinfo.type; dev->board_rev = core->bus->boardinfo.rev; */ dev->chip_id = core->bus->chipinfo.id; dev->chip_rev = core->bus->chipinfo.rev; dev->chip_pkg = core->bus->chipinfo.pkg; dev->bus_sprom = &core->bus->sprom; dev->core_id = core->id.id; dev->core_rev = core->id.rev; return dev; } #endif /* CONFIG_B43_BCMA */ /* SSB */ #ifdef CONFIG_B43_SSB static int b43_bus_ssb_bus_may_powerdown(struct b43_bus_dev *dev) { return ssb_bus_may_powerdown(dev->sdev->bus); } static int b43_bus_ssb_bus_powerup(struct b43_bus_dev *dev, bool dynamic_pctl) { return ssb_bus_powerup(dev->sdev->bus, dynamic_pctl); } static int b43_bus_ssb_device_is_enabled(struct b43_bus_dev *dev) { return ssb_device_is_enabled(dev->sdev); } static void b43_bus_ssb_device_enable(struct b43_bus_dev *dev, u32 core_specific_flags) { ssb_device_enable(dev->sdev, core_specific_flags); } static void b43_bus_ssb_device_disable(struct b43_bus_dev *dev, u32 core_specific_flags) { ssb_device_disable(dev->sdev, core_specific_flags); } static u16 b43_bus_ssb_read16(struct b43_bus_dev *dev, u16 offset) { return ssb_read16(dev->sdev, offset); } static u32 b43_bus_ssb_read32(struct b43_bus_dev *dev, u16 offset) { return ssb_read32(dev->sdev, offset); } static void b43_bus_ssb_write16(struct b43_bus_dev *dev, u16 offset, u16 value) { ssb_write16(dev->sdev, offset, value); } static void b43_bus_ssb_write32(struct b43_bus_dev *dev, u16 offset, u32 value) { ssb_write32(dev->sdev, offset, value); } static void b43_bus_ssb_block_read(struct b43_bus_dev *dev, void *buffer, size_t count, u16 offset, u8 reg_width) { ssb_block_read(dev->sdev, buffer, count, offset, reg_width); } static void b43_bus_ssb_block_write(struct b43_bus_dev *dev, const void *buffer, size_t count, u16 offset, u8 reg_width) { ssb_block_write(dev->sdev, buffer, count, offset, reg_width); } struct b43_bus_dev *b43_bus_dev_ssb_init(struct ssb_device *sdev) { struct b43_bus_dev *dev; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return NULL; dev->bus_type = B43_BUS_SSB; dev->sdev = sdev; dev->bus_may_powerdown = b43_bus_ssb_bus_may_powerdown; dev->bus_powerup = b43_bus_ssb_bus_powerup; dev->device_is_enabled = b43_bus_ssb_device_is_enabled; dev->device_enable = b43_bus_ssb_device_enable; dev->device_disable = b43_bus_ssb_device_disable; dev->read16 = b43_bus_ssb_read16; dev->read32 = b43_bus_ssb_read32; dev->write16 = b43_bus_ssb_write16; dev->write32 = b43_bus_ssb_write32; dev->block_read = b43_bus_ssb_block_read; dev->block_write = b43_bus_ssb_block_write; dev->dev = sdev->dev; dev->dma_dev = sdev->dma_dev; dev->irq = sdev->irq; dev->board_vendor = sdev->bus->boardinfo.vendor; dev->board_type = sdev->bus->boardinfo.type; dev->board_rev = sdev->bus->boardinfo.rev; dev->chip_id = sdev->bus->chip_id; dev->chip_rev = sdev->bus->chip_rev; dev->chip_pkg = sdev->bus->chip_package; dev->bus_sprom = &sdev->bus->sprom; dev->core_id = sdev->id.coreid; dev->core_rev = sdev->id.revision; return dev; } #endif /* CONFIG_B43_SSB */ void *b43_bus_get_wldev(struct b43_bus_dev *dev) { switch (dev->bus_type) { #ifdef CONFIG_B43_BCMA case B43_BUS_BCMA: return bcma_get_drvdata(dev->bdev); #endif #ifdef CONFIG_B43_SSB case B43_BUS_SSB: return ssb_get_drvdata(dev->sdev); #endif } return NULL; } void b43_bus_set_wldev(struct b43_bus_dev *dev, void *wldev) { switch (dev->bus_type) { #ifdef CONFIG_B43_BCMA case B43_BUS_BCMA: bcma_set_drvdata(dev->bdev, wldev); break; #endif #ifdef CONFIG_B43_SSB case B43_BUS_SSB: ssb_set_drvdata(dev->sdev, wldev); break; #endif } }
gpl-2.0
mahirkukreja/delos3geurkernel
drivers/gpu/drm/ttm/ttm_memory.c
5408
14273
/************************************************************************** * * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ #define pr_fmt(fmt) "[TTM] " fmt #include "ttm/ttm_memory.h" #include "ttm/ttm_module.h" #include "ttm/ttm_page_alloc.h" #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/slab.h> #define TTM_MEMORY_ALLOC_RETRIES 4 struct ttm_mem_zone { struct kobject kobj; struct ttm_mem_global *glob; const char *name; uint64_t zone_mem; uint64_t emer_mem; uint64_t max_mem; uint64_t swap_limit; uint64_t used_mem; }; static struct attribute ttm_mem_sys = { .name = "zone_memory", .mode = S_IRUGO }; static struct attribute ttm_mem_emer = { .name = "emergency_memory", .mode = S_IRUGO | S_IWUSR }; static struct attribute ttm_mem_max = { .name = "available_memory", .mode = S_IRUGO | S_IWUSR }; static struct attribute ttm_mem_swap = { .name = "swap_limit", .mode = S_IRUGO | S_IWUSR }; static struct attribute ttm_mem_used = { .name = "used_memory", .mode = S_IRUGO }; static void ttm_mem_zone_kobj_release(struct kobject *kobj) { struct ttm_mem_zone *zone = container_of(kobj, struct ttm_mem_zone, kobj); pr_info("Zone %7s: Used memory at exit: %llu kiB\n", zone->name, (unsigned long long)zone->used_mem >> 10); kfree(zone); } static ssize_t ttm_mem_zone_show(struct kobject *kobj, struct attribute *attr, char *buffer) { struct ttm_mem_zone *zone = container_of(kobj, struct ttm_mem_zone, kobj); uint64_t val = 0; spin_lock(&zone->glob->lock); if (attr == &ttm_mem_sys) val = zone->zone_mem; else if (attr == &ttm_mem_emer) val = zone->emer_mem; else if (attr == &ttm_mem_max) val = zone->max_mem; else if (attr == &ttm_mem_swap) val = zone->swap_limit; else if (attr == &ttm_mem_used) val = zone->used_mem; spin_unlock(&zone->glob->lock); return snprintf(buffer, PAGE_SIZE, "%llu\n", (unsigned long long) val >> 10); } static void ttm_check_swapping(struct ttm_mem_global *glob); static ssize_t ttm_mem_zone_store(struct kobject *kobj, struct attribute *attr, const char *buffer, size_t size) { struct ttm_mem_zone *zone = container_of(kobj, struct ttm_mem_zone, kobj); int chars; unsigned long val; uint64_t val64; chars = sscanf(buffer, "%lu", &val); if (chars == 0) return size; val64 = val; val64 <<= 10; spin_lock(&zone->glob->lock); if (val64 > zone->zone_mem) val64 = zone->zone_mem; if (attr == &ttm_mem_emer) { zone->emer_mem = val64; if (zone->max_mem > val64) zone->max_mem = val64; } else if (attr == &ttm_mem_max) { zone->max_mem = val64; if (zone->emer_mem < val64) zone->emer_mem = val64; } else if (attr == &ttm_mem_swap) zone->swap_limit = val64; spin_unlock(&zone->glob->lock); ttm_check_swapping(zone->glob); return size; } static struct attribute *ttm_mem_zone_attrs[] = { &ttm_mem_sys, &ttm_mem_emer, &ttm_mem_max, &ttm_mem_swap, &ttm_mem_used, NULL }; static const struct sysfs_ops ttm_mem_zone_ops = { .show = &ttm_mem_zone_show, .store = &ttm_mem_zone_store }; static struct kobj_type ttm_mem_zone_kobj_type = { .release = &ttm_mem_zone_kobj_release, .sysfs_ops = &ttm_mem_zone_ops, .default_attrs = ttm_mem_zone_attrs, }; static void ttm_mem_global_kobj_release(struct kobject *kobj) { struct ttm_mem_global *glob = container_of(kobj, struct ttm_mem_global, kobj); kfree(glob); } static struct kobj_type ttm_mem_glob_kobj_type = { .release = &ttm_mem_global_kobj_release, }; static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob, bool from_wq, uint64_t extra) { unsigned int i; struct ttm_mem_zone *zone; uint64_t target; for (i = 0; i < glob->num_zones; ++i) { zone = glob->zones[i]; if (from_wq) target = zone->swap_limit; else if (capable(CAP_SYS_ADMIN)) target = zone->emer_mem; else target = zone->max_mem; target = (extra > target) ? 0ULL : target; if (zone->used_mem > target) return true; } return false; } /** * At this point we only support a single shrink callback. * Extend this if needed, perhaps using a linked list of callbacks. * Note that this function is reentrant: * many threads may try to swap out at any given time. */ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq, uint64_t extra) { int ret; struct ttm_mem_shrink *shrink; spin_lock(&glob->lock); if (glob->shrink == NULL) goto out; while (ttm_zones_above_swap_target(glob, from_wq, extra)) { shrink = glob->shrink; spin_unlock(&glob->lock); ret = shrink->do_shrink(shrink); spin_lock(&glob->lock); if (unlikely(ret != 0)) goto out; } out: spin_unlock(&glob->lock); } static void ttm_shrink_work(struct work_struct *work) { struct ttm_mem_global *glob = container_of(work, struct ttm_mem_global, work); ttm_shrink(glob, true, 0ULL); } static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, const struct sysinfo *si) { struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); uint64_t mem; int ret; if (unlikely(!zone)) return -ENOMEM; mem = si->totalram - si->totalhigh; mem *= si->mem_unit; zone->name = "kernel"; zone->zone_mem = mem; zone->max_mem = mem >> 1; zone->emer_mem = (mem >> 1) + (mem >> 2); zone->swap_limit = zone->max_mem - (mem >> 3); zone->used_mem = 0; zone->glob = glob; glob->zone_kernel = zone; ret = kobject_init_and_add( &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); if (unlikely(ret != 0)) { kobject_put(&zone->kobj); return ret; } glob->zones[glob->num_zones++] = zone; return 0; } #ifdef CONFIG_HIGHMEM static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, const struct sysinfo *si) { struct ttm_mem_zone *zone; uint64_t mem; int ret; if (si->totalhigh == 0) return 0; zone = kzalloc(sizeof(*zone), GFP_KERNEL); if (unlikely(!zone)) return -ENOMEM; mem = si->totalram; mem *= si->mem_unit; zone->name = "highmem"; zone->zone_mem = mem; zone->max_mem = mem >> 1; zone->emer_mem = (mem >> 1) + (mem >> 2); zone->swap_limit = zone->max_mem - (mem >> 3); zone->used_mem = 0; zone->glob = glob; glob->zone_highmem = zone; ret = kobject_init_and_add( &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); if (unlikely(ret != 0)) { kobject_put(&zone->kobj); return ret; } glob->zones[glob->num_zones++] = zone; return 0; } #else static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, const struct sysinfo *si) { struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); uint64_t mem; int ret; if (unlikely(!zone)) return -ENOMEM; mem = si->totalram; mem *= si->mem_unit; /** * No special dma32 zone needed. */ if (mem <= ((uint64_t) 1ULL << 32)) { kfree(zone); return 0; } /* * Limit max dma32 memory to 4GB for now * until we can figure out how big this * zone really is. */ mem = ((uint64_t) 1ULL << 32); zone->name = "dma32"; zone->zone_mem = mem; zone->max_mem = mem >> 1; zone->emer_mem = (mem >> 1) + (mem >> 2); zone->swap_limit = zone->max_mem - (mem >> 3); zone->used_mem = 0; zone->glob = glob; glob->zone_dma32 = zone; ret = kobject_init_and_add( &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); if (unlikely(ret != 0)) { kobject_put(&zone->kobj); return ret; } glob->zones[glob->num_zones++] = zone; return 0; } #endif int ttm_mem_global_init(struct ttm_mem_global *glob) { struct sysinfo si; int ret; int i; struct ttm_mem_zone *zone; spin_lock_init(&glob->lock); glob->swap_queue = create_singlethread_workqueue("ttm_swap"); INIT_WORK(&glob->work, ttm_shrink_work); init_waitqueue_head(&glob->queue); ret = kobject_init_and_add( &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting"); if (unlikely(ret != 0)) { kobject_put(&glob->kobj); return ret; } si_meminfo(&si); ret = ttm_mem_init_kernel_zone(glob, &si); if (unlikely(ret != 0)) goto out_no_zone; #ifdef CONFIG_HIGHMEM ret = ttm_mem_init_highmem_zone(glob, &si); if (unlikely(ret != 0)) goto out_no_zone; #else ret = ttm_mem_init_dma32_zone(glob, &si); if (unlikely(ret != 0)) goto out_no_zone; #endif for (i = 0; i < glob->num_zones; ++i) { zone = glob->zones[i]; pr_info("Zone %7s: Available graphics memory: %llu kiB\n", zone->name, (unsigned long long)zone->max_mem >> 10); } ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); return 0; out_no_zone: ttm_mem_global_release(glob); return ret; } EXPORT_SYMBOL(ttm_mem_global_init); void ttm_mem_global_release(struct ttm_mem_global *glob) { unsigned int i; struct ttm_mem_zone *zone; /* let the page allocator first stop the shrink work. */ ttm_page_alloc_fini(); ttm_dma_page_alloc_fini(); flush_workqueue(glob->swap_queue); destroy_workqueue(glob->swap_queue); glob->swap_queue = NULL; for (i = 0; i < glob->num_zones; ++i) { zone = glob->zones[i]; kobject_del(&zone->kobj); kobject_put(&zone->kobj); } kobject_del(&glob->kobj); kobject_put(&glob->kobj); } EXPORT_SYMBOL(ttm_mem_global_release); static void ttm_check_swapping(struct ttm_mem_global *glob) { bool needs_swapping = false; unsigned int i; struct ttm_mem_zone *zone; spin_lock(&glob->lock); for (i = 0; i < glob->num_zones; ++i) { zone = glob->zones[i]; if (zone->used_mem > zone->swap_limit) { needs_swapping = true; break; } } spin_unlock(&glob->lock); if (unlikely(needs_swapping)) (void)queue_work(glob->swap_queue, &glob->work); } static void ttm_mem_global_free_zone(struct ttm_mem_global *glob, struct ttm_mem_zone *single_zone, uint64_t amount) { unsigned int i; struct ttm_mem_zone *zone; spin_lock(&glob->lock); for (i = 0; i < glob->num_zones; ++i) { zone = glob->zones[i]; if (single_zone && zone != single_zone) continue; zone->used_mem -= amount; } spin_unlock(&glob->lock); } void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount) { return ttm_mem_global_free_zone(glob, NULL, amount); } EXPORT_SYMBOL(ttm_mem_global_free); static int ttm_mem_global_reserve(struct ttm_mem_global *glob, struct ttm_mem_zone *single_zone, uint64_t amount, bool reserve) { uint64_t limit; int ret = -ENOMEM; unsigned int i; struct ttm_mem_zone *zone; spin_lock(&glob->lock); for (i = 0; i < glob->num_zones; ++i) { zone = glob->zones[i]; if (single_zone && zone != single_zone) continue; limit = (capable(CAP_SYS_ADMIN)) ? zone->emer_mem : zone->max_mem; if (zone->used_mem > limit) goto out_unlock; } if (reserve) { for (i = 0; i < glob->num_zones; ++i) { zone = glob->zones[i]; if (single_zone && zone != single_zone) continue; zone->used_mem += amount; } } ret = 0; out_unlock: spin_unlock(&glob->lock); ttm_check_swapping(glob); return ret; } static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob, struct ttm_mem_zone *single_zone, uint64_t memory, bool no_wait, bool interruptible) { int count = TTM_MEMORY_ALLOC_RETRIES; while (unlikely(ttm_mem_global_reserve(glob, single_zone, memory, true) != 0)) { if (no_wait) return -ENOMEM; if (unlikely(count-- == 0)) return -ENOMEM; ttm_shrink(glob, false, memory + (memory >> 2) + 16); } return 0; } int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, bool no_wait, bool interruptible) { /** * Normal allocations of kernel memory are registered in * all zones. */ return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait, interruptible); } EXPORT_SYMBOL(ttm_mem_global_alloc); int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, struct page *page, bool no_wait, bool interruptible) { struct ttm_mem_zone *zone = NULL; /** * Page allocations may be registed in a single zone * only if highmem or !dma32. */ #ifdef CONFIG_HIGHMEM if (PageHighMem(page) && glob->zone_highmem != NULL) zone = glob->zone_highmem; #else if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) zone = glob->zone_kernel; #endif return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait, interruptible); } void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page) { struct ttm_mem_zone *zone = NULL; #ifdef CONFIG_HIGHMEM if (PageHighMem(page) && glob->zone_highmem != NULL) zone = glob->zone_highmem; #else if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) zone = glob->zone_kernel; #endif ttm_mem_global_free_zone(glob, zone, PAGE_SIZE); } size_t ttm_round_pot(size_t size) { if ((size & (size - 1)) == 0) return size; else if (size > PAGE_SIZE) return PAGE_ALIGN(size); else { size_t tmp_size = 4; while (tmp_size < size) tmp_size <<= 1; return tmp_size; } return 0; } EXPORT_SYMBOL(ttm_round_pot);
gpl-2.0
rukin5197/android_kernel_htc_msm7x30
drivers/s390/char/tape_char.c
7968
11925
/* * drivers/s390/char/tape_char.c * character device frontend for tape device driver * * S390 and zSeries version * Copyright IBM Corp. 2001,2006 * Author(s): Carsten Otte <cotte@de.ibm.com> * Michael Holzheu <holzheu@de.ibm.com> * Tuan Ngo-Anh <ngoanh@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> */ #define KMSG_COMPONENT "tape" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/types.h> #include <linux/proc_fs.h> #include <linux/mtio.h> #include <linux/compat.h> #include <asm/uaccess.h> #define TAPE_DBF_AREA tape_core_dbf #include "tape.h" #include "tape_std.h" #include "tape_class.h" #define TAPECHAR_MAJOR 0 /* get dynamic major */ /* * file operation structure for tape character frontend */ static ssize_t tapechar_read(struct file *, char __user *, size_t, loff_t *); static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t *); static int tapechar_open(struct inode *,struct file *); static int tapechar_release(struct inode *,struct file *); static long tapechar_ioctl(struct file *, unsigned int, unsigned long); #ifdef CONFIG_COMPAT static long tapechar_compat_ioctl(struct file *, unsigned int, unsigned long); #endif static const struct file_operations tape_fops = { .owner = THIS_MODULE, .read = tapechar_read, .write = tapechar_write, .unlocked_ioctl = tapechar_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = tapechar_compat_ioctl, #endif .open = tapechar_open, .release = tapechar_release, .llseek = no_llseek, }; static int tapechar_major = TAPECHAR_MAJOR; /* * This function is called for every new tapedevice */ int tapechar_setup_device(struct tape_device * device) { char device_name[20]; sprintf(device_name, "ntibm%i", device->first_minor / 2); device->nt = register_tape_dev( &device->cdev->dev, MKDEV(tapechar_major, device->first_minor), &tape_fops, device_name, "non-rewinding" ); device_name[0] = 'r'; device->rt = register_tape_dev( &device->cdev->dev, MKDEV(tapechar_major, device->first_minor + 1), &tape_fops, device_name, "rewinding" ); return 0; } void tapechar_cleanup_device(struct tape_device *device) { unregister_tape_dev(&device->cdev->dev, device->rt); device->rt = NULL; unregister_tape_dev(&device->cdev->dev, device->nt); device->nt = NULL; } static int tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) { struct idal_buffer *new; if (device->char_data.idal_buf != NULL && device->char_data.idal_buf->size == block_size) return 0; if (block_size > MAX_BLOCKSIZE) { DBF_EVENT(3, "Invalid blocksize (%zd > %d)\n", block_size, MAX_BLOCKSIZE); return -EINVAL; } /* The current idal buffer is not correct. Allocate a new one. */ new = idal_buffer_alloc(block_size, 0); if (IS_ERR(new)) return -ENOMEM; if (device->char_data.idal_buf != NULL) idal_buffer_free(device->char_data.idal_buf); device->char_data.idal_buf = new; return 0; } /* * Tape device read function */ static ssize_t tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) { struct tape_device *device; struct tape_request *request; size_t block_size; int rc; DBF_EVENT(6, "TCHAR:read\n"); device = (struct tape_device *) filp->private_data; /* * If the tape isn't terminated yet, do it now. And since we then * are at the end of the tape there wouldn't be anything to read * anyways. So we return immediately. */ if(device->required_tapemarks) { return tape_std_terminate_write(device); } /* Find out block size to use */ if (device->char_data.block_size != 0) { if (count < device->char_data.block_size) { DBF_EVENT(3, "TCHAR:read smaller than block " "size was requested\n"); return -EINVAL; } block_size = device->char_data.block_size; } else { block_size = count; } rc = tapechar_check_idalbuffer(device, block_size); if (rc) return rc; #ifdef CONFIG_S390_TAPE_BLOCK /* Changes position. */ device->blk_data.medium_changed = 1; #endif DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size); /* Let the discipline build the ccw chain. */ request = device->discipline->read_block(device, block_size); if (IS_ERR(request)) return PTR_ERR(request); /* Execute it. */ rc = tape_do_io(device, request); if (rc == 0) { rc = block_size - request->rescnt; DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc); /* Copy data from idal buffer to user space. */ if (idal_buffer_to_user(device->char_data.idal_buf, data, rc) != 0) rc = -EFAULT; } tape_free_request(request); return rc; } /* * Tape device write function */ static ssize_t tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos) { struct tape_device *device; struct tape_request *request; size_t block_size; size_t written; int nblocks; int i, rc; DBF_EVENT(6, "TCHAR:write\n"); device = (struct tape_device *) filp->private_data; /* Find out block size and number of blocks */ if (device->char_data.block_size != 0) { if (count < device->char_data.block_size) { DBF_EVENT(3, "TCHAR:write smaller than block " "size was requested\n"); return -EINVAL; } block_size = device->char_data.block_size; nblocks = count / block_size; } else { block_size = count; nblocks = 1; } rc = tapechar_check_idalbuffer(device, block_size); if (rc) return rc; #ifdef CONFIG_S390_TAPE_BLOCK /* Changes position. */ device->blk_data.medium_changed = 1; #endif DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size); DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks); /* Let the discipline build the ccw chain. */ request = device->discipline->write_block(device, block_size); if (IS_ERR(request)) return PTR_ERR(request); rc = 0; written = 0; for (i = 0; i < nblocks; i++) { /* Copy data from user space to idal buffer. */ if (idal_buffer_from_user(device->char_data.idal_buf, data, block_size)) { rc = -EFAULT; break; } rc = tape_do_io(device, request); if (rc) break; DBF_EVENT(6, "TCHAR:wbytes: %lx\n", block_size - request->rescnt); written += block_size - request->rescnt; if (request->rescnt != 0) break; data += block_size; } tape_free_request(request); if (rc == -ENOSPC) { /* * Ok, the device has no more space. It has NOT written * the block. */ if (device->discipline->process_eov) device->discipline->process_eov(device); if (written > 0) rc = 0; } /* * After doing a write we always need two tapemarks to correctly * terminate the tape (one to terminate the file, the second to * flag the end of recorded data. * Since process_eov positions the tape in front of the written * tapemark it doesn't hurt to write two marks again. */ if (!rc) device->required_tapemarks = 2; return rc ? rc : written; } /* * Character frontend tape device open function. */ static int tapechar_open (struct inode *inode, struct file *filp) { struct tape_device *device; int minor, rc; DBF_EVENT(6, "TCHAR:open: %i:%i\n", imajor(filp->f_path.dentry->d_inode), iminor(filp->f_path.dentry->d_inode)); if (imajor(filp->f_path.dentry->d_inode) != tapechar_major) return -ENODEV; minor = iminor(filp->f_path.dentry->d_inode); device = tape_find_device(minor / TAPE_MINORS_PER_DEV); if (IS_ERR(device)) { DBF_EVENT(3, "TCHAR:open: tape_find_device() failed\n"); return PTR_ERR(device); } rc = tape_open(device); if (rc == 0) { filp->private_data = device; nonseekable_open(inode, filp); } else tape_put_device(device); return rc; } /* * Character frontend tape device release function. */ static int tapechar_release(struct inode *inode, struct file *filp) { struct tape_device *device; DBF_EVENT(6, "TCHAR:release: %x\n", iminor(inode)); device = (struct tape_device *) filp->private_data; /* * If this is the rewinding tape minor then rewind. In that case we * write all required tapemarks. Otherwise only one to terminate the * file. */ if ((iminor(inode) & 1) != 0) { if (device->required_tapemarks) tape_std_terminate_write(device); tape_mtop(device, MTREW, 1); } else { if (device->required_tapemarks > 1) { if (tape_mtop(device, MTWEOF, 1) == 0) device->required_tapemarks--; } } if (device->char_data.idal_buf != NULL) { idal_buffer_free(device->char_data.idal_buf); device->char_data.idal_buf = NULL; } tape_release(device); filp->private_data = NULL; tape_put_device(device); return 0; } /* * Tape device io controls. */ static int __tapechar_ioctl(struct tape_device *device, unsigned int no, unsigned long data) { int rc; if (no == MTIOCTOP) { struct mtop op; if (copy_from_user(&op, (char __user *) data, sizeof(op)) != 0) return -EFAULT; if (op.mt_count < 0) return -EINVAL; /* * Operations that change tape position should write final * tapemarks. */ switch (op.mt_op) { case MTFSF: case MTBSF: case MTFSR: case MTBSR: case MTREW: case MTOFFL: case MTEOM: case MTRETEN: case MTBSFM: case MTFSFM: case MTSEEK: #ifdef CONFIG_S390_TAPE_BLOCK device->blk_data.medium_changed = 1; #endif if (device->required_tapemarks) tape_std_terminate_write(device); default: ; } rc = tape_mtop(device, op.mt_op, op.mt_count); if (op.mt_op == MTWEOF && rc == 0) { if (op.mt_count > device->required_tapemarks) device->required_tapemarks = 0; else device->required_tapemarks -= op.mt_count; } return rc; } if (no == MTIOCPOS) { /* MTIOCPOS: query the tape position. */ struct mtpos pos; rc = tape_mtop(device, MTTELL, 1); if (rc < 0) return rc; pos.mt_blkno = rc; if (copy_to_user((char __user *) data, &pos, sizeof(pos)) != 0) return -EFAULT; return 0; } if (no == MTIOCGET) { /* MTIOCGET: query the tape drive status. */ struct mtget get; memset(&get, 0, sizeof(get)); get.mt_type = MT_ISUNKNOWN; get.mt_resid = 0 /* device->devstat.rescnt */; get.mt_dsreg = device->tape_state; /* FIXME: mt_gstat, mt_erreg, mt_fileno */ get.mt_gstat = 0; get.mt_erreg = 0; get.mt_fileno = 0; get.mt_gstat = device->tape_generic_status; if (device->medium_state == MS_LOADED) { rc = tape_mtop(device, MTTELL, 1); if (rc < 0) return rc; if (rc == 0) get.mt_gstat |= GMT_BOT(~0); get.mt_blkno = rc; } if (copy_to_user((char __user *) data, &get, sizeof(get)) != 0) return -EFAULT; return 0; } /* Try the discipline ioctl function. */ if (device->discipline->ioctl_fn == NULL) return -EINVAL; return device->discipline->ioctl_fn(device, no, data); } static long tapechar_ioctl(struct file *filp, unsigned int no, unsigned long data) { struct tape_device *device; long rc; DBF_EVENT(6, "TCHAR:ioct\n"); device = (struct tape_device *) filp->private_data; mutex_lock(&device->mutex); rc = __tapechar_ioctl(device, no, data); mutex_unlock(&device->mutex); return rc; } #ifdef CONFIG_COMPAT static long tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data) { struct tape_device *device = filp->private_data; int rval = -ENOIOCTLCMD; unsigned long argp; /* The 'arg' argument of any ioctl function may only be used for * pointers because of the compat pointer conversion. * Consider this when adding new ioctls. */ argp = (unsigned long) compat_ptr(data); if (device->discipline->ioctl_fn) { mutex_lock(&device->mutex); rval = device->discipline->ioctl_fn(device, no, argp); mutex_unlock(&device->mutex); if (rval == -EINVAL) rval = -ENOIOCTLCMD; } return rval; } #endif /* CONFIG_COMPAT */ /* * Initialize character device frontend. */ int tapechar_init (void) { dev_t dev; if (alloc_chrdev_region(&dev, 0, 256, "tape") != 0) return -1; tapechar_major = MAJOR(dev); return 0; } /* * cleanup */ void tapechar_exit(void) { unregister_chrdev_region(MKDEV(tapechar_major, 0), 256); }
gpl-2.0
m4734/mysql_pio
storage/ndb/src/ndbapi/NdbWaitGroup.cpp
33
7757
/* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <ndb_global.h> #include <stdlib.h> #include "NdbWaitGroup.hpp" #include "WakeupHandler.hpp" #include "ndb_cluster_connection.hpp" #include "TransporterFacade.hpp" #include "ndb_cluster_connection_impl.hpp" #include "NdbImpl.hpp" int round_up(int num, int factor) { return num + factor - 1 - (num - 1) % factor; } NdbWaitGroup::NdbWaitGroup(Ndb_cluster_connection *_conn, int ndbs) : m_pos_new(0), m_pos_wait(0), m_pos_ready(0), m_multiWaitHandler(0), m_pos_overflow(0), m_nodeId(0), m_active_version(0), m_conn(_conn) { const int pointers_per_cache_line = NDB_CL / sizeof(Ndb *); /* round array size up to a whole cache line */ m_array_size = round_up(ndbs, pointers_per_cache_line); /* m_pos is used in the version 1 api */ m_pos = m_array_size; /* overflow list is 1/8 of array, also rounded up */ m_overflow_size = m_array_size / 8; m_overflow_size = round_up(m_overflow_size, pointers_per_cache_line); /* Return point is somewhere in the array */ m_pos_return = m_array_size / 3; /* Allocate the main array and the overflow list */ m_array = (Ndb **) calloc(m_array_size, sizeof(Ndb *)); m_overflow = (Ndb **) calloc(m_overflow_size, sizeof(Ndb *)); /* Call into the TransporterFacade to set up wakeups */ bool rc = m_conn->m_impl.m_transporter_facade->setupWakeup(); require(rc); /* Get a new Ndb object to be the dedicated "wakeup object" for the group */ m_wakeNdb = new Ndb(m_conn); require(m_wakeNdb); m_wakeNdb->init(1); m_nodeId = m_wakeNdb->theNode; /* Get a wakeup handler */ m_multiWaitHandler = new MultiNdbWakeupHandler(m_wakeNdb); require(m_multiWaitHandler); } NdbWaitGroup::~NdbWaitGroup() { delete m_multiWaitHandler; delete m_wakeNdb; free(m_array); free(m_overflow); } void NdbWaitGroup::wakeup() { m_conn->m_impl.m_transporter_facade->requestWakeup(); } /* Old-API addNdb() */ bool NdbWaitGroup::addNdb(Ndb *ndb) { if(unlikely(ndb->theNode != Uint32(m_nodeId))) { return false; // Ndb belongs to wrong ndb_cluster_connection } if(unlikely(m_pos == 0)) { return false; // array is full } m_array[--m_pos] = ndb; return true; } /* Old-API version of wait(). It is single-threaded without any concurrent push(). */ int NdbWaitGroup::wait(Ndb ** & arrayHead /* out */, Uint32 timeout_millis, int min_ndbs) { int nready; int nwait = m_array_size - m_pos; Ndb ** ndblist = m_array + m_pos; arrayHead = NULL; m_active_version = 1; int wait_rc = m_multiWaitHandler->waitForInput(ndblist, nwait, min_ndbs, timeout_millis, &nready); if(wait_rc == 0) { arrayHead = ndblist; m_pos += nready; return nready; } return wait_rc ? -1 : nready; } /* Version 2 API */ /* QUEUE A = Array m_array MAX = Array Size m_array_size RETURNPOINT = Some point between 0 and MAX m_pos_return N = New (recently pushed to list) NC = New Cursor m_pos_new W = Waiting (on NDB network i/o) WC = Waiting Cursor m_pos_wait R = Returned (from NDB, ready to poll) RC = Returned Cursor m_pos_ready init: NC = WC = RC = 0. push: A[NC] = X NC += 1 # NC is index of next new item If(NC == MAX) List is full wait: # Maintenance tasks: (1) If list is full, resize (2) If NC > RETURNPOINT, shift list downwad so A[WC] becomes A[0] # Wait for all the newly arrived items nwait = NC - WC nready = waitForInput(WC, nwait) WC += nready # WC is start index of the next wait pop: IF (RC != WC) RETURNVAL = A[RC] RC += 1 # RC is index of next ready item Many threads can push and pop; only one thread can use wait. */ int NdbWaitGroup::push(Ndb *ndb) { if(unlikely(ndb->theNode != Uint32(m_nodeId))) { return -1; } lock(); if(unlikely(m_pos_new == m_array_size)) // array is full { if(unlikely(m_pos_overflow == m_overflow_size)) // overflow list is full { m_overflow_size *= 2; assert(m_overflow_size < NDBWAITGROUP_MAX_SIZE); m_overflow = (Ndb **) realloc(m_overflow, m_overflow_size * sizeof(Ndb*)); } m_overflow[m_pos_overflow++] = ndb; } else { m_array[m_pos_new++] = ndb; // common case } unlock(); return 0; } /* wait() takes the lock before and after wait (not during). In 7.2, shifting or resizing the list requires a PollGuard, but in 7.3, the underlying wakeupHandler will only touch the array during wait() so no lock is needed. */ int NdbWaitGroup::wait(Uint32 timeout_millis, int pct_ready) { int nready, nwait; m_active_version = 2; assert(pct_ready >=0 && pct_ready <= 100); lock(); /* Resize list if full */ if(unlikely(m_pos_new == m_array_size)) { resize_list(); } /* On last pop, if list has advanced past return point, shift back to 0 */ if(m_pos_ready && /* Not at zero */ m_pos_ready == m_pos_wait && /* Cannot currently pop */ m_pos_new > m_pos_return) /* NC > RETURNPOINT */ { for(Uint32 i = m_pos_wait; i < m_pos_new; i++) { m_array[i - m_pos_wait] = m_array[i]; } m_pos_new -= m_pos_wait; m_pos_ready = m_pos_wait = 0; } /* Number of items to wait for */ nwait = m_pos_new - m_pos_wait; unlock(); /********** ENTER WAIT **********/ int min_ndbs = nwait * pct_ready / 100 ; if(min_ndbs == 0 && pct_ready > 0) min_ndbs = 1; Ndb ** arrayHead = m_array + m_pos_wait; m_multiWaitHandler->waitForInput(arrayHead, nwait, min_ndbs, timeout_millis, &nready); /********** EXIT WAIT *********/ lock(); m_pos_wait += nready; unlock(); return nready; } Ndb * NdbWaitGroup::pop() { Ndb * r = 0; lock(); if(m_pos_ready < m_pos_wait) { r = m_array[m_pos_ready++]; } unlock(); return r; } /* Private internal methods */ void NdbWaitGroup::resize_list() { Uint32 size_required = m_array_size + m_pos_overflow + 1; while(m_array_size < size_required) { m_array_size *= 2; m_pos_return *= 2; } assert(m_array_size < NDBWAITGROUP_MAX_SIZE); /* Reallocate */ m_array = (Ndb **) realloc(m_array, m_array_size * sizeof(Ndb *)); /* Copy from the overflow list to the new list. */ while(m_pos_overflow) { m_array[m_pos_new++] = m_overflow[--m_pos_overflow]; } }
gpl-2.0
cupertinomiranda/binutils_new
sim/rl78/gdb-if.c
33
12599
/* gdb-if.c -- sim interface to GDB. Copyright (C) 2011-2015 Free Software Foundation, Inc. Contributed by Red Hat, Inc. This file is part of the GNU simulators. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "config.h" #include <stdio.h> #include <assert.h> #include <signal.h> #include <string.h> #include <ctype.h> #include <stdlib.h> #include "ansidecl.h" #include "gdb/callback.h" #include "gdb/remote-sim.h" #include "gdb/signals.h" #include "gdb/sim-rl78.h" #include "cpu.h" #include "mem.h" #include "load.h" #include "trace.h" /* Ideally, we'd wrap up all the minisim's data structures in an object and pass that around. However, neither GDB nor run needs that ability. So we just have one instance, that lives in global variables, and each time we open it, we re-initialize it. */ struct sim_state { const char *message; }; static struct sim_state the_minisim = { "This is the sole rl78 minisim instance." }; static int open; static struct host_callback_struct *host_callbacks; /* Open an instance of the sim. For this sim, only one instance is permitted. If sim_open() is called multiple times, the sim will be reset. */ SIM_DESC sim_open (SIM_OPEN_KIND kind, struct host_callback_struct *callback, struct bfd *abfd, char **argv) { if (open) fprintf (stderr, "rl78 minisim: re-opened sim\n"); /* The 'run' interface doesn't use this function, so we don't care about KIND; it's always SIM_OPEN_DEBUG. */ if (kind != SIM_OPEN_DEBUG) fprintf (stderr, "rl78 minisim: sim_open KIND != SIM_OPEN_DEBUG: %d\n", kind); /* We use this for the load command. Perhaps someday, it'll be used for syscalls too. */ host_callbacks = callback; /* We don't expect any command-line arguments. */ init_cpu (); trace = 0; sim_disasm_init (abfd); open = 1; while (argv != NULL && *argv != NULL) { if (strcmp (*argv, "g10") == 0 || strcmp (*argv, "-Mg10") == 0) { fprintf (stderr, "rl78 g10 support enabled.\n"); rl78_g10_mode = 1; g13_multiply = 0; g14_multiply = 0; mem_set_mirror (0, 0xf8000, 4096); break; } if (strcmp (*argv, "g13") == 0 || strcmp (*argv, "-Mg13") == 0) { fprintf (stderr, "rl78 g13 support enabled.\n"); rl78_g10_mode = 0; g13_multiply = 1; g14_multiply = 0; break; } if (strcmp (*argv, "g14") == 0 || strcmp (*argv, "-Mg14") == 0) { fprintf (stderr, "rl78 g14 support enabled.\n"); rl78_g10_mode = 0; g13_multiply = 0; g14_multiply = 1; break; } argv++; } return &the_minisim; } /* Verify the sim descriptor. Just print a message if the descriptor doesn't match. Nothing bad will happen if the descriptor doesn't match because all of the state is global. But if it doesn't match, that means there's a problem with the caller. */ static void check_desc (SIM_DESC sd) { if (sd != &the_minisim) fprintf (stderr, "rl78 minisim: desc != &the_minisim\n"); } /* Close the sim. */ void sim_close (SIM_DESC sd, int quitting) { check_desc (sd); /* Not much to do. At least free up our memory. */ init_mem (); open = 0; } /* Open the program to run; print a message if the program cannot be opened. */ static bfd * open_objfile (const char *filename) { bfd *prog = bfd_openr (filename, 0); if (!prog) { fprintf (stderr, "Can't read %s\n", filename); return 0; } if (!bfd_check_format (prog, bfd_object)) { fprintf (stderr, "%s not a rl78 program\n", filename); return 0; } return prog; } /* Load a program. */ SIM_RC sim_load (SIM_DESC sd, const char *prog, struct bfd *abfd, int from_tty) { check_desc (sd); if (!abfd) abfd = open_objfile (prog); if (!abfd) return SIM_RC_FAIL; rl78_load (abfd, host_callbacks, "sim"); return SIM_RC_OK; } /* Create inferior. */ SIM_RC sim_create_inferior (SIM_DESC sd, struct bfd *abfd, char **argv, char **env) { check_desc (sd); if (abfd) rl78_load (abfd, 0, "sim"); return SIM_RC_OK; } /* Read memory. */ int sim_read (SIM_DESC sd, SIM_ADDR mem, unsigned char *buf, int length) { check_desc (sd); if (mem >= MEM_SIZE) return 0; else if (mem + length > MEM_SIZE) length = MEM_SIZE - mem; mem_get_blk (mem, buf, length); return length; } /* Write memory. */ int sim_write (SIM_DESC sd, SIM_ADDR mem, const unsigned char *buf, int length) { check_desc (sd); if (mem >= MEM_SIZE) return 0; else if (mem + length > MEM_SIZE) length = MEM_SIZE - mem; mem_put_blk (mem, buf, length); return length; } /* Read the LENGTH bytes at BUF as an little-endian value. */ static SI get_le (unsigned char *buf, int length) { SI acc = 0; while (--length >= 0) acc = (acc << 8) + buf[length]; return acc; } /* Store VAL as a little-endian value in the LENGTH bytes at BUF. */ static void put_le (unsigned char *buf, int length, SI val) { int i; for (i = 0; i < length; i++) { buf[i] = val & 0xff; val >>= 8; } } /* Verify that REGNO is in the proper range. Return 0 if not and something non-zero if so. */ static int check_regno (enum sim_rl78_regnum regno) { return 0 <= regno && regno < sim_rl78_num_regs; } /* Return the size of the register REGNO. */ static size_t reg_size (enum sim_rl78_regnum regno) { size_t size; if (regno == sim_rl78_pc_regnum) size = 4; else size = 1; return size; } /* Return the register address associated with the register specified by REGNO. */ static unsigned long reg_addr (enum sim_rl78_regnum regno) { if (sim_rl78_bank0_r0_regnum <= regno && regno <= sim_rl78_bank0_r7_regnum) return 0xffef8 + (regno - sim_rl78_bank0_r0_regnum); else if (sim_rl78_bank1_r0_regnum <= regno && regno <= sim_rl78_bank1_r7_regnum) return 0xffef0 + (regno - sim_rl78_bank1_r0_regnum); else if (sim_rl78_bank2_r0_regnum <= regno && regno <= sim_rl78_bank2_r7_regnum) return 0xffee8 + (regno - sim_rl78_bank2_r0_regnum); else if (sim_rl78_bank3_r0_regnum <= regno && regno <= sim_rl78_bank3_r7_regnum) return 0xffee0 + (regno - sim_rl78_bank3_r0_regnum); else if (regno == sim_rl78_psw_regnum) return 0xffffa; else if (regno == sim_rl78_es_regnum) return 0xffffd; else if (regno == sim_rl78_cs_regnum) return 0xffffc; /* Note: We can't handle PC here because it's not memory mapped. */ else if (regno == sim_rl78_spl_regnum) return 0xffff8; else if (regno == sim_rl78_sph_regnum) return 0xffff9; else if (regno == sim_rl78_pmc_regnum) return 0xffffe; else if (regno == sim_rl78_mem_regnum) return 0xfffff; return 0; } /* Fetch the contents of the register specified by REGNO, placing the contents in BUF. The length LENGTH must match the sim's internal notion of the register's size. */ int sim_fetch_register (SIM_DESC sd, int regno, unsigned char *buf, int length) { size_t size; SI val; check_desc (sd); if (!check_regno (regno)) return 0; size = reg_size (regno); if (length != size) return 0; if (regno == sim_rl78_pc_regnum) val = pc; else val = memory[reg_addr (regno)]; put_le (buf, length, val); return size; } /* Store the value stored in BUF to the register REGNO. The length LENGTH must match the sim's internal notion of the register size. */ int sim_store_register (SIM_DESC sd, int regno, unsigned char *buf, int length) { size_t size; SI val; check_desc (sd); if (!check_regno (regno)) return -1; size = reg_size (regno); if (length != size) return -1; val = get_le (buf, length); if (regno == sim_rl78_pc_regnum) { pc = val; /* The rl78 program counter is 20 bits wide. Ensure that GDB hasn't picked up any stray bits. This has occurred when performing a GDB "return" command in which the return address is obtained from a 32-bit container on the stack. */ assert ((pc & ~0x0fffff) == 0); } else memory[reg_addr (regno)] = val; return size; } /* Print out message associated with "info target". */ void sim_info (SIM_DESC sd, int verbose) { check_desc (sd); printf ("The rl78 minisim doesn't collect any statistics.\n"); } static volatile int stop; static enum sim_stop reason; int siggnal; /* Given a signal number used by the rl78 bsp (that is, newlib), return the corresponding signal numbers. */ int rl78_signal_to_target (int sig) { switch (sig) { case 4: return GDB_SIGNAL_ILL; case 5: return GDB_SIGNAL_TRAP; case 10: return GDB_SIGNAL_BUS; case 11: return GDB_SIGNAL_SEGV; case 24: return GDB_SIGNAL_XCPU; break; case 2: return GDB_SIGNAL_INT; case 8: return GDB_SIGNAL_FPE; break; case 6: return GDB_SIGNAL_ABRT; } return 0; } /* Take a step return code RC and set up the variables consulted by sim_stop_reason appropriately. */ void handle_step (int rc) { if (RL78_STEPPED (rc) || RL78_HIT_BREAK (rc)) { reason = sim_stopped; siggnal = GDB_SIGNAL_TRAP; } else if (RL78_STOPPED (rc)) { reason = sim_stopped; siggnal = rl78_signal_to_target (RL78_STOP_SIG (rc)); } else { assert (RL78_EXITED (rc)); reason = sim_exited; siggnal = RL78_EXIT_STATUS (rc); } } /* Resume execution after a stop. */ void sim_resume (SIM_DESC sd, int step, int sig_to_deliver) { int rc; check_desc (sd); if (sig_to_deliver != 0) { fprintf (stderr, "Warning: the rl78 minisim does not implement " "signal delivery yet.\n" "Resuming with no signal.\n"); } /* We don't clear 'stop' here, because then we would miss interrupts that arrived on the way here. Instead, we clear the flag in sim_stop_reason, after GDB has disabled the interrupt signal handler. */ for (;;) { if (stop) { stop = 0; reason = sim_stopped; siggnal = GDB_SIGNAL_INT; break; } rc = setjmp (decode_jmp_buf); if (rc == 0) rc = decode_opcode (); if (!RL78_STEPPED (rc) || step) { handle_step (rc); break; } } } /* Stop the sim. */ int sim_stop (SIM_DESC sd) { stop = 1; return 1; } /* Fetch the stop reason and signal. */ void sim_stop_reason (SIM_DESC sd, enum sim_stop *reason_p, int *sigrc_p) { check_desc (sd); *reason_p = reason; *sigrc_p = siggnal; } /* Execute the sim-specific command associated with GDB's "sim ..." command. */ void sim_do_command (SIM_DESC sd, const char *cmd) { const char *args; char *p = strdup (cmd); check_desc (sd); if (cmd == NULL) { cmd = ""; args = ""; } else { /* Skip leading whitespace. */ while (isspace (*p)) p++; /* Find the extent of the command word. */ for (p = cmd; *p; p++) if (isspace (*p)) break; /* Null-terminate the command word, and record the start of any further arguments. */ if (*p) { *p = '\0'; args = p + 1; while (isspace (*args)) args++; } else args = p; } if (strcmp (cmd, "trace") == 0) { if (strcmp (args, "on") == 0) trace = 1; else if (strcmp (args, "off") == 0) trace = 0; else printf ("The 'sim trace' command expects 'on' or 'off' " "as an argument.\n"); } else if (strcmp (cmd, "verbose") == 0) { if (strcmp (args, "on") == 0) verbose = 1; else if (strcmp (args, "noisy") == 0) verbose = 2; else if (strcmp (args, "off") == 0) verbose = 0; else printf ("The 'sim verbose' command expects 'on', 'noisy', or 'off'" " as an argument.\n"); } else printf ("The 'sim' command expects either 'trace' or 'verbose'" " as a subcommand.\n"); free (p); } /* Stub for command completion. */ char ** sim_complete_command (SIM_DESC sd, const char *text, const char *word) { return NULL; }
gpl-2.0
ResurrectionRemix-Devices/android_kernel_samsung_smdk4412
arch/arm/mach-exynos/tmu.c
33
42544
/* linux/arch/arm/mach-exynos/tmu.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com * * EXYNOS4 - Thermal Management support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/interrupt.h> #include <linux/err.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/slab.h> #include <linux/kobject.h> #ifdef CONFIG_EXYNOS4_EXPORT_TEMP #include <linux/exynos4_export_temp.h> #endif #include <asm/irq.h> #include <mach/regs-tmu.h> #include <mach/cpufreq.h> #include <mach/map.h> #include <mach/smc.h> #include <plat/s5p-tmu.h> #include <plat/map-s5p.h> #include <plat/gpio-cfg.h> #include <plat/cpu.h> #include <mach/asv.h> #ifdef CONFIG_BUSFREQ_OPP #include <mach/busfreq_exynos4.h> #include <mach/dev.h> #endif #ifdef CONFIG_EXYNOS4_EXPORT_TEMP static unsigned int tmu_curr_temperature; #endif static enum { ENABLE_TEMP_MON = 0x1, ENABLE_TEST_MODE = 0x2, } enable_mask = ENABLE_TEMP_MON | ENABLE_TEST_MODE; module_param_named(enable_mask, enable_mask, uint, 0644); #define ENABLE_DBGMASK (ENABLE_TEMP_MON | ENABLE_TEST_MODE) /* for factory mode */ #define CONFIG_TMU_SYSFS /* flags that throttling or trippint is treated */ #define THROTTLE_FLAG (0x1 << 0) #define WARNING_FLAG (0x1 << 1) #define TRIPPING_FLAG (0x1 << 2) #define MEM_THROTTLE_FLAG (0x1 << 4) #define TIMING_AREF_OFFSET 0x30 static struct workqueue_struct *tmu_monitor_wq; static DEFINE_MUTEX(tmu_lock); #if (defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412)) #if defined(CONFIG_VIDEO_MALI400MP) extern int mali_voltage_lock_init(void); extern int mali_voltage_lock_push(int lock_vol); extern int mali_voltage_lock_pop(void); #endif #define CONFIG_TC_VOLTAGE /* Temperature compensated voltage */ #endif static unsigned int get_curr_temp(struct s5p_tmu_info *info) { unsigned char curr_temp_code; int temperature; if (!info) return -EAGAIN; /* After reading temperature code from register, compensating * its value and calculating celsius temperatue, * get current temperatue. */ curr_temp_code = __raw_readl(info->tmu_base + EXYNOS4_TMU_CURRENT_TEMP) & 0xff; /* Check range of temprature code with curr_temp_code & efusing info */ pr_debug("CURRENT_TEMP = 0x%02x\n", curr_temp_code); #if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412) /* temperature code range are between min 10 and 125 */ if ((info->te1 - curr_temp_code) > 15 || (curr_temp_code - info->te1) > 100) #else /* temperature code range are between min 25 and 125 */ if ((curr_temp_code - info->te1) < 0 || (curr_temp_code - info->te1) > 100) #endif pr_warning("temperature code is in inaccurate -->" "check if vdd_18_ts is on\n" "or surrounding temp is low.\n"); /* compensate and calculate current temperature */ temperature = curr_temp_code - info->te1 + TMU_DC_VALUE; if (temperature < 0) { /* if temperature lower than 0 degree, set 0 degree */ pr_info("current temp is %d celsius degree.\n" "so, set to 0 celsius degree!\n", temperature); temperature = 0; } #ifdef CONFIG_EXYNOS4_EXPORT_TEMP tmu_curr_temperature = temperature; #endif return (unsigned int)temperature; } #ifdef CONFIG_EXYNOS4_EXPORT_TEMP unsigned int get_exynos4_temperature(void) { return tmu_curr_temperature; } EXPORT_SYMBOL(get_exynos4_temperature); #endif static ssize_t show_temperature(struct device *dev, struct device_attribute *attr, char *buf) { struct s5p_tmu_info *info = dev_get_drvdata(dev); unsigned int temperature; if (!dev) return -ENODEV; mutex_lock(&tmu_lock); temperature = get_curr_temp(info); mutex_unlock(&tmu_lock); return sprintf(buf, "%u\n", temperature); } static ssize_t show_tmu_state(struct device *dev, struct device_attribute *attr, char *buf) { struct s5p_tmu_info *info = dev_get_drvdata(dev); if (!dev) return -ENODEV; return sprintf(buf, "%d\n", info->tmu_state); } static ssize_t show_lot_id(struct device *dev, struct device_attribute *attr, char *buf) { u32 id1 = 0; u32 id2 = 0; id1 = __raw_readl(S5P_VA_CHIPID + 0x14); id2 = __raw_readl(S5P_VA_CHIPID + 0x18); return sprintf(buf, "%08x-%08x\n", id1, id2); } static DEVICE_ATTR(temperature, 0444, show_temperature, NULL); static DEVICE_ATTR(tmu_state, 0444, show_tmu_state, NULL); static DEVICE_ATTR(lot_id, 0444, show_lot_id, NULL); static void print_temperature_params(struct s5p_tmu_info *info) { struct s5p_platform_tmu *pdata = info->dev->platform_data; pr_info("** temperature set value **\n" "1st throttling stop_temp = %u, start_temp = %u\n" "2nd throttling stop_temp = %u, start_tmep = %u\n" "tripping temp = %u, s/w emergency temp = %u\n" "mem throttling stop_temp = %u, start_temp = %u\n", pdata->ts.stop_1st_throttle, pdata->ts.start_1st_throttle, pdata->ts.stop_2nd_throttle, pdata->ts.start_2nd_throttle, pdata->ts.start_tripping, pdata->ts.start_emergency, pdata->ts.stop_mem_throttle, pdata->ts.start_mem_throttle); #if defined(CONFIG_TC_VOLTAGE) pr_info("tc_voltage stop_temp = %u, start_temp = %u\n", pdata->ts.stop_tc, pdata->ts.start_tc); #endif } unsigned int get_refresh_interval(unsigned int freq_ref, unsigned int refresh_nsec) { unsigned int uRlk, refresh = 0; /* * uRlk = FIN / 100000; * refresh_usec = (unsigned int)(fMicrosec * 10); * uRegVal = ((unsigned int)(uRlk * uMicroSec / 100)) - 1; * refresh = * (unsigned int)(freq_ref * (unsigned int)(refresh_usec * 10) / 100) - 1; */ uRlk = freq_ref / 1000000; refresh = ((unsigned int)(uRlk * refresh_nsec / 1000)); pr_info("@@@ get_refresh_interval = 0x%02x\n", refresh); return refresh; } struct tmu_early_param { int set_ts; struct temperature_params ts; int set_lock; unsigned cpufreq_level_1st_throttle; unsigned cpufreq_level_2nd_throttle; int set_rate; unsigned int sampling_rate; unsigned int monitor_rate; }; static struct tmu_early_param tmu_in; static int tmu_print_temp_on_off; static int __init get_temperature_params(char *str) { int ints[11]; unsigned int mask = (enable_mask & ENABLE_DBGMASK); if (!(mask & ENABLE_TEST_MODE)) return -EPERM; get_options(str, ARRAY_SIZE(ints), ints); /* output the input value */ pr_info("tmu_test=%s\n", str); if (ints[0]) tmu_in.set_ts = 1; if (ints[0] > 0) tmu_in.ts.stop_1st_throttle = (unsigned int)ints[1]; if (ints[0] > 1) tmu_in.ts.start_1st_throttle = (unsigned int)ints[2]; if (ints[0] > 2) tmu_in.ts.stop_2nd_throttle = (unsigned int)ints[3]; if (ints[0] > 3) tmu_in.ts.start_2nd_throttle = (unsigned int)ints[4]; if (ints[0] > 4) tmu_in.ts.start_tripping = (unsigned int)ints[5]; if (ints[0] > 5) tmu_in.ts.start_emergency = (unsigned int)ints[6]; if (ints[0] > 6) tmu_in.ts.stop_mem_throttle = (unsigned int)ints[7]; if (ints[0] > 7) tmu_in.ts.start_mem_throttle = (unsigned int)ints[8]; /* output the input value */ pr_info("-->1st throttling temp: start[%u], stop[%u]\n" "-->2nd throttling temp: start[%u], stop[%u]\n" "-->trpping temp[%u], emergency temp[%u]\n" "-->mem throttling temp: start[%u], stop[%u]\n", tmu_in.ts.start_1st_throttle, tmu_in.ts.stop_1st_throttle, tmu_in.ts.start_2nd_throttle, tmu_in.ts.stop_2nd_throttle, tmu_in.ts.start_tripping, tmu_in.ts.start_emergency, tmu_in.ts.start_mem_throttle, tmu_in.ts.stop_mem_throttle); #ifdef CONFIG_TC_VOLTAGE if (ints[0] > 8) tmu_in.ts.stop_tc = (unsigned int)ints[9]; if (ints[0] > 9) tmu_in.ts.start_tc = (unsigned int)ints[10]; pr_info("-->temp compensate : start[%u], stop[%u]\n", tmu_in.ts.start_tc, tmu_in.ts.stop_tc); #endif return 0; } early_param("tmu_test", get_temperature_params); static int __init get_cpufreq_limit_param(char *str) { int ints[3]; unsigned int mask = (enable_mask & ENABLE_DBGMASK); if (!(mask & ENABLE_TEST_MODE)) return -EPERM; get_options(str, ARRAY_SIZE(ints), ints); /* output the input value */ pr_info("cpu_level=%s\n", str); if (ints[0]) tmu_in.set_lock = 1; if (ints[0] > 0) tmu_in.cpufreq_level_1st_throttle = (unsigned int)ints[1]; if (ints[0] > 1) tmu_in.cpufreq_level_2nd_throttle = (unsigned int)ints[2]; pr_info("--> cpufreq_limit: 1st cpu_level = %u, 2nd cpu_level = %u\n", tmu_in.cpufreq_level_1st_throttle, tmu_in.cpufreq_level_2nd_throttle); return 0; } early_param("cpu_level", get_cpufreq_limit_param); static int __init get_sampling_rate_param(char *str) { int ints[3]; unsigned int mask = (enable_mask & ENABLE_DBGMASK); if (!(mask & ENABLE_TEST_MODE)) return -EPERM; get_options(str, ARRAY_SIZE(ints), ints); /* output the input value */ pr_info("tmu_sampling_rate=%s\n", str); if (ints[0]) tmu_in.set_rate = 1; if (ints[0] > 0) tmu_in.sampling_rate = (unsigned int)ints[1]; if (ints[0] > 1) tmu_in.monitor_rate = (unsigned int)ints[2]; pr_info("--> sampling_rate = %u ms, monitor_rate = %u ms\n", tmu_in.sampling_rate, tmu_in.monitor_rate); return 0; } early_param("tmu_sampling_rate", get_sampling_rate_param); static void exynos4_poll_cur_temp(struct work_struct *work) { unsigned int cur_temp; struct delayed_work *delayed_work = to_delayed_work(work); struct s5p_tmu_info *info = container_of(delayed_work, struct s5p_tmu_info, monitor); unsigned int mask = (enable_mask & ENABLE_DBGMASK); mutex_lock(&tmu_lock); if (mask & ENABLE_TEMP_MON) { cur_temp = get_curr_temp(info); if (tmu_print_temp_on_off) pr_info("curr temp in polling_interval = %u state = %d\n", cur_temp, info->tmu_state); else pr_debug("curr temp in polling_interval = %u\n", cur_temp); } queue_delayed_work_on(0, tmu_monitor_wq, &info->monitor, info->monitor_period); mutex_unlock(&tmu_lock); } static ssize_t tmu_show_print_state(struct device *dev, struct device_attribute *attr, char *buf) { int ret; ret = sprintf(buf, "[TMU] tmu_print_temp_on_off=%d\n" , tmu_print_temp_on_off); return ret; } static ssize_t tmu_store_print_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret = 0; if (!strncmp(buf, "0", 1)) { tmu_print_temp_on_off = 0; ret = 0; } else if (!strncmp(buf, "1", 1)) { tmu_print_temp_on_off = 1; ret = 1; } else { dev_err(dev, "Invalid cmd !!\n"); return -EINVAL; } return ret; } static DEVICE_ATTR(print_state, S_IRUGO | S_IWUSR,\ tmu_show_print_state, tmu_store_print_state); void set_refresh_rate(unsigned int auto_refresh) { /* * uRlk = FIN / 100000; * refresh_usec = (unsigned int)(fMicrosec * 10); * uRegVal = ((unsigned int)(uRlk * uMicroSec / 100)) - 1; */ pr_debug("set_auto_refresh = 0x%02x\n", auto_refresh); #ifdef CONFIG_ARCH_EXYNOS4 #ifdef CONFIG_ARM_TRUSTZONE exynos_smc(SMC_CMD_REG, SMC_REG_ID_SFR_W((EXYNOS4_PA_DMC0_4212 + TIMING_AREF_OFFSET)), auto_refresh, 0); exynos_smc(SMC_CMD_REG, SMC_REG_ID_SFR_W((EXYNOS4_PA_DMC1_4212 + TIMING_AREF_OFFSET)), auto_refresh, 0); #else /* change auto refresh period in TIMING_AREF register of dmc0 */ __raw_writel(auto_refresh, S5P_VA_DMC0 + TIMING_AREF_OFFSET); /* change auto refresh period in TIMING_AREF regisger of dmc1 */ __raw_writel(auto_refresh, S5P_VA_DMC1 + TIMING_AREF_OFFSET); #endif #else /* CONFIG_ARCH_EXYNOS4 */ #ifdef CONFIG_ARM_TRUSTZONE exynos_smc(SMC_CMD_REG, SMC_REG_ID_SFR_W((EXYNOS5_PA_DMC + TIMING_AREF_OFFSET)), auto_refresh, 0); #else /* change auto refresh period in TIMING_AREF register of dmc */ __raw_writel(auto_refresh, S5P_VA_DMC0 + TIMING_AREF_OFFSET); #endif #endif /* CONFIG_ARCH_EXYNOS4 */ } static void set_temperature_params(struct s5p_tmu_info *info) { struct s5p_platform_tmu *data = info->dev->platform_data; /* In the tmu_test mode, change temperature_params value * input data. */ if (tmu_in.set_ts) data->ts = tmu_in.ts; if (tmu_in.set_lock) { info->cpufreq_level_1st_throttle = tmu_in.cpufreq_level_1st_throttle; info->cpufreq_level_2nd_throttle = tmu_in.cpufreq_level_2nd_throttle; } if (tmu_in.set_rate) { info->sampling_rate = usecs_to_jiffies(tmu_in.sampling_rate * 1000); info->monitor_period = usecs_to_jiffies(tmu_in.monitor_rate * 1000); } print_temperature_params(info); } static int notify_change_of_tmu_state(struct s5p_tmu_info *info) { char temp_buf[20]; char *envp[2]; int env_offset = 0; snprintf(temp_buf, sizeof(temp_buf), "TMUSTATE=%d", info->tmu_state); envp[env_offset++] = temp_buf; envp[env_offset] = NULL; pr_info("%s: uevent: %d, name = %s\n", __func__, info->tmu_state, temp_buf); return kobject_uevent_env(&info->dev->kobj, KOBJ_CHANGE, envp); } static void exynos_interrupt_enable(struct s5p_tmu_info *info, int enable) { static unsigned int save; if (!save) save = __raw_readl(info->tmu_base + EXYNOS4_TMU_INTEN); if (enable) __raw_writel(save, info->tmu_base + EXYNOS4_TMU_INTEN); else __raw_writel(0x0, info->tmu_base + EXYNOS4_TMU_INTEN); } #if defined(CONFIG_TC_VOLTAGE) /** * exynos_tc_volt - locks or frees vdd_arm, vdd_mif/int and vdd_g3d for * temperature compensation. * * This function limits or free voltage of cpufreq, busfreq, and mali driver * according to 2nd arguments. */ static int exynos_tc_volt(struct s5p_tmu_info *info, int enable) { struct s5p_platform_tmu *data; static int usage; int ret = 0; if (!info || !(info->dev)) return -EPERM; data = info->dev->platform_data; if (enable == usage) { pr_debug("TMU: already is %s.\n", enable ? "locked" : "unlocked"); return 0; } if (enable) { ret = exynos_cpufreq_lock(DVFS_LOCK_ID_TMU, info->cpulevel_tc); if (ret) goto err_lock; #ifdef CONFIG_BUSFREQ_OPP ret = dev_lock(info->bus_dev, info->dev, info->busfreq_tc); if (ret) goto err_lock; #endif #if defined(CONFIG_VIDEO_MALI400MP) ret = mali_voltage_lock_push(data->temp_compensate.g3d_volt); if (ret < 0) { pr_err("TMU: g3d_push error: %u uV\n", data->temp_compensate.g3d_volt); goto err_lock; } #endif } else { exynos_cpufreq_lock_free(DVFS_LOCK_ID_TMU); #ifdef CONFIG_BUSFREQ_OPP ret = dev_unlock(info->bus_dev, info->dev); if (ret) goto err_unlock; #endif #if defined(CONFIG_VIDEO_MALI400MP) ret = mali_voltage_lock_pop(); if (ret < 0) { pr_err("TMU: g3d_pop error\n"); goto err_unlock; } #endif } usage = enable; pr_info("TMU: %s is ok!\n", enable ? "lock" : "unlock"); return ret; err_lock: err_unlock: pr_err("TMU: %s is fail.\n", enable ? "lock" : "unlock"); return ret; } #endif static void exynos4_handler_tmu_state(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct s5p_tmu_info *info = container_of(delayed_work, struct s5p_tmu_info, polling); struct s5p_platform_tmu *data = info->dev->platform_data; unsigned int cur_temp; static int auto_refresh_changed; static int check_handle; int trend = 0; mutex_lock(&tmu_lock); cur_temp = get_curr_temp(info); trend = cur_temp - info->last_temperature; pr_debug("curr_temp = %u, temp_diff = %d\n", cur_temp, trend); switch (info->tmu_state) { #if defined(CONFIG_TC_VOLTAGE) case TMU_STATUS_TC: /* lock has priority than unlock */ if (cur_temp <= data->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) pr_err("TMU: lock error!\n"); } else if (cur_temp >= data->ts.stop_tc) { if (exynos_tc_volt(info, 0) < 0) { pr_err("TMU: unlock error!\n"); } else { info->tmu_state = TMU_STATUS_NORMAL; pr_info("change state: tc -> normal.\n"); } } /* free if upper limit is locked */ if (check_handle) { exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); check_handle = 0; } break; #endif case TMU_STATUS_NORMAL: /* 1. change state: 1st-throttling */ if (cur_temp >= data->ts.start_1st_throttle) { info->tmu_state = TMU_STATUS_THROTTLED; pr_info("change state: normal->throttle.\n"); #if defined(CONFIG_TC_VOLTAGE) /* check whether temp compesation need or not */ } else if (cur_temp <= data->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) { pr_err("TMU: lock error!\n"); } else { info->tmu_state = TMU_STATUS_TC; pr_info("change state: normal->tc.\n"); } #endif /* 2. polling end and uevent */ } else if ((cur_temp <= data->ts.stop_1st_throttle) && (cur_temp <= data->ts.stop_mem_throttle)) { if (check_handle & THROTTLE_FLAG) { exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); check_handle &= ~(THROTTLE_FLAG); } pr_debug("check_handle = %d\n", check_handle); notify_change_of_tmu_state(info); pr_info("normal: free cpufreq_limit & interrupt enable.\n"); /* clear to prevent from interfupt by peindig bit */ __raw_writel(INTCLEARALL, info->tmu_base + EXYNOS4_TMU_INTCLEAR); exynos_interrupt_enable(info, 1); enable_irq(info->irq); mutex_unlock(&tmu_lock); return; } break; case TMU_STATUS_THROTTLED: /* 1. change state: 2nd-throttling or warning */ if (cur_temp >= data->ts.start_2nd_throttle) { info->tmu_state = TMU_STATUS_WARNING; pr_info("change state: 1st throttle->2nd throttle.\n"); #if defined(CONFIG_TC_VOLTAGE) /* check whether temp compesation need or not */ } else if (cur_temp <= data->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) pr_err("TMU: lock error!\n"); else info->tmu_state = TMU_STATUS_TC; #endif /* 2. cpufreq limitation and uevent */ } else if ((cur_temp >= data->ts.start_1st_throttle) && !(check_handle & THROTTLE_FLAG)) { if (check_handle & WARNING_FLAG) { exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); check_handle &= ~(WARNING_FLAG); } exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, info->cpufreq_level_1st_throttle); check_handle |= THROTTLE_FLAG; pr_debug("check_handle = %d\n", check_handle); notify_change_of_tmu_state(info); pr_info("throttling: set cpufreq upper limit.\n"); /* 3. change state: normal */ } else if ((cur_temp <= data->ts.stop_1st_throttle) && (trend < 0)) { info->tmu_state = TMU_STATUS_NORMAL; pr_info("change state: 1st throttle->normal.\n"); } break; case TMU_STATUS_WARNING: /* 1. change state: tripping */ if (cur_temp >= data->ts.start_tripping) { info->tmu_state = TMU_STATUS_TRIPPED; pr_info("change state: 2nd throttle->trip\n"); #if defined(CONFIG_TC_VOLTAGE) /* check whether temp compesation need or not */ } else if (cur_temp <= data->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) pr_err("TMU: lock error!\n"); else info->tmu_state = TMU_STATUS_TC; #endif /* 2. cpufreq limitation and uevent */ } else if ((cur_temp >= data->ts.start_2nd_throttle) && !(check_handle & WARNING_FLAG)) { if (check_handle & THROTTLE_FLAG) { exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); check_handle &= ~(THROTTLE_FLAG); } exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, info->cpufreq_level_2nd_throttle); check_handle |= WARNING_FLAG; pr_debug("check_handle = %d\n", check_handle); notify_change_of_tmu_state(info); pr_info("2nd throttle: cpufreq is limited.\n"); /* 3. change state: 1st-throttling */ } else if ((cur_temp <= data->ts.stop_2nd_throttle) && (trend < 0)) { info->tmu_state = TMU_STATUS_THROTTLED; pr_info("change state: 2nd throttle->1st throttle, " "and release cpufreq upper limit.\n"); } break; case TMU_STATUS_TRIPPED: /* 1. call uevent to shut-down */ if ((cur_temp >= data->ts.start_tripping) && (trend > 0) && !(check_handle & TRIPPING_FLAG)) { notify_change_of_tmu_state(info); pr_info("tripping: on waiting shutdown.\n"); check_handle |= TRIPPING_FLAG; pr_debug("check_handle = %d\n", check_handle); #if defined(CONFIG_TC_VOLTAGE) /* check whether temp compesation need or not */ } else if (cur_temp <= data->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) pr_err("TMU: lock error!\n"); else info->tmu_state = TMU_STATUS_TC; #endif /* 2. change state: 2nd-throttling or warning */ } else if ((cur_temp <= data->ts.stop_2nd_throttle) && (trend < 0)) { info->tmu_state = TMU_STATUS_WARNING; pr_info("change state: trip->2nd throttle, " "Check! occured only test mode.\n"); } /* 3. chip protection: kernel panic as SW workaround */ if ((cur_temp >= data->ts.start_emergency) && (trend > 0)) { panic("Emergency!!!! tripping is not treated!\n"); /* clear to prevent from interfupt by peindig bit */ __raw_writel(INTCLEARALL, info->tmu_state + EXYNOS4_TMU_INTCLEAR); enable_irq(info->irq); mutex_unlock(&tmu_lock); return; } break; case TMU_STATUS_INIT: /* sned tmu initial status to platform */ disable_irq(info->irq); if (cur_temp >= data->ts.start_tripping) info->tmu_state = TMU_STATUS_TRIPPED; #if defined(CONFIG_TC_VOLTAGE) /* check whether temp compesation need or not */ else if (cur_temp <= data->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) pr_err("TMU: lock error!\n"); else info->tmu_state = TMU_STATUS_TC; } #endif else if (cur_temp >= data->ts.start_2nd_throttle) info->tmu_state = TMU_STATUS_WARNING; else if (cur_temp >= data->ts.start_1st_throttle) info->tmu_state = TMU_STATUS_THROTTLED; else if (cur_temp <= data->ts.stop_1st_throttle) info->tmu_state = TMU_STATUS_NORMAL; notify_change_of_tmu_state(info); pr_info("%s: inform to init state to platform.\n", __func__); break; default: pr_warn("Bug: checked tmu_state.\n"); if (cur_temp >= data->ts.start_tripping) info->tmu_state = TMU_STATUS_TRIPPED; #if defined(CONFIG_TC_VOLTAGE) /* check whether temp compesation need or not */ else if (cur_temp <= data->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) pr_err("TMU: lock error!\n"); else info->tmu_state = TMU_STATUS_TC; } #endif else info->tmu_state = TMU_STATUS_WARNING; break; } /* end */ /* memory throttling */ if (cur_temp >= data->ts.start_mem_throttle) { if (!(auto_refresh_changed) && (trend > 0)) { pr_info("set auto_refresh 1.95us\n"); set_refresh_rate(info->auto_refresh_tq0); auto_refresh_changed = 1; } } else if (cur_temp <= (data->ts.stop_mem_throttle)) { if ((auto_refresh_changed) && (trend < 0)) { pr_info("set auto_refresh 3.9us\n"); set_refresh_rate(info->auto_refresh_normal); auto_refresh_changed = 0; } } info->last_temperature = cur_temp; /* reschedule the next work */ queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, info->sampling_rate); mutex_unlock(&tmu_lock); return; } static int exynos4210_tmu_init(struct s5p_tmu_info *info) { struct s5p_platform_tmu *data = info->dev->platform_data; unsigned int tmp; unsigned int temp_code_threshold; unsigned int temp_code_throttle, temp_code_warning, temp_code_trip; /* To compensate temperature sensor * get trim informatoin and save to struct tmu_info */ tmp = __raw_readl(info->tmu_base + EXYNOS4_TMU_TRIMINFO); info->te1 = tmp & TMU_TRIMINFO_MASK; info->te2 = ((tmp >> 8) & TMU_TRIMINFO_MASK); /* check boundary the triminfo */ if ((EFUSE_MIN_VALUE > info->te1) || (info->te1 > EFUSE_MAX_VALUE) || (info->te2 != 0)) info->te1 = EFUSE_AVG_VALUE; pr_info("%s: triminfo = 0x%08x, low 8bit = 0x%02x, high 24 bit = 0x%06x\n", __func__, tmp, info->te1, info->te2); /* Need to initial regsiter setting after getting parameter info */ /* [28:23] vref [11:8] slope - Tunning parameter */ __raw_writel(VREF_SLOPE, info->tmu_base + EXYNOS4_TMU_CONTROL); /* Convert celsius temperature value to temperature code value * such as threshold_level, 1st throttle, 2nd throttle, * tripping temperature. */ temp_code_threshold = data->ts.stop_1st_throttle + info->te1 - TMU_DC_VALUE; temp_code_throttle = data->ts.start_1st_throttle - data->ts.stop_1st_throttle; temp_code_warning = data->ts.start_2nd_throttle - data->ts.stop_1st_throttle; temp_code_trip = data->ts.start_tripping - data->ts.stop_1st_throttle; /* Set interrupt trigger level */ __raw_writel(temp_code_threshold, info->tmu_base + EXYNOS4210_TMU_THRESHOLD_TEMP); __raw_writel(temp_code_throttle, info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL0); __raw_writel(temp_code_warning, info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL1); __raw_writel(temp_code_trip, info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL2); __raw_writel(TRIGGER_LEV_MAX, info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL3); pr_info("THD_TEMP:0x%02x: TRIG_LEV0: 0x%02x\n" "TRIG_LEV1: 0x%02x TRIG_LEV2: 0x%02x, TRIG_LEV3: 0x%02x\n", __raw_readl(info->tmu_base + EXYNOS4210_TMU_THRESHOLD_TEMP), __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL0), __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL1), __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL2), __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL3)); mdelay(50); /* Need to initial regsiter setting after getting parameter info */ /* [28:23] vref [11:8] slope - Tunning parameter */ __raw_writel(VREF_SLOPE, info->tmu_base + EXYNOS4_TMU_CONTROL); /* TMU core enable */ tmp = __raw_readl(info->tmu_base + EXYNOS4_TMU_CONTROL); tmp |= TMUCORE_ENABLE; __raw_writel(tmp, info->tmu_base + EXYNOS4_TMU_CONTROL); /* check interrupt status register */ pr_debug("tmu interrupt status: 0x%02x\n", __raw_readl(info->tmu_base + EXYNOS4_TMU_INTSTAT)); /* LEV0 LEV1 LEV2 interrupt enable */ __raw_writel(INTEN0 | INTEN1 | INTEN2, info->tmu_base + EXYNOS4_TMU_INTEN); return 0; } static int exynos4x12_tmu_init(struct s5p_tmu_info *info) { struct s5p_platform_tmu *data = info->dev->platform_data; unsigned int tmp; unsigned char temp_code_throttle, temp_code_warning, temp_code_trip; /* To compensate temperature sensor, * set triminfo control register & get trim informatoin * and save to struct tmu_info */ tmp = __raw_readl(info->tmu_base + EXYNOS4x12_TMU_TRIMINFO_CONROL); tmp |= TMU_RELOAD; __raw_writel(tmp, info->tmu_base + EXYNOS4x12_TMU_TRIMINFO_CONROL); mdelay(1); tmp = __raw_readl(info->tmu_base + EXYNOS4_TMU_TRIMINFO); info->te1 = tmp & TMU_TRIMINFO_MASK; /* In case of non e-fusing chip, s/w workaround */ if (tmp == 0) info->te1 = 0x37; pr_debug("%s: triminfo reg = 0x%08x, value = %u\n", __func__, tmp, info->te1); /* Convert celsius temperature value to temperature code value * such as 1st throttle, 2nd throttle, tripping temperature. * its ranges are between 25 cesius(0x32) to 125 cesius4(0x96) */ temp_code_throttle = data->ts.start_1st_throttle + info->te1 - TMU_DC_VALUE; temp_code_warning = data->ts.start_2nd_throttle + info->te1 - TMU_DC_VALUE; temp_code_trip = data->ts.start_tripping + info->te1 - TMU_DC_VALUE; pr_debug("temp_code_throttle: %u, temp_code_warning: %u\n" "temp_code_trip: %u, info->te1 = %u\n", temp_code_throttle, temp_code_warning, temp_code_trip, info->te1); /* Set interrupt trigger level */ tmp = ((0xFF << 24) | (temp_code_trip << 16) | (temp_code_warning << 8) | (temp_code_throttle << 0)); __raw_writel(tmp, info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_RISE); pr_debug("THD_TEMP_RISE: 0x%08x\n", __raw_readl(info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_RISE)); #if defined(CONFIG_TC_VOLTAGE) /* Get set temperature for tc_voltage and set falling interrupt * trigger level */ tmp = (data->ts.start_tc + info->te1 - TMU_DC_VALUE) << 0; __raw_writel(tmp, info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_FALL); pr_debug("THD_TEMP_FALL: 0x%08x\n", __raw_readl(info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_FALL)); #endif /* TMU core enable */ tmp = __raw_readl(info->tmu_base + EXYNOS4_TMU_CONTROL); tmp |= (TMUCORE_ENABLE | (0x6 << 20)); /* MUX_ADDR : 110b */ __raw_writel(tmp, info->tmu_base + EXYNOS4_TMU_CONTROL); /* Because temperature sensing time is appro 940us, * tmu is enabled and 1st valid sample can get 1ms after. */ mdelay(1); /* check interrupt status register */ pr_debug("tmu interrupt status: 0x%08x\n", __raw_readl(info->tmu_base + EXYNOS4_TMU_INTSTAT)); /* THRESHOLD_TEMP_RISE0, RISE1, RISE2 interrupt enable */ __raw_writel(INTEN_RISE0 | INTEN_RISE1 | INTEN_RISE2, info->tmu_base + EXYNOS4_TMU_INTEN); #if defined(CONFIG_TC_VOLTAGE) tmp = __raw_readl(info->tmu_base + EXYNOS4_TMU_INTEN); tmp |= INTEN_FALL0; __raw_writel(tmp, info->tmu_base + EXYNOS4_TMU_INTEN); #endif return 0; } static int tmu_initialize(struct platform_device *pdev) { struct s5p_tmu_info *info = platform_get_drvdata(pdev); unsigned int tmp; unsigned ret; /* check if sensing is idle */ tmp = (__raw_readl(info->tmu_base + EXYNOS4_TMU_STATUS) & 0x1); if (!tmp) { pr_err("failed to start tmu driver\n"); return -ENOENT; } if (soc_is_exynos4210()) ret = exynos4210_tmu_init(info); else ret = exynos4x12_tmu_init(info); return ret; } static irqreturn_t exynos4x12_tmu_irq_handler(int irq, void *id) { struct s5p_tmu_info *info = id; unsigned int status; disable_irq_nosync(irq); status = __raw_readl(info->tmu_base + EXYNOS4_TMU_INTSTAT) & 0x1FFFF; pr_info("EXYNOS4x12_tmu interrupt: INTSTAT = 0x%08x\n", status); /* To handle multiple interrupt pending, * interrupt by high temperature are serviced with priority. */ #if defined(CONFIG_TC_VOLTAGE) if (status & INTSTAT_FALL0) { info->tmu_state = TMU_STATUS_TC; __raw_writel(INTCLEARALL, info->tmu_base + EXYNOS4_TMU_INTCLEAR); exynos_interrupt_enable(info, 0); } else if (status & INTSTAT_RISE2) { info->tmu_state = TMU_STATUS_TRIPPED; __raw_writel(INTCLEAR_RISE2, info->tmu_base + EXYNOS4_TMU_INTCLEAR); #else if (status & INTSTAT_RISE2) { info->tmu_state = TMU_STATUS_TRIPPED; __raw_writel(INTCLEAR_RISE2, info->tmu_base + EXYNOS4_TMU_INTCLEAR); #endif } else if (status & INTSTAT_RISE1) { info->tmu_state = TMU_STATUS_WARNING; __raw_writel(INTCLEAR_RISE1, info->tmu_base + EXYNOS4_TMU_INTCLEAR); } else if (status & INTSTAT_RISE0) { info->tmu_state = TMU_STATUS_THROTTLED; __raw_writel(INTCLEAR_RISE0, info->tmu_base + EXYNOS4_TMU_INTCLEAR); } else { pr_err("%s: interrupt error\n", __func__); __raw_writel(INTCLEARALL, info->tmu_base + EXYNOS4_TMU_INTCLEAR); queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, info->sampling_rate / 2); return -ENODEV; } /* read current temperature & save */ info->last_temperature = get_curr_temp(info); queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, info->sampling_rate); return IRQ_HANDLED; } static irqreturn_t exynos4210_tmu_irq_handler(int irq, void *id) { struct s5p_tmu_info *info = id; unsigned int status; disable_irq_nosync(irq); status = __raw_readl(info->tmu_base + EXYNOS4_TMU_INTSTAT); pr_info("EXYNOS4212_tmu interrupt: INTSTAT = 0x%08x\n", status); /* To handle multiple interrupt pending, * interrupt by high temperature are serviced with priority. */ if (status & TMU_INTSTAT2) { info->tmu_state = TMU_STATUS_TRIPPED; __raw_writel(INTCLEAR2, info->tmu_base + EXYNOS4_TMU_INTCLEAR); } else if (status & TMU_INTSTAT1) { info->tmu_state = TMU_STATUS_WARNING; __raw_writel(INTCLEAR1, info->tmu_base + EXYNOS4_TMU_INTCLEAR); } else if (status & TMU_INTSTAT0) { info->tmu_state = TMU_STATUS_THROTTLED; __raw_writel(INTCLEAR0, info->tmu_base + EXYNOS4_TMU_INTCLEAR); } else { pr_err("%s: interrupt error\n", __func__); __raw_writel(INTCLEARALL, info->tmu_base + EXYNOS4_TMU_INTCLEAR); queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, info->sampling_rate / 2); return -ENODEV; } /* read current temperature & save */ info->last_temperature = get_curr_temp(info); queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, info->sampling_rate); return IRQ_HANDLED; } #ifdef CONFIG_TMU_SYSFS static ssize_t s5p_tmu_show_curr_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct s5p_tmu_info *info = dev_get_drvdata(dev); unsigned int curr_temp; curr_temp = get_curr_temp(info); curr_temp *= 10; pr_info("curr temp = %d\n", curr_temp); return sprintf(buf, "%d\n", curr_temp); } static DEVICE_ATTR(curr_temp, S_IRUGO, s5p_tmu_show_curr_temp, NULL); #endif static int __devinit s5p_tmu_probe(struct platform_device *pdev) { struct s5p_tmu_info *info; struct s5p_platform_tmu *pdata; struct resource *res; unsigned int mask = (enable_mask & ENABLE_DBGMASK); int ret = 0; pr_debug("%s: probe=%p\n", __func__, pdev); info = kzalloc(sizeof(struct s5p_tmu_info), GFP_KERNEL); if (!info) { dev_err(&pdev->dev, "failed to alloc memory!\n"); ret = -ENOMEM; goto err_nomem; } platform_set_drvdata(pdev, info); info->dev = &pdev->dev; info->tmu_state = TMU_STATUS_INIT; /* set cpufreq limit level at 1st_throttle & 2nd throttle */ pdata = info->dev->platform_data; if (pdata->cpufreq.limit_1st_throttle) exynos_cpufreq_get_level(pdata->cpufreq.limit_1st_throttle, &info->cpufreq_level_1st_throttle); if (pdata->cpufreq.limit_2nd_throttle) exynos_cpufreq_get_level(pdata->cpufreq.limit_2nd_throttle, &info->cpufreq_level_2nd_throttle); pr_info("@@@ %s: cpufreq_limit: 1st_throttle: %u, 2nd_throttle = %u\n", __func__, info->cpufreq_level_1st_throttle, info->cpufreq_level_2nd_throttle); #if defined(CONFIG_TC_VOLTAGE) /* Temperature compensated voltage */ if (exynos_find_cpufreq_level_by_volt(pdata->temp_compensate.arm_volt, &info->cpulevel_tc) < 0) { dev_err(&pdev->dev, "cpufreq_get_level error\n"); ret = -EINVAL; goto err_nores; } #ifdef CONFIG_BUSFREQ_OPP /* To lock bus frequency in OPP mode */ info->bus_dev = dev_get("exynos-busfreq"); if (info->bus_dev < 0) { dev_err(&pdev->dev, "Failed to get_dev\n"); ret = -EINVAL; goto err_nores; } if (exynos4x12_find_busfreq_by_volt(pdata->temp_compensate.bus_volt, &info->busfreq_tc)) { dev_err(&pdev->dev, "get_busfreq_value error\n"); ret = -EINVAL; goto err_nores; } #endif pr_info("%s: cpufreq_level[%u], busfreq_value[%u]\n", __func__, info->cpulevel_tc, info->busfreq_tc); #endif /* Map auto_refresh_rate of normal & tq0 mode */ info->auto_refresh_tq0 = get_refresh_interval(FREQ_IN_PLL, AUTO_REFRESH_PERIOD_TQ0); info->auto_refresh_normal = get_refresh_interval(FREQ_IN_PLL, AUTO_REFRESH_PERIOD_NORMAL); /* To poll current temp, set sampling rate to ONE second sampling */ info->sampling_rate = usecs_to_jiffies(1000 * 1000); /* 10sec monitroing */ info->monitor_period = usecs_to_jiffies(10000 * 1000); /* support test mode */ if (mask & ENABLE_TEST_MODE) set_temperature_params(info); else print_temperature_params(info); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "failed to get memory region resource\n"); ret = -ENODEV; goto err_nores; } info->ioarea = request_mem_region(res->start, res->end-res->start + 1, pdev->name); if (!(info->ioarea)) { dev_err(&pdev->dev, "failed to reserve memory region\n"); ret = -EBUSY; goto err_nores; } info->tmu_base = ioremap(res->start, (res->end - res->start) + 1); if (!(info->tmu_base)) { dev_err(&pdev->dev, "failed ioremap()\n"); ret = -ENOMEM; goto err_nomap; } tmu_monitor_wq = create_freezable_workqueue(dev_name(&pdev->dev)); if (!tmu_monitor_wq) { pr_info("Creation of tmu_monitor_wq failed\n"); ret = -ENOMEM; goto err_wq; } /* To support periodic temprature monitoring */ if (mask & ENABLE_TEMP_MON) { INIT_DELAYED_WORK_DEFERRABLE(&info->monitor, exynos4_poll_cur_temp); queue_delayed_work_on(0, tmu_monitor_wq, &info->monitor, info->monitor_period); } INIT_DELAYED_WORK_DEFERRABLE(&info->polling, exynos4_handler_tmu_state); info->irq = platform_get_irq(pdev, 0); if (info->irq < 0) { dev_err(&pdev->dev, "no irq for thermal %d\n", info->irq); ret = -EINVAL; goto err_irq; } if (soc_is_exynos4210()) ret = request_irq(info->irq, exynos4210_tmu_irq_handler, IRQF_DISABLED, "s5p-tmu interrupt", info); else ret = request_irq(info->irq, exynos4x12_tmu_irq_handler, IRQF_DISABLED, "s5p-tmu interrupt", info); if (ret) { dev_err(&pdev->dev, "request_irq is failed. %d\n", ret); goto err_irq; } ret = device_create_file(&pdev->dev, &dev_attr_temperature); if (ret != 0) { pr_err("Failed to create temperatue file: %d\n", ret); goto err_sysfs_file1; } ret = device_create_file(&pdev->dev, &dev_attr_tmu_state); if (ret != 0) { pr_err("Failed to create tmu_state file: %d\n", ret); goto err_sysfs_file2; } ret = device_create_file(&pdev->dev, &dev_attr_lot_id); if (ret != 0) { pr_err("Failed to create lot id file: %d\n", ret); goto err_sysfs_file3; } ret = tmu_initialize(pdev); if (ret) goto err_init; #ifdef CONFIG_TMU_SYSFS ret = device_create_file(&pdev->dev, &dev_attr_curr_temp); if (ret < 0) { dev_err(&pdev->dev, "Failed to create sysfs group\n"); goto err_init; } #endif #ifdef CONFIG_TMU_DEBUG ret = device_create_file(&pdev->dev, &dev_attr_print_state); if (ret) { dev_err(&pdev->dev, "Failed to create tmu sysfs group\n\n"); return ret; } #endif #if defined(CONFIG_TC_VOLTAGE) /* s/w workaround for fast service when interrupt is not occured, * such as current temp is lower than tc interrupt temperature * or current temp is continuosly increased. */ if (get_curr_temp(info) <= pdata->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) pr_err("TMU: lock error!\n"); } #if defined(CONFIG_VIDEO_MALI400MP) if (mali_voltage_lock_init()) pr_err("Failed to initialize mail voltage lock.\n"); #endif #endif /* initialize tmu_state */ queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, info->sampling_rate); return ret; err_init: device_remove_file(&pdev->dev, &dev_attr_lot_id); err_sysfs_file3: device_remove_file(&pdev->dev, &dev_attr_tmu_state); err_sysfs_file2: device_remove_file(&pdev->dev, &dev_attr_temperature); err_sysfs_file1: if (info->irq >= 0) free_irq(info->irq, info); err_irq: destroy_workqueue(tmu_monitor_wq); err_wq: iounmap(info->tmu_base); err_nomap: release_resource(info->ioarea); kfree(info->ioarea); err_nores: kfree(info); info = NULL; err_nomem: dev_err(&pdev->dev, "initialization failed.\n"); return ret; } static int __devinit s5p_tmu_remove(struct platform_device *pdev) { struct s5p_tmu_info *info = platform_get_drvdata(pdev); cancel_delayed_work(&info->polling); destroy_workqueue(tmu_monitor_wq); device_remove_file(&pdev->dev, &dev_attr_temperature); device_remove_file(&pdev->dev, &dev_attr_tmu_state); if (info->irq >= 0) free_irq(info->irq, info); iounmap(info->tmu_base); release_resource(info->ioarea); kfree(info->ioarea); kfree(info); info = NULL; pr_info("%s is removed\n", dev_name(&pdev->dev)); return 0; } #ifdef CONFIG_PM static int s5p_tmu_suspend(struct platform_device *pdev, pm_message_t state) { struct s5p_tmu_info *info = platform_get_drvdata(pdev); if (!info) return -EAGAIN; /* save register value */ info->reg_save[0] = __raw_readl(info->tmu_base + EXYNOS4_TMU_CONTROL); info->reg_save[1] = __raw_readl(info->tmu_base + EXYNOS4_TMU_SAMPLING_INTERNAL); info->reg_save[2] = __raw_readl(info->tmu_base + EXYNOS4_TMU_COUNTER_VALUE0); info->reg_save[3] = __raw_readl(info->tmu_base + EXYNOS4_TMU_COUNTER_VALUE1); info->reg_save[4] = __raw_readl(info->tmu_base + EXYNOS4_TMU_INTEN); if (soc_is_exynos4210()) { info->reg_save[5] = __raw_readl(info->tmu_base + EXYNOS4210_TMU_THRESHOLD_TEMP); info->reg_save[6] = __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL0); info->reg_save[7] = __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL1); info->reg_save[8] = __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL2); info->reg_save[9] = __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL3); } else { info->reg_save[5] = __raw_readl(info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_RISE); #if defined(CONFIG_TC_VOLTAGE) info->reg_save[6] = __raw_readl(info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_FALL); #endif } disable_irq(info->irq); return 0; } static int s5p_tmu_resume(struct platform_device *pdev) { struct s5p_tmu_info *info = platform_get_drvdata(pdev); struct s5p_platform_tmu *data; if (!info || !(info->dev)) return -EAGAIN; data = info->dev->platform_data; /* restore tmu register value */ __raw_writel(info->reg_save[0], info->tmu_base + EXYNOS4_TMU_CONTROL); __raw_writel(info->reg_save[1], info->tmu_base + EXYNOS4_TMU_SAMPLING_INTERNAL); __raw_writel(info->reg_save[2], info->tmu_base + EXYNOS4_TMU_COUNTER_VALUE0); __raw_writel(info->reg_save[3], info->tmu_base + EXYNOS4_TMU_COUNTER_VALUE1); if (soc_is_exynos4210()) { __raw_writel(info->reg_save[5], info->tmu_base + EXYNOS4210_TMU_THRESHOLD_TEMP); __raw_writel(info->reg_save[6], info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL0); __raw_writel(info->reg_save[7], info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL1); __raw_writel(info->reg_save[8], info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL2); __raw_writel(info->reg_save[9], info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL3); } else { __raw_writel(info->reg_save[5], info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_RISE); #if defined(CONFIG_TC_VOLTAGE) __raw_writel(info->reg_save[6], info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_FALL); #endif } __raw_writel(info->reg_save[4], info->tmu_base + EXYNOS4_TMU_INTEN); #if defined(CONFIG_TC_VOLTAGE) /* s/w workaround for fast service when interrupt is not occured, * such as current temp is lower than tc interrupt temperature * or current temp is continuosly increased.. */ mdelay(1); if (get_curr_temp(info) <= data->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) pr_err("TMU: lock error!\n"); } #endif /* Find out tmu_state after wakeup */ queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, 0); return 0; } #else #define s5p_tmu_suspend NULL #define s5p_tmu_resume NULL #endif static struct platform_driver s5p_tmu_driver = { .probe = s5p_tmu_probe, .remove = s5p_tmu_remove, .suspend = s5p_tmu_suspend, .resume = s5p_tmu_resume, .driver = { .name = "s5p-tmu", .owner = THIS_MODULE, }, }; static int __init s5p_tmu_driver_init(void) { return platform_driver_register(&s5p_tmu_driver); } static void __exit s5p_tmu_driver_exit(void) { platform_driver_unregister(&s5p_tmu_driver); } late_initcall(s5p_tmu_driver_init); module_exit(s5p_tmu_driver_exit);
gpl-2.0
sunny-wyb/xen-4.1.2
tools/firmware/etherboot/ipxe/src/arch/i386/image/eltorito.c
33
9006
/* * Copyright (C) 2007 Michael Brown <mbrown@fensystems.co.uk>. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ FILE_LICENCE ( GPL2_OR_LATER ); /** * @file * * El Torito bootable ISO image format * */ #include <stdint.h> #include <errno.h> #include <assert.h> #include <realmode.h> #include <bootsector.h> #include <int13.h> #include <gpxe/uaccess.h> #include <gpxe/image.h> #include <gpxe/segment.h> #include <gpxe/ramdisk.h> #include <gpxe/init.h> #define ISO9660_BLKSIZE 2048 #define ELTORITO_VOL_DESC_OFFSET ( 17 * ISO9660_BLKSIZE ) /** An El Torito Boot Record Volume Descriptor */ struct eltorito_vol_desc { /** Boot record indicator; must be 0 */ uint8_t record_indicator; /** ISO-9660 identifier; must be "CD001" */ uint8_t iso9660_id[5]; /** Version, must be 1 */ uint8_t version; /** Boot system indicator; must be "EL TORITO SPECIFICATION" */ uint8_t system_indicator[32]; /** Unused */ uint8_t unused[32]; /** Boot catalog sector */ uint32_t sector; } __attribute__ (( packed )); /** An El Torito Boot Catalog Validation Entry */ struct eltorito_validation_entry { /** Header ID; must be 1 */ uint8_t header_id; /** Platform ID * * 0 = 80x86 * 1 = PowerPC * 2 = Mac */ uint8_t platform_id; /** Reserved */ uint16_t reserved; /** ID string */ uint8_t id_string[24]; /** Checksum word */ uint16_t checksum; /** Signature; must be 0xaa55 */ uint16_t signature; } __attribute__ (( packed )); /** A bootable entry in the El Torito Boot Catalog */ struct eltorito_boot_entry { /** Boot indicator * * Must be @c ELTORITO_BOOTABLE for a bootable ISO image */ uint8_t indicator; /** Media type * */ uint8_t media_type; /** Load segment */ uint16_t load_segment; /** System type */ uint8_t filesystem; /** Unused */ uint8_t reserved_a; /** Sector count */ uint16_t length; /** Starting sector */ uint32_t start; /** Unused */ uint8_t reserved_b[20]; } __attribute__ (( packed )); /** Boot indicator for a bootable ISO image */ #define ELTORITO_BOOTABLE 0x88 /** El Torito media types */ enum eltorito_media_type { /** No emulation */ ELTORITO_NO_EMULATION = 0, }; struct image_type eltorito_image_type __image_type ( PROBE_NORMAL ); /** * Calculate 16-bit word checksum * * @v data Data to checksum * @v len Length (in bytes, must be even) * @ret sum Checksum */ static unsigned int word_checksum ( void *data, size_t len ) { uint16_t *words; uint16_t sum = 0; for ( words = data ; len ; words++, len -= 2 ) { sum += *words; } return sum; } /** * Execute El Torito image * * @v image El Torito image * @ret rc Return status code */ static int eltorito_exec ( struct image *image ) { struct ramdisk ramdisk; struct int13_drive int13_drive; unsigned int load_segment = image->priv.ul; unsigned int load_offset = ( load_segment ? 0 : 0x7c00 ); int rc; memset ( &ramdisk, 0, sizeof ( ramdisk ) ); init_ramdisk ( &ramdisk, image->data, image->len, ISO9660_BLKSIZE ); memset ( &int13_drive, 0, sizeof ( int13_drive ) ); int13_drive.blockdev = &ramdisk.blockdev; register_int13_drive ( &int13_drive ); if ( ( rc = call_bootsector ( load_segment, load_offset, int13_drive.drive ) ) != 0 ) { DBGC ( image, "ElTorito %p boot failed: %s\n", image, strerror ( rc ) ); goto err; } rc = -ECANCELED; /* -EIMPOSSIBLE */ err: unregister_int13_drive ( &int13_drive ); return rc; } /** * Read and verify El Torito Boot Record Volume Descriptor * * @v image El Torito file * @ret catalog_offset Offset of Boot Catalog * @ret rc Return status code */ static int eltorito_read_voldesc ( struct image *image, unsigned long *catalog_offset ) { static const struct eltorito_vol_desc vol_desc_signature = { .record_indicator = 0, .iso9660_id = "CD001", .version = 1, .system_indicator = "EL TORITO SPECIFICATION", }; struct eltorito_vol_desc vol_desc; /* Sanity check */ if ( image->len < ( ELTORITO_VOL_DESC_OFFSET + ISO9660_BLKSIZE ) ) { DBGC ( image, "ElTorito %p too short\n", image ); return -ENOEXEC; } /* Read and verify Boot Record Volume Descriptor */ copy_from_user ( &vol_desc, image->data, ELTORITO_VOL_DESC_OFFSET, sizeof ( vol_desc ) ); if ( memcmp ( &vol_desc, &vol_desc_signature, offsetof ( typeof ( vol_desc ), sector ) ) != 0 ) { DBGC ( image, "ElTorito %p invalid Boot Record Volume " "Descriptor\n", image ); return -ENOEXEC; } *catalog_offset = ( vol_desc.sector * ISO9660_BLKSIZE ); DBGC ( image, "ElTorito %p boot catalog at offset %#lx\n", image, *catalog_offset ); return 0; } /** * Read and verify El Torito Boot Catalog * * @v image El Torito file * @v catalog_offset Offset of Boot Catalog * @ret boot_entry El Torito boot entry * @ret rc Return status code */ static int eltorito_read_catalog ( struct image *image, unsigned long catalog_offset, struct eltorito_boot_entry *boot_entry ) { struct eltorito_validation_entry validation_entry; /* Sanity check */ if ( image->len < ( catalog_offset + ISO9660_BLKSIZE ) ) { DBGC ( image, "ElTorito %p bad boot catalog offset %#lx\n", image, catalog_offset ); return -ENOEXEC; } /* Read and verify the Validation Entry of the Boot Catalog */ copy_from_user ( &validation_entry, image->data, catalog_offset, sizeof ( validation_entry ) ); if ( word_checksum ( &validation_entry, sizeof ( validation_entry ) ) != 0 ) { DBGC ( image, "ElTorito %p bad Validation Entry checksum\n", image ); return -ENOEXEC; } /* Read and verify the Initial/Default entry */ copy_from_user ( boot_entry, image->data, ( catalog_offset + sizeof ( validation_entry ) ), sizeof ( *boot_entry ) ); if ( boot_entry->indicator != ELTORITO_BOOTABLE ) { DBGC ( image, "ElTorito %p not bootable\n", image ); return -ENOEXEC; } if ( boot_entry->media_type != ELTORITO_NO_EMULATION ) { DBGC ( image, "ElTorito %p cannot support media type %d\n", image, boot_entry->media_type ); return -ENOTSUP; } DBGC ( image, "ElTorito %p media type %d segment %04x\n", image, boot_entry->media_type, boot_entry->load_segment ); return 0; } /** * Load El Torito virtual disk image into memory * * @v image El Torito file * @v boot_entry El Torito boot entry * @ret rc Return status code */ static int eltorito_load_disk ( struct image *image, struct eltorito_boot_entry *boot_entry ) { unsigned long start = ( boot_entry->start * ISO9660_BLKSIZE ); unsigned long length = ( boot_entry->length * ISO9660_BLKSIZE ); unsigned int load_segment; userptr_t buffer; int rc; /* Sanity check */ if ( image->len < ( start + length ) ) { DBGC ( image, "ElTorito %p virtual disk lies outside image\n", image ); return -ENOEXEC; } DBGC ( image, "ElTorito %p virtual disk at %#lx+%#lx\n", image, start, length ); /* Calculate load address */ load_segment = boot_entry->load_segment; buffer = real_to_user ( load_segment, ( load_segment ? 0 : 0x7c00 ) ); /* Verify and prepare segment */ if ( ( rc = prep_segment ( buffer, length, length ) ) != 0 ) { DBGC ( image, "ElTorito %p could not prepare segment: %s\n", image, strerror ( rc ) ); return rc; } /* Copy image to segment */ memcpy_user ( buffer, 0, image->data, start, length ); return 0; } /** * Load El Torito image into memory * * @v image El Torito file * @ret rc Return status code */ static int eltorito_load ( struct image *image ) { struct eltorito_boot_entry boot_entry; unsigned long bootcat_offset; int rc; /* Read Boot Record Volume Descriptor, if present */ if ( ( rc = eltorito_read_voldesc ( image, &bootcat_offset ) ) != 0 ) return rc; /* This is an El Torito image, valid or otherwise */ if ( ! image->type ) image->type = &eltorito_image_type; /* Read Boot Catalog */ if ( ( rc = eltorito_read_catalog ( image, bootcat_offset, &boot_entry ) ) != 0 ) return rc; /* Load Virtual Disk image */ if ( ( rc = eltorito_load_disk ( image, &boot_entry ) ) != 0 ) return rc; /* Record load segment in image private data field */ image->priv.ul = boot_entry.load_segment; return 0; } /** El Torito image type */ struct image_type eltorito_image_type __image_type ( PROBE_NORMAL ) = { .name = "El Torito", .load = eltorito_load, .exec = eltorito_exec, };
gpl-2.0
Combitech/simcom-linux-kernel
arch/arm/mach-pxa/tavorevb.c
545
11329
/* * linux/arch/arm/mach-pxa/tavorevb.c * * Support for the Marvell PXA930 Evaluation Board * * Copyright (C) 2007-2008 Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * publishhed by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/gpio.h> #include <linux/smc91x.h> #include <linux/pwm_backlight.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/pxa930.h> #include <mach/pxafb.h> #include <mach/pxa27x_keypad.h> #include "devices.h" #include "generic.h" /* Tavor EVB MFP configurations */ static mfp_cfg_t tavorevb_mfp_cfg[] __initdata = { /* Ethernet */ DF_nCS1_nCS3, GPIO47_GPIO, /* LCD */ GPIO23_LCD_DD0, GPIO24_LCD_DD1, GPIO25_LCD_DD2, GPIO26_LCD_DD3, GPIO27_LCD_DD4, GPIO28_LCD_DD5, GPIO29_LCD_DD6, GPIO44_LCD_DD7, GPIO21_LCD_CS, GPIO22_LCD_CS2, GPIO17_LCD_FCLK_RD, GPIO18_LCD_LCLK_A0, GPIO19_LCD_PCLK_WR, /* LCD Backlight */ GPIO43_PWM3, /* primary backlight */ GPIO32_PWM0, /* secondary backlight */ /* Keypad */ GPIO0_KP_MKIN_0, GPIO2_KP_MKIN_1, GPIO4_KP_MKIN_2, GPIO6_KP_MKIN_3, GPIO8_KP_MKIN_4, GPIO10_KP_MKIN_5, GPIO12_KP_MKIN_6, GPIO1_KP_MKOUT_0, GPIO3_KP_MKOUT_1, GPIO5_KP_MKOUT_2, GPIO7_KP_MKOUT_3, GPIO9_KP_MKOUT_4, GPIO11_KP_MKOUT_5, GPIO13_KP_MKOUT_6, GPIO14_KP_DKIN_2, GPIO15_KP_DKIN_3, }; #define TAVOREVB_ETH_PHYS (0x14000000) static struct resource smc91x_resources[] = { [0] = { .start = (TAVOREVB_ETH_PHYS + 0x300), .end = (TAVOREVB_ETH_PHYS + 0xfffff), .flags = IORESOURCE_MEM, }, [1] = { .start = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO47)), .end = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO47)), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, } }; static struct smc91x_platdata tavorevb_smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT | SMC91X_USE_DMA, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, .dev = { .platform_data = &tavorevb_smc91x_info, }, }; #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) static unsigned int tavorevb_matrix_key_map[] = { /* KEY(row, col, key_code) */ KEY(0, 4, KEY_A), KEY(0, 5, KEY_B), KEY(0, 6, KEY_C), KEY(1, 4, KEY_E), KEY(1, 5, KEY_F), KEY(1, 6, KEY_G), KEY(2, 4, KEY_I), KEY(2, 5, KEY_J), KEY(2, 6, KEY_K), KEY(3, 4, KEY_M), KEY(3, 5, KEY_N), KEY(3, 6, KEY_O), KEY(4, 5, KEY_R), KEY(4, 6, KEY_S), KEY(5, 4, KEY_U), KEY(5, 4, KEY_V), KEY(5, 6, KEY_W), KEY(6, 4, KEY_Y), KEY(6, 5, KEY_Z), KEY(0, 3, KEY_0), KEY(2, 0, KEY_1), KEY(2, 1, KEY_2), KEY(2, 2, KEY_3), KEY(2, 3, KEY_4), KEY(1, 0, KEY_5), KEY(1, 1, KEY_6), KEY(1, 2, KEY_7), KEY(1, 3, KEY_8), KEY(0, 2, KEY_9), KEY(6, 6, KEY_SPACE), KEY(0, 0, KEY_KPASTERISK), /* * */ KEY(0, 1, KEY_KPDOT), /* # */ KEY(4, 1, KEY_UP), KEY(4, 3, KEY_DOWN), KEY(4, 0, KEY_LEFT), KEY(4, 2, KEY_RIGHT), KEY(6, 0, KEY_HOME), KEY(3, 2, KEY_END), KEY(6, 1, KEY_DELETE), KEY(5, 2, KEY_BACK), KEY(6, 3, KEY_CAPSLOCK), /* KEY_LEFTSHIFT), */ KEY(4, 4, KEY_ENTER), /* scroll push */ KEY(6, 2, KEY_ENTER), /* keypad action */ KEY(3, 1, KEY_SEND), KEY(5, 3, KEY_RECORD), KEY(5, 0, KEY_VOLUMEUP), KEY(5, 1, KEY_VOLUMEDOWN), KEY(3, 0, KEY_F22), /* soft1 */ KEY(3, 3, KEY_F23), /* soft2 */ }; static struct pxa27x_keypad_platform_data tavorevb_keypad_info = { .matrix_key_rows = 7, .matrix_key_cols = 7, .matrix_key_map = tavorevb_matrix_key_map, .matrix_key_map_size = ARRAY_SIZE(tavorevb_matrix_key_map), .debounce_interval = 30, }; static void __init tavorevb_init_keypad(void) { pxa_set_keypad_info(&tavorevb_keypad_info); } #else static inline void tavorevb_init_keypad(void) {} #endif /* CONFIG_KEYBOARD_PXA27x || CONFIG_KEYBOARD_PXA27x_MODULE */ #if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) static struct platform_pwm_backlight_data tavorevb_backlight_data[] = { [0] = { /* primary backlight */ .pwm_id = 2, .max_brightness = 100, .dft_brightness = 100, .pwm_period_ns = 100000, }, [1] = { /* secondary backlight */ .pwm_id = 0, .max_brightness = 100, .dft_brightness = 100, .pwm_period_ns = 100000, }, }; static struct platform_device tavorevb_backlight_devices[] = { [0] = { .name = "pwm-backlight", .id = 0, .dev = { .platform_data = &tavorevb_backlight_data[0], }, }, [1] = { .name = "pwm-backlight", .id = 1, .dev = { .platform_data = &tavorevb_backlight_data[1], }, }, }; static uint16_t panel_init[] = { /* DSTB OUT */ SMART_CMD(0x00), SMART_CMD_NOOP, SMART_DELAY(1), SMART_CMD(0x00), SMART_CMD_NOOP, SMART_DELAY(1), SMART_CMD(0x00), SMART_CMD_NOOP, SMART_DELAY(1), /* STB OUT */ SMART_CMD(0x00), SMART_CMD(0x1D), SMART_DAT(0x00), SMART_DAT(0x05), SMART_DELAY(1), /* P-ON Init sequence */ SMART_CMD(0x00), /* OSC ON */ SMART_CMD(0x00), SMART_DAT(0x00), SMART_DAT(0x01), SMART_CMD(0x00), SMART_CMD(0x01), /* SOURCE DRIVER SHIFT DIRECTION and display RAM setting */ SMART_DAT(0x01), SMART_DAT(0x27), SMART_CMD(0x00), SMART_CMD(0x02), /* LINE INV */ SMART_DAT(0x02), SMART_DAT(0x00), SMART_CMD(0x00), SMART_CMD(0x03), /* IF mode(1) */ SMART_DAT(0x01), /* 8bit smart mode(8-8),high speed write mode */ SMART_DAT(0x30), SMART_CMD(0x07), SMART_CMD(0x00), /* RAM Write Mode */ SMART_DAT(0x00), SMART_DAT(0x03), SMART_CMD(0x00), /* DISPLAY Setting, 262K, fixed(NO scroll), no split screen */ SMART_CMD(0x07), SMART_DAT(0x40), /* 16/18/19 BPP */ SMART_DAT(0x00), SMART_CMD(0x00), SMART_CMD(0x08), /* BP, FP Seting, BP=2H, FP=3H */ SMART_DAT(0x03), SMART_DAT(0x02), SMART_CMD(0x00), SMART_CMD(0x0C), /* IF mode(2), using internal clock & MPU */ SMART_DAT(0x00), SMART_DAT(0x00), SMART_CMD(0x00), SMART_CMD(0x0D), /* Frame setting, 1Min. Frequence, 16CLK */ SMART_DAT(0x00), SMART_DAT(0x10), SMART_CMD(0x00), SMART_CMD(0x12), /* Timing(1),ASW W=4CLK, ASW ST=1CLK */ SMART_DAT(0x03), SMART_DAT(0x02), SMART_CMD(0x00), SMART_CMD(0x13), /* Timing(2),OEV ST=0.5CLK, OEV ED=1CLK */ SMART_DAT(0x01), SMART_DAT(0x02), SMART_CMD(0x00), SMART_CMD(0x14), /* Timing(3), ASW HOLD=0.5CLK */ SMART_DAT(0x00), SMART_DAT(0x00), SMART_CMD(0x00), SMART_CMD(0x15), /* Timing(4), CKV ST=0CLK, CKV ED=1CLK */ SMART_DAT(0x20), SMART_DAT(0x00), SMART_CMD(0x00), SMART_CMD(0x1C), SMART_DAT(0x00), SMART_DAT(0x00), SMART_CMD(0x03), SMART_CMD(0x00), SMART_DAT(0x04), SMART_DAT(0x03), SMART_CMD(0x03), SMART_CMD(0x01), SMART_DAT(0x03), SMART_DAT(0x04), SMART_CMD(0x03), SMART_CMD(0x02), SMART_DAT(0x04), SMART_DAT(0x03), SMART_CMD(0x03), SMART_CMD(0x03), SMART_DAT(0x03), SMART_DAT(0x03), SMART_CMD(0x03), SMART_CMD(0x04), SMART_DAT(0x01), SMART_DAT(0x01), SMART_CMD(0x03), SMART_CMD(0x05), SMART_DAT(0x00), SMART_DAT(0x00), SMART_CMD(0x04), SMART_CMD(0x02), SMART_DAT(0x00), SMART_DAT(0x00), SMART_CMD(0x04), SMART_CMD(0x03), SMART_DAT(0x01), SMART_DAT(0x3F), SMART_DELAY(0), /* DISP RAM setting: 240*320 */ SMART_CMD(0x04), /* HADDR, START 0 */ SMART_CMD(0x06), SMART_DAT(0x00), SMART_DAT(0x00), /* x1,3 */ SMART_CMD(0x04), /* HADDR, END 4 */ SMART_CMD(0x07), SMART_DAT(0x00), SMART_DAT(0xEF), /* x2, 7 */ SMART_CMD(0x04), /* VADDR, START 8 */ SMART_CMD(0x08), SMART_DAT(0x00), /* y1, 10 */ SMART_DAT(0x00), /* y1, 11 */ SMART_CMD(0x04), /* VADDR, END 12 */ SMART_CMD(0x09), SMART_DAT(0x01), /* y2, 14 */ SMART_DAT(0x3F), /* y2, 15 */ SMART_CMD(0x02), /* RAM ADDR SETTING 16 */ SMART_CMD(0x00), SMART_DAT(0x00), SMART_DAT(0x00), /* x1, 19 */ SMART_CMD(0x02), /* RAM ADDR SETTING 20 */ SMART_CMD(0x01), SMART_DAT(0x00), /* y1, 22 */ SMART_DAT(0x00), /* y1, 23 */ }; static uint16_t panel_on[] = { /* Power-IC ON */ SMART_CMD(0x01), SMART_CMD(0x02), SMART_DAT(0x07), SMART_DAT(0x7D), SMART_CMD(0x01), SMART_CMD(0x03), SMART_DAT(0x00), SMART_DAT(0x05), SMART_CMD(0x01), SMART_CMD(0x04), SMART_DAT(0x00), SMART_DAT(0x00), SMART_CMD(0x01), SMART_CMD(0x05), SMART_DAT(0x00), SMART_DAT(0x15), SMART_CMD(0x01), SMART_CMD(0x00), SMART_DAT(0xC0), SMART_DAT(0x10), SMART_DELAY(30), /* DISP ON */ SMART_CMD(0x01), SMART_CMD(0x01), SMART_DAT(0x00), SMART_DAT(0x01), SMART_CMD(0x01), SMART_CMD(0x00), SMART_DAT(0xFF), SMART_DAT(0xFE), SMART_DELAY(150), }; static uint16_t panel_off[] = { SMART_CMD(0x00), SMART_CMD(0x1E), SMART_DAT(0x00), SMART_DAT(0x0A), SMART_CMD(0x01), SMART_CMD(0x00), SMART_DAT(0xFF), SMART_DAT(0xEE), SMART_CMD(0x01), SMART_CMD(0x00), SMART_DAT(0xF8), SMART_DAT(0x12), SMART_CMD(0x01), SMART_CMD(0x00), SMART_DAT(0xE8), SMART_DAT(0x11), SMART_CMD(0x01), SMART_CMD(0x00), SMART_DAT(0xC0), SMART_DAT(0x11), SMART_CMD(0x01), SMART_CMD(0x00), SMART_DAT(0x40), SMART_DAT(0x11), SMART_CMD(0x01), SMART_CMD(0x00), SMART_DAT(0x00), SMART_DAT(0x10), }; static uint16_t update_framedata[] = { /* write ram */ SMART_CMD(0x02), SMART_CMD(0x02), /* write frame data */ SMART_CMD_WRITE_FRAME, }; static void ltm020d550_lcd_power(int on, struct fb_var_screeninfo *var) { struct fb_info *info = container_of(var, struct fb_info, var); if (on) { pxafb_smart_queue(info, ARRAY_AND_SIZE(panel_init)); pxafb_smart_queue(info, ARRAY_AND_SIZE(panel_on)); } else { pxafb_smart_queue(info, ARRAY_AND_SIZE(panel_off)); } if (pxafb_smart_flush(info)) pr_err("%s: timed out\n", __func__); } static void ltm020d550_update(struct fb_info *info) { pxafb_smart_queue(info, ARRAY_AND_SIZE(update_framedata)); pxafb_smart_flush(info); } static struct pxafb_mode_info toshiba_ltm020d550_modes[] = { [0] = { .xres = 240, .yres = 320, .bpp = 16, .a0csrd_set_hld = 30, .a0cswr_set_hld = 30, .wr_pulse_width = 30, .rd_pulse_width = 170, .op_hold_time = 30, .cmd_inh_time = 60, /* L_LCLK_A0 and L_LCLK_RD active low */ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, }; static struct pxafb_mach_info tavorevb_lcd_info = { .modes = toshiba_ltm020d550_modes, .num_modes = 1, .lcd_conn = LCD_SMART_PANEL_8BPP | LCD_PCLK_EDGE_FALL, .pxafb_lcd_power = ltm020d550_lcd_power, .smart_update = ltm020d550_update, }; static void __init tavorevb_init_lcd(void) { platform_device_register(&tavorevb_backlight_devices[0]); platform_device_register(&tavorevb_backlight_devices[1]); set_pxa_fb_info(&tavorevb_lcd_info); } #else static inline void tavorevb_init_lcd(void) {} #endif /* CONFIG_FB_PXA || CONFIG_FB_PXA_MODULE */ static void __init tavorevb_init(void) { /* initialize MFP configurations */ pxa3xx_mfp_config(ARRAY_AND_SIZE(tavorevb_mfp_cfg)); platform_device_register(&smc91x_device); tavorevb_init_lcd(); tavorevb_init_keypad(); } MACHINE_START(TAVOREVB, "PXA930 Evaluation Board (aka TavorEVB)") /* Maintainer: Eric Miao <eric.miao@marvell.com> */ .phys_io = 0x40000000, .boot_params = 0xa0000100, .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc, .map_io = pxa_map_io, .init_irq = pxa3xx_init_irq, .timer = &pxa_timer, .init_machine = tavorevb_init, MACHINE_END
gpl-2.0
LouZiffer/HTC_Kingdom_Kernel_2.6.35.10
arch/x86/boot/compressed/misc.c
801
8379
/* * misc.c * * This is a collection of several routines from gzip-1.0.3 * adapted for Linux. * * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 * puts by Nick Holloway 1993, better puts by Martin Mares 1995 * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996 */ /* * we have to be careful, because no indirections are allowed here, and * paravirt_ops is a kind of one. As it will only run in baremetal anyway, * we just keep it from happening */ #undef CONFIG_PARAVIRT #ifdef CONFIG_X86_32 #define _ASM_X86_DESC_H 1 #endif #include <linux/linkage.h> #include <linux/screen_info.h> #include <linux/elf.h> #include <linux/io.h> #include <asm/page.h> #include <asm/boot.h> #include <asm/bootparam.h> /* WARNING!! * This code is compiled with -fPIC and it is relocated dynamically * at run time, but no relocation processing is performed. * This means that it is not safe to place pointers in static structures. */ /* * Getting to provable safe in place decompression is hard. * Worst case behaviours need to be analyzed. * Background information: * * The file layout is: * magic[2] * method[1] * flags[1] * timestamp[4] * extraflags[1] * os[1] * compressed data blocks[N] * crc[4] orig_len[4] * * resulting in 18 bytes of non compressed data overhead. * * Files divided into blocks * 1 bit (last block flag) * 2 bits (block type) * * 1 block occurs every 32K -1 bytes or when there 50% compression * has been achieved. The smallest block type encoding is always used. * * stored: * 32 bits length in bytes. * * fixed: * magic fixed tree. * symbols. * * dynamic: * dynamic tree encoding. * symbols. * * * The buffer for decompression in place is the length of the * uncompressed data, plus a small amount extra to keep the algorithm safe. * The compressed data is placed at the end of the buffer. The output * pointer is placed at the start of the buffer and the input pointer * is placed where the compressed data starts. Problems will occur * when the output pointer overruns the input pointer. * * The output pointer can only overrun the input pointer if the input * pointer is moving faster than the output pointer. A condition only * triggered by data whose compressed form is larger than the uncompressed * form. * * The worst case at the block level is a growth of the compressed data * of 5 bytes per 32767 bytes. * * The worst case internal to a compressed block is very hard to figure. * The worst case can at least be boundined by having one bit that represents * 32764 bytes and then all of the rest of the bytes representing the very * very last byte. * * All of which is enough to compute an amount of extra data that is required * to be safe. To avoid problems at the block level allocating 5 extra bytes * per 32767 bytes of data is sufficient. To avoind problems internal to a * block adding an extra 32767 bytes (the worst case uncompressed block size) * is sufficient, to ensure that in the worst case the decompressed data for * block will stop the byte before the compressed data for a block begins. * To avoid problems with the compressed data's meta information an extra 18 * bytes are needed. Leading to the formula: * * extra_bytes = (uncompressed_size >> 12) + 32768 + 18 + decompressor_size. * * Adding 8 bytes per 32K is a bit excessive but much easier to calculate. * Adding 32768 instead of 32767 just makes for round numbers. * Adding the decompressor_size is necessary as it musht live after all * of the data as well. Last I measured the decompressor is about 14K. * 10K of actual data and 4K of bss. * */ /* * gzip declarations */ #define STATIC static #undef memset #undef memcpy #define memzero(s, n) memset((s), 0, (n)) static void error(char *m); /* * This is set up by the setup-routine at boot-time */ static struct boot_params *real_mode; /* Pointer to real-mode data */ static int quiet; void *memset(void *s, int c, size_t n); void *memcpy(void *dest, const void *src, size_t n); static void __putstr(int, const char *); #define putstr(__x) __putstr(0, __x) #ifdef CONFIG_X86_64 #define memptr long #else #define memptr unsigned #endif static memptr free_mem_ptr; static memptr free_mem_end_ptr; static char *vidmem; static int vidport; static int lines, cols; #ifdef CONFIG_KERNEL_GZIP #include "../../../../lib/decompress_inflate.c" #endif #ifdef CONFIG_KERNEL_BZIP2 #include "../../../../lib/decompress_bunzip2.c" #endif #ifdef CONFIG_KERNEL_LZMA #include "../../../../lib/decompress_unlzma.c" #endif #ifdef CONFIG_KERNEL_LZO #include "../../../../lib/decompress_unlzo.c" #endif static void scroll(void) { int i; memcpy(vidmem, vidmem + cols * 2, (lines - 1) * cols * 2); for (i = (lines - 1) * cols * 2; i < lines * cols * 2; i += 2) vidmem[i] = ' '; } static void __putstr(int error, const char *s) { int x, y, pos; char c; #ifndef CONFIG_X86_VERBOSE_BOOTUP if (!error) return; #endif if (real_mode->screen_info.orig_video_mode == 0 && lines == 0 && cols == 0) return; x = real_mode->screen_info.orig_x; y = real_mode->screen_info.orig_y; while ((c = *s++) != '\0') { if (c == '\n') { x = 0; if (++y >= lines) { scroll(); y--; } } else { vidmem[(x + cols * y) * 2] = c; if (++x >= cols) { x = 0; if (++y >= lines) { scroll(); y--; } } } } real_mode->screen_info.orig_x = x; real_mode->screen_info.orig_y = y; pos = (x + cols * y) * 2; /* Update cursor position */ outb(14, vidport); outb(0xff & (pos >> 9), vidport+1); outb(15, vidport); outb(0xff & (pos >> 1), vidport+1); } void *memset(void *s, int c, size_t n) { int i; char *ss = s; for (i = 0; i < n; i++) ss[i] = c; return s; } void *memcpy(void *dest, const void *src, size_t n) { int i; const char *s = src; char *d = dest; for (i = 0; i < n; i++) d[i] = s[i]; return dest; } static void error(char *x) { __putstr(1, "\n\n"); __putstr(1, x); __putstr(1, "\n\n -- System halted"); while (1) asm("hlt"); } static void parse_elf(void *output) { #ifdef CONFIG_X86_64 Elf64_Ehdr ehdr; Elf64_Phdr *phdrs, *phdr; #else Elf32_Ehdr ehdr; Elf32_Phdr *phdrs, *phdr; #endif void *dest; int i; memcpy(&ehdr, output, sizeof(ehdr)); if (ehdr.e_ident[EI_MAG0] != ELFMAG0 || ehdr.e_ident[EI_MAG1] != ELFMAG1 || ehdr.e_ident[EI_MAG2] != ELFMAG2 || ehdr.e_ident[EI_MAG3] != ELFMAG3) { error("Kernel is not a valid ELF file"); return; } if (!quiet) putstr("Parsing ELF... "); phdrs = malloc(sizeof(*phdrs) * ehdr.e_phnum); if (!phdrs) error("Failed to allocate space for phdrs"); memcpy(phdrs, output + ehdr.e_phoff, sizeof(*phdrs) * ehdr.e_phnum); for (i = 0; i < ehdr.e_phnum; i++) { phdr = &phdrs[i]; switch (phdr->p_type) { case PT_LOAD: #ifdef CONFIG_RELOCATABLE dest = output; dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR); #else dest = (void *)(phdr->p_paddr); #endif memcpy(dest, output + phdr->p_offset, phdr->p_filesz); break; default: /* Ignore other PT_* */ break; } } } asmlinkage void decompress_kernel(void *rmode, memptr heap, unsigned char *input_data, unsigned long input_len, unsigned char *output) { real_mode = rmode; if (real_mode->hdr.loadflags & QUIET_FLAG) quiet = 1; if (real_mode->screen_info.orig_video_mode == 7) { vidmem = (char *) 0xb0000; vidport = 0x3b4; } else { vidmem = (char *) 0xb8000; vidport = 0x3d4; } lines = real_mode->screen_info.orig_video_lines; cols = real_mode->screen_info.orig_video_cols; free_mem_ptr = heap; /* Heap */ free_mem_end_ptr = heap + BOOT_HEAP_SIZE; if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1)) error("Destination address inappropriately aligned"); #ifdef CONFIG_X86_64 if (heap > 0x3fffffffffffUL) error("Destination address too large"); #else if (heap > ((-__PAGE_OFFSET-(512<<20)-1) & 0x7fffffff)) error("Destination address too large"); #endif #ifndef CONFIG_RELOCATABLE if ((unsigned long)output != LOAD_PHYSICAL_ADDR) error("Wrong destination address"); #endif if (!quiet) putstr("\nDecompressing Linux... "); decompress(input_data, input_len, NULL, NULL, output, NULL, error); parse_elf(output); if (!quiet) putstr("done.\nBooting the kernel.\n"); return; }
gpl-2.0
djvoleur/kernel_samsung_exynos7420
drivers/hwmon/iio_hwmon.c
2081
4938
/* Hwmon client for industrial I/O devices * * Copyright (c) 2011 Jonathan Cameron * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/hwmon.h> #include <linux/of.h> #include <linux/hwmon-sysfs.h> #include <linux/iio/consumer.h> #include <linux/iio/types.h> /** * struct iio_hwmon_state - device instance state * @channels: filled with array of channels from iio * @num_channels: number of channels in channels (saves counting twice) * @hwmon_dev: associated hwmon device * @attr_group: the group of attributes * @attrs: null terminated array of attribute pointers. */ struct iio_hwmon_state { struct iio_channel *channels; int num_channels; struct device *hwmon_dev; struct attribute_group attr_group; struct attribute **attrs; }; /* * Assumes that IIO and hwmon operate in the same base units. * This is supposed to be true, but needs verification for * new channel types. */ static ssize_t iio_hwmon_read_val(struct device *dev, struct device_attribute *attr, char *buf) { int result; int ret; struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr); struct iio_hwmon_state *state = dev_get_drvdata(dev); ret = iio_read_channel_processed(&state->channels[sattr->index], &result); if (ret < 0) return ret; return sprintf(buf, "%d\n", result); } static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) { const char *name = "iio_hwmon"; if (dev->of_node && dev->of_node->name) name = dev->of_node->name; return sprintf(buf, "%s\n", name); } static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); static int iio_hwmon_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct iio_hwmon_state *st; struct sensor_device_attribute *a; int ret, i; int in_i = 1, temp_i = 1, curr_i = 1; enum iio_chan_type type; struct iio_channel *channels; channels = iio_channel_get_all(dev); if (IS_ERR(channels)) return PTR_ERR(channels); st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL); if (st == NULL) { ret = -ENOMEM; goto error_release_channels; } st->channels = channels; /* count how many attributes we have */ while (st->channels[st->num_channels].indio_dev) st->num_channels++; st->attrs = devm_kzalloc(dev, sizeof(*st->attrs) * (st->num_channels + 2), GFP_KERNEL); if (st->attrs == NULL) { ret = -ENOMEM; goto error_release_channels; } for (i = 0; i < st->num_channels; i++) { a = devm_kzalloc(dev, sizeof(*a), GFP_KERNEL); if (a == NULL) { ret = -ENOMEM; goto error_release_channels; } sysfs_attr_init(&a->dev_attr.attr); ret = iio_get_channel_type(&st->channels[i], &type); if (ret < 0) goto error_release_channels; switch (type) { case IIO_VOLTAGE: a->dev_attr.attr.name = kasprintf(GFP_KERNEL, "in%d_input", in_i++); break; case IIO_TEMP: a->dev_attr.attr.name = kasprintf(GFP_KERNEL, "temp%d_input", temp_i++); break; case IIO_CURRENT: a->dev_attr.attr.name = kasprintf(GFP_KERNEL, "curr%d_input", curr_i++); break; default: ret = -EINVAL; goto error_release_channels; } if (a->dev_attr.attr.name == NULL) { ret = -ENOMEM; goto error_release_channels; } a->dev_attr.show = iio_hwmon_read_val; a->dev_attr.attr.mode = S_IRUGO; a->index = i; st->attrs[i] = &a->dev_attr.attr; } st->attrs[st->num_channels] = &dev_attr_name.attr; st->attr_group.attrs = st->attrs; platform_set_drvdata(pdev, st); ret = sysfs_create_group(&dev->kobj, &st->attr_group); if (ret < 0) goto error_release_channels; st->hwmon_dev = hwmon_device_register(dev); if (IS_ERR(st->hwmon_dev)) { ret = PTR_ERR(st->hwmon_dev); goto error_remove_group; } return 0; error_remove_group: sysfs_remove_group(&dev->kobj, &st->attr_group); error_release_channels: iio_channel_release_all(channels); return ret; } static int iio_hwmon_remove(struct platform_device *pdev) { struct iio_hwmon_state *st = platform_get_drvdata(pdev); hwmon_device_unregister(st->hwmon_dev); sysfs_remove_group(&pdev->dev.kobj, &st->attr_group); iio_channel_release_all(st->channels); return 0; } static struct of_device_id iio_hwmon_of_match[] = { { .compatible = "iio-hwmon", }, { } }; static struct platform_driver __refdata iio_hwmon_driver = { .driver = { .name = "iio_hwmon", .owner = THIS_MODULE, .of_match_table = iio_hwmon_of_match, }, .probe = iio_hwmon_probe, .remove = iio_hwmon_remove, }; module_platform_driver(iio_hwmon_driver); MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>"); MODULE_DESCRIPTION("IIO to hwmon driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
TeamRegular/android_kernel_alcatel_msm8916-stock
net/openvswitch/vport-internal_dev.c
2081
6339
/* * Copyright (c) 2007-2012 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA */ #include <linux/hardirq.h> #include <linux/if_vlan.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/skbuff.h> #include <net/dst.h> #include <net/xfrm.h> #include "datapath.h" #include "vport-internal_dev.h" #include "vport-netdev.h" struct internal_dev { struct vport *vport; }; static struct internal_dev *internal_dev_priv(struct net_device *netdev) { return netdev_priv(netdev); } /* This function is only called by the kernel network layer.*/ static struct rtnl_link_stats64 *internal_dev_get_stats(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct vport *vport = ovs_internal_dev_get_vport(netdev); struct ovs_vport_stats vport_stats; ovs_vport_get_stats(vport, &vport_stats); /* The tx and rx stats need to be swapped because the * switch and host OS have opposite perspectives. */ stats->rx_packets = vport_stats.tx_packets; stats->tx_packets = vport_stats.rx_packets; stats->rx_bytes = vport_stats.tx_bytes; stats->tx_bytes = vport_stats.rx_bytes; stats->rx_errors = vport_stats.tx_errors; stats->tx_errors = vport_stats.rx_errors; stats->rx_dropped = vport_stats.tx_dropped; stats->tx_dropped = vport_stats.rx_dropped; return stats; } /* Called with rcu_read_lock_bh. */ static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev) { rcu_read_lock(); ovs_vport_receive(internal_dev_priv(netdev)->vport, skb); rcu_read_unlock(); return 0; } static int internal_dev_open(struct net_device *netdev) { netif_start_queue(netdev); return 0; } static int internal_dev_stop(struct net_device *netdev) { netif_stop_queue(netdev); return 0; } static void internal_dev_getinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { strlcpy(info->driver, "openvswitch", sizeof(info->driver)); } static const struct ethtool_ops internal_dev_ethtool_ops = { .get_drvinfo = internal_dev_getinfo, .get_link = ethtool_op_get_link, }; static int internal_dev_change_mtu(struct net_device *netdev, int new_mtu) { if (new_mtu < 68) return -EINVAL; netdev->mtu = new_mtu; return 0; } static void internal_dev_destructor(struct net_device *dev) { struct vport *vport = ovs_internal_dev_get_vport(dev); ovs_vport_free(vport); free_netdev(dev); } static const struct net_device_ops internal_dev_netdev_ops = { .ndo_open = internal_dev_open, .ndo_stop = internal_dev_stop, .ndo_start_xmit = internal_dev_xmit, .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = internal_dev_change_mtu, .ndo_get_stats64 = internal_dev_get_stats, }; static void do_setup(struct net_device *netdev) { ether_setup(netdev); netdev->netdev_ops = &internal_dev_netdev_ops; netdev->priv_flags &= ~IFF_TX_SKB_SHARING; netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE; netdev->destructor = internal_dev_destructor; SET_ETHTOOL_OPS(netdev, &internal_dev_ethtool_ops); netdev->tx_queue_len = 0; netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_TSO; netdev->vlan_features = netdev->features; netdev->features |= NETIF_F_HW_VLAN_CTAG_TX; netdev->hw_features = netdev->features & ~NETIF_F_LLTX; eth_hw_addr_random(netdev); } static struct vport *internal_dev_create(const struct vport_parms *parms) { struct vport *vport; struct netdev_vport *netdev_vport; struct internal_dev *internal_dev; int err; vport = ovs_vport_alloc(sizeof(struct netdev_vport), &ovs_internal_vport_ops, parms); if (IS_ERR(vport)) { err = PTR_ERR(vport); goto error; } netdev_vport = netdev_vport_priv(vport); netdev_vport->dev = alloc_netdev(sizeof(struct internal_dev), parms->name, do_setup); if (!netdev_vport->dev) { err = -ENOMEM; goto error_free_vport; } dev_net_set(netdev_vport->dev, ovs_dp_get_net(vport->dp)); internal_dev = internal_dev_priv(netdev_vport->dev); internal_dev->vport = vport; /* Restrict bridge port to current netns. */ if (vport->port_no == OVSP_LOCAL) netdev_vport->dev->features |= NETIF_F_NETNS_LOCAL; rtnl_lock(); err = register_netdevice(netdev_vport->dev); if (err) goto error_free_netdev; dev_set_promiscuity(netdev_vport->dev, 1); rtnl_unlock(); netif_start_queue(netdev_vport->dev); return vport; error_free_netdev: rtnl_unlock(); free_netdev(netdev_vport->dev); error_free_vport: ovs_vport_free(vport); error: return ERR_PTR(err); } static void internal_dev_destroy(struct vport *vport) { struct netdev_vport *netdev_vport = netdev_vport_priv(vport); netif_stop_queue(netdev_vport->dev); rtnl_lock(); dev_set_promiscuity(netdev_vport->dev, -1); /* unregister_netdevice() waits for an RCU grace period. */ unregister_netdevice(netdev_vport->dev); rtnl_unlock(); } static int internal_dev_recv(struct vport *vport, struct sk_buff *skb) { struct net_device *netdev = netdev_vport_priv(vport)->dev; int len; len = skb->len; skb_dst_drop(skb); nf_reset(skb); secpath_reset(skb); skb->dev = netdev; skb->pkt_type = PACKET_HOST; skb->protocol = eth_type_trans(skb, netdev); netif_rx(skb); return len; } const struct vport_ops ovs_internal_vport_ops = { .type = OVS_VPORT_TYPE_INTERNAL, .create = internal_dev_create, .destroy = internal_dev_destroy, .get_name = ovs_netdev_get_name, .send = internal_dev_recv, }; int ovs_is_internal_dev(const struct net_device *netdev) { return netdev->netdev_ops == &internal_dev_netdev_ops; } struct vport *ovs_internal_dev_get_vport(struct net_device *netdev) { if (!ovs_is_internal_dev(netdev)) return NULL; return internal_dev_priv(netdev)->vport; }
gpl-2.0
lioux/AK-tuna
drivers/usb/gadget/f_sourcesink.c
2593
14432
/* * f_sourcesink.c - USB peripheral source/sink configuration driver * * Copyright (C) 2003-2008 David Brownell * Copyright (C) 2008 by Nokia Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* #define VERBOSE_DEBUG */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/device.h> #include "g_zero.h" #include "gadget_chips.h" /* * SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral * controller drivers. * * This just sinks bulk packets OUT to the peripheral and sources them IN * to the host, optionally with specific data patterns for integrity tests. * As such it supports basic functionality and load tests. * * In terms of control messaging, this supports all the standard requests * plus two that support control-OUT tests. If the optional "autoresume" * mode is enabled, it provides good functional coverage for the "USBCV" * test harness from USB-IF. * * Note that because this doesn't queue more than one request at a time, * some other function must be used to test queueing logic. The network * link (g_ether) is the best overall option for that, since its TX and RX * queues are relatively independent, will receive a range of packet sizes, * and can often be made to run out completely. Those issues are important * when stress testing peripheral controller drivers. * * * This is currently packaged as a configuration driver, which can't be * combined with other functions to make composite devices. However, it * can be combined with other independent configurations. */ struct f_sourcesink { struct usb_function function; struct usb_ep *in_ep; struct usb_ep *out_ep; }; static inline struct f_sourcesink *func_to_ss(struct usb_function *f) { return container_of(f, struct f_sourcesink, function); } static unsigned pattern; module_param(pattern, uint, 0); MODULE_PARM_DESC(pattern, "0 = all zeroes, 1 = mod63 "); /*-------------------------------------------------------------------------*/ static struct usb_interface_descriptor source_sink_intf = { .bLength = sizeof source_sink_intf, .bDescriptorType = USB_DT_INTERFACE, .bNumEndpoints = 2, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, /* .iInterface = DYNAMIC */ }; /* full speed support: */ static struct usb_endpoint_descriptor fs_source_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_endpoint_descriptor fs_sink_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_descriptor_header *fs_source_sink_descs[] = { (struct usb_descriptor_header *) &source_sink_intf, (struct usb_descriptor_header *) &fs_sink_desc, (struct usb_descriptor_header *) &fs_source_desc, NULL, }; /* high speed support: */ static struct usb_endpoint_descriptor hs_source_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_endpoint_descriptor hs_sink_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_descriptor_header *hs_source_sink_descs[] = { (struct usb_descriptor_header *) &source_sink_intf, (struct usb_descriptor_header *) &hs_source_desc, (struct usb_descriptor_header *) &hs_sink_desc, NULL, }; /* function-specific strings: */ static struct usb_string strings_sourcesink[] = { [0].s = "source and sink data", { } /* end of list */ }; static struct usb_gadget_strings stringtab_sourcesink = { .language = 0x0409, /* en-us */ .strings = strings_sourcesink, }; static struct usb_gadget_strings *sourcesink_strings[] = { &stringtab_sourcesink, NULL, }; /*-------------------------------------------------------------------------*/ static int __init sourcesink_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct f_sourcesink *ss = func_to_ss(f); int id; /* allocate interface ID(s) */ id = usb_interface_id(c, f); if (id < 0) return id; source_sink_intf.bInterfaceNumber = id; /* allocate endpoints */ ss->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_source_desc); if (!ss->in_ep) { autoconf_fail: ERROR(cdev, "%s: can't autoconfigure on %s\n", f->name, cdev->gadget->name); return -ENODEV; } ss->in_ep->driver_data = cdev; /* claim */ ss->out_ep = usb_ep_autoconfig(cdev->gadget, &fs_sink_desc); if (!ss->out_ep) goto autoconf_fail; ss->out_ep->driver_data = cdev; /* claim */ /* support high speed hardware */ if (gadget_is_dualspeed(c->cdev->gadget)) { hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress; hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress; f->hs_descriptors = hs_source_sink_descs; } DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n", gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", f->name, ss->in_ep->name, ss->out_ep->name); return 0; } static void sourcesink_unbind(struct usb_configuration *c, struct usb_function *f) { kfree(func_to_ss(f)); } /* optionally require specific source/sink data patterns */ static int check_read_data(struct f_sourcesink *ss, struct usb_request *req) { unsigned i; u8 *buf = req->buf; struct usb_composite_dev *cdev = ss->function.config->cdev; for (i = 0; i < req->actual; i++, buf++) { switch (pattern) { /* all-zeroes has no synchronization issues */ case 0: if (*buf == 0) continue; break; /* "mod63" stays in sync with short-terminated transfers, * OR otherwise when host and gadget agree on how large * each usb transfer request should be. Resync is done * with set_interface or set_config. (We *WANT* it to * get quickly out of sync if controllers or their drivers * stutter for any reason, including buffer duplcation...) */ case 1: if (*buf == (u8)(i % 63)) continue; break; } ERROR(cdev, "bad OUT byte, buf[%d] = %d\n", i, *buf); usb_ep_set_halt(ss->out_ep); return -EINVAL; } return 0; } static void reinit_write_data(struct usb_ep *ep, struct usb_request *req) { unsigned i; u8 *buf = req->buf; switch (pattern) { case 0: memset(req->buf, 0, req->length); break; case 1: for (i = 0; i < req->length; i++) *buf++ = (u8) (i % 63); break; } } static void source_sink_complete(struct usb_ep *ep, struct usb_request *req) { struct f_sourcesink *ss = ep->driver_data; struct usb_composite_dev *cdev = ss->function.config->cdev; int status = req->status; switch (status) { case 0: /* normal completion? */ if (ep == ss->out_ep) { check_read_data(ss, req); memset(req->buf, 0x55, req->length); } else reinit_write_data(ep, req); break; /* this endpoint is normally active while we're configured */ case -ECONNABORTED: /* hardware forced ep reset */ case -ECONNRESET: /* request dequeued */ case -ESHUTDOWN: /* disconnect from host */ VDBG(cdev, "%s gone (%d), %d/%d\n", ep->name, status, req->actual, req->length); if (ep == ss->out_ep) check_read_data(ss, req); free_ep_req(ep, req); return; case -EOVERFLOW: /* buffer overrun on read means that * we didn't provide a big enough * buffer. */ default: #if 1 DBG(cdev, "%s complete --> %d, %d/%d\n", ep->name, status, req->actual, req->length); #endif case -EREMOTEIO: /* short read */ break; } status = usb_ep_queue(ep, req, GFP_ATOMIC); if (status) { ERROR(cdev, "kill %s: resubmit %d bytes --> %d\n", ep->name, req->length, status); usb_ep_set_halt(ep); /* FIXME recover later ... somehow */ } } static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in) { struct usb_ep *ep; struct usb_request *req; int status; ep = is_in ? ss->in_ep : ss->out_ep; req = alloc_ep_req(ep); if (!req) return -ENOMEM; req->complete = source_sink_complete; if (is_in) reinit_write_data(ep, req); else memset(req->buf, 0x55, req->length); status = usb_ep_queue(ep, req, GFP_ATOMIC); if (status) { struct usb_composite_dev *cdev; cdev = ss->function.config->cdev; ERROR(cdev, "start %s %s --> %d\n", is_in ? "IN" : "OUT", ep->name, status); free_ep_req(ep, req); } return status; } static void disable_source_sink(struct f_sourcesink *ss) { struct usb_composite_dev *cdev; cdev = ss->function.config->cdev; disable_endpoints(cdev, ss->in_ep, ss->out_ep); VDBG(cdev, "%s disabled\n", ss->function.name); } static int enable_source_sink(struct usb_composite_dev *cdev, struct f_sourcesink *ss) { int result = 0; const struct usb_endpoint_descriptor *src, *sink; struct usb_ep *ep; src = ep_choose(cdev->gadget, &hs_source_desc, &fs_source_desc); sink = ep_choose(cdev->gadget, &hs_sink_desc, &fs_sink_desc); /* one endpoint writes (sources) zeroes IN (to the host) */ ep = ss->in_ep; result = usb_ep_enable(ep, src); if (result < 0) return result; ep->driver_data = ss; result = source_sink_start_ep(ss, true); if (result < 0) { fail: ep = ss->in_ep; usb_ep_disable(ep); ep->driver_data = NULL; return result; } /* one endpoint reads (sinks) anything OUT (from the host) */ ep = ss->out_ep; result = usb_ep_enable(ep, sink); if (result < 0) goto fail; ep->driver_data = ss; result = source_sink_start_ep(ss, false); if (result < 0) { usb_ep_disable(ep); ep->driver_data = NULL; goto fail; } DBG(cdev, "%s enabled\n", ss->function.name); return result; } static int sourcesink_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct f_sourcesink *ss = func_to_ss(f); struct usb_composite_dev *cdev = f->config->cdev; /* we know alt is zero */ if (ss->in_ep->driver_data) disable_source_sink(ss); return enable_source_sink(cdev, ss); } static void sourcesink_disable(struct usb_function *f) { struct f_sourcesink *ss = func_to_ss(f); disable_source_sink(ss); } /*-------------------------------------------------------------------------*/ static int __init sourcesink_bind_config(struct usb_configuration *c) { struct f_sourcesink *ss; int status; ss = kzalloc(sizeof *ss, GFP_KERNEL); if (!ss) return -ENOMEM; ss->function.name = "source/sink"; ss->function.descriptors = fs_source_sink_descs; ss->function.bind = sourcesink_bind; ss->function.unbind = sourcesink_unbind; ss->function.set_alt = sourcesink_set_alt; ss->function.disable = sourcesink_disable; status = usb_add_function(c, &ss->function); if (status) kfree(ss); return status; } static int sourcesink_setup(struct usb_configuration *c, const struct usb_ctrlrequest *ctrl) { struct usb_request *req = c->cdev->req; int value = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); /* composite driver infrastructure handles everything except * the two control test requests. */ switch (ctrl->bRequest) { /* * These are the same vendor-specific requests supported by * Intel's USB 2.0 compliance test devices. We exceed that * device spec by allowing multiple-packet requests. * * NOTE: the Control-OUT data stays in req->buf ... better * would be copying it into a scratch buffer, so that other * requests may safely intervene. */ case 0x5b: /* control WRITE test -- fill the buffer */ if (ctrl->bRequestType != (USB_DIR_OUT|USB_TYPE_VENDOR)) goto unknown; if (w_value || w_index) break; /* just read that many bytes into the buffer */ if (w_length > req->length) break; value = w_length; break; case 0x5c: /* control READ test -- return the buffer */ if (ctrl->bRequestType != (USB_DIR_IN|USB_TYPE_VENDOR)) goto unknown; if (w_value || w_index) break; /* expect those bytes are still in the buffer; send back */ if (w_length > req->length) break; value = w_length; break; default: unknown: VDBG(c->cdev, "unknown control req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); } /* respond with data transfer or status phase? */ if (value >= 0) { VDBG(c->cdev, "source/sink req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); req->zero = 0; req->length = value; value = usb_ep_queue(c->cdev->gadget->ep0, req, GFP_ATOMIC); if (value < 0) ERROR(c->cdev, "source/sinkc response, err %d\n", value); } /* device either stalls (value < 0) or reports success */ return value; } static struct usb_configuration sourcesink_driver = { .label = "source/sink", .strings = sourcesink_strings, .setup = sourcesink_setup, .bConfigurationValue = 3, .bmAttributes = USB_CONFIG_ATT_SELFPOWER, /* .iConfiguration = DYNAMIC */ }; /** * sourcesink_add - add a source/sink testing configuration to a device * @cdev: the device to support the configuration */ int __init sourcesink_add(struct usb_composite_dev *cdev, bool autoresume) { int id; /* allocate string ID(s) */ id = usb_string_id(cdev); if (id < 0) return id; strings_sourcesink[0].id = id; source_sink_intf.iInterface = id; sourcesink_driver.iConfiguration = id; /* support autoresume for remote wakeup testing */ if (autoresume) sourcesink_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP; /* support OTG systems */ if (gadget_is_otg(cdev->gadget)) { sourcesink_driver.descriptors = otg_desc; sourcesink_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP; } return usb_add_config(cdev, &sourcesink_driver, sourcesink_bind_config); }
gpl-2.0
hiikezoe/android_kernel_huawei_204hw
drivers/hid/hid-pl.c
3617
6011
/* * Force feedback support for PantherLord/GreenAsia based devices * * The devices are distributed under various names and the same USB device ID * can be used in both adapters and actual game controllers. * * 0810:0001 "Twin USB Joystick" * - tested with PantherLord USB/PS2 2in1 Adapter * - contains two reports, one for each port (HID_QUIRK_MULTI_INPUT) * * 0e8f:0003 "GreenAsia Inc. USB Joystick " * - tested with König Gaming gamepad * * 0e8f:0003 "GASIA USB Gamepad" * - another version of the König gamepad * * Copyright (c) 2007, 2009 Anssi Hannula <anssi.hannula@gmail.com> */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* #define DEBUG */ #define debug(format, arg...) pr_debug("hid-plff: " format "\n" , ## arg) #include <linux/input.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/hid.h> #include "hid-ids.h" #ifdef CONFIG_PANTHERLORD_FF #include "usbhid/usbhid.h" struct plff_device { struct hid_report *report; s32 *strong; s32 *weak; }; static int hid_plff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct plff_device *plff = data; int left, right; left = effect->u.rumble.strong_magnitude; right = effect->u.rumble.weak_magnitude; debug("called with 0x%04x 0x%04x", left, right); left = left * 0x7f / 0xffff; right = right * 0x7f / 0xffff; *plff->strong = left; *plff->weak = right; debug("running with 0x%02x 0x%02x", left, right); usbhid_submit_report(hid, plff->report, USB_DIR_OUT); return 0; } static int plff_init(struct hid_device *hid) { struct plff_device *plff; struct hid_report *report; struct hid_input *hidinput; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct list_head *report_ptr = report_list; struct input_dev *dev; int error; s32 *strong; s32 *weak; /* The device contains one output report per physical device, all containing 1 field, which contains 4 ff00.0002 usages and 4 16bit absolute values. The input reports also contain a field which contains 8 ff00.0001 usages and 8 boolean values. Their meaning is currently unknown. A version of the 0e8f:0003 exists that has all the values in separate fields and misses the extra input field, thus resembling Zeroplus (hid-zpff) devices. */ if (list_empty(report_list)) { hid_err(hid, "no output reports found\n"); return -ENODEV; } list_for_each_entry(hidinput, &hid->inputs, list) { report_ptr = report_ptr->next; if (report_ptr == report_list) { hid_err(hid, "required output report is missing\n"); return -ENODEV; } report = list_entry(report_ptr, struct hid_report, list); if (report->maxfield < 1) { hid_err(hid, "no fields in the report\n"); return -ENODEV; } if (report->field[0]->report_count >= 4) { report->field[0]->value[0] = 0x00; report->field[0]->value[1] = 0x00; strong = &report->field[0]->value[2]; weak = &report->field[0]->value[3]; debug("detected single-field device"); } else if (report->maxfield >= 4 && report->field[0]->maxusage == 1 && report->field[0]->usage[0].hid == (HID_UP_LED | 0x43)) { report->field[0]->value[0] = 0x00; report->field[1]->value[0] = 0x00; strong = &report->field[2]->value[0]; weak = &report->field[3]->value[0]; debug("detected 4-field device"); } else { hid_err(hid, "not enough fields or values\n"); return -ENODEV; } plff = kzalloc(sizeof(struct plff_device), GFP_KERNEL); if (!plff) return -ENOMEM; dev = hidinput->input; set_bit(FF_RUMBLE, dev->ffbit); error = input_ff_create_memless(dev, plff, hid_plff_play); if (error) { kfree(plff); return error; } plff->report = report; plff->strong = strong; plff->weak = weak; *strong = 0x00; *weak = 0x00; usbhid_submit_report(hid, plff->report, USB_DIR_OUT); } hid_info(hid, "Force feedback for PantherLord/GreenAsia devices by Anssi Hannula <anssi.hannula@gmail.com>\n"); return 0; } #else static inline int plff_init(struct hid_device *hid) { return 0; } #endif static int pl_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; if (id->driver_data) hdev->quirks |= HID_QUIRK_MULTI_INPUT; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); if (ret) { hid_err(hdev, "hw start failed\n"); goto err; } plff_init(hdev); return 0; err: return ret; } static const struct hid_device_id pl_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR), .driver_data = 1 }, /* Twin USB Joystick */ { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR), .driver_data = 1 }, /* Twin USB Joystick */ { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003), }, { } }; MODULE_DEVICE_TABLE(hid, pl_devices); static struct hid_driver pl_driver = { .name = "pantherlord", .id_table = pl_devices, .probe = pl_probe, }; static int __init pl_init(void) { return hid_register_driver(&pl_driver); } static void __exit pl_exit(void) { hid_unregister_driver(&pl_driver); } module_init(pl_init); module_exit(pl_exit); MODULE_LICENSE("GPL");
gpl-2.0
onejay09/OLD----kernel_HTC_msm7x30_KK
fs/udf/udftime.c
4641
5989
/* Copyright (C) 1993, 1994, 1995, 1996, 1997 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Paul Eggert (eggert@twinsun.com). The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with the GNU C Library; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* * dgb 10/02/98: ripped this from glibc source to help convert timestamps * to unix time * 10/04/98: added new table-based lookup after seeing how ugly * the gnu code is * blf 09/27/99: ripped out all the old code and inserted new table from * John Brockmeyer (without leap second corrections) * rewrote udf_stamp_to_time and fixed timezone accounting in * udf_time_to_stamp. */ /* * We don't take into account leap seconds. This may be correct or incorrect. * For more NIST information (especially dealing with leap seconds), see: * http://www.boulder.nist.gov/timefreq/pubs/bulletin/leapsecond.htm */ #include <linux/types.h> #include <linux/kernel.h> #include "udfdecl.h" #define EPOCH_YEAR 1970 #ifndef __isleap /* Nonzero if YEAR is a leap year (every 4 years, except every 100th isn't, and every 400th is). */ #define __isleap(year) \ ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0)) #endif /* How many days come before each month (0-12). */ static const unsigned short int __mon_yday[2][13] = { /* Normal years. */ {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365}, /* Leap years. */ {0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366} }; #define MAX_YEAR_SECONDS 69 #define SPD 0x15180 /*3600*24 */ #define SPY(y, l, s) (SPD * (365 * y + l) + s) static time_t year_seconds[MAX_YEAR_SECONDS] = { /*1970*/ SPY(0, 0, 0), SPY(1, 0, 0), SPY(2, 0, 0), SPY(3, 1, 0), /*1974*/ SPY(4, 1, 0), SPY(5, 1, 0), SPY(6, 1, 0), SPY(7, 2, 0), /*1978*/ SPY(8, 2, 0), SPY(9, 2, 0), SPY(10, 2, 0), SPY(11, 3, 0), /*1982*/ SPY(12, 3, 0), SPY(13, 3, 0), SPY(14, 3, 0), SPY(15, 4, 0), /*1986*/ SPY(16, 4, 0), SPY(17, 4, 0), SPY(18, 4, 0), SPY(19, 5, 0), /*1990*/ SPY(20, 5, 0), SPY(21, 5, 0), SPY(22, 5, 0), SPY(23, 6, 0), /*1994*/ SPY(24, 6, 0), SPY(25, 6, 0), SPY(26, 6, 0), SPY(27, 7, 0), /*1998*/ SPY(28, 7, 0), SPY(29, 7, 0), SPY(30, 7, 0), SPY(31, 8, 0), /*2002*/ SPY(32, 8, 0), SPY(33, 8, 0), SPY(34, 8, 0), SPY(35, 9, 0), /*2006*/ SPY(36, 9, 0), SPY(37, 9, 0), SPY(38, 9, 0), SPY(39, 10, 0), /*2010*/ SPY(40, 10, 0), SPY(41, 10, 0), SPY(42, 10, 0), SPY(43, 11, 0), /*2014*/ SPY(44, 11, 0), SPY(45, 11, 0), SPY(46, 11, 0), SPY(47, 12, 0), /*2018*/ SPY(48, 12, 0), SPY(49, 12, 0), SPY(50, 12, 0), SPY(51, 13, 0), /*2022*/ SPY(52, 13, 0), SPY(53, 13, 0), SPY(54, 13, 0), SPY(55, 14, 0), /*2026*/ SPY(56, 14, 0), SPY(57, 14, 0), SPY(58, 14, 0), SPY(59, 15, 0), /*2030*/ SPY(60, 15, 0), SPY(61, 15, 0), SPY(62, 15, 0), SPY(63, 16, 0), /*2034*/ SPY(64, 16, 0), SPY(65, 16, 0), SPY(66, 16, 0), SPY(67, 17, 0), /*2038*/ SPY(68, 17, 0) }; extern struct timezone sys_tz; #define SECS_PER_HOUR (60 * 60) #define SECS_PER_DAY (SECS_PER_HOUR * 24) struct timespec * udf_disk_stamp_to_time(struct timespec *dest, struct timestamp src) { int yday; u16 typeAndTimezone = le16_to_cpu(src.typeAndTimezone); u16 year = le16_to_cpu(src.year); uint8_t type = typeAndTimezone >> 12; int16_t offset; if (type == 1) { offset = typeAndTimezone << 4; /* sign extent offset */ offset = (offset >> 4); if (offset == -2047) /* unspecified offset */ offset = 0; } else offset = 0; if ((year < EPOCH_YEAR) || (year >= EPOCH_YEAR + MAX_YEAR_SECONDS)) { return NULL; } dest->tv_sec = year_seconds[year - EPOCH_YEAR]; dest->tv_sec -= offset * 60; yday = ((__mon_yday[__isleap(year)][src.month - 1]) + src.day - 1); dest->tv_sec += (((yday * 24) + src.hour) * 60 + src.minute) * 60 + src.second; dest->tv_nsec = 1000 * (src.centiseconds * 10000 + src.hundredsOfMicroseconds * 100 + src.microseconds); return dest; } struct timestamp * udf_time_to_disk_stamp(struct timestamp *dest, struct timespec ts) { long int days, rem, y; const unsigned short int *ip; int16_t offset; offset = -sys_tz.tz_minuteswest; if (!dest) return NULL; dest->typeAndTimezone = cpu_to_le16(0x1000 | (offset & 0x0FFF)); ts.tv_sec += offset * 60; days = ts.tv_sec / SECS_PER_DAY; rem = ts.tv_sec % SECS_PER_DAY; dest->hour = rem / SECS_PER_HOUR; rem %= SECS_PER_HOUR; dest->minute = rem / 60; dest->second = rem % 60; y = 1970; #define DIV(a, b) ((a) / (b) - ((a) % (b) < 0)) #define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400)) while (days < 0 || days >= (__isleap(y) ? 366 : 365)) { long int yg = y + days / 365 - (days % 365 < 0); /* Adjust DAYS and Y to match the guessed year. */ days -= ((yg - y) * 365 + LEAPS_THRU_END_OF(yg - 1) - LEAPS_THRU_END_OF(y - 1)); y = yg; } dest->year = cpu_to_le16(y); ip = __mon_yday[__isleap(y)]; for (y = 11; days < (long int)ip[y]; --y) continue; days -= ip[y]; dest->month = y + 1; dest->day = days + 1; dest->centiseconds = ts.tv_nsec / 10000000; dest->hundredsOfMicroseconds = (ts.tv_nsec / 1000 - dest->centiseconds * 10000) / 100; dest->microseconds = (ts.tv_nsec / 1000 - dest->centiseconds * 10000 - dest->hundredsOfMicroseconds * 100); return dest; } /* EOF */
gpl-2.0
deafnote/android_kernel_huawei_u88251
sound/isa/sb/sb16_csp.c
4641
33481
/* * Copyright (c) 1999 by Uros Bizjak <uros@kss-loka.si> * Takashi Iwai <tiwai@suse.de> * * SB16ASP/AWE32 CSP control * * CSP microcode loader: * alsa-tools/sb16_csp/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/init.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/control.h> #include <sound/info.h> #include <sound/sb16_csp.h> #include <sound/initval.h> MODULE_AUTHOR("Uros Bizjak <uros@kss-loka.si>"); MODULE_DESCRIPTION("ALSA driver for SB16 Creative Signal Processor"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("sb16/mulaw_main.csp"); MODULE_FIRMWARE("sb16/alaw_main.csp"); MODULE_FIRMWARE("sb16/ima_adpcm_init.csp"); MODULE_FIRMWARE("sb16/ima_adpcm_playback.csp"); MODULE_FIRMWARE("sb16/ima_adpcm_capture.csp"); #ifdef SNDRV_LITTLE_ENDIAN #define CSP_HDR_VALUE(a,b,c,d) ((a) | ((b)<<8) | ((c)<<16) | ((d)<<24)) #else #define CSP_HDR_VALUE(a,b,c,d) ((d) | ((c)<<8) | ((b)<<16) | ((a)<<24)) #endif #define RIFF_HEADER CSP_HDR_VALUE('R', 'I', 'F', 'F') #define CSP__HEADER CSP_HDR_VALUE('C', 'S', 'P', ' ') #define LIST_HEADER CSP_HDR_VALUE('L', 'I', 'S', 'T') #define FUNC_HEADER CSP_HDR_VALUE('f', 'u', 'n', 'c') #define CODE_HEADER CSP_HDR_VALUE('c', 'o', 'd', 'e') #define INIT_HEADER CSP_HDR_VALUE('i', 'n', 'i', 't') #define MAIN_HEADER CSP_HDR_VALUE('m', 'a', 'i', 'n') /* * RIFF data format */ struct riff_header { __u32 name; __u32 len; }; struct desc_header { struct riff_header info; __u16 func_nr; __u16 VOC_type; __u16 flags_play_rec; __u16 flags_16bit_8bit; __u16 flags_stereo_mono; __u16 flags_rates; }; /* * prototypes */ static void snd_sb_csp_free(struct snd_hwdep *hw); static int snd_sb_csp_open(struct snd_hwdep * hw, struct file *file); static int snd_sb_csp_ioctl(struct snd_hwdep * hw, struct file *file, unsigned int cmd, unsigned long arg); static int snd_sb_csp_release(struct snd_hwdep * hw, struct file *file); static int csp_detect(struct snd_sb *chip, int *version); static int set_codec_parameter(struct snd_sb *chip, unsigned char par, unsigned char val); static int set_register(struct snd_sb *chip, unsigned char reg, unsigned char val); static int read_register(struct snd_sb *chip, unsigned char reg); static int set_mode_register(struct snd_sb *chip, unsigned char mode); static int get_version(struct snd_sb *chip); static int snd_sb_csp_riff_load(struct snd_sb_csp * p, struct snd_sb_csp_microcode __user * code); static int snd_sb_csp_unload(struct snd_sb_csp * p); static int snd_sb_csp_load_user(struct snd_sb_csp * p, const unsigned char __user *buf, int size, int load_flags); static int snd_sb_csp_autoload(struct snd_sb_csp * p, int pcm_sfmt, int play_rec_mode); static int snd_sb_csp_check_version(struct snd_sb_csp * p); static int snd_sb_csp_use(struct snd_sb_csp * p); static int snd_sb_csp_unuse(struct snd_sb_csp * p); static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channels); static int snd_sb_csp_stop(struct snd_sb_csp * p); static int snd_sb_csp_pause(struct snd_sb_csp * p); static int snd_sb_csp_restart(struct snd_sb_csp * p); static int snd_sb_qsound_build(struct snd_sb_csp * p); static void snd_sb_qsound_destroy(struct snd_sb_csp * p); static int snd_sb_csp_qsound_transfer(struct snd_sb_csp * p); static int init_proc_entry(struct snd_sb_csp * p, int device); static void info_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer); /* * Detect CSP chip and create a new instance */ int snd_sb_csp_new(struct snd_sb *chip, int device, struct snd_hwdep ** rhwdep) { struct snd_sb_csp *p; int uninitialized_var(version); int err; struct snd_hwdep *hw; if (rhwdep) *rhwdep = NULL; if (csp_detect(chip, &version)) return -ENODEV; if ((err = snd_hwdep_new(chip->card, "SB16-CSP", device, &hw)) < 0) return err; if ((p = kzalloc(sizeof(*p), GFP_KERNEL)) == NULL) { snd_device_free(chip->card, hw); return -ENOMEM; } p->chip = chip; p->version = version; /* CSP operators */ p->ops.csp_use = snd_sb_csp_use; p->ops.csp_unuse = snd_sb_csp_unuse; p->ops.csp_autoload = snd_sb_csp_autoload; p->ops.csp_start = snd_sb_csp_start; p->ops.csp_stop = snd_sb_csp_stop; p->ops.csp_qsound_transfer = snd_sb_csp_qsound_transfer; mutex_init(&p->access_mutex); sprintf(hw->name, "CSP v%d.%d", (version >> 4), (version & 0x0f)); hw->iface = SNDRV_HWDEP_IFACE_SB16CSP; hw->private_data = p; hw->private_free = snd_sb_csp_free; /* operators - only write/ioctl */ hw->ops.open = snd_sb_csp_open; hw->ops.ioctl = snd_sb_csp_ioctl; hw->ops.release = snd_sb_csp_release; /* create a proc entry */ init_proc_entry(p, device); if (rhwdep) *rhwdep = hw; return 0; } /* * free_private for hwdep instance */ static void snd_sb_csp_free(struct snd_hwdep *hwdep) { int i; struct snd_sb_csp *p = hwdep->private_data; if (p) { if (p->running & SNDRV_SB_CSP_ST_RUNNING) snd_sb_csp_stop(p); for (i = 0; i < ARRAY_SIZE(p->csp_programs); ++i) release_firmware(p->csp_programs[i]); kfree(p); } } /* ------------------------------ */ /* * open the device exclusively */ static int snd_sb_csp_open(struct snd_hwdep * hw, struct file *file) { struct snd_sb_csp *p = hw->private_data; return (snd_sb_csp_use(p)); } /* * ioctl for hwdep device: */ static int snd_sb_csp_ioctl(struct snd_hwdep * hw, struct file *file, unsigned int cmd, unsigned long arg) { struct snd_sb_csp *p = hw->private_data; struct snd_sb_csp_info info; struct snd_sb_csp_start start_info; int err; if (snd_BUG_ON(!p)) return -EINVAL; if (snd_sb_csp_check_version(p)) return -ENODEV; switch (cmd) { /* get information */ case SNDRV_SB_CSP_IOCTL_INFO: *info.codec_name = *p->codec_name; info.func_nr = p->func_nr; info.acc_format = p->acc_format; info.acc_channels = p->acc_channels; info.acc_width = p->acc_width; info.acc_rates = p->acc_rates; info.csp_mode = p->mode; info.run_channels = p->run_channels; info.run_width = p->run_width; info.version = p->version; info.state = p->running; if (copy_to_user((void __user *)arg, &info, sizeof(info))) err = -EFAULT; else err = 0; break; /* load CSP microcode */ case SNDRV_SB_CSP_IOCTL_LOAD_CODE: err = (p->running & SNDRV_SB_CSP_ST_RUNNING ? -EBUSY : snd_sb_csp_riff_load(p, (struct snd_sb_csp_microcode __user *) arg)); break; case SNDRV_SB_CSP_IOCTL_UNLOAD_CODE: err = (p->running & SNDRV_SB_CSP_ST_RUNNING ? -EBUSY : snd_sb_csp_unload(p)); break; /* change CSP running state */ case SNDRV_SB_CSP_IOCTL_START: if (copy_from_user(&start_info, (void __user *) arg, sizeof(start_info))) err = -EFAULT; else err = snd_sb_csp_start(p, start_info.sample_width, start_info.channels); break; case SNDRV_SB_CSP_IOCTL_STOP: err = snd_sb_csp_stop(p); break; case SNDRV_SB_CSP_IOCTL_PAUSE: err = snd_sb_csp_pause(p); break; case SNDRV_SB_CSP_IOCTL_RESTART: err = snd_sb_csp_restart(p); break; default: err = -ENOTTY; break; } return err; } /* * close the device */ static int snd_sb_csp_release(struct snd_hwdep * hw, struct file *file) { struct snd_sb_csp *p = hw->private_data; return (snd_sb_csp_unuse(p)); } /* ------------------------------ */ /* * acquire device */ static int snd_sb_csp_use(struct snd_sb_csp * p) { mutex_lock(&p->access_mutex); if (p->used) { mutex_unlock(&p->access_mutex); return -EAGAIN; } p->used++; mutex_unlock(&p->access_mutex); return 0; } /* * release device */ static int snd_sb_csp_unuse(struct snd_sb_csp * p) { mutex_lock(&p->access_mutex); p->used--; mutex_unlock(&p->access_mutex); return 0; } /* * load microcode via ioctl: * code is user-space pointer */ static int snd_sb_csp_riff_load(struct snd_sb_csp * p, struct snd_sb_csp_microcode __user * mcode) { struct snd_sb_csp_mc_header info; unsigned char __user *data_ptr; unsigned char __user *data_end; unsigned short func_nr = 0; struct riff_header file_h, item_h, code_h; __u32 item_type; struct desc_header funcdesc_h; unsigned long flags; int err; if (copy_from_user(&info, mcode, sizeof(info))) return -EFAULT; data_ptr = mcode->data; if (copy_from_user(&file_h, data_ptr, sizeof(file_h))) return -EFAULT; if ((file_h.name != RIFF_HEADER) || (le32_to_cpu(file_h.len) >= SNDRV_SB_CSP_MAX_MICROCODE_FILE_SIZE - sizeof(file_h))) { snd_printd("%s: Invalid RIFF header\n", __func__); return -EINVAL; } data_ptr += sizeof(file_h); data_end = data_ptr + le32_to_cpu(file_h.len); if (copy_from_user(&item_type, data_ptr, sizeof(item_type))) return -EFAULT; if (item_type != CSP__HEADER) { snd_printd("%s: Invalid RIFF file type\n", __func__); return -EINVAL; } data_ptr += sizeof (item_type); for (; data_ptr < data_end; data_ptr += le32_to_cpu(item_h.len)) { if (copy_from_user(&item_h, data_ptr, sizeof(item_h))) return -EFAULT; data_ptr += sizeof(item_h); if (item_h.name != LIST_HEADER) continue; if (copy_from_user(&item_type, data_ptr, sizeof(item_type))) return -EFAULT; switch (item_type) { case FUNC_HEADER: if (copy_from_user(&funcdesc_h, data_ptr + sizeof(item_type), sizeof(funcdesc_h))) return -EFAULT; func_nr = le16_to_cpu(funcdesc_h.func_nr); break; case CODE_HEADER: if (func_nr != info.func_req) break; /* not required function, try next */ data_ptr += sizeof(item_type); /* destroy QSound mixer element */ if (p->mode == SNDRV_SB_CSP_MODE_QSOUND) { snd_sb_qsound_destroy(p); } /* Clear all flags */ p->running = 0; p->mode = 0; /* load microcode blocks */ for (;;) { if (data_ptr >= data_end) return -EINVAL; if (copy_from_user(&code_h, data_ptr, sizeof(code_h))) return -EFAULT; /* init microcode blocks */ if (code_h.name != INIT_HEADER) break; data_ptr += sizeof(code_h); err = snd_sb_csp_load_user(p, data_ptr, le32_to_cpu(code_h.len), SNDRV_SB_CSP_LOAD_INITBLOCK); if (err) return err; data_ptr += le32_to_cpu(code_h.len); } /* main microcode block */ if (copy_from_user(&code_h, data_ptr, sizeof(code_h))) return -EFAULT; if (code_h.name != MAIN_HEADER) { snd_printd("%s: Missing 'main' microcode\n", __func__); return -EINVAL; } data_ptr += sizeof(code_h); err = snd_sb_csp_load_user(p, data_ptr, le32_to_cpu(code_h.len), 0); if (err) return err; /* fill in codec header */ strlcpy(p->codec_name, info.codec_name, sizeof(p->codec_name)); p->func_nr = func_nr; p->mode = le16_to_cpu(funcdesc_h.flags_play_rec); switch (le16_to_cpu(funcdesc_h.VOC_type)) { case 0x0001: /* QSound decoder */ if (le16_to_cpu(funcdesc_h.flags_play_rec) == SNDRV_SB_CSP_MODE_DSP_WRITE) { if (snd_sb_qsound_build(p) == 0) /* set QSound flag and clear all other mode flags */ p->mode = SNDRV_SB_CSP_MODE_QSOUND; } p->acc_format = 0; break; case 0x0006: /* A Law codec */ p->acc_format = SNDRV_PCM_FMTBIT_A_LAW; break; case 0x0007: /* Mu Law codec */ p->acc_format = SNDRV_PCM_FMTBIT_MU_LAW; break; case 0x0011: /* what Creative thinks is IMA ADPCM codec */ case 0x0200: /* Creative ADPCM codec */ p->acc_format = SNDRV_PCM_FMTBIT_IMA_ADPCM; break; case 201: /* Text 2 Speech decoder */ /* TODO: Text2Speech handling routines */ p->acc_format = 0; break; case 0x0202: /* Fast Speech 8 codec */ case 0x0203: /* Fast Speech 10 codec */ p->acc_format = SNDRV_PCM_FMTBIT_SPECIAL; break; default: /* other codecs are unsupported */ p->acc_format = p->acc_width = p->acc_rates = 0; p->mode = 0; snd_printd("%s: Unsupported CSP codec type: 0x%04x\n", __func__, le16_to_cpu(funcdesc_h.VOC_type)); return -EINVAL; } p->acc_channels = le16_to_cpu(funcdesc_h.flags_stereo_mono); p->acc_width = le16_to_cpu(funcdesc_h.flags_16bit_8bit); p->acc_rates = le16_to_cpu(funcdesc_h.flags_rates); /* Decouple CSP from IRQ and DMAREQ lines */ spin_lock_irqsave(&p->chip->reg_lock, flags); set_mode_register(p->chip, 0xfc); set_mode_register(p->chip, 0x00); spin_unlock_irqrestore(&p->chip->reg_lock, flags); /* finished loading successfully */ p->running = SNDRV_SB_CSP_ST_LOADED; /* set LOADED flag */ return 0; } } snd_printd("%s: Function #%d not found\n", __func__, info.func_req); return -EINVAL; } /* * unload CSP microcode */ static int snd_sb_csp_unload(struct snd_sb_csp * p) { if (p->running & SNDRV_SB_CSP_ST_RUNNING) return -EBUSY; if (!(p->running & SNDRV_SB_CSP_ST_LOADED)) return -ENXIO; /* clear supported formats */ p->acc_format = 0; p->acc_channels = p->acc_width = p->acc_rates = 0; /* destroy QSound mixer element */ if (p->mode == SNDRV_SB_CSP_MODE_QSOUND) { snd_sb_qsound_destroy(p); } /* clear all flags */ p->running = 0; p->mode = 0; return 0; } /* * send command sequence to DSP */ static inline int command_seq(struct snd_sb *chip, const unsigned char *seq, int size) { int i; for (i = 0; i < size; i++) { if (!snd_sbdsp_command(chip, seq[i])) return -EIO; } return 0; } /* * set CSP codec parameter */ static int set_codec_parameter(struct snd_sb *chip, unsigned char par, unsigned char val) { unsigned char dsp_cmd[3]; dsp_cmd[0] = 0x05; /* CSP set codec parameter */ dsp_cmd[1] = val; /* Parameter value */ dsp_cmd[2] = par; /* Parameter */ command_seq(chip, dsp_cmd, 3); snd_sbdsp_command(chip, 0x03); /* DSP read? */ if (snd_sbdsp_get_byte(chip) != par) return -EIO; return 0; } /* * set CSP register */ static int set_register(struct snd_sb *chip, unsigned char reg, unsigned char val) { unsigned char dsp_cmd[3]; dsp_cmd[0] = 0x0e; /* CSP set register */ dsp_cmd[1] = reg; /* CSP Register */ dsp_cmd[2] = val; /* value */ return command_seq(chip, dsp_cmd, 3); } /* * read CSP register * return < 0 -> error */ static int read_register(struct snd_sb *chip, unsigned char reg) { unsigned char dsp_cmd[2]; dsp_cmd[0] = 0x0f; /* CSP read register */ dsp_cmd[1] = reg; /* CSP Register */ command_seq(chip, dsp_cmd, 2); return snd_sbdsp_get_byte(chip); /* Read DSP value */ } /* * set CSP mode register */ static int set_mode_register(struct snd_sb *chip, unsigned char mode) { unsigned char dsp_cmd[2]; dsp_cmd[0] = 0x04; /* CSP set mode register */ dsp_cmd[1] = mode; /* mode */ return command_seq(chip, dsp_cmd, 2); } /* * Detect CSP * return 0 if CSP exists. */ static int csp_detect(struct snd_sb *chip, int *version) { unsigned char csp_test1, csp_test2; unsigned long flags; int result = -ENODEV; spin_lock_irqsave(&chip->reg_lock, flags); set_codec_parameter(chip, 0x00, 0x00); set_mode_register(chip, 0xfc); /* 0xfc = ?? */ csp_test1 = read_register(chip, 0x83); set_register(chip, 0x83, ~csp_test1); csp_test2 = read_register(chip, 0x83); if (csp_test2 != (csp_test1 ^ 0xff)) goto __fail; set_register(chip, 0x83, csp_test1); csp_test2 = read_register(chip, 0x83); if (csp_test2 != csp_test1) goto __fail; set_mode_register(chip, 0x00); /* 0x00 = ? */ *version = get_version(chip); snd_sbdsp_reset(chip); /* reset DSP after getversion! */ if (*version >= 0x10 && *version <= 0x1f) result = 0; /* valid version id */ __fail: spin_unlock_irqrestore(&chip->reg_lock, flags); return result; } /* * get CSP version number */ static int get_version(struct snd_sb *chip) { unsigned char dsp_cmd[2]; dsp_cmd[0] = 0x08; /* SB_DSP_!something! */ dsp_cmd[1] = 0x03; /* get chip version id? */ command_seq(chip, dsp_cmd, 2); return (snd_sbdsp_get_byte(chip)); } /* * check if the CSP version is valid */ static int snd_sb_csp_check_version(struct snd_sb_csp * p) { if (p->version < 0x10 || p->version > 0x1f) { snd_printd("%s: Invalid CSP version: 0x%x\n", __func__, p->version); return 1; } return 0; } /* * download microcode to CSP (microcode should have one "main" block). */ static int snd_sb_csp_load(struct snd_sb_csp * p, const unsigned char *buf, int size, int load_flags) { int status, i; int err; int result = -EIO; unsigned long flags; spin_lock_irqsave(&p->chip->reg_lock, flags); snd_sbdsp_command(p->chip, 0x01); /* CSP download command */ if (snd_sbdsp_get_byte(p->chip)) { snd_printd("%s: Download command failed\n", __func__); goto __fail; } /* Send CSP low byte (size - 1) */ snd_sbdsp_command(p->chip, (unsigned char)(size - 1)); /* Send high byte */ snd_sbdsp_command(p->chip, (unsigned char)((size - 1) >> 8)); /* send microcode sequence */ /* load from kernel space */ while (size--) { if (!snd_sbdsp_command(p->chip, *buf++)) goto __fail; } if (snd_sbdsp_get_byte(p->chip)) goto __fail; if (load_flags & SNDRV_SB_CSP_LOAD_INITBLOCK) { i = 0; /* some codecs (FastSpeech) take some time to initialize */ while (1) { snd_sbdsp_command(p->chip, 0x03); status = snd_sbdsp_get_byte(p->chip); if (status == 0x55 || ++i >= 10) break; udelay (10); } if (status != 0x55) { snd_printd("%s: Microcode initialization failed\n", __func__); goto __fail; } } else { /* * Read mixer register SB_DSP4_DMASETUP after loading 'main' code. * Start CSP chip if no 16bit DMA channel is set - some kind * of autorun or perhaps a bugfix? */ spin_lock(&p->chip->mixer_lock); status = snd_sbmixer_read(p->chip, SB_DSP4_DMASETUP); spin_unlock(&p->chip->mixer_lock); if (!(status & (SB_DMASETUP_DMA7 | SB_DMASETUP_DMA6 | SB_DMASETUP_DMA5))) { err = (set_codec_parameter(p->chip, 0xaa, 0x00) || set_codec_parameter(p->chip, 0xff, 0x00)); snd_sbdsp_reset(p->chip); /* really! */ if (err) goto __fail; set_mode_register(p->chip, 0xc0); /* c0 = STOP */ set_mode_register(p->chip, 0x70); /* 70 = RUN */ } } result = 0; __fail: spin_unlock_irqrestore(&p->chip->reg_lock, flags); return result; } static int snd_sb_csp_load_user(struct snd_sb_csp * p, const unsigned char __user *buf, int size, int load_flags) { int err; unsigned char *kbuf; kbuf = memdup_user(buf, size); if (IS_ERR(kbuf)) return PTR_ERR(kbuf); err = snd_sb_csp_load(p, kbuf, size, load_flags); kfree(kbuf); return err; } static int snd_sb_csp_firmware_load(struct snd_sb_csp *p, int index, int flags) { static const char *const names[] = { "sb16/mulaw_main.csp", "sb16/alaw_main.csp", "sb16/ima_adpcm_init.csp", "sb16/ima_adpcm_playback.csp", "sb16/ima_adpcm_capture.csp", }; const struct firmware *program; BUILD_BUG_ON(ARRAY_SIZE(names) != CSP_PROGRAM_COUNT); program = p->csp_programs[index]; if (!program) { int err = request_firmware(&program, names[index], p->chip->card->dev); if (err < 0) return err; p->csp_programs[index] = program; } return snd_sb_csp_load(p, program->data, program->size, flags); } /* * autoload hardware codec if necessary * return 0 if CSP is loaded and ready to run (p->running != 0) */ static int snd_sb_csp_autoload(struct snd_sb_csp * p, int pcm_sfmt, int play_rec_mode) { unsigned long flags; int err = 0; /* if CSP is running or manually loaded then exit */ if (p->running & (SNDRV_SB_CSP_ST_RUNNING | SNDRV_SB_CSP_ST_LOADED)) return -EBUSY; /* autoload microcode only if requested hardware codec is not already loaded */ if (((1 << pcm_sfmt) & p->acc_format) && (play_rec_mode & p->mode)) { p->running = SNDRV_SB_CSP_ST_AUTO; } else { switch (pcm_sfmt) { case SNDRV_PCM_FORMAT_MU_LAW: err = snd_sb_csp_firmware_load(p, CSP_PROGRAM_MULAW, 0); p->acc_format = SNDRV_PCM_FMTBIT_MU_LAW; p->mode = SNDRV_SB_CSP_MODE_DSP_READ | SNDRV_SB_CSP_MODE_DSP_WRITE; break; case SNDRV_PCM_FORMAT_A_LAW: err = snd_sb_csp_firmware_load(p, CSP_PROGRAM_ALAW, 0); p->acc_format = SNDRV_PCM_FMTBIT_A_LAW; p->mode = SNDRV_SB_CSP_MODE_DSP_READ | SNDRV_SB_CSP_MODE_DSP_WRITE; break; case SNDRV_PCM_FORMAT_IMA_ADPCM: err = snd_sb_csp_firmware_load(p, CSP_PROGRAM_ADPCM_INIT, SNDRV_SB_CSP_LOAD_INITBLOCK); if (err) break; if (play_rec_mode == SNDRV_SB_CSP_MODE_DSP_WRITE) { err = snd_sb_csp_firmware_load (p, CSP_PROGRAM_ADPCM_PLAYBACK, 0); p->mode = SNDRV_SB_CSP_MODE_DSP_WRITE; } else { err = snd_sb_csp_firmware_load (p, CSP_PROGRAM_ADPCM_CAPTURE, 0); p->mode = SNDRV_SB_CSP_MODE_DSP_READ; } p->acc_format = SNDRV_PCM_FMTBIT_IMA_ADPCM; break; default: /* Decouple CSP from IRQ and DMAREQ lines */ if (p->running & SNDRV_SB_CSP_ST_AUTO) { spin_lock_irqsave(&p->chip->reg_lock, flags); set_mode_register(p->chip, 0xfc); set_mode_register(p->chip, 0x00); spin_unlock_irqrestore(&p->chip->reg_lock, flags); p->running = 0; /* clear autoloaded flag */ } return -EINVAL; } if (err) { p->acc_format = 0; p->acc_channels = p->acc_width = p->acc_rates = 0; p->running = 0; /* clear autoloaded flag */ p->mode = 0; return (err); } else { p->running = SNDRV_SB_CSP_ST_AUTO; /* set autoloaded flag */ p->acc_width = SNDRV_SB_CSP_SAMPLE_16BIT; /* only 16 bit data */ p->acc_channels = SNDRV_SB_CSP_MONO | SNDRV_SB_CSP_STEREO; p->acc_rates = SNDRV_SB_CSP_RATE_ALL; /* HW codecs accept all rates */ } } return (p->running & SNDRV_SB_CSP_ST_AUTO) ? 0 : -ENXIO; } /* * start CSP */ static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channels) { unsigned char s_type; /* sample type */ unsigned char mixL, mixR; int result = -EIO; unsigned long flags; if (!(p->running & (SNDRV_SB_CSP_ST_LOADED | SNDRV_SB_CSP_ST_AUTO))) { snd_printd("%s: Microcode not loaded\n", __func__); return -ENXIO; } if (p->running & SNDRV_SB_CSP_ST_RUNNING) { snd_printd("%s: CSP already running\n", __func__); return -EBUSY; } if (!(sample_width & p->acc_width)) { snd_printd("%s: Unsupported PCM sample width\n", __func__); return -EINVAL; } if (!(channels & p->acc_channels)) { snd_printd("%s: Invalid number of channels\n", __func__); return -EINVAL; } /* Mute PCM volume */ spin_lock_irqsave(&p->chip->mixer_lock, flags); mixL = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV); mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7); spin_lock(&p->chip->reg_lock); set_mode_register(p->chip, 0xc0); /* c0 = STOP */ set_mode_register(p->chip, 0x70); /* 70 = RUN */ s_type = 0x00; if (channels == SNDRV_SB_CSP_MONO) s_type = 0x11; /* 000n 000n (n = 1 if mono) */ if (sample_width == SNDRV_SB_CSP_SAMPLE_8BIT) s_type |= 0x22; /* 00dX 00dX (d = 1 if 8 bit samples) */ if (set_codec_parameter(p->chip, 0x81, s_type)) { snd_printd("%s: Set sample type command failed\n", __func__); goto __fail; } if (set_codec_parameter(p->chip, 0x80, 0x00)) { snd_printd("%s: Codec start command failed\n", __func__); goto __fail; } p->run_width = sample_width; p->run_channels = channels; p->running |= SNDRV_SB_CSP_ST_RUNNING; if (p->mode & SNDRV_SB_CSP_MODE_QSOUND) { set_codec_parameter(p->chip, 0xe0, 0x01); /* enable QSound decoder */ set_codec_parameter(p->chip, 0x00, 0xff); set_codec_parameter(p->chip, 0x01, 0xff); p->running |= SNDRV_SB_CSP_ST_QSOUND; /* set QSound startup value */ snd_sb_csp_qsound_transfer(p); } result = 0; __fail: spin_unlock(&p->chip->reg_lock); /* restore PCM volume */ snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR); spin_unlock_irqrestore(&p->chip->mixer_lock, flags); return result; } /* * stop CSP */ static int snd_sb_csp_stop(struct snd_sb_csp * p) { int result; unsigned char mixL, mixR; unsigned long flags; if (!(p->running & SNDRV_SB_CSP_ST_RUNNING)) return 0; /* Mute PCM volume */ spin_lock_irqsave(&p->chip->mixer_lock, flags); mixL = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV); mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7); spin_lock(&p->chip->reg_lock); if (p->running & SNDRV_SB_CSP_ST_QSOUND) { set_codec_parameter(p->chip, 0xe0, 0x01); /* disable QSound decoder */ set_codec_parameter(p->chip, 0x00, 0x00); set_codec_parameter(p->chip, 0x01, 0x00); p->running &= ~SNDRV_SB_CSP_ST_QSOUND; } result = set_mode_register(p->chip, 0xc0); /* c0 = STOP */ spin_unlock(&p->chip->reg_lock); /* restore PCM volume */ snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR); spin_unlock_irqrestore(&p->chip->mixer_lock, flags); if (!(result)) p->running &= ~(SNDRV_SB_CSP_ST_PAUSED | SNDRV_SB_CSP_ST_RUNNING); return result; } /* * pause CSP codec and hold DMA transfer */ static int snd_sb_csp_pause(struct snd_sb_csp * p) { int result; unsigned long flags; if (!(p->running & SNDRV_SB_CSP_ST_RUNNING)) return -EBUSY; spin_lock_irqsave(&p->chip->reg_lock, flags); result = set_codec_parameter(p->chip, 0x80, 0xff); spin_unlock_irqrestore(&p->chip->reg_lock, flags); if (!(result)) p->running |= SNDRV_SB_CSP_ST_PAUSED; return result; } /* * restart CSP codec and resume DMA transfer */ static int snd_sb_csp_restart(struct snd_sb_csp * p) { int result; unsigned long flags; if (!(p->running & SNDRV_SB_CSP_ST_PAUSED)) return -EBUSY; spin_lock_irqsave(&p->chip->reg_lock, flags); result = set_codec_parameter(p->chip, 0x80, 0x00); spin_unlock_irqrestore(&p->chip->reg_lock, flags); if (!(result)) p->running &= ~SNDRV_SB_CSP_ST_PAUSED; return result; } /* ------------------------------ */ /* * QSound mixer control for PCM */ #define snd_sb_qsound_switch_info snd_ctl_boolean_mono_info static int snd_sb_qsound_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_sb_csp *p = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = p->q_enabled ? 1 : 0; return 0; } static int snd_sb_qsound_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_sb_csp *p = snd_kcontrol_chip(kcontrol); unsigned long flags; int change; unsigned char nval; nval = ucontrol->value.integer.value[0] & 0x01; spin_lock_irqsave(&p->q_lock, flags); change = p->q_enabled != nval; p->q_enabled = nval; spin_unlock_irqrestore(&p->q_lock, flags); return change; } static int snd_sb_qsound_space_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = SNDRV_SB_CSP_QSOUND_MAX_RIGHT; return 0; } static int snd_sb_qsound_space_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_sb_csp *p = snd_kcontrol_chip(kcontrol); unsigned long flags; spin_lock_irqsave(&p->q_lock, flags); ucontrol->value.integer.value[0] = p->qpos_left; ucontrol->value.integer.value[1] = p->qpos_right; spin_unlock_irqrestore(&p->q_lock, flags); return 0; } static int snd_sb_qsound_space_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_sb_csp *p = snd_kcontrol_chip(kcontrol); unsigned long flags; int change; unsigned char nval1, nval2; nval1 = ucontrol->value.integer.value[0]; if (nval1 > SNDRV_SB_CSP_QSOUND_MAX_RIGHT) nval1 = SNDRV_SB_CSP_QSOUND_MAX_RIGHT; nval2 = ucontrol->value.integer.value[1]; if (nval2 > SNDRV_SB_CSP_QSOUND_MAX_RIGHT) nval2 = SNDRV_SB_CSP_QSOUND_MAX_RIGHT; spin_lock_irqsave(&p->q_lock, flags); change = p->qpos_left != nval1 || p->qpos_right != nval2; p->qpos_left = nval1; p->qpos_right = nval2; p->qpos_changed = change; spin_unlock_irqrestore(&p->q_lock, flags); return change; } static struct snd_kcontrol_new snd_sb_qsound_switch = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "3D Control - Switch", .info = snd_sb_qsound_switch_info, .get = snd_sb_qsound_switch_get, .put = snd_sb_qsound_switch_put }; static struct snd_kcontrol_new snd_sb_qsound_space = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "3D Control - Space", .info = snd_sb_qsound_space_info, .get = snd_sb_qsound_space_get, .put = snd_sb_qsound_space_put }; static int snd_sb_qsound_build(struct snd_sb_csp * p) { struct snd_card *card; int err; if (snd_BUG_ON(!p)) return -EINVAL; card = p->chip->card; p->qpos_left = p->qpos_right = SNDRV_SB_CSP_QSOUND_MAX_RIGHT / 2; p->qpos_changed = 0; spin_lock_init(&p->q_lock); if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0) goto __error; if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0) goto __error; return 0; __error: snd_sb_qsound_destroy(p); return err; } static void snd_sb_qsound_destroy(struct snd_sb_csp * p) { struct snd_card *card; unsigned long flags; if (snd_BUG_ON(!p)) return; card = p->chip->card; down_write(&card->controls_rwsem); if (p->qsound_switch) snd_ctl_remove(card, p->qsound_switch); if (p->qsound_space) snd_ctl_remove(card, p->qsound_space); up_write(&card->controls_rwsem); /* cancel pending transfer of QSound parameters */ spin_lock_irqsave (&p->q_lock, flags); p->qpos_changed = 0; spin_unlock_irqrestore (&p->q_lock, flags); } /* * Transfer qsound parameters to CSP, * function should be called from interrupt routine */ static int snd_sb_csp_qsound_transfer(struct snd_sb_csp * p) { int err = -ENXIO; spin_lock(&p->q_lock); if (p->running & SNDRV_SB_CSP_ST_QSOUND) { set_codec_parameter(p->chip, 0xe0, 0x01); /* left channel */ set_codec_parameter(p->chip, 0x00, p->qpos_left); set_codec_parameter(p->chip, 0x02, 0x00); /* right channel */ set_codec_parameter(p->chip, 0x00, p->qpos_right); set_codec_parameter(p->chip, 0x03, 0x00); err = 0; } p->qpos_changed = 0; spin_unlock(&p->q_lock); return err; } /* ------------------------------ */ /* * proc interface */ static int init_proc_entry(struct snd_sb_csp * p, int device) { char name[16]; struct snd_info_entry *entry; sprintf(name, "cspD%d", device); if (! snd_card_proc_new(p->chip->card, name, &entry)) snd_info_set_text_ops(entry, p, info_read); return 0; } static void info_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_sb_csp *p = entry->private_data; snd_iprintf(buffer, "Creative Signal Processor [v%d.%d]\n", (p->version >> 4), (p->version & 0x0f)); snd_iprintf(buffer, "State: %cx%c%c%c\n", ((p->running & SNDRV_SB_CSP_ST_QSOUND) ? 'Q' : '-'), ((p->running & SNDRV_SB_CSP_ST_PAUSED) ? 'P' : '-'), ((p->running & SNDRV_SB_CSP_ST_RUNNING) ? 'R' : '-'), ((p->running & SNDRV_SB_CSP_ST_LOADED) ? 'L' : '-')); if (p->running & SNDRV_SB_CSP_ST_LOADED) { snd_iprintf(buffer, "Codec: %s [func #%d]\n", p->codec_name, p->func_nr); snd_iprintf(buffer, "Sample rates: "); if (p->acc_rates == SNDRV_SB_CSP_RATE_ALL) { snd_iprintf(buffer, "All\n"); } else { snd_iprintf(buffer, "%s%s%s%s\n", ((p->acc_rates & SNDRV_SB_CSP_RATE_8000) ? "8000Hz " : ""), ((p->acc_rates & SNDRV_SB_CSP_RATE_11025) ? "11025Hz " : ""), ((p->acc_rates & SNDRV_SB_CSP_RATE_22050) ? "22050Hz " : ""), ((p->acc_rates & SNDRV_SB_CSP_RATE_44100) ? "44100Hz" : "")); } if (p->mode == SNDRV_SB_CSP_MODE_QSOUND) { snd_iprintf(buffer, "QSound decoder %sabled\n", p->q_enabled ? "en" : "dis"); } else { snd_iprintf(buffer, "PCM format ID: 0x%x (%s/%s) [%s/%s] [%s/%s]\n", p->acc_format, ((p->acc_width & SNDRV_SB_CSP_SAMPLE_16BIT) ? "16bit" : "-"), ((p->acc_width & SNDRV_SB_CSP_SAMPLE_8BIT) ? "8bit" : "-"), ((p->acc_channels & SNDRV_SB_CSP_MONO) ? "mono" : "-"), ((p->acc_channels & SNDRV_SB_CSP_STEREO) ? "stereo" : "-"), ((p->mode & SNDRV_SB_CSP_MODE_DSP_WRITE) ? "playback" : "-"), ((p->mode & SNDRV_SB_CSP_MODE_DSP_READ) ? "capture" : "-")); } } if (p->running & SNDRV_SB_CSP_ST_AUTO) { snd_iprintf(buffer, "Autoloaded Mu-Law, A-Law or Ima-ADPCM hardware codec\n"); } if (p->running & SNDRV_SB_CSP_ST_RUNNING) { snd_iprintf(buffer, "Processing %dbit %s PCM samples\n", ((p->run_width & SNDRV_SB_CSP_SAMPLE_16BIT) ? 16 : 8), ((p->run_channels & SNDRV_SB_CSP_MONO) ? "mono" : "stereo")); } if (p->running & SNDRV_SB_CSP_ST_QSOUND) { snd_iprintf(buffer, "Qsound position: left = 0x%x, right = 0x%x\n", p->qpos_left, p->qpos_right); } } /* */ EXPORT_SYMBOL(snd_sb_csp_new); /* * INIT part */ static int __init alsa_sb_csp_init(void) { return 0; } static void __exit alsa_sb_csp_exit(void) { } module_init(alsa_sb_csp_init) module_exit(alsa_sb_csp_exit)
gpl-2.0
TripNRaVeR/tripndroid-m7-unleashed-3.4
net/tipc/ref.c
4897
8530
/* * net/tipc/ref.c: TIPC object registry code * * Copyright (c) 1991-2006, Ericsson AB * Copyright (c) 2004-2007, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "core.h" #include "ref.h" /** * struct reference - TIPC object reference entry * @object: pointer to object associated with reference entry * @lock: spinlock controlling access to object * @ref: reference value for object (combines instance & array index info) */ struct reference { void *object; spinlock_t lock; u32 ref; }; /** * struct tipc_ref_table - table of TIPC object reference entries * @entries: pointer to array of reference entries * @capacity: array index of first unusable entry * @init_point: array index of first uninitialized entry * @first_free: array index of first unused object reference entry * @last_free: array index of last unused object reference entry * @index_mask: bitmask for array index portion of reference values * @start_mask: initial value for instance value portion of reference values */ struct ref_table { struct reference *entries; u32 capacity; u32 init_point; u32 first_free; u32 last_free; u32 index_mask; u32 start_mask; }; /* * Object reference table consists of 2**N entries. * * State Object ptr Reference * ----- ---------- --------- * In use non-NULL XXXX|own index * (XXXX changes each time entry is acquired) * Free NULL YYYY|next free index * (YYYY is one more than last used XXXX) * Uninitialized NULL 0 * * Entry 0 is not used; this allows index 0 to denote the end of the free list. * * Note that a reference value of 0 does not necessarily indicate that an * entry is uninitialized, since the last entry in the free list could also * have a reference value of 0 (although this is unlikely). */ static struct ref_table tipc_ref_table; static DEFINE_RWLOCK(ref_table_lock); /** * tipc_ref_table_init - create reference table for objects */ int tipc_ref_table_init(u32 requested_size, u32 start) { struct reference *table; u32 actual_size; /* account for unused entry, then round up size to a power of 2 */ requested_size++; for (actual_size = 16; actual_size < requested_size; actual_size <<= 1) /* do nothing */ ; /* allocate table & mark all entries as uninitialized */ table = vzalloc(actual_size * sizeof(struct reference)); if (table == NULL) return -ENOMEM; tipc_ref_table.entries = table; tipc_ref_table.capacity = requested_size; tipc_ref_table.init_point = 1; tipc_ref_table.first_free = 0; tipc_ref_table.last_free = 0; tipc_ref_table.index_mask = actual_size - 1; tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask; return 0; } /** * tipc_ref_table_stop - destroy reference table for objects */ void tipc_ref_table_stop(void) { if (!tipc_ref_table.entries) return; vfree(tipc_ref_table.entries); tipc_ref_table.entries = NULL; } /** * tipc_ref_acquire - create reference to an object * * Register an object pointer in reference table and lock the object. * Returns a unique reference value that is used from then on to retrieve the * object pointer, or to determine that the object has been deregistered. * * Note: The object is returned in the locked state so that the caller can * register a partially initialized object, without running the risk that * the object will be accessed before initialization is complete. */ u32 tipc_ref_acquire(void *object, spinlock_t **lock) { u32 index; u32 index_mask; u32 next_plus_upper; u32 ref; struct reference *entry = NULL; if (!object) { err("Attempt to acquire reference to non-existent object\n"); return 0; } if (!tipc_ref_table.entries) { err("Reference table not found during acquisition attempt\n"); return 0; } /* take a free entry, if available; otherwise initialize a new entry */ write_lock_bh(&ref_table_lock); if (tipc_ref_table.first_free) { index = tipc_ref_table.first_free; entry = &(tipc_ref_table.entries[index]); index_mask = tipc_ref_table.index_mask; next_plus_upper = entry->ref; tipc_ref_table.first_free = next_plus_upper & index_mask; ref = (next_plus_upper & ~index_mask) + index; } else if (tipc_ref_table.init_point < tipc_ref_table.capacity) { index = tipc_ref_table.init_point++; entry = &(tipc_ref_table.entries[index]); spin_lock_init(&entry->lock); ref = tipc_ref_table.start_mask + index; } else { ref = 0; } write_unlock_bh(&ref_table_lock); /* * Grab the lock so no one else can modify this entry * While we assign its ref value & object pointer */ if (entry) { spin_lock_bh(&entry->lock); entry->ref = ref; entry->object = object; *lock = &entry->lock; /* * keep it locked, the caller is responsible * for unlocking this when they're done with it */ } return ref; } /** * tipc_ref_discard - invalidate references to an object * * Disallow future references to an object and free up the entry for re-use. * Note: The entry's spin_lock may still be busy after discard */ void tipc_ref_discard(u32 ref) { struct reference *entry; u32 index; u32 index_mask; if (!tipc_ref_table.entries) { err("Reference table not found during discard attempt\n"); return; } index_mask = tipc_ref_table.index_mask; index = ref & index_mask; entry = &(tipc_ref_table.entries[index]); write_lock_bh(&ref_table_lock); if (!entry->object) { err("Attempt to discard reference to non-existent object\n"); goto exit; } if (entry->ref != ref) { err("Attempt to discard non-existent reference\n"); goto exit; } /* * mark entry as unused; increment instance part of entry's reference * to invalidate any subsequent references */ entry->object = NULL; entry->ref = (ref & ~index_mask) + (index_mask + 1); /* append entry to free entry list */ if (tipc_ref_table.first_free == 0) tipc_ref_table.first_free = index; else tipc_ref_table.entries[tipc_ref_table.last_free].ref |= index; tipc_ref_table.last_free = index; exit: write_unlock_bh(&ref_table_lock); } /** * tipc_ref_lock - lock referenced object and return pointer to it */ void *tipc_ref_lock(u32 ref) { if (likely(tipc_ref_table.entries)) { struct reference *entry; entry = &tipc_ref_table.entries[ref & tipc_ref_table.index_mask]; if (likely(entry->ref != 0)) { spin_lock_bh(&entry->lock); if (likely((entry->ref == ref) && (entry->object))) return entry->object; spin_unlock_bh(&entry->lock); } } return NULL; } /** * tipc_ref_deref - return pointer referenced object (without locking it) */ void *tipc_ref_deref(u32 ref) { if (likely(tipc_ref_table.entries)) { struct reference *entry; entry = &tipc_ref_table.entries[ref & tipc_ref_table.index_mask]; if (likely(entry->ref == ref)) return entry->object; } return NULL; }
gpl-2.0
V-KING/g3_kernel
drivers/infiniband/hw/ipath/ipath_file_ops.c
5153
73545
/* * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/pci.h> #include <linux/poll.h> #include <linux/cdev.h> #include <linux/swap.h> #include <linux/export.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/io.h> #include <linux/jiffies.h> #include <linux/cpu.h> #include <asm/pgtable.h> #include "ipath_kernel.h" #include "ipath_common.h" #include "ipath_user_sdma.h" static int ipath_open(struct inode *, struct file *); static int ipath_close(struct inode *, struct file *); static ssize_t ipath_write(struct file *, const char __user *, size_t, loff_t *); static ssize_t ipath_writev(struct kiocb *, const struct iovec *, unsigned long , loff_t); static unsigned int ipath_poll(struct file *, struct poll_table_struct *); static int ipath_mmap(struct file *, struct vm_area_struct *); static const struct file_operations ipath_file_ops = { .owner = THIS_MODULE, .write = ipath_write, .aio_write = ipath_writev, .open = ipath_open, .release = ipath_close, .poll = ipath_poll, .mmap = ipath_mmap, .llseek = noop_llseek, }; /* * Convert kernel virtual addresses to physical addresses so they don't * potentially conflict with the chip addresses used as mmap offsets. * It doesn't really matter what mmap offset we use as long as we can * interpret it correctly. */ static u64 cvt_kvaddr(void *p) { struct page *page; u64 paddr = 0; page = vmalloc_to_page(p); if (page) paddr = page_to_pfn(page) << PAGE_SHIFT; return paddr; } static int ipath_get_base_info(struct file *fp, void __user *ubase, size_t ubase_size) { struct ipath_portdata *pd = port_fp(fp); int ret = 0; struct ipath_base_info *kinfo = NULL; struct ipath_devdata *dd = pd->port_dd; unsigned subport_cnt; int shared, master; size_t sz; subport_cnt = pd->port_subport_cnt; if (!subport_cnt) { shared = 0; master = 0; subport_cnt = 1; } else { shared = 1; master = !subport_fp(fp); } sz = sizeof(*kinfo); /* If port sharing is not requested, allow the old size structure */ if (!shared) sz -= 7 * sizeof(u64); if (ubase_size < sz) { ipath_cdbg(PROC, "Base size %zu, need %zu (version mismatch?)\n", ubase_size, sz); ret = -EINVAL; goto bail; } kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL); if (kinfo == NULL) { ret = -ENOMEM; goto bail; } ret = dd->ipath_f_get_base_info(pd, kinfo); if (ret < 0) goto bail; kinfo->spi_rcvhdr_cnt = dd->ipath_rcvhdrcnt; kinfo->spi_rcvhdrent_size = dd->ipath_rcvhdrentsize; kinfo->spi_tidegrcnt = dd->ipath_rcvegrcnt; kinfo->spi_rcv_egrbufsize = dd->ipath_rcvegrbufsize; /* * have to mmap whole thing */ kinfo->spi_rcv_egrbuftotlen = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size; kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk; kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen / pd->port_rcvegrbuf_chunks; kinfo->spi_tidcnt = dd->ipath_rcvtidcnt / subport_cnt; if (master) kinfo->spi_tidcnt += dd->ipath_rcvtidcnt % subport_cnt; /* * for this use, may be ipath_cfgports summed over all chips that * are are configured and present */ kinfo->spi_nports = dd->ipath_cfgports; /* unit (chip/board) our port is on */ kinfo->spi_unit = dd->ipath_unit; /* for now, only a single page */ kinfo->spi_tid_maxsize = PAGE_SIZE; /* * Doing this per port, and based on the skip value, etc. This has * to be the actual buffer size, since the protocol code treats it * as an array. * * These have to be set to user addresses in the user code via mmap. * These values are used on return to user code for the mmap target * addresses only. For 32 bit, same 44 bit address problem, so use * the physical address, not virtual. Before 2.6.11, using the * page_address() macro worked, but in 2.6.11, even that returns the * full 64 bit address (upper bits all 1's). So far, using the * physical addresses (or chip offsets, for chip mapping) works, but * no doubt some future kernel release will change that, and we'll be * on to yet another method of dealing with this. */ kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys; kinfo->spi_rcvhdr_tailaddr = (u64) pd->port_rcvhdrqtailaddr_phys; kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys; kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys; kinfo->spi_status = (u64) kinfo->spi_pioavailaddr + (void *) dd->ipath_statusp - (void *) dd->ipath_pioavailregs_dma; if (!shared) { kinfo->spi_piocnt = pd->port_piocnt; kinfo->spi_piobufbase = (u64) pd->port_piobufs; kinfo->__spi_uregbase = (u64) dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port; } else if (master) { kinfo->spi_piocnt = (pd->port_piocnt / subport_cnt) + (pd->port_piocnt % subport_cnt); /* Master's PIO buffers are after all the slave's */ kinfo->spi_piobufbase = (u64) pd->port_piobufs + dd->ipath_palign * (pd->port_piocnt - kinfo->spi_piocnt); } else { unsigned slave = subport_fp(fp) - 1; kinfo->spi_piocnt = pd->port_piocnt / subport_cnt; kinfo->spi_piobufbase = (u64) pd->port_piobufs + dd->ipath_palign * kinfo->spi_piocnt * slave; } if (shared) { kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port; kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs; kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base; kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr; kinfo->__spi_uregbase = cvt_kvaddr(pd->subport_uregbase + PAGE_SIZE * subport_fp(fp)); kinfo->spi_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base + pd->port_rcvhdrq_size * subport_fp(fp)); kinfo->spi_rcvhdr_tailaddr = 0; kinfo->spi_rcv_egrbufs = cvt_kvaddr(pd->subport_rcvegrbuf + pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size * subport_fp(fp)); kinfo->spi_subport_uregbase = cvt_kvaddr(pd->subport_uregbase); kinfo->spi_subport_rcvegrbuf = cvt_kvaddr(pd->subport_rcvegrbuf); kinfo->spi_subport_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base); ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n", kinfo->spi_port, kinfo->spi_runtime_flags, (unsigned long long) kinfo->spi_subport_uregbase, (unsigned long long) kinfo->spi_subport_rcvegrbuf, (unsigned long long) kinfo->spi_subport_rcvhdr_base); } /* * All user buffers are 2KB buffers. If we ever support * giving 4KB buffers to user processes, this will need some * work. */ kinfo->spi_pioindex = (kinfo->spi_piobufbase - (dd->ipath_piobufbase & 0xffffffff)) / dd->ipath_palign; kinfo->spi_pioalign = dd->ipath_palign; kinfo->spi_qpair = IPATH_KD_QP; /* * user mode PIO buffers are always 2KB, even when 4KB can * be received, and sent via the kernel; this is ibmaxlen * for 2K MTU. */ kinfo->spi_piosize = dd->ipath_piosize2k - 2 * sizeof(u32); kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */ kinfo->spi_port = pd->port_port; kinfo->spi_subport = subport_fp(fp); kinfo->spi_sw_version = IPATH_KERN_SWVERSION; kinfo->spi_hw_version = dd->ipath_revision; if (master) { kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER; } sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo); if (copy_to_user(ubase, kinfo, sz)) ret = -EFAULT; bail: kfree(kinfo); return ret; } /** * ipath_tid_update - update a port TID * @pd: the port * @fp: the ipath device file * @ti: the TID information * * The new implementation as of Oct 2004 is that the driver assigns * the tid and returns it to the caller. To make it easier to * catch bugs, and to reduce search time, we keep a cursor for * each port, walking the shadow tid array to find one that's not * in use. * * For now, if we can't allocate the full list, we fail, although * in the long run, we'll allocate as many as we can, and the * caller will deal with that by trying the remaining pages later. * That means that when we fail, we have to mark the tids as not in * use again, in our shadow copy. * * It's up to the caller to free the tids when they are done. * We'll unlock the pages as they free them. * * Also, right now we are locking one page at a time, but since * the intended use of this routine is for a single group of * virtually contiguous pages, that should change to improve * performance. */ static int ipath_tid_update(struct ipath_portdata *pd, struct file *fp, const struct ipath_tid_info *ti) { int ret = 0, ntids; u32 tid, porttid, cnt, i, tidcnt, tidoff; u16 *tidlist; struct ipath_devdata *dd = pd->port_dd; u64 physaddr; unsigned long vaddr; u64 __iomem *tidbase; unsigned long tidmap[8]; struct page **pagep = NULL; unsigned subport = subport_fp(fp); if (!dd->ipath_pageshadow) { ret = -ENOMEM; goto done; } cnt = ti->tidcnt; if (!cnt) { ipath_dbg("After copyin, tidcnt 0, tidlist %llx\n", (unsigned long long) ti->tidlist); /* * Should we treat as success? likely a bug */ ret = -EFAULT; goto done; } porttid = pd->port_port * dd->ipath_rcvtidcnt; if (!pd->port_subport_cnt) { tidcnt = dd->ipath_rcvtidcnt; tid = pd->port_tidcursor; tidoff = 0; } else if (!subport) { tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) + (dd->ipath_rcvtidcnt % pd->port_subport_cnt); tidoff = dd->ipath_rcvtidcnt - tidcnt; porttid += tidoff; tid = tidcursor_fp(fp); } else { tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt; tidoff = tidcnt * (subport - 1); porttid += tidoff; tid = tidcursor_fp(fp); } if (cnt > tidcnt) { /* make sure it all fits in port_tid_pg_list */ dev_info(&dd->pcidev->dev, "Process tried to allocate %u " "TIDs, only trying max (%u)\n", cnt, tidcnt); cnt = tidcnt; } pagep = &((struct page **) pd->port_tid_pg_list)[tidoff]; tidlist = &((u16 *) &pagep[dd->ipath_rcvtidcnt])[tidoff]; memset(tidmap, 0, sizeof(tidmap)); /* before decrement; chip actual # */ ntids = tidcnt; tidbase = (u64 __iomem *) (((char __iomem *) dd->ipath_kregbase) + dd->ipath_rcvtidbase + porttid * sizeof(*tidbase)); ipath_cdbg(VERBOSE, "Port%u %u tids, cursor %u, tidbase %p\n", pd->port_port, cnt, tid, tidbase); /* virtual address of first page in transfer */ vaddr = ti->tidvaddr; if (!access_ok(VERIFY_WRITE, (void __user *) vaddr, cnt * PAGE_SIZE)) { ipath_dbg("Fail vaddr %p, %u pages, !access_ok\n", (void *)vaddr, cnt); ret = -EFAULT; goto done; } ret = ipath_get_user_pages(vaddr, cnt, pagep); if (ret) { if (ret == -EBUSY) { ipath_dbg("Failed to lock addr %p, %u pages " "(already locked)\n", (void *) vaddr, cnt); /* * for now, continue, and see what happens but with * the new implementation, this should never happen, * unless perhaps the user has mpin'ed the pages * themselves (something we need to test) */ ret = 0; } else { dev_info(&dd->pcidev->dev, "Failed to lock addr %p, %u pages: " "errno %d\n", (void *) vaddr, cnt, -ret); goto done; } } for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) { for (; ntids--; tid++) { if (tid == tidcnt) tid = 0; if (!dd->ipath_pageshadow[porttid + tid]) break; } if (ntids < 0) { /* * oops, wrapped all the way through their TIDs, * and didn't have enough free; see comments at * start of routine */ ipath_dbg("Not enough free TIDs for %u pages " "(index %d), failing\n", cnt, i); i--; /* last tidlist[i] not filled in */ ret = -ENOMEM; break; } tidlist[i] = tid + tidoff; ipath_cdbg(VERBOSE, "Updating idx %u to TID %u, " "vaddr %lx\n", i, tid + tidoff, vaddr); /* we "know" system pages and TID pages are same size */ dd->ipath_pageshadow[porttid + tid] = pagep[i]; dd->ipath_physshadow[porttid + tid] = ipath_map_page( dd->pcidev, pagep[i], 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); /* * don't need atomic or it's overhead */ __set_bit(tid, tidmap); physaddr = dd->ipath_physshadow[porttid + tid]; ipath_stats.sps_pagelocks++; ipath_cdbg(VERBOSE, "TID %u, vaddr %lx, physaddr %llx pgp %p\n", tid, vaddr, (unsigned long long) physaddr, pagep[i]); dd->ipath_f_put_tid(dd, &tidbase[tid], RCVHQ_RCV_TYPE_EXPECTED, physaddr); /* * don't check this tid in ipath_portshadow, since we * just filled it in; start with the next one. */ tid++; } if (ret) { u32 limit; cleanup: /* jump here if copy out of updated info failed... */ ipath_dbg("After failure (ret=%d), undo %d of %d entries\n", -ret, i, cnt); /* same code that's in ipath_free_tid() */ limit = sizeof(tidmap) * BITS_PER_BYTE; if (limit > tidcnt) /* just in case size changes in future */ limit = tidcnt; tid = find_first_bit((const unsigned long *)tidmap, limit); for (; tid < limit; tid++) { if (!test_bit(tid, tidmap)) continue; if (dd->ipath_pageshadow[porttid + tid]) { ipath_cdbg(VERBOSE, "Freeing TID %u\n", tid); dd->ipath_f_put_tid(dd, &tidbase[tid], RCVHQ_RCV_TYPE_EXPECTED, dd->ipath_tidinvalid); pci_unmap_page(dd->pcidev, dd->ipath_physshadow[porttid + tid], PAGE_SIZE, PCI_DMA_FROMDEVICE); dd->ipath_pageshadow[porttid + tid] = NULL; ipath_stats.sps_pageunlocks++; } } ipath_release_user_pages(pagep, cnt); } else { /* * Copy the updated array, with ipath_tid's filled in, back * to user. Since we did the copy in already, this "should * never fail" If it does, we have to clean up... */ if (copy_to_user((void __user *) (unsigned long) ti->tidlist, tidlist, cnt * sizeof(*tidlist))) { ret = -EFAULT; goto cleanup; } if (copy_to_user((void __user *) (unsigned long) ti->tidmap, tidmap, sizeof tidmap)) { ret = -EFAULT; goto cleanup; } if (tid == tidcnt) tid = 0; if (!pd->port_subport_cnt) pd->port_tidcursor = tid; else tidcursor_fp(fp) = tid; } done: if (ret) ipath_dbg("Failed to map %u TID pages, failing with %d\n", ti->tidcnt, -ret); return ret; } /** * ipath_tid_free - free a port TID * @pd: the port * @subport: the subport * @ti: the TID info * * right now we are unlocking one page at a time, but since * the intended use of this routine is for a single group of * virtually contiguous pages, that should change to improve * performance. We check that the TID is in range for this port * but otherwise don't check validity; if user has an error and * frees the wrong tid, it's only their own data that can thereby * be corrupted. We do check that the TID was in use, for sanity * We always use our idea of the saved address, not the address that * they pass in to us. */ static int ipath_tid_free(struct ipath_portdata *pd, unsigned subport, const struct ipath_tid_info *ti) { int ret = 0; u32 tid, porttid, cnt, limit, tidcnt; struct ipath_devdata *dd = pd->port_dd; u64 __iomem *tidbase; unsigned long tidmap[8]; if (!dd->ipath_pageshadow) { ret = -ENOMEM; goto done; } if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap, sizeof tidmap)) { ret = -EFAULT; goto done; } porttid = pd->port_port * dd->ipath_rcvtidcnt; if (!pd->port_subport_cnt) tidcnt = dd->ipath_rcvtidcnt; else if (!subport) { tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) + (dd->ipath_rcvtidcnt % pd->port_subport_cnt); porttid += dd->ipath_rcvtidcnt - tidcnt; } else { tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt; porttid += tidcnt * (subport - 1); } tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) + dd->ipath_rcvtidbase + porttid * sizeof(*tidbase)); limit = sizeof(tidmap) * BITS_PER_BYTE; if (limit > tidcnt) /* just in case size changes in future */ limit = tidcnt; tid = find_first_bit(tidmap, limit); ipath_cdbg(VERBOSE, "Port%u free %u tids; first bit (max=%d) " "set is %d, porttid %u\n", pd->port_port, ti->tidcnt, limit, tid, porttid); for (cnt = 0; tid < limit; tid++) { /* * small optimization; if we detect a run of 3 or so without * any set, use find_first_bit again. That's mainly to * accelerate the case where we wrapped, so we have some at * the beginning, and some at the end, and a big gap * in the middle. */ if (!test_bit(tid, tidmap)) continue; cnt++; if (dd->ipath_pageshadow[porttid + tid]) { struct page *p; p = dd->ipath_pageshadow[porttid + tid]; dd->ipath_pageshadow[porttid + tid] = NULL; ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n", pid_nr(pd->port_pid), tid); dd->ipath_f_put_tid(dd, &tidbase[tid], RCVHQ_RCV_TYPE_EXPECTED, dd->ipath_tidinvalid); pci_unmap_page(dd->pcidev, dd->ipath_physshadow[porttid + tid], PAGE_SIZE, PCI_DMA_FROMDEVICE); ipath_release_user_pages(&p, 1); ipath_stats.sps_pageunlocks++; } else ipath_dbg("Unused tid %u, ignoring\n", tid); } if (cnt != ti->tidcnt) ipath_dbg("passed in tidcnt %d, only %d bits set in map\n", ti->tidcnt, cnt); done: if (ret) ipath_dbg("Failed to unmap %u TID pages, failing with %d\n", ti->tidcnt, -ret); return ret; } /** * ipath_set_part_key - set a partition key * @pd: the port * @key: the key * * We can have up to 4 active at a time (other than the default, which is * always allowed). This is somewhat tricky, since multiple ports may set * the same key, so we reference count them, and clean up at exit. All 4 * partition keys are packed into a single infinipath register. It's an * error for a process to set the same pkey multiple times. We provide no * mechanism to de-allocate a pkey at this time, we may eventually need to * do that. I've used the atomic operations, and no locking, and only make * a single pass through what's available. This should be more than * adequate for some time. I'll think about spinlocks or the like if and as * it's necessary. */ static int ipath_set_part_key(struct ipath_portdata *pd, u16 key) { struct ipath_devdata *dd = pd->port_dd; int i, any = 0, pidx = -1; u16 lkey = key & 0x7FFF; int ret; if (lkey == (IPATH_DEFAULT_P_KEY & 0x7FFF)) { /* nothing to do; this key always valid */ ret = 0; goto bail; } ipath_cdbg(VERBOSE, "p%u try to set pkey %hx, current keys " "%hx:%x %hx:%x %hx:%x %hx:%x\n", pd->port_port, key, dd->ipath_pkeys[0], atomic_read(&dd->ipath_pkeyrefs[0]), dd->ipath_pkeys[1], atomic_read(&dd->ipath_pkeyrefs[1]), dd->ipath_pkeys[2], atomic_read(&dd->ipath_pkeyrefs[2]), dd->ipath_pkeys[3], atomic_read(&dd->ipath_pkeyrefs[3])); if (!lkey) { ipath_cdbg(PROC, "p%u tries to set key 0, not allowed\n", pd->port_port); ret = -EINVAL; goto bail; } /* * Set the full membership bit, because it has to be * set in the register or the packet, and it seems * cleaner to set in the register than to force all * callers to set it. (see bug 4331) */ key |= 0x8000; for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { if (!pd->port_pkeys[i] && pidx == -1) pidx = i; if (pd->port_pkeys[i] == key) { ipath_cdbg(VERBOSE, "p%u tries to set same pkey " "(%x) more than once\n", pd->port_port, key); ret = -EEXIST; goto bail; } } if (pidx == -1) { ipath_dbg("All pkeys for port %u already in use, " "can't set %x\n", pd->port_port, key); ret = -EBUSY; goto bail; } for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { if (!dd->ipath_pkeys[i]) { any++; continue; } if (dd->ipath_pkeys[i] == key) { atomic_t *pkrefs = &dd->ipath_pkeyrefs[i]; if (atomic_inc_return(pkrefs) > 1) { pd->port_pkeys[pidx] = key; ipath_cdbg(VERBOSE, "p%u set key %x " "matches #%d, count now %d\n", pd->port_port, key, i, atomic_read(pkrefs)); ret = 0; goto bail; } else { /* * lost race, decrement count, catch below */ atomic_dec(pkrefs); ipath_cdbg(VERBOSE, "Lost race, count was " "0, after dec, it's %d\n", atomic_read(pkrefs)); any++; } } if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) { /* * It makes no sense to have both the limited and * full membership PKEY set at the same time since * the unlimited one will disable the limited one. */ ret = -EEXIST; goto bail; } } if (!any) { ipath_dbg("port %u, all pkeys already in use, " "can't set %x\n", pd->port_port, key); ret = -EBUSY; goto bail; } for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { if (!dd->ipath_pkeys[i] && atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) { u64 pkey; /* for ipathstats, etc. */ ipath_stats.sps_pkeys[i] = lkey; pd->port_pkeys[pidx] = dd->ipath_pkeys[i] = key; pkey = (u64) dd->ipath_pkeys[0] | ((u64) dd->ipath_pkeys[1] << 16) | ((u64) dd->ipath_pkeys[2] << 32) | ((u64) dd->ipath_pkeys[3] << 48); ipath_cdbg(PROC, "p%u set key %x in #%d, " "portidx %d, new pkey reg %llx\n", pd->port_port, key, i, pidx, (unsigned long long) pkey); ipath_write_kreg( dd, dd->ipath_kregs->kr_partitionkey, pkey); ret = 0; goto bail; } } ipath_dbg("port %u, all pkeys already in use 2nd pass, " "can't set %x\n", pd->port_port, key); ret = -EBUSY; bail: return ret; } /** * ipath_manage_rcvq - manage a port's receive queue * @pd: the port * @subport: the subport * @start_stop: action to carry out * * start_stop == 0 disables receive on the port, for use in queue * overflow conditions. start_stop==1 re-enables, to be used to * re-init the software copy of the head register */ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport, int start_stop) { struct ipath_devdata *dd = pd->port_dd; ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n", start_stop ? "en" : "dis", dd->ipath_unit, pd->port_port, subport); if (subport) goto bail; /* atomically clear receive enable port. */ if (start_stop) { /* * On enable, force in-memory copy of the tail register to * 0, so that protocol code doesn't have to worry about * whether or not the chip has yet updated the in-memory * copy or not on return from the system call. The chip * always resets it's tail register back to 0 on a * transition from disabled to enabled. This could cause a * problem if software was broken, and did the enable w/o * the disable, but eventually the in-memory copy will be * updated and correct itself, even in the face of software * bugs. */ if (pd->port_rcvhdrtail_kvaddr) ipath_clear_rcvhdrtail(pd); set_bit(dd->ipath_r_portenable_shift + pd->port_port, &dd->ipath_rcvctrl); } else clear_bit(dd->ipath_r_portenable_shift + pd->port_port, &dd->ipath_rcvctrl); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl); /* now be sure chip saw it before we return */ ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); if (start_stop) { /* * And try to be sure that tail reg update has happened too. * This should in theory interlock with the RXE changes to * the tail register. Don't assign it to the tail register * in memory copy, since we could overwrite an update by the * chip if we did. */ ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); } /* always; new head should be equal to new tail; see above */ bail: return 0; } static void ipath_clean_part_key(struct ipath_portdata *pd, struct ipath_devdata *dd) { int i, j, pchanged = 0; u64 oldpkey; /* for debugging only */ oldpkey = (u64) dd->ipath_pkeys[0] | ((u64) dd->ipath_pkeys[1] << 16) | ((u64) dd->ipath_pkeys[2] << 32) | ((u64) dd->ipath_pkeys[3] << 48); for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { if (!pd->port_pkeys[i]) continue; ipath_cdbg(VERBOSE, "look for key[%d] %hx in pkeys\n", i, pd->port_pkeys[i]); for (j = 0; j < ARRAY_SIZE(dd->ipath_pkeys); j++) { /* check for match independent of the global bit */ if ((dd->ipath_pkeys[j] & 0x7fff) != (pd->port_pkeys[i] & 0x7fff)) continue; if (atomic_dec_and_test(&dd->ipath_pkeyrefs[j])) { ipath_cdbg(VERBOSE, "p%u clear key " "%x matches #%d\n", pd->port_port, pd->port_pkeys[i], j); ipath_stats.sps_pkeys[j] = dd->ipath_pkeys[j] = 0; pchanged++; } else ipath_cdbg( VERBOSE, "p%u key %x matches #%d, " "but ref still %d\n", pd->port_port, pd->port_pkeys[i], j, atomic_read(&dd->ipath_pkeyrefs[j])); break; } pd->port_pkeys[i] = 0; } if (pchanged) { u64 pkey = (u64) dd->ipath_pkeys[0] | ((u64) dd->ipath_pkeys[1] << 16) | ((u64) dd->ipath_pkeys[2] << 32) | ((u64) dd->ipath_pkeys[3] << 48); ipath_cdbg(VERBOSE, "p%u old pkey reg %llx, " "new pkey reg %llx\n", pd->port_port, (unsigned long long) oldpkey, (unsigned long long) pkey); ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey, pkey); } } /* * Initialize the port data with the receive buffer sizes * so this can be done while the master port is locked. * Otherwise, there is a race with a slave opening the port * and seeing these fields uninitialized. */ static void init_user_egr_sizes(struct ipath_portdata *pd) { struct ipath_devdata *dd = pd->port_dd; unsigned egrperchunk, egrcnt, size; /* * to avoid wasting a lot of memory, we allocate 32KB chunks of * physically contiguous memory, advance through it until used up * and then allocate more. Of course, we need memory to store those * extra pointers, now. Started out with 256KB, but under heavy * memory pressure (creating large files and then copying them over * NFS while doing lots of MPI jobs), we hit some allocation * failures, even though we can sleep... (2.6.10) Still get * failures at 64K. 32K is the lowest we can go without wasting * additional memory. */ size = 0x8000; egrperchunk = size / dd->ipath_rcvegrbufsize; egrcnt = dd->ipath_rcvegrcnt; pd->port_rcvegrbuf_chunks = (egrcnt + egrperchunk - 1) / egrperchunk; pd->port_rcvegrbufs_perchunk = egrperchunk; pd->port_rcvegrbuf_size = size; } /** * ipath_create_user_egr - allocate eager TID buffers * @pd: the port to allocate TID buffers for * * This routine is now quite different for user and kernel, because * the kernel uses skb's, for the accelerated network performance * This is the user port version * * Allocate the eager TID buffers and program them into infinipath * They are no longer completely contiguous, we do multiple allocation * calls. */ static int ipath_create_user_egr(struct ipath_portdata *pd) { struct ipath_devdata *dd = pd->port_dd; unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff; size_t size; int ret; gfp_t gfp_flags; /* * GFP_USER, but without GFP_FS, so buffer cache can be * coalesced (we hope); otherwise, even at order 4, * heavy filesystem activity makes these fail, and we can * use compound pages. */ gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; egrcnt = dd->ipath_rcvegrcnt; /* TID number offset for this port */ egroff = (pd->port_port - 1) * egrcnt + dd->ipath_p0_rcvegrcnt; egrsize = dd->ipath_rcvegrbufsize; ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid " "offset %x, egrsize %u\n", egrcnt, egroff, egrsize); chunk = pd->port_rcvegrbuf_chunks; egrperchunk = pd->port_rcvegrbufs_perchunk; size = pd->port_rcvegrbuf_size; pd->port_rcvegrbuf = kmalloc(chunk * sizeof(pd->port_rcvegrbuf[0]), GFP_KERNEL); if (!pd->port_rcvegrbuf) { ret = -ENOMEM; goto bail; } pd->port_rcvegrbuf_phys = kmalloc(chunk * sizeof(pd->port_rcvegrbuf_phys[0]), GFP_KERNEL); if (!pd->port_rcvegrbuf_phys) { ret = -ENOMEM; goto bail_rcvegrbuf; } for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) { pd->port_rcvegrbuf[e] = dma_alloc_coherent( &dd->pcidev->dev, size, &pd->port_rcvegrbuf_phys[e], gfp_flags); if (!pd->port_rcvegrbuf[e]) { ret = -ENOMEM; goto bail_rcvegrbuf_phys; } } pd->port_rcvegr_phys = pd->port_rcvegrbuf_phys[0]; for (e = chunk = 0; chunk < pd->port_rcvegrbuf_chunks; chunk++) { dma_addr_t pa = pd->port_rcvegrbuf_phys[chunk]; unsigned i; for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) { dd->ipath_f_put_tid(dd, e + egroff + (u64 __iomem *) ((char __iomem *) dd->ipath_kregbase + dd->ipath_rcvegrbase), RCVHQ_RCV_TYPE_EAGER, pa); pa += egrsize; } cond_resched(); /* don't hog the cpu */ } ret = 0; goto bail; bail_rcvegrbuf_phys: for (e = 0; e < pd->port_rcvegrbuf_chunks && pd->port_rcvegrbuf[e]; e++) { dma_free_coherent(&dd->pcidev->dev, size, pd->port_rcvegrbuf[e], pd->port_rcvegrbuf_phys[e]); } kfree(pd->port_rcvegrbuf_phys); pd->port_rcvegrbuf_phys = NULL; bail_rcvegrbuf: kfree(pd->port_rcvegrbuf); pd->port_rcvegrbuf = NULL; bail: return ret; } /* common code for the mappings on dma_alloc_coherent mem */ static int ipath_mmap_mem(struct vm_area_struct *vma, struct ipath_portdata *pd, unsigned len, int write_ok, void *kvaddr, char *what) { struct ipath_devdata *dd = pd->port_dd; unsigned long pfn; int ret; if ((vma->vm_end - vma->vm_start) > len) { dev_info(&dd->pcidev->dev, "FAIL on %s: len %lx > %x\n", what, vma->vm_end - vma->vm_start, len); ret = -EFAULT; goto bail; } if (!write_ok) { if (vma->vm_flags & VM_WRITE) { dev_info(&dd->pcidev->dev, "%s must be mapped readonly\n", what); ret = -EPERM; goto bail; } /* don't allow them to later change with mprotect */ vma->vm_flags &= ~VM_MAYWRITE; } pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT; ret = remap_pfn_range(vma, vma->vm_start, pfn, len, vma->vm_page_prot); if (ret) dev_info(&dd->pcidev->dev, "%s port%u mmap of %lx, %x " "bytes r%c failed: %d\n", what, pd->port_port, pfn, len, write_ok?'w':'o', ret); else ipath_cdbg(VERBOSE, "%s port%u mmaped %lx, %x bytes " "r%c\n", what, pd->port_port, pfn, len, write_ok?'w':'o'); bail: return ret; } static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd, u64 ureg) { unsigned long phys; int ret; /* * This is real hardware, so use io_remap. This is the mechanism * for the user process to update the head registers for their port * in the chip. */ if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) { dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen " "%lx > PAGE\n", vma->vm_end - vma->vm_start); ret = -EFAULT; } else { phys = dd->ipath_physaddr + ureg; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot); } return ret; } static int mmap_piobufs(struct vm_area_struct *vma, struct ipath_devdata *dd, struct ipath_portdata *pd, unsigned piobufs, unsigned piocnt) { unsigned long phys; int ret; /* * When we map the PIO buffers in the chip, we want to map them as * writeonly, no read possible. This prevents access to previous * process data, and catches users who might try to read the i/o * space due to a bug. */ if ((vma->vm_end - vma->vm_start) > (piocnt * dd->ipath_palign)) { dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: " "reqlen %lx > PAGE\n", vma->vm_end - vma->vm_start); ret = -EINVAL; goto bail; } phys = dd->ipath_physaddr + piobufs; #if defined(__powerpc__) /* There isn't a generic way to specify writethrough mappings */ pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU; pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED; #endif /* * don't allow them to later change to readable with mprotect (for when * not initially mapped readable, as is normally the case) */ vma->vm_flags &= ~VM_MAYREAD; vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot); bail: return ret; } static int mmap_rcvegrbufs(struct vm_area_struct *vma, struct ipath_portdata *pd) { struct ipath_devdata *dd = pd->port_dd; unsigned long start, size; size_t total_size, i; unsigned long pfn; int ret; size = pd->port_rcvegrbuf_size; total_size = pd->port_rcvegrbuf_chunks * size; if ((vma->vm_end - vma->vm_start) > total_size) { dev_info(&dd->pcidev->dev, "FAIL on egr bufs: " "reqlen %lx > actual %lx\n", vma->vm_end - vma->vm_start, (unsigned long) total_size); ret = -EINVAL; goto bail; } if (vma->vm_flags & VM_WRITE) { dev_info(&dd->pcidev->dev, "Can't map eager buffers as " "writable (flags=%lx)\n", vma->vm_flags); ret = -EPERM; goto bail; } /* don't allow them to later change to writeable with mprotect */ vma->vm_flags &= ~VM_MAYWRITE; start = vma->vm_start; for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) { pfn = virt_to_phys(pd->port_rcvegrbuf[i]) >> PAGE_SHIFT; ret = remap_pfn_range(vma, start, pfn, size, vma->vm_page_prot); if (ret < 0) goto bail; } ret = 0; bail: return ret; } /* * ipath_file_vma_fault - handle a VMA page fault. */ static int ipath_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page; page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT)); if (!page) return VM_FAULT_SIGBUS; get_page(page); vmf->page = page; return 0; } static const struct vm_operations_struct ipath_file_vm_ops = { .fault = ipath_file_vma_fault, }; static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr, struct ipath_portdata *pd, unsigned subport) { unsigned long len; struct ipath_devdata *dd; void *addr; size_t size; int ret = 0; /* If the port is not shared, all addresses should be physical */ if (!pd->port_subport_cnt) goto bail; dd = pd->port_dd; size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size; /* * Each process has all the subport uregbase, rcvhdrq, and * rcvegrbufs mmapped - as an array for all the processes, * and also separately for this process. */ if (pgaddr == cvt_kvaddr(pd->subport_uregbase)) { addr = pd->subport_uregbase; size = PAGE_SIZE * pd->port_subport_cnt; } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base)) { addr = pd->subport_rcvhdr_base; size = pd->port_rcvhdrq_size * pd->port_subport_cnt; } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf)) { addr = pd->subport_rcvegrbuf; size *= pd->port_subport_cnt; } else if (pgaddr == cvt_kvaddr(pd->subport_uregbase + PAGE_SIZE * subport)) { addr = pd->subport_uregbase + PAGE_SIZE * subport; size = PAGE_SIZE; } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base + pd->port_rcvhdrq_size * subport)) { addr = pd->subport_rcvhdr_base + pd->port_rcvhdrq_size * subport; size = pd->port_rcvhdrq_size; } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf + size * subport)) { addr = pd->subport_rcvegrbuf + size * subport; /* rcvegrbufs are read-only on the slave */ if (vma->vm_flags & VM_WRITE) { dev_info(&dd->pcidev->dev, "Can't map eager buffers as " "writable (flags=%lx)\n", vma->vm_flags); ret = -EPERM; goto bail; } /* * Don't allow permission to later change to writeable * with mprotect. */ vma->vm_flags &= ~VM_MAYWRITE; } else { goto bail; } len = vma->vm_end - vma->vm_start; if (len > size) { ipath_cdbg(MM, "FAIL: reqlen %lx > %zx\n", len, size); ret = -EINVAL; goto bail; } vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT; vma->vm_ops = &ipath_file_vm_ops; vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND; ret = 1; bail: return ret; } /** * ipath_mmap - mmap various structures into user space * @fp: the file pointer * @vma: the VM area * * We use this to have a shared buffer between the kernel and the user code * for the rcvhdr queue, egr buffers, and the per-port user regs and pio * buffers in the chip. We have the open and close entries so we can bump * the ref count and keep the driver from being unloaded while still mapped. */ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma) { struct ipath_portdata *pd; struct ipath_devdata *dd; u64 pgaddr, ureg; unsigned piobufs, piocnt; int ret; pd = port_fp(fp); if (!pd) { ret = -EINVAL; goto bail; } dd = pd->port_dd; /* * This is the ipath_do_user_init() code, mapping the shared buffers * into the user process. The address referred to by vm_pgoff is the * file offset passed via mmap(). For shared ports, this is the * kernel vmalloc() address of the pages to share with the master. * For non-shared or master ports, this is a physical address. * We only do one mmap for each space mapped. */ pgaddr = vma->vm_pgoff << PAGE_SHIFT; /* * Check for 0 in case one of the allocations failed, but user * called mmap anyway. */ if (!pgaddr) { ret = -EINVAL; goto bail; } ipath_cdbg(MM, "pgaddr %llx vm_start=%lx len %lx port %u:%u:%u\n", (unsigned long long) pgaddr, vma->vm_start, vma->vm_end - vma->vm_start, dd->ipath_unit, pd->port_port, subport_fp(fp)); /* * Physical addresses must fit in 40 bits for our hardware. * Check for kernel virtual addresses first, anything else must * match a HW or memory address. */ ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp)); if (ret) { if (ret > 0) ret = 0; goto bail; } ureg = dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port; if (!pd->port_subport_cnt) { /* port is not shared */ piocnt = pd->port_piocnt; piobufs = pd->port_piobufs; } else if (!subport_fp(fp)) { /* caller is the master */ piocnt = (pd->port_piocnt / pd->port_subport_cnt) + (pd->port_piocnt % pd->port_subport_cnt); piobufs = pd->port_piobufs + dd->ipath_palign * (pd->port_piocnt - piocnt); } else { unsigned slave = subport_fp(fp) - 1; /* caller is a slave */ piocnt = pd->port_piocnt / pd->port_subport_cnt; piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave; } if (pgaddr == ureg) ret = mmap_ureg(vma, dd, ureg); else if (pgaddr == piobufs) ret = mmap_piobufs(vma, dd, pd, piobufs, piocnt); else if (pgaddr == dd->ipath_pioavailregs_phys) /* in-memory copy of pioavail registers */ ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0, (void *) dd->ipath_pioavailregs_dma, "pioavail registers"); else if (pgaddr == pd->port_rcvegr_phys) ret = mmap_rcvegrbufs(vma, pd); else if (pgaddr == (u64) pd->port_rcvhdrq_phys) /* * The rcvhdrq itself; readonly except on HT (so have * to allow writable mapping), multiple pages, contiguous * from an i/o perspective. */ ret = ipath_mmap_mem(vma, pd, pd->port_rcvhdrq_size, 1, pd->port_rcvhdrq, "rcvhdrq"); else if (pgaddr == (u64) pd->port_rcvhdrqtailaddr_phys) /* in-memory copy of rcvhdrq tail register */ ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0, pd->port_rcvhdrtail_kvaddr, "rcvhdrq tail"); else ret = -EINVAL; vma->vm_private_data = NULL; if (ret < 0) dev_info(&dd->pcidev->dev, "Failure %d on off %llx len %lx\n", -ret, (unsigned long long)pgaddr, vma->vm_end - vma->vm_start); bail: return ret; } static unsigned ipath_poll_hdrqfull(struct ipath_portdata *pd) { unsigned pollflag = 0; if ((pd->poll_type & IPATH_POLL_TYPE_OVERFLOW) && pd->port_hdrqfull != pd->port_hdrqfull_poll) { pollflag |= POLLIN | POLLRDNORM; pd->port_hdrqfull_poll = pd->port_hdrqfull; } return pollflag; } static unsigned int ipath_poll_urgent(struct ipath_portdata *pd, struct file *fp, struct poll_table_struct *pt) { unsigned pollflag = 0; struct ipath_devdata *dd; dd = pd->port_dd; /* variable access in ipath_poll_hdrqfull() needs this */ rmb(); pollflag = ipath_poll_hdrqfull(pd); if (pd->port_urgent != pd->port_urgent_poll) { pollflag |= POLLIN | POLLRDNORM; pd->port_urgent_poll = pd->port_urgent; } if (!pollflag) { /* this saves a spin_lock/unlock in interrupt handler... */ set_bit(IPATH_PORT_WAITING_URG, &pd->port_flag); /* flush waiting flag so don't miss an event... */ wmb(); poll_wait(fp, &pd->port_wait, pt); } return pollflag; } static unsigned int ipath_poll_next(struct ipath_portdata *pd, struct file *fp, struct poll_table_struct *pt) { u32 head; u32 tail; unsigned pollflag = 0; struct ipath_devdata *dd; dd = pd->port_dd; /* variable access in ipath_poll_hdrqfull() needs this */ rmb(); pollflag = ipath_poll_hdrqfull(pd); head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port); if (pd->port_rcvhdrtail_kvaddr) tail = ipath_get_rcvhdrtail(pd); else tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); if (head != tail) pollflag |= POLLIN | POLLRDNORM; else { /* this saves a spin_lock/unlock in interrupt handler */ set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); /* flush waiting flag so we don't miss an event */ wmb(); set_bit(pd->port_port + dd->ipath_r_intravail_shift, &dd->ipath_rcvctrl); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl); if (dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */ ipath_write_ureg(dd, ur_rcvhdrhead, dd->ipath_rhdrhead_intr_off | head, pd->port_port); poll_wait(fp, &pd->port_wait, pt); } return pollflag; } static unsigned int ipath_poll(struct file *fp, struct poll_table_struct *pt) { struct ipath_portdata *pd; unsigned pollflag; pd = port_fp(fp); if (!pd) pollflag = 0; else if (pd->poll_type & IPATH_POLL_TYPE_URGENT) pollflag = ipath_poll_urgent(pd, fp, pt); else pollflag = ipath_poll_next(pd, fp, pt); return pollflag; } static int ipath_supports_subports(int user_swmajor, int user_swminor) { /* no subport implementation prior to software version 1.3 */ return (user_swmajor > 1) || (user_swminor >= 3); } static int ipath_compatible_subports(int user_swmajor, int user_swminor) { /* this code is written long-hand for clarity */ if (IPATH_USER_SWMAJOR != user_swmajor) { /* no promise of compatibility if major mismatch */ return 0; } if (IPATH_USER_SWMAJOR == 1) { switch (IPATH_USER_SWMINOR) { case 0: case 1: case 2: /* no subport implementation so cannot be compatible */ return 0; case 3: /* 3 is only compatible with itself */ return user_swminor == 3; default: /* >= 4 are compatible (or are expected to be) */ return user_swminor >= 4; } } /* make no promises yet for future major versions */ return 0; } static int init_subports(struct ipath_devdata *dd, struct ipath_portdata *pd, const struct ipath_user_info *uinfo) { int ret = 0; unsigned num_subports; size_t size; /* * If the user is requesting zero subports, * skip the subport allocation. */ if (uinfo->spu_subport_cnt <= 0) goto bail; /* Self-consistency check for ipath_compatible_subports() */ if (ipath_supports_subports(IPATH_USER_SWMAJOR, IPATH_USER_SWMINOR) && !ipath_compatible_subports(IPATH_USER_SWMAJOR, IPATH_USER_SWMINOR)) { dev_info(&dd->pcidev->dev, "Inconsistent ipath_compatible_subports()\n"); goto bail; } /* Check for subport compatibility */ if (!ipath_compatible_subports(uinfo->spu_userversion >> 16, uinfo->spu_userversion & 0xffff)) { dev_info(&dd->pcidev->dev, "Mismatched user version (%d.%d) and driver " "version (%d.%d) while port sharing. Ensure " "that driver and library are from the same " "release.\n", (int) (uinfo->spu_userversion >> 16), (int) (uinfo->spu_userversion & 0xffff), IPATH_USER_SWMAJOR, IPATH_USER_SWMINOR); goto bail; } if (uinfo->spu_subport_cnt > INFINIPATH_MAX_SUBPORT) { ret = -EINVAL; goto bail; } num_subports = uinfo->spu_subport_cnt; pd->subport_uregbase = vzalloc(PAGE_SIZE * num_subports); if (!pd->subport_uregbase) { ret = -ENOMEM; goto bail; } /* Note: pd->port_rcvhdrq_size isn't initialized yet. */ size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize * sizeof(u32), PAGE_SIZE) * num_subports; pd->subport_rcvhdr_base = vzalloc(size); if (!pd->subport_rcvhdr_base) { ret = -ENOMEM; goto bail_ureg; } pd->subport_rcvegrbuf = vzalloc(pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size * num_subports); if (!pd->subport_rcvegrbuf) { ret = -ENOMEM; goto bail_rhdr; } pd->port_subport_cnt = uinfo->spu_subport_cnt; pd->port_subport_id = uinfo->spu_subport_id; pd->active_slaves = 1; set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag); goto bail; bail_rhdr: vfree(pd->subport_rcvhdr_base); bail_ureg: vfree(pd->subport_uregbase); pd->subport_uregbase = NULL; bail: return ret; } static int try_alloc_port(struct ipath_devdata *dd, int port, struct file *fp, const struct ipath_user_info *uinfo) { struct ipath_portdata *pd; int ret; if (!(pd = dd->ipath_pd[port])) { void *ptmp; pd = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL); /* * Allocate memory for use in ipath_tid_update() just once * at open, not per call. Reduces cost of expected send * setup. */ ptmp = kmalloc(dd->ipath_rcvtidcnt * sizeof(u16) + dd->ipath_rcvtidcnt * sizeof(struct page **), GFP_KERNEL); if (!pd || !ptmp) { ipath_dev_err(dd, "Unable to allocate portdata " "memory, failing open\n"); ret = -ENOMEM; kfree(pd); kfree(ptmp); goto bail; } dd->ipath_pd[port] = pd; dd->ipath_pd[port]->port_port = port; dd->ipath_pd[port]->port_dd = dd; dd->ipath_pd[port]->port_tid_pg_list = ptmp; init_waitqueue_head(&dd->ipath_pd[port]->port_wait); } if (!pd->port_cnt) { pd->userversion = uinfo->spu_userversion; init_user_egr_sizes(pd); if ((ret = init_subports(dd, pd, uinfo)) != 0) goto bail; ipath_cdbg(PROC, "%s[%u] opened unit:port %u:%u\n", current->comm, current->pid, dd->ipath_unit, port); pd->port_cnt = 1; port_fp(fp) = pd; pd->port_pid = get_pid(task_pid(current)); strlcpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); ipath_stats.sps_ports++; ret = 0; } else ret = -EBUSY; bail: return ret; } static inline int usable(struct ipath_devdata *dd) { return dd && (dd->ipath_flags & IPATH_PRESENT) && dd->ipath_kregbase && dd->ipath_lid && !(dd->ipath_flags & (IPATH_LINKDOWN | IPATH_DISABLED | IPATH_LINKUNK)); } static int find_free_port(int unit, struct file *fp, const struct ipath_user_info *uinfo) { struct ipath_devdata *dd = ipath_lookup(unit); int ret, i; if (!dd) { ret = -ENODEV; goto bail; } if (!usable(dd)) { ret = -ENETDOWN; goto bail; } for (i = 1; i < dd->ipath_cfgports; i++) { ret = try_alloc_port(dd, i, fp, uinfo); if (ret != -EBUSY) goto bail; } ret = -EBUSY; bail: return ret; } static int find_best_unit(struct file *fp, const struct ipath_user_info *uinfo) { int ret = 0, i, prefunit = -1, devmax; int maxofallports, npresent, nup; int ndev; devmax = ipath_count_units(&npresent, &nup, &maxofallports); /* * This code is present to allow a knowledgeable person to * specify the layout of processes to processors before opening * this driver, and then we'll assign the process to the "closest" * InfiniPath chip to that processor (we assume reasonable connectivity, * for now). This code assumes that if affinity has been set * before this point, that at most one cpu is set; for now this * is reasonable. I check for both cpumask_empty() and cpumask_full(), * in case some kernel variant sets none of the bits when no * affinity is set. 2.6.11 and 12 kernels have all present * cpus set. Some day we'll have to fix it up further to handle * a cpu subset. This algorithm fails for two HT chips connected * in tunnel fashion. Eventually this needs real topology * information. There may be some issues with dual core numbering * as well. This needs more work prior to release. */ if (!cpumask_empty(tsk_cpus_allowed(current)) && !cpumask_full(tsk_cpus_allowed(current))) { int ncpus = num_online_cpus(), curcpu = -1, nset = 0; get_online_cpus(); for_each_online_cpu(i) if (cpumask_test_cpu(i, tsk_cpus_allowed(current))) { ipath_cdbg(PROC, "%s[%u] affinity set for " "cpu %d/%d\n", current->comm, current->pid, i, ncpus); curcpu = i; nset++; } put_online_cpus(); if (curcpu != -1 && nset != ncpus) { if (npresent) { prefunit = curcpu / (ncpus / npresent); ipath_cdbg(PROC,"%s[%u] %d chips, %d cpus, " "%d cpus/chip, select unit %d\n", current->comm, current->pid, npresent, ncpus, ncpus / npresent, prefunit); } } } /* * user ports start at 1, kernel port is 0 * For now, we do round-robin access across all chips */ if (prefunit != -1) devmax = prefunit + 1; recheck: for (i = 1; i < maxofallports; i++) { for (ndev = prefunit != -1 ? prefunit : 0; ndev < devmax; ndev++) { struct ipath_devdata *dd = ipath_lookup(ndev); if (!usable(dd)) continue; /* can't use this unit */ if (i >= dd->ipath_cfgports) /* * Maxed out on users of this unit. Try * next. */ continue; ret = try_alloc_port(dd, i, fp, uinfo); if (!ret) goto done; } } if (npresent) { if (nup == 0) { ret = -ENETDOWN; ipath_dbg("No ports available (none initialized " "and ready)\n"); } else { if (prefunit > 0) { /* if started above 0, retry from 0 */ ipath_cdbg(PROC, "%s[%u] no ports on prefunit " "%d, clear and re-check\n", current->comm, current->pid, prefunit); devmax = ipath_count_units(NULL, NULL, NULL); prefunit = -1; goto recheck; } ret = -EBUSY; ipath_dbg("No ports available\n"); } } else { ret = -ENXIO; ipath_dbg("No boards found\n"); } done: return ret; } static int find_shared_port(struct file *fp, const struct ipath_user_info *uinfo) { int devmax, ndev, i; int ret = 0; devmax = ipath_count_units(NULL, NULL, NULL); for (ndev = 0; ndev < devmax; ndev++) { struct ipath_devdata *dd = ipath_lookup(ndev); if (!usable(dd)) continue; for (i = 1; i < dd->ipath_cfgports; i++) { struct ipath_portdata *pd = dd->ipath_pd[i]; /* Skip ports which are not yet open */ if (!pd || !pd->port_cnt) continue; /* Skip port if it doesn't match the requested one */ if (pd->port_subport_id != uinfo->spu_subport_id) continue; /* Verify the sharing process matches the master */ if (pd->port_subport_cnt != uinfo->spu_subport_cnt || pd->userversion != uinfo->spu_userversion || pd->port_cnt >= pd->port_subport_cnt) { ret = -EINVAL; goto done; } port_fp(fp) = pd; subport_fp(fp) = pd->port_cnt++; pd->port_subpid[subport_fp(fp)] = get_pid(task_pid(current)); tidcursor_fp(fp) = 0; pd->active_slaves |= 1 << subport_fp(fp); ipath_cdbg(PROC, "%s[%u] %u sharing %s[%u] unit:port %u:%u\n", current->comm, current->pid, subport_fp(fp), pd->port_comm, pid_nr(pd->port_pid), dd->ipath_unit, pd->port_port); ret = 1; goto done; } } done: return ret; } static int ipath_open(struct inode *in, struct file *fp) { /* The real work is performed later in ipath_assign_port() */ fp->private_data = kzalloc(sizeof(struct ipath_filedata), GFP_KERNEL); return fp->private_data ? 0 : -ENOMEM; } /* Get port early, so can set affinity prior to memory allocation */ static int ipath_assign_port(struct file *fp, const struct ipath_user_info *uinfo) { int ret; int i_minor; unsigned swmajor, swminor; /* Check to be sure we haven't already initialized this file */ if (port_fp(fp)) { ret = -EINVAL; goto done; } /* for now, if major version is different, bail */ swmajor = uinfo->spu_userversion >> 16; if (swmajor != IPATH_USER_SWMAJOR) { ipath_dbg("User major version %d not same as driver " "major %d\n", uinfo->spu_userversion >> 16, IPATH_USER_SWMAJOR); ret = -ENODEV; goto done; } swminor = uinfo->spu_userversion & 0xffff; if (swminor != IPATH_USER_SWMINOR) ipath_dbg("User minor version %d not same as driver " "minor %d\n", swminor, IPATH_USER_SWMINOR); mutex_lock(&ipath_mutex); if (ipath_compatible_subports(swmajor, swminor) && uinfo->spu_subport_cnt && (ret = find_shared_port(fp, uinfo))) { if (ret > 0) ret = 0; goto done_chk_sdma; } i_minor = iminor(fp->f_path.dentry->d_inode) - IPATH_USER_MINOR_BASE; ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n", (long)fp->f_path.dentry->d_inode->i_rdev, i_minor); if (i_minor) ret = find_free_port(i_minor - 1, fp, uinfo); else ret = find_best_unit(fp, uinfo); done_chk_sdma: if (!ret) { struct ipath_filedata *fd = fp->private_data; const struct ipath_portdata *pd = fd->pd; const struct ipath_devdata *dd = pd->port_dd; fd->pq = ipath_user_sdma_queue_create(&dd->pcidev->dev, dd->ipath_unit, pd->port_port, fd->subport); if (!fd->pq) ret = -ENOMEM; } mutex_unlock(&ipath_mutex); done: return ret; } static int ipath_do_user_init(struct file *fp, const struct ipath_user_info *uinfo) { int ret; struct ipath_portdata *pd = port_fp(fp); struct ipath_devdata *dd; u32 head32; /* Subports don't need to initialize anything since master did it. */ if (subport_fp(fp)) { ret = wait_event_interruptible(pd->port_wait, !test_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag)); goto done; } dd = pd->port_dd; if (uinfo->spu_rcvhdrsize) { ret = ipath_setrcvhdrsize(dd, uinfo->spu_rcvhdrsize); if (ret) goto done; } /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */ /* some ports may get extra buffers, calculate that here */ if (pd->port_port <= dd->ipath_ports_extrabuf) pd->port_piocnt = dd->ipath_pbufsport + 1; else pd->port_piocnt = dd->ipath_pbufsport; /* for right now, kernel piobufs are at end, so port 1 is at 0 */ if (pd->port_port <= dd->ipath_ports_extrabuf) pd->port_pio_base = (dd->ipath_pbufsport + 1) * (pd->port_port - 1); else pd->port_pio_base = dd->ipath_ports_extrabuf + dd->ipath_pbufsport * (pd->port_port - 1); pd->port_piobufs = dd->ipath_piobufbase + pd->port_pio_base * dd->ipath_palign; ipath_cdbg(VERBOSE, "piobuf base for port %u is 0x%x, piocnt %u," " first pio %u\n", pd->port_port, pd->port_piobufs, pd->port_piocnt, pd->port_pio_base); ipath_chg_pioavailkernel(dd, pd->port_pio_base, pd->port_piocnt, 0); /* * Now allocate the rcvhdr Q and eager TIDs; skip the TID * array for time being. If pd->port_port > chip-supported, * we need to do extra stuff here to handle by handling overflow * through port 0, someday */ ret = ipath_create_rcvhdrq(dd, pd); if (!ret) ret = ipath_create_user_egr(pd); if (ret) goto done; /* * set the eager head register for this port to the current values * of the tail pointers, since we don't know if they were * updated on last use of the port. */ head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port); ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port); pd->port_lastrcvhdrqtail = -1; ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n", pd->port_port, head32); pd->port_tidcursor = 0; /* start at beginning after open */ /* initialize poll variables... */ pd->port_urgent = 0; pd->port_urgent_poll = 0; pd->port_hdrqfull_poll = pd->port_hdrqfull; /* * Now enable the port for receive. * For chips that are set to DMA the tail register to memory * when they change (and when the update bit transitions from * 0 to 1. So for those chips, we turn it off and then back on. * This will (very briefly) affect any other open ports, but the * duration is very short, and therefore isn't an issue. We * explicitly set the in-memory tail copy to 0 beforehand, so we * don't have to wait to be sure the DMA update has happened * (chip resets head/tail to 0 on transition to enable). */ set_bit(dd->ipath_r_portenable_shift + pd->port_port, &dd->ipath_rcvctrl); if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) { if (pd->port_rcvhdrtail_kvaddr) ipath_clear_rcvhdrtail(pd); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl & ~(1ULL << dd->ipath_r_tailupd_shift)); } ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl); /* Notify any waiting slaves */ if (pd->port_subport_cnt) { clear_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag); wake_up(&pd->port_wait); } done: return ret; } /** * unlock_exptid - unlock any expected TID entries port still had in use * @pd: port * * We don't actually update the chip here, because we do a bulk update * below, using ipath_f_clear_tids. */ static void unlock_expected_tids(struct ipath_portdata *pd) { struct ipath_devdata *dd = pd->port_dd; int port_tidbase = pd->port_port * dd->ipath_rcvtidcnt; int i, cnt = 0, maxtid = port_tidbase + dd->ipath_rcvtidcnt; ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n", pd->port_port); for (i = port_tidbase; i < maxtid; i++) { struct page *ps = dd->ipath_pageshadow[i]; if (!ps) continue; dd->ipath_pageshadow[i] = NULL; pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i], PAGE_SIZE, PCI_DMA_FROMDEVICE); ipath_release_user_pages_on_close(&ps, 1); cnt++; ipath_stats.sps_pageunlocks++; } if (cnt) ipath_cdbg(VERBOSE, "Port %u locked %u expTID entries\n", pd->port_port, cnt); if (ipath_stats.sps_pagelocks || ipath_stats.sps_pageunlocks) ipath_cdbg(VERBOSE, "%llu pages locked, %llu unlocked\n", (unsigned long long) ipath_stats.sps_pagelocks, (unsigned long long) ipath_stats.sps_pageunlocks); } static int ipath_close(struct inode *in, struct file *fp) { int ret = 0; struct ipath_filedata *fd; struct ipath_portdata *pd; struct ipath_devdata *dd; unsigned long flags; unsigned port; struct pid *pid; ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n", (long)in->i_rdev, fp->private_data); mutex_lock(&ipath_mutex); fd = fp->private_data; fp->private_data = NULL; pd = fd->pd; if (!pd) { mutex_unlock(&ipath_mutex); goto bail; } dd = pd->port_dd; /* drain user sdma queue */ ipath_user_sdma_queue_drain(dd, fd->pq); ipath_user_sdma_queue_destroy(fd->pq); if (--pd->port_cnt) { /* * XXX If the master closes the port before the slave(s), * revoke the mmap for the eager receive queue so * the slave(s) don't wait for receive data forever. */ pd->active_slaves &= ~(1 << fd->subport); put_pid(pd->port_subpid[fd->subport]); pd->port_subpid[fd->subport] = NULL; mutex_unlock(&ipath_mutex); goto bail; } /* early; no interrupt users after this */ spin_lock_irqsave(&dd->ipath_uctxt_lock, flags); port = pd->port_port; dd->ipath_pd[port] = NULL; pid = pd->port_pid; pd->port_pid = NULL; spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags); if (pd->port_rcvwait_to || pd->port_piowait_to || pd->port_rcvnowait || pd->port_pionowait) { ipath_cdbg(VERBOSE, "port%u, %u rcv, %u pio wait timeo; " "%u rcv %u, pio already\n", pd->port_port, pd->port_rcvwait_to, pd->port_piowait_to, pd->port_rcvnowait, pd->port_pionowait); pd->port_rcvwait_to = pd->port_piowait_to = pd->port_rcvnowait = pd->port_pionowait = 0; } if (pd->port_flag) { ipath_cdbg(PROC, "port %u port_flag set: 0x%lx\n", pd->port_port, pd->port_flag); pd->port_flag = 0; } if (dd->ipath_kregbase) { /* atomically clear receive enable port and intr avail. */ clear_bit(dd->ipath_r_portenable_shift + port, &dd->ipath_rcvctrl); clear_bit(pd->port_port + dd->ipath_r_intravail_shift, &dd->ipath_rcvctrl); ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl); /* and read back from chip to be sure that nothing * else is in flight when we do the rest */ (void)ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); /* clean up the pkeys for this port user */ ipath_clean_part_key(pd, dd); /* * be paranoid, and never write 0's to these, just use an * unused part of the port 0 tail page. Of course, * rcvhdraddr points to a large chunk of memory, so this * could still trash things, but at least it won't trash * page 0, and by disabling the port, it should stop "soon", * even if a packet or two is in already in flight after we * disabled the port. */ ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr, port, dd->ipath_dummy_hdrq_phys); ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr, pd->port_port, dd->ipath_dummy_hdrq_phys); ipath_disarm_piobufs(dd, pd->port_pio_base, pd->port_piocnt); ipath_chg_pioavailkernel(dd, pd->port_pio_base, pd->port_piocnt, 1); dd->ipath_f_clear_tids(dd, pd->port_port); if (dd->ipath_pageshadow) unlock_expected_tids(pd); ipath_stats.sps_ports--; ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n", pd->port_comm, pid_nr(pid), dd->ipath_unit, port); } put_pid(pid); mutex_unlock(&ipath_mutex); ipath_free_pddata(dd, pd); /* after releasing the mutex */ bail: kfree(fd); return ret; } static int ipath_port_info(struct ipath_portdata *pd, u16 subport, struct ipath_port_info __user *uinfo) { struct ipath_port_info info; int nup; int ret; size_t sz; (void) ipath_count_units(NULL, &nup, NULL); info.num_active = nup; info.unit = pd->port_dd->ipath_unit; info.port = pd->port_port; info.subport = subport; /* Don't return new fields if old library opened the port. */ if (ipath_supports_subports(pd->userversion >> 16, pd->userversion & 0xffff)) { /* Number of user ports available for this device. */ info.num_ports = pd->port_dd->ipath_cfgports - 1; info.num_subports = pd->port_subport_cnt; sz = sizeof(info); } else sz = sizeof(info) - 2 * sizeof(u16); if (copy_to_user(uinfo, &info, sz)) { ret = -EFAULT; goto bail; } ret = 0; bail: return ret; } static int ipath_get_slave_info(struct ipath_portdata *pd, void __user *slave_mask_addr) { int ret = 0; if (copy_to_user(slave_mask_addr, &pd->active_slaves, sizeof(u32))) ret = -EFAULT; return ret; } static int ipath_sdma_get_inflight(struct ipath_user_sdma_queue *pq, u32 __user *inflightp) { const u32 val = ipath_user_sdma_inflight_counter(pq); if (put_user(val, inflightp)) return -EFAULT; return 0; } static int ipath_sdma_get_complete(struct ipath_devdata *dd, struct ipath_user_sdma_queue *pq, u32 __user *completep) { u32 val; int err; err = ipath_user_sdma_make_progress(dd, pq); if (err < 0) return err; val = ipath_user_sdma_complete_counter(pq); if (put_user(val, completep)) return -EFAULT; return 0; } static ssize_t ipath_write(struct file *fp, const char __user *data, size_t count, loff_t *off) { const struct ipath_cmd __user *ucmd; struct ipath_portdata *pd; const void __user *src; size_t consumed, copy; struct ipath_cmd cmd; ssize_t ret = 0; void *dest; if (count < sizeof(cmd.type)) { ret = -EINVAL; goto bail; } ucmd = (const struct ipath_cmd __user *) data; if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) { ret = -EFAULT; goto bail; } consumed = sizeof(cmd.type); switch (cmd.type) { case IPATH_CMD_ASSIGN_PORT: case __IPATH_CMD_USER_INIT: case IPATH_CMD_USER_INIT: copy = sizeof(cmd.cmd.user_info); dest = &cmd.cmd.user_info; src = &ucmd->cmd.user_info; break; case IPATH_CMD_RECV_CTRL: copy = sizeof(cmd.cmd.recv_ctrl); dest = &cmd.cmd.recv_ctrl; src = &ucmd->cmd.recv_ctrl; break; case IPATH_CMD_PORT_INFO: copy = sizeof(cmd.cmd.port_info); dest = &cmd.cmd.port_info; src = &ucmd->cmd.port_info; break; case IPATH_CMD_TID_UPDATE: case IPATH_CMD_TID_FREE: copy = sizeof(cmd.cmd.tid_info); dest = &cmd.cmd.tid_info; src = &ucmd->cmd.tid_info; break; case IPATH_CMD_SET_PART_KEY: copy = sizeof(cmd.cmd.part_key); dest = &cmd.cmd.part_key; src = &ucmd->cmd.part_key; break; case __IPATH_CMD_SLAVE_INFO: copy = sizeof(cmd.cmd.slave_mask_addr); dest = &cmd.cmd.slave_mask_addr; src = &ucmd->cmd.slave_mask_addr; break; case IPATH_CMD_PIOAVAILUPD: // force an update of PIOAvail reg copy = 0; src = NULL; dest = NULL; break; case IPATH_CMD_POLL_TYPE: copy = sizeof(cmd.cmd.poll_type); dest = &cmd.cmd.poll_type; src = &ucmd->cmd.poll_type; break; case IPATH_CMD_ARMLAUNCH_CTRL: copy = sizeof(cmd.cmd.armlaunch_ctrl); dest = &cmd.cmd.armlaunch_ctrl; src = &ucmd->cmd.armlaunch_ctrl; break; case IPATH_CMD_SDMA_INFLIGHT: copy = sizeof(cmd.cmd.sdma_inflight); dest = &cmd.cmd.sdma_inflight; src = &ucmd->cmd.sdma_inflight; break; case IPATH_CMD_SDMA_COMPLETE: copy = sizeof(cmd.cmd.sdma_complete); dest = &cmd.cmd.sdma_complete; src = &ucmd->cmd.sdma_complete; break; default: ret = -EINVAL; goto bail; } if (copy) { if ((count - consumed) < copy) { ret = -EINVAL; goto bail; } if (copy_from_user(dest, src, copy)) { ret = -EFAULT; goto bail; } consumed += copy; } pd = port_fp(fp); if (!pd && cmd.type != __IPATH_CMD_USER_INIT && cmd.type != IPATH_CMD_ASSIGN_PORT) { ret = -EINVAL; goto bail; } switch (cmd.type) { case IPATH_CMD_ASSIGN_PORT: ret = ipath_assign_port(fp, &cmd.cmd.user_info); if (ret) goto bail; break; case __IPATH_CMD_USER_INIT: /* backwards compatibility, get port first */ ret = ipath_assign_port(fp, &cmd.cmd.user_info); if (ret) goto bail; /* and fall through to current version. */ case IPATH_CMD_USER_INIT: ret = ipath_do_user_init(fp, &cmd.cmd.user_info); if (ret) goto bail; ret = ipath_get_base_info( fp, (void __user *) (unsigned long) cmd.cmd.user_info.spu_base_info, cmd.cmd.user_info.spu_base_info_size); break; case IPATH_CMD_RECV_CTRL: ret = ipath_manage_rcvq(pd, subport_fp(fp), cmd.cmd.recv_ctrl); break; case IPATH_CMD_PORT_INFO: ret = ipath_port_info(pd, subport_fp(fp), (struct ipath_port_info __user *) (unsigned long) cmd.cmd.port_info); break; case IPATH_CMD_TID_UPDATE: ret = ipath_tid_update(pd, fp, &cmd.cmd.tid_info); break; case IPATH_CMD_TID_FREE: ret = ipath_tid_free(pd, subport_fp(fp), &cmd.cmd.tid_info); break; case IPATH_CMD_SET_PART_KEY: ret = ipath_set_part_key(pd, cmd.cmd.part_key); break; case __IPATH_CMD_SLAVE_INFO: ret = ipath_get_slave_info(pd, (void __user *) (unsigned long) cmd.cmd.slave_mask_addr); break; case IPATH_CMD_PIOAVAILUPD: ipath_force_pio_avail_update(pd->port_dd); break; case IPATH_CMD_POLL_TYPE: pd->poll_type = cmd.cmd.poll_type; break; case IPATH_CMD_ARMLAUNCH_CTRL: if (cmd.cmd.armlaunch_ctrl) ipath_enable_armlaunch(pd->port_dd); else ipath_disable_armlaunch(pd->port_dd); break; case IPATH_CMD_SDMA_INFLIGHT: ret = ipath_sdma_get_inflight(user_sdma_queue_fp(fp), (u32 __user *) (unsigned long) cmd.cmd.sdma_inflight); break; case IPATH_CMD_SDMA_COMPLETE: ret = ipath_sdma_get_complete(pd->port_dd, user_sdma_queue_fp(fp), (u32 __user *) (unsigned long) cmd.cmd.sdma_complete); break; } if (ret >= 0) ret = consumed; bail: return ret; } static ssize_t ipath_writev(struct kiocb *iocb, const struct iovec *iov, unsigned long dim, loff_t off) { struct file *filp = iocb->ki_filp; struct ipath_filedata *fp = filp->private_data; struct ipath_portdata *pd = port_fp(filp); struct ipath_user_sdma_queue *pq = fp->pq; if (!dim) return -EINVAL; return ipath_user_sdma_writev(pd->port_dd, pq, iov, dim); } static struct class *ipath_class; static int init_cdev(int minor, char *name, const struct file_operations *fops, struct cdev **cdevp, struct device **devp) { const dev_t dev = MKDEV(IPATH_MAJOR, minor); struct cdev *cdev = NULL; struct device *device = NULL; int ret; cdev = cdev_alloc(); if (!cdev) { printk(KERN_ERR IPATH_DRV_NAME ": Could not allocate cdev for minor %d, %s\n", minor, name); ret = -ENOMEM; goto done; } cdev->owner = THIS_MODULE; cdev->ops = fops; kobject_set_name(&cdev->kobj, name); ret = cdev_add(cdev, dev, 1); if (ret < 0) { printk(KERN_ERR IPATH_DRV_NAME ": Could not add cdev for minor %d, %s (err %d)\n", minor, name, -ret); goto err_cdev; } device = device_create(ipath_class, NULL, dev, NULL, name); if (IS_ERR(device)) { ret = PTR_ERR(device); printk(KERN_ERR IPATH_DRV_NAME ": Could not create " "device for minor %d, %s (err %d)\n", minor, name, -ret); goto err_cdev; } goto done; err_cdev: cdev_del(cdev); cdev = NULL; done: if (ret >= 0) { *cdevp = cdev; *devp = device; } else { *cdevp = NULL; *devp = NULL; } return ret; } int ipath_cdev_init(int minor, char *name, const struct file_operations *fops, struct cdev **cdevp, struct device **devp) { return init_cdev(minor, name, fops, cdevp, devp); } static void cleanup_cdev(struct cdev **cdevp, struct device **devp) { struct device *dev = *devp; if (dev) { device_unregister(dev); *devp = NULL; } if (*cdevp) { cdev_del(*cdevp); *cdevp = NULL; } } void ipath_cdev_cleanup(struct cdev **cdevp, struct device **devp) { cleanup_cdev(cdevp, devp); } static struct cdev *wildcard_cdev; static struct device *wildcard_dev; static const dev_t dev = MKDEV(IPATH_MAJOR, 0); static int user_init(void) { int ret; ret = register_chrdev_region(dev, IPATH_NMINORS, IPATH_DRV_NAME); if (ret < 0) { printk(KERN_ERR IPATH_DRV_NAME ": Could not register " "chrdev region (err %d)\n", -ret); goto done; } ipath_class = class_create(THIS_MODULE, IPATH_DRV_NAME); if (IS_ERR(ipath_class)) { ret = PTR_ERR(ipath_class); printk(KERN_ERR IPATH_DRV_NAME ": Could not create " "device class (err %d)\n", -ret); goto bail; } goto done; bail: unregister_chrdev_region(dev, IPATH_NMINORS); done: return ret; } static void user_cleanup(void) { if (ipath_class) { class_destroy(ipath_class); ipath_class = NULL; } unregister_chrdev_region(dev, IPATH_NMINORS); } static atomic_t user_count = ATOMIC_INIT(0); static atomic_t user_setup = ATOMIC_INIT(0); int ipath_user_add(struct ipath_devdata *dd) { char name[10]; int ret; if (atomic_inc_return(&user_count) == 1) { ret = user_init(); if (ret < 0) { ipath_dev_err(dd, "Unable to set up user support: " "error %d\n", -ret); goto bail; } ret = init_cdev(0, "ipath", &ipath_file_ops, &wildcard_cdev, &wildcard_dev); if (ret < 0) { ipath_dev_err(dd, "Could not create wildcard " "minor: error %d\n", -ret); goto bail_user; } atomic_set(&user_setup, 1); } snprintf(name, sizeof(name), "ipath%d", dd->ipath_unit); ret = init_cdev(dd->ipath_unit + 1, name, &ipath_file_ops, &dd->user_cdev, &dd->user_dev); if (ret < 0) ipath_dev_err(dd, "Could not create user minor %d, %s\n", dd->ipath_unit + 1, name); goto bail; bail_user: user_cleanup(); bail: return ret; } void ipath_user_remove(struct ipath_devdata *dd) { cleanup_cdev(&dd->user_cdev, &dd->user_dev); if (atomic_dec_return(&user_count) == 0) { if (atomic_read(&user_setup) == 0) goto bail; cleanup_cdev(&wildcard_cdev, &wildcard_dev); user_cleanup(); atomic_set(&user_setup, 0); } bail: return; }
gpl-2.0
jetonbacaj/SomeKernel_G920P_PB6
arch/s390/kernel/perf_event.c
6689
3016
/* * Performance event support for s390x * * Copyright IBM Corp. 2012 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (version 2 only) * as published by the Free Software Foundation. */ #define KMSG_COMPONENT "perf" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/kernel.h> #include <linux/perf_event.h> #include <linux/percpu.h> #include <linux/export.h> #include <asm/irq.h> #include <asm/cpu_mf.h> #include <asm/lowcore.h> #include <asm/processor.h> const char *perf_pmu_name(void) { if (cpum_cf_avail() || cpum_sf_avail()) return "CPU-measurement facilities (CPUMF)"; return "pmu"; } EXPORT_SYMBOL(perf_pmu_name); int perf_num_counters(void) { int num = 0; if (cpum_cf_avail()) num += PERF_CPUM_CF_MAX_CTR; return num; } EXPORT_SYMBOL(perf_num_counters); void perf_event_print_debug(void) { struct cpumf_ctr_info cf_info; unsigned long flags; int cpu; if (!cpum_cf_avail()) return; local_irq_save(flags); cpu = smp_processor_id(); memset(&cf_info, 0, sizeof(cf_info)); if (!qctri(&cf_info)) { pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n", cpu, cf_info.cfvn, cf_info.csvn, cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl); print_hex_dump_bytes("CPUMF Query: ", DUMP_PREFIX_OFFSET, &cf_info, sizeof(cf_info)); } local_irq_restore(flags); } /* See also arch/s390/kernel/traps.c */ static unsigned long __store_trace(struct perf_callchain_entry *entry, unsigned long sp, unsigned long low, unsigned long high) { struct stack_frame *sf; struct pt_regs *regs; while (1) { sp = sp & PSW_ADDR_INSN; if (sp < low || sp > high - sizeof(*sf)) return sp; sf = (struct stack_frame *) sp; perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN); /* Follow the backchain. */ while (1) { low = sp; sp = sf->back_chain & PSW_ADDR_INSN; if (!sp) break; if (sp <= low || sp > high - sizeof(*sf)) return sp; sf = (struct stack_frame *) sp; perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN); } /* Zero backchain detected, check for interrupt frame. */ sp = (unsigned long) (sf + 1); if (sp <= low || sp > high - sizeof(*regs)) return sp; regs = (struct pt_regs *) sp; perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN); low = sp; sp = regs->gprs[15]; } } void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) { unsigned long head; struct stack_frame *head_sf; if (user_mode(regs)) return; head = regs->gprs[15]; head_sf = (struct stack_frame *) head; if (!head_sf || !head_sf->back_chain) return; head = head_sf->back_chain; head = __store_trace(entry, head, S390_lowcore.async_stack - ASYNC_SIZE, S390_lowcore.async_stack); __store_trace(entry, head, S390_lowcore.thread_info, S390_lowcore.thread_info + THREAD_SIZE); }
gpl-2.0
Altaf-Mahdi/i9505
lib/smp_processor_id.c
7201
1122
/* * lib/smp_processor_id.c * * DEBUG_PREEMPT variant of smp_processor_id(). */ #include <linux/export.h> #include <linux/kallsyms.h> #include <linux/sched.h> notrace unsigned int debug_smp_processor_id(void) { unsigned long preempt_count = preempt_count(); int this_cpu = raw_smp_processor_id(); if (likely(preempt_count)) goto out; if (irqs_disabled()) goto out; /* * Kernel threads bound to a single CPU can safely use * smp_processor_id(): */ if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu))) goto out; /* * It is valid to assume CPU-locality during early bootup: */ if (system_state != SYSTEM_RUNNING) goto out; /* * Avoid recursion: */ preempt_disable_notrace(); if (!printk_ratelimit()) goto out_enable; printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] " "code: %s/%d\n", preempt_count() - 1, current->comm, current->pid); print_symbol("caller is %s\n", (long)__builtin_return_address(0)); dump_stack(); out_enable: preempt_enable_no_resched_notrace(); out: return this_cpu; } EXPORT_SYMBOL(debug_smp_processor_id);
gpl-2.0
championswimmer/android_kernel_sony_seagull
drivers/pci/pcie/aer/aerdrv.c
8225
12263
/* * drivers/pci/pcie/aer/aerdrv.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * This file implements the AER root port service driver. The driver will * register an irq handler. When root port triggers an AER interrupt, the irq * handler will collect root port status and schedule a work. * * Copyright (C) 2006 Intel Corp. * Tom Long Nguyen (tom.l.nguyen@intel.com) * Zhang Yanmin (yanmin.zhang@intel.com) * */ #include <linux/module.h> #include <linux/pci.h> #include <linux/pci-acpi.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/pm.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/pcieport_if.h> #include <linux/slab.h> #include "aerdrv.h" #include "../../pci.h" /* * Version Information */ #define DRIVER_VERSION "v1.0" #define DRIVER_AUTHOR "tom.l.nguyen@intel.com" #define DRIVER_DESC "Root Port Advanced Error Reporting Driver" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); static int __devinit aer_probe(struct pcie_device *dev); static void aer_remove(struct pcie_device *dev); static pci_ers_result_t aer_error_detected(struct pci_dev *dev, enum pci_channel_state error); static void aer_error_resume(struct pci_dev *dev); static pci_ers_result_t aer_root_reset(struct pci_dev *dev); static struct pci_error_handlers aer_error_handlers = { .error_detected = aer_error_detected, .resume = aer_error_resume, }; static struct pcie_port_service_driver aerdriver = { .name = "aer", .port_type = PCI_EXP_TYPE_ROOT_PORT, .service = PCIE_PORT_SERVICE_AER, .probe = aer_probe, .remove = aer_remove, .err_handler = &aer_error_handlers, .reset_link = aer_root_reset, }; static int pcie_aer_disable; void pci_no_aer(void) { pcie_aer_disable = 1; /* has priority over 'forceload' */ } bool pci_aer_available(void) { return !pcie_aer_disable && pci_msi_enabled(); } static int set_device_error_reporting(struct pci_dev *dev, void *data) { bool enable = *((bool *)data); if ((dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) || (dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) || (dev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)) { if (enable) pci_enable_pcie_error_reporting(dev); else pci_disable_pcie_error_reporting(dev); } if (enable) pcie_set_ecrc_checking(dev); return 0; } /** * set_downstream_devices_error_reporting - enable/disable the error reporting bits on the root port and its downstream ports. * @dev: pointer to root port's pci_dev data structure * @enable: true = enable error reporting, false = disable error reporting. */ static void set_downstream_devices_error_reporting(struct pci_dev *dev, bool enable) { set_device_error_reporting(dev, &enable); if (!dev->subordinate) return; pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable); } /** * aer_enable_rootport - enable Root Port's interrupts when receiving messages * @rpc: pointer to a Root Port data structure * * Invoked when PCIe bus loads AER service driver. */ static void aer_enable_rootport(struct aer_rpc *rpc) { struct pci_dev *pdev = rpc->rpd->port; int pos, aer_pos; u16 reg16; u32 reg32; pos = pci_pcie_cap(pdev); /* Clear PCIe Capability's Device Status */ pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, &reg16); pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16); /* Disable system error generation in response to error messages */ pci_read_config_word(pdev, pos + PCI_EXP_RTCTL, &reg16); reg16 &= ~(SYSTEM_ERROR_INTR_ON_MESG_MASK); pci_write_config_word(pdev, pos + PCI_EXP_RTCTL, reg16); aer_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); /* Clear error status */ pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, &reg32); pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32); pci_read_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, &reg32); pci_write_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, reg32); pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, &reg32); pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32); /* * Enable error reporting for the root port device and downstream port * devices. */ set_downstream_devices_error_reporting(pdev, true); /* Enable Root Port's interrupt in response to error messages */ pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, &reg32); reg32 |= ROOT_PORT_INTR_ON_MESG_MASK; pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, reg32); } /** * aer_disable_rootport - disable Root Port's interrupts when receiving messages * @rpc: pointer to a Root Port data structure * * Invoked when PCIe bus unloads AER service driver. */ static void aer_disable_rootport(struct aer_rpc *rpc) { struct pci_dev *pdev = rpc->rpd->port; u32 reg32; int pos; /* * Disable error reporting for the root port device and downstream port * devices. */ set_downstream_devices_error_reporting(pdev, false); pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); /* Disable Root's interrupt in response to error messages */ pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, &reg32); reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK; pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, reg32); /* Clear Root's error status reg */ pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, &reg32); pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, reg32); } /** * aer_irq - Root Port's ISR * @irq: IRQ assigned to Root Port * @context: pointer to Root Port data structure * * Invoked when Root Port detects AER messages. */ irqreturn_t aer_irq(int irq, void *context) { unsigned int status, id; struct pcie_device *pdev = (struct pcie_device *)context; struct aer_rpc *rpc = get_service_data(pdev); int next_prod_idx; unsigned long flags; int pos; pos = pci_find_ext_capability(pdev->port, PCI_EXT_CAP_ID_ERR); /* * Must lock access to Root Error Status Reg, Root Error ID Reg, * and Root error producer/consumer index */ spin_lock_irqsave(&rpc->e_lock, flags); /* Read error status */ pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, &status); if (!(status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV))) { spin_unlock_irqrestore(&rpc->e_lock, flags); return IRQ_NONE; } /* Read error source and clear error status */ pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_ERR_SRC, &id); pci_write_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, status); /* Store error source for later DPC handler */ next_prod_idx = rpc->prod_idx + 1; if (next_prod_idx == AER_ERROR_SOURCES_MAX) next_prod_idx = 0; if (next_prod_idx == rpc->cons_idx) { /* * Error Storm Condition - possibly the same error occurred. * Drop the error. */ spin_unlock_irqrestore(&rpc->e_lock, flags); return IRQ_HANDLED; } rpc->e_sources[rpc->prod_idx].status = status; rpc->e_sources[rpc->prod_idx].id = id; rpc->prod_idx = next_prod_idx; spin_unlock_irqrestore(&rpc->e_lock, flags); /* Invoke DPC handler */ schedule_work(&rpc->dpc_handler); return IRQ_HANDLED; } EXPORT_SYMBOL_GPL(aer_irq); /** * aer_alloc_rpc - allocate Root Port data structure * @dev: pointer to the pcie_dev data structure * * Invoked when Root Port's AER service is loaded. */ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev) { struct aer_rpc *rpc; rpc = kzalloc(sizeof(struct aer_rpc), GFP_KERNEL); if (!rpc) return NULL; /* Initialize Root lock access, e_lock, to Root Error Status Reg */ spin_lock_init(&rpc->e_lock); rpc->rpd = dev; INIT_WORK(&rpc->dpc_handler, aer_isr); mutex_init(&rpc->rpc_mutex); init_waitqueue_head(&rpc->wait_release); /* Use PCIe bus function to store rpc into PCIe device */ set_service_data(dev, rpc); return rpc; } /** * aer_remove - clean up resources * @dev: pointer to the pcie_dev data structure * * Invoked when PCI Express bus unloads or AER probe fails. */ static void aer_remove(struct pcie_device *dev) { struct aer_rpc *rpc = get_service_data(dev); if (rpc) { /* If register interrupt service, it must be free. */ if (rpc->isr) free_irq(dev->irq, dev); wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx); aer_disable_rootport(rpc); kfree(rpc); set_service_data(dev, NULL); } } /** * aer_probe - initialize resources * @dev: pointer to the pcie_dev data structure * @id: pointer to the service id data structure * * Invoked when PCI Express bus loads AER service driver. */ static int __devinit aer_probe(struct pcie_device *dev) { int status; struct aer_rpc *rpc; struct device *device = &dev->device; /* Init */ status = aer_init(dev); if (status) return status; /* Alloc rpc data structure */ rpc = aer_alloc_rpc(dev); if (!rpc) { dev_printk(KERN_DEBUG, device, "alloc rpc failed\n"); aer_remove(dev); return -ENOMEM; } /* Request IRQ ISR */ status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", dev); if (status) { dev_printk(KERN_DEBUG, device, "request IRQ failed\n"); aer_remove(dev); return status; } rpc->isr = 1; aer_enable_rootport(rpc); return status; } /** * aer_root_reset - reset link on Root Port * @dev: pointer to Root Port's pci_dev data structure * * Invoked by Port Bus driver when performing link reset at Root Port. */ static pci_ers_result_t aer_root_reset(struct pci_dev *dev) { u32 reg32; int pos; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); /* Disable Root's interrupt in response to error messages */ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, &reg32); reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK; pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); aer_do_secondary_bus_reset(dev); dev_printk(KERN_DEBUG, &dev->dev, "Root Port link has been reset\n"); /* Clear Root Error Status */ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32); pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, reg32); /* Enable Root Port's interrupt in response to error messages */ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, &reg32); reg32 |= ROOT_PORT_INTR_ON_MESG_MASK; pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); return PCI_ERS_RESULT_RECOVERED; } /** * aer_error_detected - update severity status * @dev: pointer to Root Port's pci_dev data structure * @error: error severity being notified by port bus * * Invoked by Port Bus driver during error recovery. */ static pci_ers_result_t aer_error_detected(struct pci_dev *dev, enum pci_channel_state error) { /* Root Port has no impact. Always recovers. */ return PCI_ERS_RESULT_CAN_RECOVER; } /** * aer_error_resume - clean up corresponding error status bits * @dev: pointer to Root Port's pci_dev data structure * * Invoked by Port Bus driver during nonfatal recovery. */ static void aer_error_resume(struct pci_dev *dev) { int pos; u32 status, mask; u16 reg16; /* Clean up Root device status */ pos = pci_pcie_cap(dev); pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &reg16); pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, reg16); /* Clean AER Root Error Status */ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); if (dev->error_state == pci_channel_io_normal) status &= ~mask; /* Clear corresponding nonfatal bits */ else status &= mask; /* Clear corresponding fatal bits */ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); } /** * aer_service_init - register AER root service driver * * Invoked when AER root service driver is loaded. */ static int __init aer_service_init(void) { if (!pci_aer_available() || aer_acpi_firmware_first()) return -ENXIO; return pcie_port_service_register(&aerdriver); } /** * aer_service_exit - unregister AER root service driver * * Invoked when AER root service driver is unloaded. */ static void __exit aer_service_exit(void) { pcie_port_service_unregister(&aerdriver); } module_init(aer_service_init); module_exit(aer_service_exit);
gpl-2.0
zymphad/leanKernel-angler
arch/parisc/lib/bitops.c
8737
1819
/* * bitops.c: atomic operations which got too long to be inlined all over * the place. * * Copyright 1999 Philipp Rumpf (prumpf@tux.org) * Copyright 2000 Grant Grundler (grundler@cup.hp.com) */ #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/atomic.h> #ifdef CONFIG_SMP arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED }; #endif #ifdef CONFIG_64BIT unsigned long __xchg64(unsigned long x, unsigned long *ptr) { unsigned long temp, flags; _atomic_spin_lock_irqsave(ptr, flags); temp = *ptr; *ptr = x; _atomic_spin_unlock_irqrestore(ptr, flags); return temp; } #endif unsigned long __xchg32(int x, int *ptr) { unsigned long flags; long temp; _atomic_spin_lock_irqsave(ptr, flags); temp = (long) *ptr; /* XXX - sign extension wanted? */ *ptr = x; _atomic_spin_unlock_irqrestore(ptr, flags); return (unsigned long)temp; } unsigned long __xchg8(char x, char *ptr) { unsigned long flags; long temp; _atomic_spin_lock_irqsave(ptr, flags); temp = (long) *ptr; /* XXX - sign extension wanted? */ *ptr = x; _atomic_spin_unlock_irqrestore(ptr, flags); return (unsigned long)temp; } #ifdef CONFIG_64BIT unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new) { unsigned long flags; unsigned long prev; _atomic_spin_lock_irqsave(ptr, flags); if ((prev = *ptr) == old) *ptr = new; _atomic_spin_unlock_irqrestore(ptr, flags); return prev; } #endif unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new) { unsigned long flags; unsigned int prev; _atomic_spin_lock_irqsave(ptr, flags); if ((prev = *ptr) == old) *ptr = new; _atomic_spin_unlock_irqrestore(ptr, flags); return (unsigned long)prev; }
gpl-2.0
nocoast/android_kernel_lge_g2
arch/score/kernel/signal.c
8993
10110
/* * arch/score/kernel/signal.c * * Score Processor version. * * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. * Chen Liqin <liqin.chen@sunplusct.com> * Lennox Wu <lennox.wu@sunplusct.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/errno.h> #include <linux/signal.h> #include <linux/ptrace.h> #include <linux/unistd.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> #include <asm/syscalls.h> #include <asm/ucontext.h> #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) struct rt_sigframe { u32 rs_ass[4]; /* argument save space */ u32 rs_code[2]; /* signal trampoline */ struct siginfo rs_info; struct ucontext rs_uc; }; static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { int err = 0; unsigned long reg; reg = regs->cp0_epc; err |= __put_user(reg, &sc->sc_pc); err |= __put_user(regs->cp0_psr, &sc->sc_psr); err |= __put_user(regs->cp0_condition, &sc->sc_condition); #define save_gp_reg(i) { \ reg = regs->regs[i]; \ err |= __put_user(reg, &sc->sc_regs[i]); \ } while (0) save_gp_reg(0); save_gp_reg(1); save_gp_reg(2); save_gp_reg(3); save_gp_reg(4); save_gp_reg(5); save_gp_reg(6); save_gp_reg(7); save_gp_reg(8); save_gp_reg(9); save_gp_reg(10); save_gp_reg(11); save_gp_reg(12); save_gp_reg(13); save_gp_reg(14); save_gp_reg(15); save_gp_reg(16); save_gp_reg(17); save_gp_reg(18); save_gp_reg(19); save_gp_reg(20); save_gp_reg(21); save_gp_reg(22); save_gp_reg(23); save_gp_reg(24); save_gp_reg(25); save_gp_reg(26); save_gp_reg(27); save_gp_reg(28); save_gp_reg(29); #undef save_gp_reg reg = regs->ceh; err |= __put_user(reg, &sc->sc_mdceh); reg = regs->cel; err |= __put_user(reg, &sc->sc_mdcel); err |= __put_user(regs->cp0_ecr, &sc->sc_ecr); err |= __put_user(regs->cp0_ema, &sc->sc_ema); return err; } static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { int err = 0; u32 reg; err |= __get_user(regs->cp0_epc, &sc->sc_pc); err |= __get_user(regs->cp0_condition, &sc->sc_condition); err |= __get_user(reg, &sc->sc_mdceh); regs->ceh = (int) reg; err |= __get_user(reg, &sc->sc_mdcel); regs->cel = (int) reg; err |= __get_user(reg, &sc->sc_psr); regs->cp0_psr = (int) reg; err |= __get_user(reg, &sc->sc_ecr); regs->cp0_ecr = (int) reg; err |= __get_user(reg, &sc->sc_ema); regs->cp0_ema = (int) reg; #define restore_gp_reg(i) do { \ err |= __get_user(reg, &sc->sc_regs[i]); \ regs->regs[i] = reg; \ } while (0) restore_gp_reg(0); restore_gp_reg(1); restore_gp_reg(2); restore_gp_reg(3); restore_gp_reg(4); restore_gp_reg(5); restore_gp_reg(6); restore_gp_reg(7); restore_gp_reg(8); restore_gp_reg(9); restore_gp_reg(10); restore_gp_reg(11); restore_gp_reg(12); restore_gp_reg(13); restore_gp_reg(14); restore_gp_reg(15); restore_gp_reg(16); restore_gp_reg(17); restore_gp_reg(18); restore_gp_reg(19); restore_gp_reg(20); restore_gp_reg(21); restore_gp_reg(22); restore_gp_reg(23); restore_gp_reg(24); restore_gp_reg(25); restore_gp_reg(26); restore_gp_reg(27); restore_gp_reg(28); restore_gp_reg(29); #undef restore_gp_reg return err; } /* * Determine which stack to use.. */ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) { unsigned long sp; /* Default to using normal stack */ sp = regs->regs[0]; sp -= 32; /* This is the X/Open sanctioned signal stack switching. */ if ((ka->sa.sa_flags & SA_ONSTACK) && (!on_sig_stack(sp))) sp = current->sas_ss_sp + current->sas_ss_size; return (void __user*)((sp - frame_size) & ~7); } asmlinkage long score_sigaltstack(struct pt_regs *regs) { const stack_t __user *uss = (const stack_t __user *) regs->regs[4]; stack_t __user *uoss = (stack_t __user *) regs->regs[5]; unsigned long usp = regs->regs[0]; return do_sigaltstack(uss, uoss, usp); } asmlinkage long score_rt_sigreturn(struct pt_regs *regs) { struct rt_sigframe __user *frame; sigset_t set; stack_t st; int sig; frame = (struct rt_sigframe __user *) regs->regs[0]; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(&current->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext); if (sig < 0) goto badframe; else if (sig) force_sig(sig, current); if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st))) goto badframe; /* It is more difficult to avoid calling this function than to call it and ignore errors. */ do_sigaltstack((stack_t __user *)&st, NULL, regs->regs[0]); __asm__ __volatile__( "mv\tr0, %0\n\t" "la\tr8, syscall_exit\n\t" "br\tr8\n\t" : : "r" (regs) : "r8"); badframe: force_sig(SIGSEGV, current); return 0; } static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info) { struct rt_sigframe __user *frame; int err = 0; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; /* * Set up the return code ... * * li v0, __NR_rt_sigreturn * syscall */ err |= __put_user(0x87788000 + __NR_rt_sigreturn*2, frame->rs_code + 0); err |= __put_user(0x80008002, frame->rs_code + 1); flush_cache_sigtramp((unsigned long) frame->rs_code); err |= copy_siginfo_to_user(&frame->rs_info, info); err |= __put_user(0, &frame->rs_uc.uc_flags); err |= __put_user(NULL, &frame->rs_uc.uc_link); err |= __put_user((void __user *)current->sas_ss_sp, &frame->rs_uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->regs[0]), &frame->rs_uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->rs_uc.uc_stack.ss_size); err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext); err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); if (err) goto give_sigsegv; regs->regs[0] = (unsigned long) frame; regs->regs[3] = (unsigned long) frame->rs_code; regs->regs[4] = signr; regs->regs[5] = (unsigned long) &frame->rs_info; regs->regs[6] = (unsigned long) &frame->rs_uc; regs->regs[29] = (unsigned long) ka->sa.sa_handler; regs->cp0_epc = (unsigned long) ka->sa.sa_handler; return 0; give_sigsegv: if (signr == SIGSEGV) ka->sa.sa_handler = SIG_DFL; force_sig(SIGSEGV, current); return -EFAULT; } static int handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) { int ret; if (regs->is_syscall) { switch (regs->regs[4]) { case ERESTART_RESTARTBLOCK: case ERESTARTNOHAND: regs->regs[4] = EINTR; break; case ERESTARTSYS: if (!(ka->sa.sa_flags & SA_RESTART)) { regs->regs[4] = EINTR; break; } case ERESTARTNOINTR: regs->regs[4] = regs->orig_r4; regs->regs[7] = regs->orig_r7; regs->cp0_epc -= 8; } regs->is_syscall = 0; } /* * Set up the stack frame */ ret = setup_rt_frame(ka, regs, sig, oldset, info); spin_lock_irq(&current->sighand->siglock); sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NODEFER)) sigaddset(&current->blocked, sig); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); return ret; } static void do_signal(struct pt_regs *regs) { struct k_sigaction ka; sigset_t *oldset; siginfo_t info; int signr; /* * We want the common case to go fast, which is why we may in certain * cases get here from kernel mode. Just return without doing anything * if so. */ if (!user_mode(regs)) return; if (test_thread_flag(TIF_RESTORE_SIGMASK)) oldset = &current->saved_sigmask; else oldset = &current->blocked; signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Actually deliver the signal. */ if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { /* * A signal was successfully delivered; the saved * sigmask will have been stored in the signal frame, * and will be restored by sigreturn, so we can simply * clear the TIF_RESTORE_SIGMASK flag. */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) clear_thread_flag(TIF_RESTORE_SIGMASK); } return; } if (regs->is_syscall) { if (regs->regs[4] == ERESTARTNOHAND || regs->regs[4] == ERESTARTSYS || regs->regs[4] == ERESTARTNOINTR) { regs->regs[4] = regs->orig_r4; regs->regs[7] = regs->orig_r7; regs->cp0_epc -= 8; } if (regs->regs[4] == ERESTART_RESTARTBLOCK) { regs->regs[27] = __NR_restart_syscall; regs->regs[4] = regs->orig_r4; regs->regs[7] = regs->orig_r7; regs->cp0_epc -= 8; } regs->is_syscall = 0; /* Don't deal with this again. */ } /* * If there's no signal to deliver, we just put the saved sigmask * back */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) { clear_thread_flag(TIF_RESTORE_SIGMASK); sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); } } /* * notification of userspace execution resumption * - triggered by the TIF_WORK_MASK flags */ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) { /* deal with pending signal delivery */ if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) do_signal(regs); }
gpl-2.0
NovaFusion/twrp_kernel
arch/score/kernel/signal.c
8993
10110
/* * arch/score/kernel/signal.c * * Score Processor version. * * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. * Chen Liqin <liqin.chen@sunplusct.com> * Lennox Wu <lennox.wu@sunplusct.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/errno.h> #include <linux/signal.h> #include <linux/ptrace.h> #include <linux/unistd.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> #include <asm/syscalls.h> #include <asm/ucontext.h> #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) struct rt_sigframe { u32 rs_ass[4]; /* argument save space */ u32 rs_code[2]; /* signal trampoline */ struct siginfo rs_info; struct ucontext rs_uc; }; static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { int err = 0; unsigned long reg; reg = regs->cp0_epc; err |= __put_user(reg, &sc->sc_pc); err |= __put_user(regs->cp0_psr, &sc->sc_psr); err |= __put_user(regs->cp0_condition, &sc->sc_condition); #define save_gp_reg(i) { \ reg = regs->regs[i]; \ err |= __put_user(reg, &sc->sc_regs[i]); \ } while (0) save_gp_reg(0); save_gp_reg(1); save_gp_reg(2); save_gp_reg(3); save_gp_reg(4); save_gp_reg(5); save_gp_reg(6); save_gp_reg(7); save_gp_reg(8); save_gp_reg(9); save_gp_reg(10); save_gp_reg(11); save_gp_reg(12); save_gp_reg(13); save_gp_reg(14); save_gp_reg(15); save_gp_reg(16); save_gp_reg(17); save_gp_reg(18); save_gp_reg(19); save_gp_reg(20); save_gp_reg(21); save_gp_reg(22); save_gp_reg(23); save_gp_reg(24); save_gp_reg(25); save_gp_reg(26); save_gp_reg(27); save_gp_reg(28); save_gp_reg(29); #undef save_gp_reg reg = regs->ceh; err |= __put_user(reg, &sc->sc_mdceh); reg = regs->cel; err |= __put_user(reg, &sc->sc_mdcel); err |= __put_user(regs->cp0_ecr, &sc->sc_ecr); err |= __put_user(regs->cp0_ema, &sc->sc_ema); return err; } static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { int err = 0; u32 reg; err |= __get_user(regs->cp0_epc, &sc->sc_pc); err |= __get_user(regs->cp0_condition, &sc->sc_condition); err |= __get_user(reg, &sc->sc_mdceh); regs->ceh = (int) reg; err |= __get_user(reg, &sc->sc_mdcel); regs->cel = (int) reg; err |= __get_user(reg, &sc->sc_psr); regs->cp0_psr = (int) reg; err |= __get_user(reg, &sc->sc_ecr); regs->cp0_ecr = (int) reg; err |= __get_user(reg, &sc->sc_ema); regs->cp0_ema = (int) reg; #define restore_gp_reg(i) do { \ err |= __get_user(reg, &sc->sc_regs[i]); \ regs->regs[i] = reg; \ } while (0) restore_gp_reg(0); restore_gp_reg(1); restore_gp_reg(2); restore_gp_reg(3); restore_gp_reg(4); restore_gp_reg(5); restore_gp_reg(6); restore_gp_reg(7); restore_gp_reg(8); restore_gp_reg(9); restore_gp_reg(10); restore_gp_reg(11); restore_gp_reg(12); restore_gp_reg(13); restore_gp_reg(14); restore_gp_reg(15); restore_gp_reg(16); restore_gp_reg(17); restore_gp_reg(18); restore_gp_reg(19); restore_gp_reg(20); restore_gp_reg(21); restore_gp_reg(22); restore_gp_reg(23); restore_gp_reg(24); restore_gp_reg(25); restore_gp_reg(26); restore_gp_reg(27); restore_gp_reg(28); restore_gp_reg(29); #undef restore_gp_reg return err; } /* * Determine which stack to use.. */ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) { unsigned long sp; /* Default to using normal stack */ sp = regs->regs[0]; sp -= 32; /* This is the X/Open sanctioned signal stack switching. */ if ((ka->sa.sa_flags & SA_ONSTACK) && (!on_sig_stack(sp))) sp = current->sas_ss_sp + current->sas_ss_size; return (void __user*)((sp - frame_size) & ~7); } asmlinkage long score_sigaltstack(struct pt_regs *regs) { const stack_t __user *uss = (const stack_t __user *) regs->regs[4]; stack_t __user *uoss = (stack_t __user *) regs->regs[5]; unsigned long usp = regs->regs[0]; return do_sigaltstack(uss, uoss, usp); } asmlinkage long score_rt_sigreturn(struct pt_regs *regs) { struct rt_sigframe __user *frame; sigset_t set; stack_t st; int sig; frame = (struct rt_sigframe __user *) regs->regs[0]; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(&current->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext); if (sig < 0) goto badframe; else if (sig) force_sig(sig, current); if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st))) goto badframe; /* It is more difficult to avoid calling this function than to call it and ignore errors. */ do_sigaltstack((stack_t __user *)&st, NULL, regs->regs[0]); __asm__ __volatile__( "mv\tr0, %0\n\t" "la\tr8, syscall_exit\n\t" "br\tr8\n\t" : : "r" (regs) : "r8"); badframe: force_sig(SIGSEGV, current); return 0; } static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info) { struct rt_sigframe __user *frame; int err = 0; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; /* * Set up the return code ... * * li v0, __NR_rt_sigreturn * syscall */ err |= __put_user(0x87788000 + __NR_rt_sigreturn*2, frame->rs_code + 0); err |= __put_user(0x80008002, frame->rs_code + 1); flush_cache_sigtramp((unsigned long) frame->rs_code); err |= copy_siginfo_to_user(&frame->rs_info, info); err |= __put_user(0, &frame->rs_uc.uc_flags); err |= __put_user(NULL, &frame->rs_uc.uc_link); err |= __put_user((void __user *)current->sas_ss_sp, &frame->rs_uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->regs[0]), &frame->rs_uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->rs_uc.uc_stack.ss_size); err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext); err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); if (err) goto give_sigsegv; regs->regs[0] = (unsigned long) frame; regs->regs[3] = (unsigned long) frame->rs_code; regs->regs[4] = signr; regs->regs[5] = (unsigned long) &frame->rs_info; regs->regs[6] = (unsigned long) &frame->rs_uc; regs->regs[29] = (unsigned long) ka->sa.sa_handler; regs->cp0_epc = (unsigned long) ka->sa.sa_handler; return 0; give_sigsegv: if (signr == SIGSEGV) ka->sa.sa_handler = SIG_DFL; force_sig(SIGSEGV, current); return -EFAULT; } static int handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) { int ret; if (regs->is_syscall) { switch (regs->regs[4]) { case ERESTART_RESTARTBLOCK: case ERESTARTNOHAND: regs->regs[4] = EINTR; break; case ERESTARTSYS: if (!(ka->sa.sa_flags & SA_RESTART)) { regs->regs[4] = EINTR; break; } case ERESTARTNOINTR: regs->regs[4] = regs->orig_r4; regs->regs[7] = regs->orig_r7; regs->cp0_epc -= 8; } regs->is_syscall = 0; } /* * Set up the stack frame */ ret = setup_rt_frame(ka, regs, sig, oldset, info); spin_lock_irq(&current->sighand->siglock); sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NODEFER)) sigaddset(&current->blocked, sig); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); return ret; } static void do_signal(struct pt_regs *regs) { struct k_sigaction ka; sigset_t *oldset; siginfo_t info; int signr; /* * We want the common case to go fast, which is why we may in certain * cases get here from kernel mode. Just return without doing anything * if so. */ if (!user_mode(regs)) return; if (test_thread_flag(TIF_RESTORE_SIGMASK)) oldset = &current->saved_sigmask; else oldset = &current->blocked; signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Actually deliver the signal. */ if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { /* * A signal was successfully delivered; the saved * sigmask will have been stored in the signal frame, * and will be restored by sigreturn, so we can simply * clear the TIF_RESTORE_SIGMASK flag. */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) clear_thread_flag(TIF_RESTORE_SIGMASK); } return; } if (regs->is_syscall) { if (regs->regs[4] == ERESTARTNOHAND || regs->regs[4] == ERESTARTSYS || regs->regs[4] == ERESTARTNOINTR) { regs->regs[4] = regs->orig_r4; regs->regs[7] = regs->orig_r7; regs->cp0_epc -= 8; } if (regs->regs[4] == ERESTART_RESTARTBLOCK) { regs->regs[27] = __NR_restart_syscall; regs->regs[4] = regs->orig_r4; regs->regs[7] = regs->orig_r7; regs->cp0_epc -= 8; } regs->is_syscall = 0; /* Don't deal with this again. */ } /* * If there's no signal to deliver, we just put the saved sigmask * back */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) { clear_thread_flag(TIF_RESTORE_SIGMASK); sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); } } /* * notification of userspace execution resumption * - triggered by the TIF_WORK_MASK flags */ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) { /* deal with pending signal delivery */ if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) do_signal(regs); }
gpl-2.0
xboxfanj/android_kernel_oneplus_msm8974
arch/sh/mm/cache-sh7705.c
12065
4964
/* * arch/sh/mm/cache-sh7705.c * * Copyright (C) 1999, 2000 Niibe Yutaka * Copyright (C) 2004 Alex Song * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * */ #include <linux/init.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/threads.h> #include <asm/addrspace.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/cache.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> /* * The 32KB cache on the SH7705 suffers from the same synonym problem * as SH4 CPUs */ static inline void cache_wback_all(void) { unsigned long ways, waysize, addrstart; ways = current_cpu_data.dcache.ways; waysize = current_cpu_data.dcache.sets; waysize <<= current_cpu_data.dcache.entry_shift; addrstart = CACHE_OC_ADDRESS_ARRAY; do { unsigned long addr; for (addr = addrstart; addr < addrstart + waysize; addr += current_cpu_data.dcache.linesz) { unsigned long data; int v = SH_CACHE_UPDATED | SH_CACHE_VALID; data = __raw_readl(addr); if ((data & v) == v) __raw_writel(data & ~v, addr); } addrstart += current_cpu_data.dcache.way_incr; } while (--ways); } /* * Write back the range of D-cache, and purge the I-cache. * * Called from kernel/module.c:sys_init_module and routine for a.out format. */ static void sh7705_flush_icache_range(void *args) { struct flusher_data *data = args; unsigned long start, end; start = data->addr1; end = data->addr2; __flush_wback_region((void *)start, end - start); } /* * Writeback&Invalidate the D-cache of the page */ static void __flush_dcache_page(unsigned long phys) { unsigned long ways, waysize, addrstart; unsigned long flags; phys |= SH_CACHE_VALID; /* * Here, phys is the physical address of the page. We check all the * tags in the cache for those with the same page number as this page * (by masking off the lowest 2 bits of the 19-bit tag; these bits are * derived from the offset within in the 4k page). Matching valid * entries are invalidated. * * Since 2 bits of the cache index are derived from the virtual page * number, knowing this would reduce the number of cache entries to be * searched by a factor of 4. However this function exists to deal with * potential cache aliasing, therefore the optimisation is probably not * possible. */ local_irq_save(flags); jump_to_uncached(); ways = current_cpu_data.dcache.ways; waysize = current_cpu_data.dcache.sets; waysize <<= current_cpu_data.dcache.entry_shift; addrstart = CACHE_OC_ADDRESS_ARRAY; do { unsigned long addr; for (addr = addrstart; addr < addrstart + waysize; addr += current_cpu_data.dcache.linesz) { unsigned long data; data = __raw_readl(addr) & (0x1ffffC00 | SH_CACHE_VALID); if (data == phys) { data &= ~(SH_CACHE_VALID | SH_CACHE_UPDATED); __raw_writel(data, addr); } } addrstart += current_cpu_data.dcache.way_incr; } while (--ways); back_to_cached(); local_irq_restore(flags); } /* * Write back & invalidate the D-cache of the page. * (To avoid "alias" issues) */ static void sh7705_flush_dcache_page(void *arg) { struct page *page = arg; struct address_space *mapping = page_mapping(page); if (mapping && !mapping_mapped(mapping)) clear_bit(PG_dcache_clean, &page->flags); else __flush_dcache_page(__pa(page_address(page))); } static void sh7705_flush_cache_all(void *args) { unsigned long flags; local_irq_save(flags); jump_to_uncached(); cache_wback_all(); back_to_cached(); local_irq_restore(flags); } /* * Write back and invalidate I/D-caches for the page. * * ADDRESS: Virtual Address (U0 address) */ static void sh7705_flush_cache_page(void *args) { struct flusher_data *data = args; unsigned long pfn = data->addr2; __flush_dcache_page(pfn << PAGE_SHIFT); } /* * This is called when a page-cache page is about to be mapped into a * user process' address space. It offers an opportunity for a * port to ensure d-cache/i-cache coherency if necessary. * * Not entirely sure why this is necessary on SH3 with 32K cache but * without it we get occasional "Memory fault" when loading a program. */ static void sh7705_flush_icache_page(void *page) { __flush_purge_region(page_address(page), PAGE_SIZE); } void __init sh7705_cache_init(void) { local_flush_icache_range = sh7705_flush_icache_range; local_flush_dcache_page = sh7705_flush_dcache_page; local_flush_cache_all = sh7705_flush_cache_all; local_flush_cache_mm = sh7705_flush_cache_all; local_flush_cache_dup_mm = sh7705_flush_cache_all; local_flush_cache_range = sh7705_flush_cache_all; local_flush_cache_page = sh7705_flush_cache_page; local_flush_icache_page = sh7705_flush_icache_page; }
gpl-2.0
JCROM-Android/jcrom_kernel_omap
arch/sh/mm/cache-sh7705.c
12065
4964
/* * arch/sh/mm/cache-sh7705.c * * Copyright (C) 1999, 2000 Niibe Yutaka * Copyright (C) 2004 Alex Song * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * */ #include <linux/init.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/threads.h> #include <asm/addrspace.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/cache.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> /* * The 32KB cache on the SH7705 suffers from the same synonym problem * as SH4 CPUs */ static inline void cache_wback_all(void) { unsigned long ways, waysize, addrstart; ways = current_cpu_data.dcache.ways; waysize = current_cpu_data.dcache.sets; waysize <<= current_cpu_data.dcache.entry_shift; addrstart = CACHE_OC_ADDRESS_ARRAY; do { unsigned long addr; for (addr = addrstart; addr < addrstart + waysize; addr += current_cpu_data.dcache.linesz) { unsigned long data; int v = SH_CACHE_UPDATED | SH_CACHE_VALID; data = __raw_readl(addr); if ((data & v) == v) __raw_writel(data & ~v, addr); } addrstart += current_cpu_data.dcache.way_incr; } while (--ways); } /* * Write back the range of D-cache, and purge the I-cache. * * Called from kernel/module.c:sys_init_module and routine for a.out format. */ static void sh7705_flush_icache_range(void *args) { struct flusher_data *data = args; unsigned long start, end; start = data->addr1; end = data->addr2; __flush_wback_region((void *)start, end - start); } /* * Writeback&Invalidate the D-cache of the page */ static void __flush_dcache_page(unsigned long phys) { unsigned long ways, waysize, addrstart; unsigned long flags; phys |= SH_CACHE_VALID; /* * Here, phys is the physical address of the page. We check all the * tags in the cache for those with the same page number as this page * (by masking off the lowest 2 bits of the 19-bit tag; these bits are * derived from the offset within in the 4k page). Matching valid * entries are invalidated. * * Since 2 bits of the cache index are derived from the virtual page * number, knowing this would reduce the number of cache entries to be * searched by a factor of 4. However this function exists to deal with * potential cache aliasing, therefore the optimisation is probably not * possible. */ local_irq_save(flags); jump_to_uncached(); ways = current_cpu_data.dcache.ways; waysize = current_cpu_data.dcache.sets; waysize <<= current_cpu_data.dcache.entry_shift; addrstart = CACHE_OC_ADDRESS_ARRAY; do { unsigned long addr; for (addr = addrstart; addr < addrstart + waysize; addr += current_cpu_data.dcache.linesz) { unsigned long data; data = __raw_readl(addr) & (0x1ffffC00 | SH_CACHE_VALID); if (data == phys) { data &= ~(SH_CACHE_VALID | SH_CACHE_UPDATED); __raw_writel(data, addr); } } addrstart += current_cpu_data.dcache.way_incr; } while (--ways); back_to_cached(); local_irq_restore(flags); } /* * Write back & invalidate the D-cache of the page. * (To avoid "alias" issues) */ static void sh7705_flush_dcache_page(void *arg) { struct page *page = arg; struct address_space *mapping = page_mapping(page); if (mapping && !mapping_mapped(mapping)) clear_bit(PG_dcache_clean, &page->flags); else __flush_dcache_page(__pa(page_address(page))); } static void sh7705_flush_cache_all(void *args) { unsigned long flags; local_irq_save(flags); jump_to_uncached(); cache_wback_all(); back_to_cached(); local_irq_restore(flags); } /* * Write back and invalidate I/D-caches for the page. * * ADDRESS: Virtual Address (U0 address) */ static void sh7705_flush_cache_page(void *args) { struct flusher_data *data = args; unsigned long pfn = data->addr2; __flush_dcache_page(pfn << PAGE_SHIFT); } /* * This is called when a page-cache page is about to be mapped into a * user process' address space. It offers an opportunity for a * port to ensure d-cache/i-cache coherency if necessary. * * Not entirely sure why this is necessary on SH3 with 32K cache but * without it we get occasional "Memory fault" when loading a program. */ static void sh7705_flush_icache_page(void *page) { __flush_purge_region(page_address(page), PAGE_SIZE); } void __init sh7705_cache_init(void) { local_flush_icache_range = sh7705_flush_icache_range; local_flush_dcache_page = sh7705_flush_dcache_page; local_flush_cache_all = sh7705_flush_cache_all; local_flush_cache_mm = sh7705_flush_cache_all; local_flush_cache_dup_mm = sh7705_flush_cache_all; local_flush_cache_range = sh7705_flush_cache_all; local_flush_cache_page = sh7705_flush_cache_page; local_flush_icache_page = sh7705_flush_icache_page; }
gpl-2.0
chrnueve/udooImaxdi
kernel_oficial_source/fs/partitions/sgi.c
13089
2274
/* * fs/partitions/sgi.c * * Code extracted from drivers/block/genhd.c */ #include "check.h" #include "sgi.h" struct sgi_disklabel { __be32 magic_mushroom; /* Big fat spliff... */ __be16 root_part_num; /* Root partition number */ __be16 swap_part_num; /* Swap partition number */ s8 boot_file[16]; /* Name of boot file for ARCS */ u8 _unused0[48]; /* Device parameter useless crapola.. */ struct sgi_volume { s8 name[8]; /* Name of volume */ __be32 block_num; /* Logical block number */ __be32 num_bytes; /* How big, in bytes */ } volume[15]; struct sgi_partition { __be32 num_blocks; /* Size in logical blocks */ __be32 first_block; /* First logical block */ __be32 type; /* Type of this partition */ } partitions[16]; __be32 csum; /* Disk label checksum */ __be32 _unused1; /* Padding */ }; int sgi_partition(struct parsed_partitions *state) { int i, csum; __be32 magic; int slot = 1; unsigned int start, blocks; __be32 *ui, cs; Sector sect; struct sgi_disklabel *label; struct sgi_partition *p; char b[BDEVNAME_SIZE]; label = read_part_sector(state, 0, &sect); if (!label) return -1; p = &label->partitions[0]; magic = label->magic_mushroom; if(be32_to_cpu(magic) != SGI_LABEL_MAGIC) { /*printk("Dev %s SGI disklabel: bad magic %08x\n", bdevname(bdev, b), be32_to_cpu(magic));*/ put_dev_sector(sect); return 0; } ui = ((__be32 *) (label + 1)) - 1; for(csum = 0; ui >= ((__be32 *) label);) { cs = *ui--; csum += be32_to_cpu(cs); } if(csum) { printk(KERN_WARNING "Dev %s SGI disklabel: csum bad, label corrupted\n", bdevname(state->bdev, b)); put_dev_sector(sect); return 0; } /* All SGI disk labels have 16 partitions, disks under Linux only * have 15 minor's. Luckily there are always a few zero length * partitions which we don't care about so we never overflow the * current_minor. */ for(i = 0; i < 16; i++, p++) { blocks = be32_to_cpu(p->num_blocks); start = be32_to_cpu(p->first_block); if (blocks) { put_partition(state, slot, start, blocks); if (be32_to_cpu(p->type) == LINUX_RAID_PARTITION) state->parts[slot].flags = ADDPART_FLAG_RAID; } slot++; } strlcat(state->pp_buf, "\n", PAGE_SIZE); put_dev_sector(sect); return 1; }
gpl-2.0
CyanogenMod/lge-kernel-gproj
arch/mips/pci/fixup-rbtx4938.c
13857
1221
/* * Toshiba rbtx4938 pci routines * Copyright (C) 2000-2001 Toshiba Corporation * * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the * terms of the GNU General Public License version 2. This program is * licensed "as is" without any warranty of any kind, whether express * or implied. * * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com) */ #include <linux/types.h> #include <asm/txx9/pci.h> #include <asm/txx9/rbtx4938.h> int __init rbtx4938_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq = tx4938_pcic1_map_irq(dev, slot); if (irq >= 0) return irq; irq = pin; /* IRQ rotation */ irq--; /* 0-3 */ if (slot == TX4927_PCIC_IDSEL_AD_TO_SLOT(23)) { /* PCI CardSlot (IDSEL=A23) */ /* PCIA => PCIA (IDSEL=A23) */ irq = (irq + 0 + slot) % 4; } else { /* PCI Backplane */ if (txx9_pci_option & TXX9_PCI_OPT_PICMG) irq = (irq + 33 - slot) % 4; else irq = (irq + 3 + slot) % 4; } irq++; /* 1-4 */ switch (irq) { case 1: irq = RBTX4938_IRQ_IOC_PCIA; break; case 2: irq = RBTX4938_IRQ_IOC_PCIB; break; case 3: irq = RBTX4938_IRQ_IOC_PCIC; break; case 4: irq = RBTX4938_IRQ_IOC_PCID; break; } return irq; }
gpl-2.0
ShieldKteam/shield_osprey
drivers/devfreq/devfreq_spdm.c
34
8061
/* *Copyright (c) 2014, The Linux Foundation. All rights reserved. * *This program is free software; you can redistribute it and/or modify *it under the terms of the GNU General Public License version 2 and *only version 2 as published by the Free Software Foundation. * *This program is distributed in the hope that it will be useful, *but WITHOUT ANY WARRANTY; without even the implied warranty of *MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *GNU General Public License for more details. */ #include <linux/clk.h> #include <linux/device.h> #include <linux/devfreq.h> #include <linux/init.h> #include <linux/gfp.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/msm-bus.h> #include <linux/of.h> #include <linux/platform_device.h> #include <soc/qcom/hvc.h> #include "governor.h" #include "devfreq_spdm.h" #define DEVFREQ_SPDM_DEFAULT_WINDOW_MS 100 static int change_bw(struct device *dev, unsigned long *freq, u32 flags) { struct spdm_data *data = 0; int i; int next_idx; int ret = 0; struct hvc_desc desc = { { 0 } }; int hvc_status = 0; if (!dev || !freq) return -EINVAL; data = dev_get_drvdata(dev); if (!data) return -EINVAL; if (data->devfreq->previous_freq == *freq) goto update_thresholds; next_idx = data->cur_idx + 1; next_idx = next_idx % 2; for (i = 0; i < data->pdata->usecase[next_idx].num_paths; i++) data->pdata->usecase[next_idx].vectors[i].ab = (*freq) << 6; data->cur_idx = next_idx; ret = msm_bus_scale_client_update_request(data->bus_scale_client_id, data->cur_idx); update_thresholds: desc.arg[0] = SPDM_CMD_ENABLE; desc.arg[1] = data->spdm_client; desc.arg[2] = clk_get_rate(data->cci_clk); hvc_status = hvc(HVC_FN_SIP(SPDM_HYP_FNID), &desc); if (hvc_status) pr_err("HVC command %u failed with error %u", (int)desc.arg[0], hvc_status); return ret; } static int get_cur_bw(struct device *dev, unsigned long *freq) { struct spdm_data *data = 0; if (!dev || !freq) return -EINVAL; data = dev_get_drvdata(dev); if (!data) return -EINVAL; *freq = data->pdata->usecase[data->cur_idx].vectors[0].ab >> 6; return 0; } static int get_dev_status(struct device *dev, struct devfreq_dev_status *status) { struct spdm_data *data = 0; int ret; if (!dev || !status) return -EINVAL; data = dev_get_drvdata(dev); if (!data) return -EINVAL; /* determine if we want to go up or down based on the notification */ if (data->action == SPDM_UP) status->busy_time = 255; else status->busy_time = 0; status->total_time = 255; ret = get_cur_bw(dev, &status->current_frequency); if (ret) return ret; return 0; } static int populate_config_data(struct spdm_data *data, struct platform_device *pdev) { int ret = -EINVAL; struct device_node *node = pdev->dev.of_node; struct property *prop = 0; ret = of_property_read_u32(node, "qcom,max-vote", &data->config_data.max_vote); if (ret) return ret; ret = of_property_read_u32(node, "qcom,bw-upstep", &data->config_data.upstep); if (ret) return ret; ret = of_property_read_u32(node, "qcom,bw-dwnstep", &data->config_data.downstep); if (ret) return ret; ret = of_property_read_u32(node, "qcom,alpha-up", &data->config_data.aup); if (ret) return ret; ret = of_property_read_u32(node, "qcom,alpha-down", &data->config_data.adown); if (ret) return ret; ret = of_property_read_u32(node, "qcom,bucket-size", &data->config_data.bucket_size); if (ret) return ret; ret = of_property_read_u32_array(node, "qcom,pl-freqs", data->config_data.pl_freqs, SPDM_PL_COUNT - 1); if (ret) return ret; ret = of_property_read_u32_array(node, "qcom,reject-rate", data->config_data.reject_rate, SPDM_PL_COUNT * 2); if (ret) return ret; ret = of_property_read_u32_array(node, "qcom,response-time-us", data->config_data.response_time_us, SPDM_PL_COUNT * 2); if (ret) return ret; ret = of_property_read_u32_array(node, "qcom,cci-response-time-us", data->config_data.cci_response_time_us, SPDM_PL_COUNT * 2); if (ret) return ret; ret = of_property_read_u32(node, "qcom,max-cci-freq", &data->config_data.max_cci_freq); if (ret) return ret; ret = of_property_read_u32(node, "qcom,up-step-multp", &data->config_data.up_step_multp); if (ret) return ret; prop = of_find_property(node, "qcom,ports", 0); if (!prop) return -EINVAL; data->config_data.num_ports = prop->length / sizeof(u32); data->config_data.ports = devm_kzalloc(&pdev->dev, prop->length, GFP_KERNEL); if (!data->config_data.ports) return -ENOMEM; ret = of_property_read_u32_array(node, "qcom,ports", data->config_data.ports, data->config_data.num_ports); if (ret) { devm_kfree(&pdev->dev, data->config_data.ports); data->config_data.ports = NULL; return ret; } return 0; } static int populate_spdm_data(struct spdm_data *data, struct platform_device *pdev) { int ret = -EINVAL; struct device_node *node = pdev->dev.of_node; ret = populate_config_data(data, pdev); if (ret) return ret; ret = of_property_read_u32(node, "qcom,spdm-client", &data->spdm_client); if (ret) goto no_client; ret = of_property_read_u32(node, "qcom,spdm-interval", &data->window); if (ret) data->window = DEVFREQ_SPDM_DEFAULT_WINDOW_MS; data->pdata = msm_bus_cl_get_pdata(pdev); if (!data->pdata) { ret = -EINVAL; goto no_pdata; } return 0; no_client: no_pdata: devm_kfree(&pdev->dev, data->config_data.ports); data->config_data.ports = NULL; return ret; } static int probe(struct platform_device *pdev) { struct spdm_data *data = 0; int ret = -EINVAL; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->action = SPDM_DOWN; platform_set_drvdata(pdev, data); ret = populate_spdm_data(data, pdev); if (ret) goto bad_of; data->bus_scale_client_id = msm_bus_scale_register_client(data->pdata); if (!data->bus_scale_client_id) { ret = -EINVAL; goto no_bus_scaling; } data->cci_clk = clk_get(&pdev->dev, "cci_clk"); if (IS_ERR(data->cci_clk)) { ret = PTR_ERR(data->cci_clk); goto no_clock; } data->profile = devm_kzalloc(&pdev->dev, sizeof(*(data->profile)), GFP_KERNEL); if (!data->profile) { ret = -ENOMEM; goto no_profile; } data->profile->target = change_bw; data->profile->get_dev_status = get_dev_status; data->profile->get_cur_freq = get_cur_bw; data->profile->polling_ms = data->window; data->devfreq = devfreq_add_device(&pdev->dev, data->profile, "spdm_bw_hyp", data); if (IS_ERR(data->devfreq)) { ret = PTR_ERR(data->devfreq); goto no_spdm_device; } spdm_init_debugfs(&pdev->dev); return 0; no_spdm_device: devm_kfree(&pdev->dev, data->profile); no_profile: no_clock: msm_bus_scale_unregister_client(data->bus_scale_client_id); no_bus_scaling: devm_kfree(&pdev->dev, data->config_data.ports); bad_of: devm_kfree(&pdev->dev, data); platform_set_drvdata(pdev, NULL); return ret; } static int remove(struct platform_device *pdev) { struct spdm_data *data = 0; data = platform_get_drvdata(pdev); spdm_remove_debugfs(data); if (data->devfreq) devfreq_remove_device(data->devfreq); if (data->profile) devm_kfree(&pdev->dev, data->profile); if (data->bus_scale_client_id) msm_bus_scale_unregister_client(data->bus_scale_client_id); if (data->config_data.ports) devm_kfree(&pdev->dev, data->config_data.ports); devm_kfree(&pdev->dev, data); platform_set_drvdata(pdev, NULL); return 0; } static const struct of_device_id devfreq_spdm_match[] = { {.compatible = "qcom,devfreq_spdm"}, {} }; static struct platform_driver devfreq_spdm_drvr = { .driver = { .name = "devfreq_spdm", .owner = THIS_MODULE, .of_match_table = devfreq_spdm_match, }, .probe = probe, .remove = remove, }; static int __init devfreq_spdm_init(void) { return platform_driver_register(&devfreq_spdm_drvr); } module_init(devfreq_spdm_init); MODULE_LICENSE("GPL v2");
gpl-2.0
Evervolv/android_kernel_htc_msm8974
arch/frv/kernel/debug-stub.c
34
4243
/* debug-stub.c: debug-mode stub * * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/string.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/serial_reg.h> #include <linux/start_kernel.h> #include <asm/serial-regs.h> #include <asm/timer-regs.h> #include <asm/irc-regs.h> #include <asm/gdb-stub.h> #include "gdb-io.h" #define __UART0(X) (*(volatile uint8_t *)(UART0_BASE + (UART_##X))) #define LSR_WAIT_FOR0(STATE) \ do { \ } while (!(__UART0(LSR) & UART_LSR_##STATE)) #define FLOWCTL_QUERY0(LINE) ({ __UART0(MSR) & UART_MSR_##LINE; }) #define FLOWCTL_CLEAR0(LINE) do { __UART0(MCR) &= ~UART_MCR_##LINE; } while (0) #define FLOWCTL_SET0(LINE) do { __UART0(MCR) |= UART_MCR_##LINE; } while (0) #define FLOWCTL_WAIT_FOR0(LINE) \ do { \ gdbstub_do_rx(); \ } while(!FLOWCTL_QUERY(LINE)) struct frv_debug_status __debug_status; static void __init debug_stub_init(void); asmlinkage void debug_stub(void) { unsigned long hsr0; int type = 0; static u8 inited = 0; if (!inited) { debug_stub_init(); type = -1; inited = 1; } hsr0 = __get_HSR(0); if (hsr0 & HSR0_ETMD) __set_HSR(0, hsr0 & ~HSR0_ETMD); __debug_status.dcr &= ~DCR_SE; if (__debug_frame->pc == (unsigned long) __break_hijack_kernel_event_breaks_here) { *__debug_frame = *__frame; __frame = __debug_frame->next_frame; __debug_status.brr = (__debug_frame->tbr & TBR_TT) << 12; __debug_status.brr |= BRR_EB; } if (__debug_frame->pc == (unsigned long) __debug_bug_trap + 4) { __debug_frame->pc = __debug_frame->lr; type = __debug_frame->gr8; } #ifdef CONFIG_GDBSTUB gdbstub(type); #endif if (hsr0 & HSR0_ETMD) __set_HSR(0, __get_HSR(0) | HSR0_ETMD); } static void __init debug_stub_init(void) { __set_IRR(6, 0xff000000); __set_IITMR(1, 0x20000000); asm volatile(" movgs gr0,ibar0 \n" " movgs gr0,ibar1 \n" " movgs gr0,ibar2 \n" " movgs gr0,ibar3 \n" " movgs gr0,dbar0 \n" " movgs gr0,dbmr00 \n" " movgs gr0,dbmr01 \n" " movgs gr0,dbdr00 \n" " movgs gr0,dbdr01 \n" " movgs gr0,dbar1 \n" " movgs gr0,dbmr10 \n" " movgs gr0,dbmr11 \n" " movgs gr0,dbdr10 \n" " movgs gr0,dbdr11 \n" ); if (__debug_frame->pc == (unsigned long) __debug_stub_init_break) __debug_frame->pc = (unsigned long) start_kernel; __debug_status.dcr = DCR_EBE; #ifdef CONFIG_GDBSTUB gdbstub_init(); #endif __clr_MASK_all(); __clr_MASK(15); __clr_RC(15); } void debug_stub_exit(int status) { #ifdef CONFIG_GDBSTUB gdbstub_exit(status); #endif } void debug_to_serial(const char *p, int n) { char ch; for (; n > 0; n--) { ch = *p++; FLOWCTL_SET0(DTR); LSR_WAIT_FOR0(THRE); if (ch == 0x0a) { __UART0(TX) = 0x0d; mb(); LSR_WAIT_FOR0(THRE); } __UART0(TX) = ch; mb(); FLOWCTL_CLEAR0(DTR); } } void debug_to_serial2(const char *fmt, ...) { va_list va; char buf[64]; int n; va_start(va, fmt); n = vsprintf(buf, fmt, va); va_end(va); debug_to_serial(buf, n); } void __init console_set_baud(unsigned baud) { unsigned value, high, low; u8 lcr; value = __serial_clock_speed_HZ / 16 / baud; high = __serial_clock_speed_HZ / 16 / value; low = __serial_clock_speed_HZ / 16 / (value + 1); if (low + (high - low) / 2 > baud) value++; lcr = __UART0(LCR); __UART0(LCR) |= UART_LCR_DLAB; mb(); __UART0(DLL) = value & 0xff; __UART0(DLM) = (value >> 8) & 0xff; mb(); __UART0(LCR) = lcr; mb(); } int __init console_get_baud(void) { unsigned value; u8 lcr; lcr = __UART0(LCR); __UART0(LCR) |= UART_LCR_DLAB; mb(); value = __UART0(DLM) << 8; value |= __UART0(DLL); __UART0(LCR) = lcr; mb(); return value; } #ifndef CONFIG_NO_KERNEL_MSG void __debug_bug_printk(const char *file, unsigned line) { printk("kernel BUG at %s:%d!\n", file, line); } #endif
gpl-2.0
xboxfanj/android_kernel_htc_msm8974
drivers/mmc/host/mmc_spi.c
34
26445
/* * mmc_spi.c - Access SD/MMC cards through SPI master controllers * * (C) Copyright 2005, Intec Automation, * Mike Lavender (mike@steroidmicros) * (C) Copyright 2006-2007, David Brownell * (C) Copyright 2007, Axis Communications, * Hans-Peter Nilsson (hp@axis.com) * (C) Copyright 2007, ATRON electronic GmbH, * Jan Nikitenko <jan.nikitenko@gmail.com> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/sched.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/bio.h> #include <linux/dma-mapping.h> #include <linux/crc7.h> #include <linux/crc-itu-t.h> #include <linux/scatterlist.h> #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> #include <linux/spi/spi.h> #include <linux/spi/mmc_spi.h> #include <asm/unaligned.h> /* Response tokens used to ack each block written: */ #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f) #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1) #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1) #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1) #define SPI_TOKEN_SINGLE 0xfe #define SPI_TOKEN_MULTI_WRITE 0xfc #define SPI_TOKEN_STOP_TRAN 0xfd #define MMC_SPI_BLOCKSIZE 512 #define r1b_timeout (HZ * 3) #define MMC_SPI_BLOCKSATONCE 128 struct scratch { u8 status[29]; u8 data_token; __be16 crc_val; }; struct mmc_spi_host { struct mmc_host *mmc; struct spi_device *spi; unsigned char power_mode; u16 powerup_msecs; struct mmc_spi_platform_data *pdata; struct spi_transfer token, t, crc, early_status; struct spi_message m; struct spi_transfer status; struct spi_message readback; struct device *dma_dev; struct scratch *data; dma_addr_t data_dma; void *ones; dma_addr_t ones_dma; }; static inline int mmc_cs_off(struct mmc_spi_host *host) { return spi_setup(host->spi); } static int mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len) { int status; if (len > sizeof(*host->data)) { WARN_ON(1); return -EIO; } host->status.len = len; if (host->dma_dev) dma_sync_single_for_device(host->dma_dev, host->data_dma, sizeof(*host->data), DMA_FROM_DEVICE); status = spi_sync_locked(host->spi, &host->readback); if (host->dma_dev) dma_sync_single_for_cpu(host->dma_dev, host->data_dma, sizeof(*host->data), DMA_FROM_DEVICE); return status; } static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout, unsigned n, u8 byte) { u8 *cp = host->data->status; unsigned long start = jiffies; while (1) { int status; unsigned i; status = mmc_spi_readbytes(host, n); if (status < 0) return status; for (i = 0; i < n; i++) { if (cp[i] != byte) return cp[i]; } if (time_is_before_jiffies(start + timeout)) break; if (time_is_before_jiffies(start+1)) schedule(); } return -ETIMEDOUT; } static inline int mmc_spi_wait_unbusy(struct mmc_spi_host *host, unsigned long timeout) { return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0); } static int mmc_spi_readtoken(struct mmc_spi_host *host, unsigned long timeout) { return mmc_spi_skip(host, timeout, 1, 0xff); } static char *maptype(struct mmc_command *cmd) { switch (mmc_spi_resp_type(cmd)) { case MMC_RSP_SPI_R1: return "R1"; case MMC_RSP_SPI_R1B: return "R1B"; case MMC_RSP_SPI_R2: return "R2/R5"; case MMC_RSP_SPI_R3: return "R3/R4/R7"; default: return "?"; } } static int mmc_spi_response_get(struct mmc_spi_host *host, struct mmc_command *cmd, int cs_on) { u8 *cp = host->data->status; u8 *end = cp + host->t.len; int value = 0; int bitshift; u8 leftover = 0; unsigned short rotator; int i; char tag[32]; snprintf(tag, sizeof(tag), " ... CMD%d response SPI_%s", cmd->opcode, maptype(cmd)); cp += 8; while (cp < end && *cp == 0xff) cp++; if (cp == end) { cp = host->data->status; end = cp+1; for (i = 2; i < 16; i++) { value = mmc_spi_readbytes(host, 1); if (value < 0) goto done; if (*cp != 0xff) goto checkstatus; } value = -ETIMEDOUT; goto done; } checkstatus: bitshift = 0; if (*cp & 0x80) { rotator = *cp++ << 8; if (cp == end) { value = mmc_spi_readbytes(host, 1); if (value < 0) goto done; cp = host->data->status; end = cp+1; } rotator |= *cp++; while (rotator & 0x8000) { bitshift++; rotator <<= 1; } cmd->resp[0] = rotator >> 8; leftover = rotator; } else { cmd->resp[0] = *cp++; } cmd->error = 0; if (cmd->resp[0] != 0) { if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS) & cmd->resp[0]) value = -EFAULT; else if (R1_SPI_ILLEGAL_COMMAND & cmd->resp[0]) value = -ENOSYS; else if (R1_SPI_COM_CRC & cmd->resp[0]) value = -EILSEQ; else if ((R1_SPI_ERASE_SEQ | R1_SPI_ERASE_RESET) & cmd->resp[0]) value = -EIO; } switch (mmc_spi_resp_type(cmd)) { case MMC_RSP_SPI_R1B: while (cp < end && *cp == 0) cp++; if (cp == end) mmc_spi_wait_unbusy(host, r1b_timeout); break; case MMC_RSP_SPI_R2: if (cp == end) { value = mmc_spi_readbytes(host, 1); if (value < 0) goto done; cp = host->data->status; end = cp+1; } if (bitshift) { rotator = leftover << 8; rotator |= *cp << bitshift; cmd->resp[0] |= (rotator & 0xFF00); } else { cmd->resp[0] |= *cp << 8; } break; case MMC_RSP_SPI_R3: rotator = leftover << 8; cmd->resp[1] = 0; for (i = 0; i < 4; i++) { cmd->resp[1] <<= 8; if (cp == end) { value = mmc_spi_readbytes(host, 1); if (value < 0) goto done; cp = host->data->status; end = cp+1; } if (bitshift) { rotator |= *cp++ << bitshift; cmd->resp[1] |= (rotator >> 8); rotator <<= 8; } else { cmd->resp[1] |= *cp++; } } break; case MMC_RSP_SPI_R1: break; default: dev_dbg(&host->spi->dev, "bad response type %04x\n", mmc_spi_resp_type(cmd)); if (value >= 0) value = -EINVAL; goto done; } if (value < 0) dev_dbg(&host->spi->dev, "%s: resp %04x %08x\n", tag, cmd->resp[0], cmd->resp[1]); if (value >= 0 && cs_on) return value; done: if (value < 0) cmd->error = value; mmc_cs_off(host); return value; } static int mmc_spi_command_send(struct mmc_spi_host *host, struct mmc_request *mrq, struct mmc_command *cmd, int cs_on) { struct scratch *data = host->data; u8 *cp = data->status; u32 arg = cmd->arg; int status; struct spi_transfer *t; memset(cp++, 0xff, sizeof(data->status)); *cp++ = 0x40 | cmd->opcode; *cp++ = (u8)(arg >> 24); *cp++ = (u8)(arg >> 16); *cp++ = (u8)(arg >> 8); *cp++ = (u8)arg; *cp++ = (crc7(0, &data->status[1], 5) << 1) | 0x01; if (cs_on && (mrq->data->flags & MMC_DATA_READ)) { cp += 2; } else { cp += 10; if (cmd->flags & MMC_RSP_SPI_S2) cp++; else if (cmd->flags & MMC_RSP_SPI_B4) cp += 4; else if (cmd->flags & MMC_RSP_BUSY) cp = data->status + sizeof(data->status); } dev_dbg(&host->spi->dev, " mmc_spi: CMD%d, resp %s\n", cmd->opcode, maptype(cmd)); spi_message_init(&host->m); t = &host->t; memset(t, 0, sizeof(*t)); t->tx_buf = t->rx_buf = data->status; t->tx_dma = t->rx_dma = host->data_dma; t->len = cp - data->status; t->cs_change = 1; spi_message_add_tail(t, &host->m); if (host->dma_dev) { host->m.is_dma_mapped = 1; dma_sync_single_for_device(host->dma_dev, host->data_dma, sizeof(*host->data), DMA_BIDIRECTIONAL); } status = spi_sync_locked(host->spi, &host->m); if (host->dma_dev) dma_sync_single_for_cpu(host->dma_dev, host->data_dma, sizeof(*host->data), DMA_BIDIRECTIONAL); if (status < 0) { dev_dbg(&host->spi->dev, " ... write returned %d\n", status); cmd->error = status; return status; } return mmc_spi_response_get(host, cmd, cs_on); } static void mmc_spi_setup_data_message( struct mmc_spi_host *host, int multiple, enum dma_data_direction direction) { struct spi_transfer *t; struct scratch *scratch = host->data; dma_addr_t dma = host->data_dma; spi_message_init(&host->m); if (dma) host->m.is_dma_mapped = 1; if (direction == DMA_TO_DEVICE) { t = &host->token; memset(t, 0, sizeof(*t)); t->len = 1; if (multiple) scratch->data_token = SPI_TOKEN_MULTI_WRITE; else scratch->data_token = SPI_TOKEN_SINGLE; t->tx_buf = &scratch->data_token; if (dma) t->tx_dma = dma + offsetof(struct scratch, data_token); spi_message_add_tail(t, &host->m); } t = &host->t; memset(t, 0, sizeof(*t)); t->tx_buf = host->ones; t->tx_dma = host->ones_dma; /* length and actual buffer info are written later */ spi_message_add_tail(t, &host->m); t = &host->crc; memset(t, 0, sizeof(*t)); t->len = 2; if (direction == DMA_TO_DEVICE) { /* the actual CRC may get written later */ t->tx_buf = &scratch->crc_val; if (dma) t->tx_dma = dma + offsetof(struct scratch, crc_val); } else { t->tx_buf = host->ones; t->tx_dma = host->ones_dma; t->rx_buf = &scratch->crc_val; if (dma) t->rx_dma = dma + offsetof(struct scratch, crc_val); } spi_message_add_tail(t, &host->m); if (multiple || direction == DMA_TO_DEVICE) { t = &host->early_status; memset(t, 0, sizeof(*t)); t->len = (direction == DMA_TO_DEVICE) ? sizeof(scratch->status) : 1; t->tx_buf = host->ones; t->tx_dma = host->ones_dma; t->rx_buf = scratch->status; if (dma) t->rx_dma = dma + offsetof(struct scratch, status); t->cs_change = 1; spi_message_add_tail(t, &host->m); } } static int mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t, unsigned long timeout) { struct spi_device *spi = host->spi; int status, i; struct scratch *scratch = host->data; u32 pattern; if (host->mmc->use_spi_crc) scratch->crc_val = cpu_to_be16( crc_itu_t(0, t->tx_buf, t->len)); if (host->dma_dev) dma_sync_single_for_device(host->dma_dev, host->data_dma, sizeof(*scratch), DMA_BIDIRECTIONAL); status = spi_sync_locked(spi, &host->m); if (status != 0) { dev_dbg(&spi->dev, "write error (%d)\n", status); return status; } if (host->dma_dev) dma_sync_single_for_cpu(host->dma_dev, host->data_dma, sizeof(*scratch), DMA_BIDIRECTIONAL); pattern = scratch->status[0] << 24; pattern |= scratch->status[1] << 16; pattern |= scratch->status[2] << 8; pattern |= scratch->status[3]; pattern |= 0xE0000000; while (pattern & 0x80000000) pattern <<= 1; pattern >>= 27; switch (pattern) { case SPI_RESPONSE_ACCEPTED: status = 0; break; case SPI_RESPONSE_CRC_ERR: status = -EILSEQ; break; case SPI_RESPONSE_WRITE_ERR: status = -EIO; break; default: status = -EPROTO; break; } if (status != 0) { dev_dbg(&spi->dev, "write error %02x (%d)\n", scratch->status[0], status); return status; } t->tx_buf += t->len; if (host->dma_dev) t->tx_dma += t->len; for (i = 4; i < sizeof(scratch->status); i++) { if (scratch->status[i] & 0x01) return 0; } return mmc_spi_wait_unbusy(host, timeout); } static int mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t, unsigned long timeout) { struct spi_device *spi = host->spi; int status; struct scratch *scratch = host->data; unsigned int bitshift; u8 leftover; status = mmc_spi_readbytes(host, 1); if (status < 0) return status; status = scratch->status[0]; if (status == 0xff || status == 0) status = mmc_spi_readtoken(host, timeout); if (status < 0) { dev_dbg(&spi->dev, "read error %02x (%d)\n", status, status); return status; } bitshift = 7; while (status & 0x80) { status <<= 1; bitshift--; } leftover = status << 1; if (host->dma_dev) { dma_sync_single_for_device(host->dma_dev, host->data_dma, sizeof(*scratch), DMA_BIDIRECTIONAL); dma_sync_single_for_device(host->dma_dev, t->rx_dma, t->len, DMA_FROM_DEVICE); } status = spi_sync_locked(spi, &host->m); if (host->dma_dev) { dma_sync_single_for_cpu(host->dma_dev, host->data_dma, sizeof(*scratch), DMA_BIDIRECTIONAL); dma_sync_single_for_cpu(host->dma_dev, t->rx_dma, t->len, DMA_FROM_DEVICE); } if (bitshift) { u8 *cp = t->rx_buf; unsigned int len; unsigned int bitright = 8 - bitshift; u8 temp; for (len = t->len; len; len--) { temp = *cp; *cp++ = leftover | (temp >> bitshift); leftover = temp << bitright; } cp = (u8 *) &scratch->crc_val; temp = *cp; *cp++ = leftover | (temp >> bitshift); leftover = temp << bitright; temp = *cp; *cp = leftover | (temp >> bitshift); } if (host->mmc->use_spi_crc) { u16 crc = crc_itu_t(0, t->rx_buf, t->len); be16_to_cpus(&scratch->crc_val); if (scratch->crc_val != crc) { dev_dbg(&spi->dev, "read - crc error: crc_val=0x%04x, " "computed=0x%04x len=%d\n", scratch->crc_val, crc, t->len); return -EILSEQ; } } t->rx_buf += t->len; if (host->dma_dev) t->rx_dma += t->len; return 0; } static void mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd, struct mmc_data *data, u32 blk_size) { struct spi_device *spi = host->spi; struct device *dma_dev = host->dma_dev; struct spi_transfer *t; enum dma_data_direction direction; struct scatterlist *sg; unsigned n_sg; int multiple = (data->blocks > 1); u32 clock_rate; unsigned long timeout; if (data->flags & MMC_DATA_READ) direction = DMA_FROM_DEVICE; else direction = DMA_TO_DEVICE; mmc_spi_setup_data_message(host, multiple, direction); t = &host->t; if (t->speed_hz) clock_rate = t->speed_hz; else clock_rate = spi->max_speed_hz; timeout = data->timeout_ns + data->timeout_clks * 1000000 / clock_rate; timeout = usecs_to_jiffies((unsigned int)(timeout / 1000)) + 1; for (sg = data->sg, n_sg = data->sg_len; n_sg; n_sg--, sg++) { int status = 0; dma_addr_t dma_addr = 0; void *kmap_addr; unsigned length = sg->length; enum dma_data_direction dir = direction; if (dma_dev) { if ((sg->offset != 0 || length != PAGE_SIZE) && dir == DMA_FROM_DEVICE) dir = DMA_BIDIRECTIONAL; dma_addr = dma_map_page(dma_dev, sg_page(sg), 0, PAGE_SIZE, dir); if (direction == DMA_TO_DEVICE) t->tx_dma = dma_addr + sg->offset; else t->rx_dma = dma_addr + sg->offset; } kmap_addr = kmap(sg_page(sg)); if (direction == DMA_TO_DEVICE) t->tx_buf = kmap_addr + sg->offset; else t->rx_buf = kmap_addr + sg->offset; while (length) { t->len = min(length, blk_size); dev_dbg(&host->spi->dev, " mmc_spi: %s block, %d bytes\n", (direction == DMA_TO_DEVICE) ? "write" : "read", t->len); if (direction == DMA_TO_DEVICE) status = mmc_spi_writeblock(host, t, timeout); else status = mmc_spi_readblock(host, t, timeout); if (status < 0) break; data->bytes_xfered += t->len; length -= t->len; if (!multiple) break; } if (direction == DMA_FROM_DEVICE) flush_kernel_dcache_page(sg_page(sg)); kunmap(sg_page(sg)); if (dma_dev) dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir); if (status < 0) { data->error = status; dev_dbg(&spi->dev, "%s status %d\n", (direction == DMA_TO_DEVICE) ? "write" : "read", status); break; } } if (direction == DMA_TO_DEVICE && multiple) { struct scratch *scratch = host->data; int tmp; const unsigned statlen = sizeof(scratch->status); dev_dbg(&spi->dev, " mmc_spi: STOP_TRAN\n"); INIT_LIST_HEAD(&host->m.transfers); list_add(&host->early_status.transfer_list, &host->m.transfers); memset(scratch->status, 0xff, statlen); scratch->status[0] = SPI_TOKEN_STOP_TRAN; host->early_status.tx_buf = host->early_status.rx_buf; host->early_status.tx_dma = host->early_status.rx_dma; host->early_status.len = statlen; if (host->dma_dev) dma_sync_single_for_device(host->dma_dev, host->data_dma, sizeof(*scratch), DMA_BIDIRECTIONAL); tmp = spi_sync_locked(spi, &host->m); if (host->dma_dev) dma_sync_single_for_cpu(host->dma_dev, host->data_dma, sizeof(*scratch), DMA_BIDIRECTIONAL); if (tmp < 0) { if (!data->error) data->error = tmp; return; } for (tmp = 2; tmp < statlen; tmp++) { if (scratch->status[tmp] != 0) return; } tmp = mmc_spi_wait_unbusy(host, timeout); if (tmp < 0 && !data->error) data->error = tmp; } } static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct mmc_spi_host *host = mmc_priv(mmc); int status = -EINVAL; int crc_retry = 5; struct mmc_command stop; #ifdef DEBUG { struct mmc_command *cmd; int invalid = 0; cmd = mrq->cmd; if (!mmc_spi_resp_type(cmd)) { dev_dbg(&host->spi->dev, "bogus command\n"); cmd->error = -EINVAL; invalid = 1; } cmd = mrq->stop; if (cmd && !mmc_spi_resp_type(cmd)) { dev_dbg(&host->spi->dev, "bogus STOP command\n"); cmd->error = -EINVAL; invalid = 1; } if (invalid) { dump_stack(); mmc_request_done(host->mmc, mrq); return; } } #endif spi_bus_lock(host->spi->master); crc_recover: status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL); if (status == 0 && mrq->data) { mmc_spi_data_do(host, mrq->cmd, mrq->data, mrq->data->blksz); if (mrq->data->error == -EILSEQ && crc_retry) { stop.opcode = MMC_STOP_TRANSMISSION; stop.arg = 0; stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; status = mmc_spi_command_send(host, mrq, &stop, 0); crc_retry--; mrq->data->error = 0; goto crc_recover; } if (mrq->stop) status = mmc_spi_command_send(host, mrq, mrq->stop, 0); else mmc_cs_off(host); } spi_bus_unlock(host->spi->master); mmc_request_done(host->mmc, mrq); } static void mmc_spi_initsequence(struct mmc_spi_host *host) { mmc_spi_wait_unbusy(host, r1b_timeout); mmc_spi_readbytes(host, 10); host->spi->mode |= SPI_CS_HIGH; if (spi_setup(host->spi) != 0) { dev_warn(&host->spi->dev, "can't change chip-select polarity\n"); host->spi->mode &= ~SPI_CS_HIGH; } else { mmc_spi_readbytes(host, 18); host->spi->mode &= ~SPI_CS_HIGH; if (spi_setup(host->spi) != 0) { dev_err(&host->spi->dev, "can't restore chip-select polarity\n"); } } } static char *mmc_powerstring(u8 power_mode) { switch (power_mode) { case MMC_POWER_OFF: return "off"; case MMC_POWER_UP: return "up"; case MMC_POWER_ON: return "on"; } return "?"; } static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct mmc_spi_host *host = mmc_priv(mmc); if (host->power_mode != ios->power_mode) { int canpower; canpower = host->pdata && host->pdata->setpower; dev_dbg(&host->spi->dev, "mmc_spi: power %s (%d)%s\n", mmc_powerstring(ios->power_mode), ios->vdd, canpower ? ", can switch" : ""); if (canpower) { switch (ios->power_mode) { case MMC_POWER_OFF: case MMC_POWER_UP: host->pdata->setpower(&host->spi->dev, ios->vdd); if (ios->power_mode == MMC_POWER_UP) msleep(host->powerup_msecs); } } if (ios->power_mode == MMC_POWER_ON) mmc_spi_initsequence(host); if (canpower && ios->power_mode == MMC_POWER_OFF) { int mres; u8 nullbyte = 0; host->spi->mode &= ~(SPI_CPOL|SPI_CPHA); mres = spi_setup(host->spi); if (mres < 0) dev_dbg(&host->spi->dev, "switch to SPI mode 0 failed\n"); if (spi_write(host->spi, &nullbyte, 1) < 0) dev_dbg(&host->spi->dev, "put spi signals to low failed\n"); /* * Now clock should be low due to spi mode 0; * MOSI should be low because of written 0x00; * chipselect should be low (it is active low) * power supply is off, so now MMC is off too! * * FIXME no, chipselect can be high since the * device is inactive and SPI_CS_HIGH is clear... */ msleep(10); if (mres == 0) { host->spi->mode |= (SPI_CPOL|SPI_CPHA); mres = spi_setup(host->spi); if (mres < 0) dev_dbg(&host->spi->dev, "switch back to SPI mode 3" " failed\n"); } } host->power_mode = ios->power_mode; } if (host->spi->max_speed_hz != ios->clock && ios->clock != 0) { int status; host->spi->max_speed_hz = ios->clock; status = spi_setup(host->spi); dev_dbg(&host->spi->dev, "mmc_spi: clock to %d Hz, %d\n", host->spi->max_speed_hz, status); } } static int mmc_spi_get_ro(struct mmc_host *mmc) { struct mmc_spi_host *host = mmc_priv(mmc); if (host->pdata && host->pdata->get_ro) return !!host->pdata->get_ro(mmc->parent); return -ENOSYS; } static int mmc_spi_get_cd(struct mmc_host *mmc) { struct mmc_spi_host *host = mmc_priv(mmc); if (host->pdata && host->pdata->get_cd) return !!host->pdata->get_cd(mmc->parent); return -ENOSYS; } static const struct mmc_host_ops mmc_spi_ops = { .request = mmc_spi_request, .set_ios = mmc_spi_set_ios, .get_ro = mmc_spi_get_ro, .get_cd = mmc_spi_get_cd, }; static irqreturn_t mmc_spi_detect_irq(int irq, void *mmc) { struct mmc_spi_host *host = mmc_priv(mmc); u16 delay_msec = max(host->pdata->detect_delay, (u16)100); mmc_detect_change(mmc, msecs_to_jiffies(delay_msec)); return IRQ_HANDLED; } static int mmc_spi_probe(struct spi_device *spi) { void *ones; struct mmc_host *mmc; struct mmc_spi_host *host; int status; if (spi->master->flags & SPI_MASTER_HALF_DUPLEX) return -EINVAL; if (spi->mode != SPI_MODE_3) spi->mode = SPI_MODE_0; spi->bits_per_word = 8; status = spi_setup(spi); if (status < 0) { dev_dbg(&spi->dev, "needs SPI mode %02x, %d KHz; %d\n", spi->mode, spi->max_speed_hz / 1000, status); return status; } status = -ENOMEM; ones = kmalloc(MMC_SPI_BLOCKSIZE, GFP_KERNEL); if (!ones) goto nomem; memset(ones, 0xff, MMC_SPI_BLOCKSIZE); mmc = mmc_alloc_host(sizeof(*host), &spi->dev); if (!mmc) goto nomem; mmc->ops = &mmc_spi_ops; mmc->max_blk_size = MMC_SPI_BLOCKSIZE; mmc->max_segs = MMC_SPI_BLOCKSATONCE; mmc->max_req_size = MMC_SPI_BLOCKSATONCE * MMC_SPI_BLOCKSIZE; mmc->max_blk_count = MMC_SPI_BLOCKSATONCE; mmc->caps = MMC_CAP_SPI; mmc->f_min = 400000; mmc->f_max = spi->max_speed_hz; host = mmc_priv(mmc); host->mmc = mmc; host->spi = spi; host->ones = ones; host->pdata = mmc_spi_get_pdata(spi); if (host->pdata) mmc->ocr_avail = host->pdata->ocr_mask; if (!mmc->ocr_avail) { dev_warn(&spi->dev, "ASSUMING 3.2-3.4 V slot power\n"); mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34; } if (host->pdata && host->pdata->setpower) { host->powerup_msecs = host->pdata->powerup_msecs; if (!host->powerup_msecs || host->powerup_msecs > 250) host->powerup_msecs = 250; } dev_set_drvdata(&spi->dev, mmc); host->data = kmalloc(sizeof(*host->data), GFP_KERNEL); if (!host->data) goto fail_nobuf1; if (spi->master->dev.parent->dma_mask) { struct device *dev = spi->master->dev.parent; host->dma_dev = dev; host->ones_dma = dma_map_single(dev, ones, MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); host->data_dma = dma_map_single(dev, host->data, sizeof(*host->data), DMA_BIDIRECTIONAL); dma_sync_single_for_cpu(host->dma_dev, host->data_dma, sizeof(*host->data), DMA_BIDIRECTIONAL); } spi_message_init(&host->readback); host->readback.is_dma_mapped = (host->dma_dev != NULL); spi_message_add_tail(&host->status, &host->readback); host->status.tx_buf = host->ones; host->status.tx_dma = host->ones_dma; host->status.rx_buf = &host->data->status; host->status.rx_dma = host->data_dma + offsetof(struct scratch, status); host->status.cs_change = 1; if (host->pdata && host->pdata->init) { status = host->pdata->init(&spi->dev, mmc_spi_detect_irq, mmc); if (status != 0) goto fail_glue_init; } if (host->pdata) mmc->caps |= host->pdata->caps; status = mmc_add_host(mmc); if (status != 0) goto fail_add_host; dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n", dev_name(&mmc->class_dev), host->dma_dev ? "" : ", no DMA", (host->pdata && host->pdata->get_ro) ? "" : ", no WP", (host->pdata && host->pdata->setpower) ? "" : ", no poweroff", (mmc->caps & MMC_CAP_NEEDS_POLL) ? ", cd polling" : ""); return 0; fail_add_host: mmc_remove_host (mmc); fail_glue_init: if (host->dma_dev) dma_unmap_single(host->dma_dev, host->data_dma, sizeof(*host->data), DMA_BIDIRECTIONAL); kfree(host->data); fail_nobuf1: mmc_free_host(mmc); mmc_spi_put_pdata(spi); dev_set_drvdata(&spi->dev, NULL); nomem: kfree(ones); return status; } static int __devexit mmc_spi_remove(struct spi_device *spi) { struct mmc_host *mmc = dev_get_drvdata(&spi->dev); struct mmc_spi_host *host; if (mmc) { host = mmc_priv(mmc); if (host->pdata && host->pdata->exit) host->pdata->exit(&spi->dev, mmc); mmc_remove_host(mmc); if (host->dma_dev) { dma_unmap_single(host->dma_dev, host->ones_dma, MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); dma_unmap_single(host->dma_dev, host->data_dma, sizeof(*host->data), DMA_BIDIRECTIONAL); } kfree(host->data); kfree(host->ones); spi->max_speed_hz = mmc->f_max; mmc_free_host(mmc); mmc_spi_put_pdata(spi); dev_set_drvdata(&spi->dev, NULL); } return 0; } static struct of_device_id mmc_spi_of_match_table[] __devinitdata = { { .compatible = "mmc-spi-slot", }, {}, }; static struct spi_driver mmc_spi_driver = { .driver = { .name = "mmc_spi", .owner = THIS_MODULE, .of_match_table = mmc_spi_of_match_table, }, .probe = mmc_spi_probe, .remove = __devexit_p(mmc_spi_remove), }; static int __init mmc_spi_init(void) { return spi_register_driver(&mmc_spi_driver); } module_init(mmc_spi_init); static void __exit mmc_spi_exit(void) { spi_unregister_driver(&mmc_spi_driver); } module_exit(mmc_spi_exit); MODULE_AUTHOR("Mike Lavender, David Brownell, " "Hans-Peter Nilsson, Jan Nikitenko"); MODULE_DESCRIPTION("SPI SD/MMC host driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("spi:mmc_spi");
gpl-2.0
UO-CAES/paparazzi
sw/airborne/test/ahrs/run_ahrs_on_synth_ivy.c
34
2331
#include <glib.h> #include <stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <Ivy/ivy.h> #include <Ivy/ivyglibloop.h> #include "test/ahrs/ahrs_on_synth.h" #include "subsystems/imu.h" #include "subsystems/ahrs.h" #include "math/pprz_algebra_float.h" #include "math/pprz_algebra_int.h" #include "test/pprz_algebra_print.h" gboolean timeout_callback(gpointer data) { for (int i = 0; i < 20; i++) { aos_compute_state(); aos_compute_sensors(); #ifndef DISABLE_PROPAGATE ahrs_propagate(aos.dt); #endif #ifndef DISABLE_ACCEL_UPDATE ahrs_update_accel(); #endif #ifndef DISABLE_MAG_UPDATE if (!(i % 5)) { ahrs_update_mag(); } #endif } #if AHRS_TYPE == AHRS_TYPE_ICE || AHRS_TYPE == AHRS_TYPE_ICQ EULERS_FLOAT_OF_BFP(ahrs_float.ltp_to_imu_euler, ahrs.ltp_to_imu_euler); #endif #if AHRS_TYPE == AHRS_TYPE_ICQ IvySendMsg("183 AHRS_GYRO_BIAS_INT %d %d %d", ahrs_impl.gyro_bias.p, ahrs_impl.gyro_bias.q, ahrs_impl.gyro_bias.r); #endif #if AHRS_TYPE == AHRS_TYPE_FLQ || AHRS_TYPE == AHRS_TYPE_FCR2 struct Int32Rates bias_i; RATES_BFP_OF_REAL(bias_i, ahrs_impl.gyro_bias); IvySendMsg("183 AHRS_GYRO_BIAS_INT %d %d %d", bias_i.p, bias_i.q, bias_i.r); #endif IvySendMsg("183 AHRS_EULER %f %f %f", ahrs_float.ltp_to_imu_euler.phi, ahrs_float.ltp_to_imu_euler.theta, ahrs_float.ltp_to_imu_euler.psi); IvySendMsg("183 NPS_RATE_ATTITUDE %f %f %f %f %f %f", DegOfRad(aos.imu_rates.p), DegOfRad(aos.imu_rates.q), DegOfRad(aos.imu_rates.r), DegOfRad(aos.ltp_to_imu_euler.phi), DegOfRad(aos.ltp_to_imu_euler.theta), DegOfRad(aos.ltp_to_imu_euler.psi)); IvySendMsg("183 NPS_GYRO_BIAS %f %f %f", DegOfRad(aos.gyro_bias.p), DegOfRad(aos.gyro_bias.q), DegOfRad(aos.gyro_bias.r)); return TRUE; } int main(int argc, char **argv) { printf("hello\n"); g_timeout_add(1000 / 25, timeout_callback, NULL); GMainLoop *ml = g_main_loop_new(NULL, FALSE); IvyInit("test_ahrs", "test_ahrs READY", NULL, NULL, NULL, NULL); IvyStart("127.255.255.255"); imu_init(); ahrs_init(); aos_init(); g_main_loop_run(ml); return 0; }
gpl-2.0
invisiblek/android_kernel_htc_m8
arch/arm/mach-tegra/board-trimslice.c
34
4470
/* * arch/arm/mach-tegra/board-trimslice.c * * Copyright (C) 2011 CompuLab, Ltd. * Author: Mike Rapoport <mike@compulab.co.il> * * Based on board-harmony.c * Copyright (C) 2010 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/serial_8250.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/gpio.h> #include <asm/hardware/gic.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/setup.h> #include <mach/iomap.h> #include <mach/sdhci.h> #include "board.h" #include "clock.h" #include "devices.h" #include "gpio-names.h" #include "board-trimslice.h" static struct plat_serial8250_port debug_uart_platform_data[] = { { .membase = IO_ADDRESS(TEGRA_UARTA_BASE), .mapbase = TEGRA_UARTA_BASE, .irq = INT_UARTA, .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE, .type = PORT_TEGRA, .iotype = UPIO_MEM, .regshift = 2, .uartclk = 216000000, }, { .flags = 0 } }; static struct platform_device debug_uart = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = debug_uart_platform_data, }, }; static struct tegra_sdhci_platform_data sdhci_pdata1 = { .cd_gpio = -1, .wp_gpio = -1, .power_gpio = -1, }; static struct tegra_sdhci_platform_data sdhci_pdata4 = { .cd_gpio = TRIMSLICE_GPIO_SD4_CD, .wp_gpio = TRIMSLICE_GPIO_SD4_WP, .power_gpio = -1, }; static struct platform_device trimslice_audio_device = { .name = "tegra-snd-trimslice", .id = 0, }; static struct platform_device *trimslice_devices[] __initdata = { &debug_uart, &tegra_sdhci_device1, &tegra_sdhci_device4, &tegra_i2s_device1, &tegra_das_device, &tegra_pcm_device, &trimslice_audio_device, }; static struct i2c_board_info trimslice_i2c3_board_info[] = { { I2C_BOARD_INFO("tlv320aic23", 0x1a), }, { I2C_BOARD_INFO("em3027", 0x56), }, }; static void trimslice_i2c_init(void) { platform_device_register(&tegra_i2c_device1); platform_device_register(&tegra_i2c_device2); platform_device_register(&tegra_i2c_device3); i2c_register_board_info(2, trimslice_i2c3_board_info, ARRAY_SIZE(trimslice_i2c3_board_info)); } static void trimslice_usb_init(void) { int err; platform_device_register(&tegra_ehci3_device); platform_device_register(&tegra_ehci2_device); err = gpio_request_one(TRIMSLICE_GPIO_USB1_MODE, GPIOF_OUT_INIT_HIGH, "usb1mode"); if (err) { pr_err("TrimSlice: failed to obtain USB1 mode gpio: %d\n", err); return; } platform_device_register(&tegra_ehci1_device); } static void __init tegra_trimslice_fixup(struct tag *tags, char **cmdline, struct meminfo *mi) { mi->nr_banks = 2; mi->bank[0].start = PHYS_OFFSET; mi->bank[0].size = 448 * SZ_1M; mi->bank[1].start = SZ_512M; mi->bank[1].size = SZ_512M; } static __initdata struct tegra_clk_init_table trimslice_clk_init_table[] = { { "uarta", "pll_p", 216000000, true }, { "pll_a", "pll_p_out1", 56448000, true }, { "pll_a_out0", "pll_a", 11289600, true }, { "cdev1", NULL, 0, true }, { "i2s1", "pll_a_out0", 11289600, false}, { NULL, NULL, 0, 0}, }; static int __init tegra_trimslice_pci_init(void) { if (!machine_is_trimslice()) return 0; return tegra_pcie_init(true, true); } subsys_initcall(tegra_trimslice_pci_init); static void __init tegra_trimslice_init(void) { tegra_clk_init_from_table(trimslice_clk_init_table); trimslice_pinmux_init(); tegra_sdhci_device1.dev.platform_data = &sdhci_pdata1; tegra_sdhci_device4.dev.platform_data = &sdhci_pdata4; platform_add_devices(trimslice_devices, ARRAY_SIZE(trimslice_devices)); trimslice_i2c_init(); trimslice_usb_init(); } MACHINE_START(TRIMSLICE, "trimslice") .atag_offset = 0x100, .fixup = tegra_trimslice_fixup, .map_io = tegra_map_common_io, .init_early = tegra20_init_early, .init_irq = tegra_init_irq, .handle_irq = gic_handle_irq, .timer = &tegra_timer, .init_machine = tegra_trimslice_init, .restart = tegra_assert_system_reset, MACHINE_END
gpl-2.0
flar2/m8-GPE
arch/mips/math-emu/dp_div.c
34
4078
/* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## */ #include "ieee754dp.h" ieee754dp ieee754dp_div(ieee754dp x, ieee754dp y) { COMPXDP; COMPYDP; EXPLODEXDP; EXPLODEYDP; CLEARCX; FLUSHXDP; FLUSHYDP; switch (CLPAIR(xc, yc)) { case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): SETCX(IEEE754_INVALID_OPERATION); return ieee754dp_nanxcpt(ieee754dp_indef(), "div", x, y); case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return y; case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF): return x; case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): SETCX(IEEE754_INVALID_OPERATION); return ieee754dp_xcpt(ieee754dp_indef(), "div", x, y); case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): return ieee754dp_zero(xs ^ ys); case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): return ieee754dp_inf(xs ^ ys); case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): SETCX(IEEE754_INVALID_OPERATION); return ieee754dp_xcpt(ieee754dp_indef(), "div", x, y); case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): SETCX(IEEE754_ZERO_DIVIDE); return ieee754dp_xcpt(ieee754dp_inf(xs ^ ys), "div", x, y); case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): return ieee754dp_zero(xs == ys ? 0 : 1); case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): DPDNORMX; case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): DPDNORMY; break; case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM): DPDNORMX; break; case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): break; } assert(xm & DP_HIDDEN_BIT); assert(ym & DP_HIDDEN_BIT); xm <<= 3; ym <<= 3; { u64 rm = 0; int re = xe - ye; u64 bm; for (bm = DP_MBIT(DP_MBITS + 2); bm; bm >>= 1) { if (xm >= ym) { xm -= ym; rm |= bm; if (xm == 0) break; } xm <<= 1; } rm <<= 1; if (xm) rm |= 1; assert(rm); while ((rm >> (DP_MBITS + 3)) == 0) { rm <<= 1; re--; } DPNORMRET2(xs == ys ? 0 : 1, re, rm, "div", x, y); } }
gpl-2.0
maxrdlf95/htc_m8_maxkernel
fs/btrfs/export.c
34
7836
#include <linux/fs.h> #include <linux/types.h> #include "ctree.h" #include "disk-io.h" #include "btrfs_inode.h" #include "print-tree.h" #include "export.h" #include "compat.h" #define BTRFS_FID_SIZE_NON_CONNECTABLE (offsetof(struct btrfs_fid, \ parent_objectid) / 4) #define BTRFS_FID_SIZE_CONNECTABLE (offsetof(struct btrfs_fid, \ parent_root_objectid) / 4) #define BTRFS_FID_SIZE_CONNECTABLE_ROOT (sizeof(struct btrfs_fid) / 4) static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len, int connectable) { struct btrfs_fid *fid = (struct btrfs_fid *)fh; struct inode *inode = dentry->d_inode; int len = *max_len; int type; if (connectable && (len < BTRFS_FID_SIZE_CONNECTABLE)) { *max_len = BTRFS_FID_SIZE_CONNECTABLE; return 255; } else if (len < BTRFS_FID_SIZE_NON_CONNECTABLE) { *max_len = BTRFS_FID_SIZE_NON_CONNECTABLE; return 255; } len = BTRFS_FID_SIZE_NON_CONNECTABLE; type = FILEID_BTRFS_WITHOUT_PARENT; fid->objectid = btrfs_ino(inode); fid->root_objectid = BTRFS_I(inode)->root->objectid; fid->gen = inode->i_generation; if (connectable && !S_ISDIR(inode->i_mode)) { struct inode *parent; u64 parent_root_id; spin_lock(&dentry->d_lock); parent = dentry->d_parent->d_inode; fid->parent_objectid = BTRFS_I(parent)->location.objectid; fid->parent_gen = parent->i_generation; parent_root_id = BTRFS_I(parent)->root->objectid; spin_unlock(&dentry->d_lock); if (parent_root_id != fid->root_objectid) { fid->parent_root_objectid = parent_root_id; len = BTRFS_FID_SIZE_CONNECTABLE_ROOT; type = FILEID_BTRFS_WITH_PARENT_ROOT; } else { len = BTRFS_FID_SIZE_CONNECTABLE; type = FILEID_BTRFS_WITH_PARENT; } } *max_len = len; return type; } static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, u64 root_objectid, u32 generation, int check_generation) { struct btrfs_fs_info *fs_info = btrfs_sb(sb); struct btrfs_root *root; struct inode *inode; struct btrfs_key key; int index; int err = 0; if (objectid < BTRFS_FIRST_FREE_OBJECTID) return ERR_PTR(-ESTALE); key.objectid = root_objectid; btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); key.offset = (u64)-1; index = srcu_read_lock(&fs_info->subvol_srcu); root = btrfs_read_fs_root_no_name(fs_info, &key); if (IS_ERR(root)) { err = PTR_ERR(root); goto fail; } if (btrfs_root_refs(&root->root_item) == 0) { err = -ENOENT; goto fail; } key.objectid = objectid; btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); key.offset = 0; inode = btrfs_iget(sb, &key, root, NULL); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto fail; } srcu_read_unlock(&fs_info->subvol_srcu, index); if (check_generation && generation != inode->i_generation) { iput(inode); return ERR_PTR(-ESTALE); } return d_obtain_alias(inode); fail: srcu_read_unlock(&fs_info->subvol_srcu, index); return ERR_PTR(err); } static struct dentry *btrfs_fh_to_parent(struct super_block *sb, struct fid *fh, int fh_len, int fh_type) { struct btrfs_fid *fid = (struct btrfs_fid *) fh; u64 objectid, root_objectid; u32 generation; if (fh_type == FILEID_BTRFS_WITH_PARENT) { if (fh_len != BTRFS_FID_SIZE_CONNECTABLE) return NULL; root_objectid = fid->root_objectid; } else if (fh_type == FILEID_BTRFS_WITH_PARENT_ROOT) { if (fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT) return NULL; root_objectid = fid->parent_root_objectid; } else return NULL; objectid = fid->parent_objectid; generation = fid->parent_gen; return btrfs_get_dentry(sb, objectid, root_objectid, generation, 1); } static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh, int fh_len, int fh_type) { struct btrfs_fid *fid = (struct btrfs_fid *) fh; u64 objectid, root_objectid; u32 generation; if ((fh_type != FILEID_BTRFS_WITH_PARENT || fh_len != BTRFS_FID_SIZE_CONNECTABLE) && (fh_type != FILEID_BTRFS_WITH_PARENT_ROOT || fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT) && (fh_type != FILEID_BTRFS_WITHOUT_PARENT || fh_len != BTRFS_FID_SIZE_NON_CONNECTABLE)) return NULL; objectid = fid->objectid; root_objectid = fid->root_objectid; generation = fid->gen; return btrfs_get_dentry(sb, objectid, root_objectid, generation, 1); } static struct dentry *btrfs_get_parent(struct dentry *child) { struct inode *dir = child->d_inode; struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_root_ref *ref; struct btrfs_key key; struct btrfs_key found_key; int ret; path = btrfs_alloc_path(); if (!path) return ERR_PTR(-ENOMEM); if (btrfs_ino(dir) == BTRFS_FIRST_FREE_OBJECTID) { key.objectid = root->root_key.objectid; key.type = BTRFS_ROOT_BACKREF_KEY; key.offset = (u64)-1; root = root->fs_info->tree_root; } else { key.objectid = btrfs_ino(dir); key.type = BTRFS_INODE_REF_KEY; key.offset = (u64)-1; } ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto fail; BUG_ON(ret == 0); if (path->slots[0] == 0) { ret = -ENOENT; goto fail; } path->slots[0]--; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (found_key.objectid != key.objectid || found_key.type != key.type) { ret = -ENOENT; goto fail; } if (found_key.type == BTRFS_ROOT_BACKREF_KEY) { ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); key.objectid = btrfs_root_ref_dirid(leaf, ref); } else { key.objectid = found_key.offset; } btrfs_free_path(path); if (found_key.type == BTRFS_ROOT_BACKREF_KEY) { return btrfs_get_dentry(root->fs_info->sb, key.objectid, found_key.offset, 0, 0); } key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; return d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL)); fail: btrfs_free_path(path); return ERR_PTR(ret); } static int btrfs_get_name(struct dentry *parent, char *name, struct dentry *child) { struct inode *inode = child->d_inode; struct inode *dir = parent->d_inode; struct btrfs_path *path; struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_inode_ref *iref; struct btrfs_root_ref *rref; struct extent_buffer *leaf; unsigned long name_ptr; struct btrfs_key key; int name_len; int ret; u64 ino; if (!dir || !inode) return -EINVAL; if (!S_ISDIR(dir->i_mode)) return -EINVAL; ino = btrfs_ino(inode); path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->leave_spinning = 1; if (ino == BTRFS_FIRST_FREE_OBJECTID) { key.objectid = BTRFS_I(inode)->root->root_key.objectid; key.type = BTRFS_ROOT_BACKREF_KEY; key.offset = (u64)-1; root = root->fs_info->tree_root; } else { key.objectid = ino; key.offset = btrfs_ino(dir); key.type = BTRFS_INODE_REF_KEY; } ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) { btrfs_free_path(path); return ret; } else if (ret > 0) { if (ino == BTRFS_FIRST_FREE_OBJECTID) { path->slots[0]--; } else { btrfs_free_path(path); return -ENOENT; } } leaf = path->nodes[0]; if (ino == BTRFS_FIRST_FREE_OBJECTID) { rref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); name_ptr = (unsigned long)(rref + 1); name_len = btrfs_root_ref_name_len(leaf, rref); } else { iref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_ref); name_ptr = (unsigned long)(iref + 1); name_len = btrfs_inode_ref_name_len(leaf, iref); } read_extent_buffer(leaf, name, name_ptr, name_len); btrfs_free_path(path); name[name_len] = '\0'; return 0; } const struct export_operations btrfs_export_ops = { .encode_fh = btrfs_encode_fh, .fh_to_dentry = btrfs_fh_to_dentry, .fh_to_parent = btrfs_fh_to_parent, .get_parent = btrfs_get_parent, .get_name = btrfs_get_name, };
gpl-2.0
h8rift/android_kernel_htc_m8att
drivers/mtd/ubi/gluebi.c
34
9727
/* * Copyright (c) International Business Machines Corp., 2006 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Author: Artem Bityutskiy (Битюцкий Артём), Joern Engel */ #include <linux/err.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/math64.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/mtd/ubi.h> #include <linux/mtd/mtd.h> #include "ubi-media.h" #define err_msg(fmt, ...) \ printk(KERN_DEBUG "gluebi (pid %d): %s: " fmt "\n", \ current->pid, __func__, ##__VA_ARGS__) struct gluebi_device { struct mtd_info mtd; int refcnt; struct ubi_volume_desc *desc; int ubi_num; int vol_id; struct list_head list; }; static LIST_HEAD(gluebi_devices); static DEFINE_MUTEX(devices_mutex); static struct gluebi_device *find_gluebi_nolock(int ubi_num, int vol_id) { struct gluebi_device *gluebi; list_for_each_entry(gluebi, &gluebi_devices, list) if (gluebi->ubi_num == ubi_num && gluebi->vol_id == vol_id) return gluebi; return NULL; } static int gluebi_get_device(struct mtd_info *mtd) { struct gluebi_device *gluebi; int ubi_mode = UBI_READONLY; if (!try_module_get(THIS_MODULE)) return -ENODEV; if (mtd->flags & MTD_WRITEABLE) ubi_mode = UBI_READWRITE; gluebi = container_of(mtd, struct gluebi_device, mtd); mutex_lock(&devices_mutex); if (gluebi->refcnt > 0) { gluebi->refcnt += 1; mutex_unlock(&devices_mutex); return 0; } gluebi->desc = ubi_open_volume(gluebi->ubi_num, gluebi->vol_id, ubi_mode); if (IS_ERR(gluebi->desc)) { mutex_unlock(&devices_mutex); module_put(THIS_MODULE); return PTR_ERR(gluebi->desc); } gluebi->refcnt += 1; mutex_unlock(&devices_mutex); return 0; } static void gluebi_put_device(struct mtd_info *mtd) { struct gluebi_device *gluebi; gluebi = container_of(mtd, struct gluebi_device, mtd); mutex_lock(&devices_mutex); gluebi->refcnt -= 1; if (gluebi->refcnt == 0) ubi_close_volume(gluebi->desc); module_put(THIS_MODULE); mutex_unlock(&devices_mutex); } static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, unsigned char *buf) { int err = 0, lnum, offs, total_read; struct gluebi_device *gluebi; gluebi = container_of(mtd, struct gluebi_device, mtd); lnum = div_u64_rem(from, mtd->erasesize, &offs); total_read = len; while (total_read) { size_t to_read = mtd->erasesize - offs; if (to_read > total_read) to_read = total_read; err = ubi_read(gluebi->desc, lnum, buf, offs, to_read); if (err) break; lnum += 1; offs = 0; total_read -= to_read; buf += to_read; } *retlen = len - total_read; return err; } /** * gluebi_write - write operation of emulated MTD devices. * @mtd: MTD device description object * @to: absolute offset where to write * @len: how many bytes to write * @retlen: count of written bytes is returned here * @buf: buffer with data to write * * This function returns zero in case of success and a negative error code in * case of failure. */ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { int err = 0, lnum, offs, total_written; struct gluebi_device *gluebi; gluebi = container_of(mtd, struct gluebi_device, mtd); lnum = div_u64_rem(to, mtd->erasesize, &offs); if (len % mtd->writesize || offs % mtd->writesize) return -EINVAL; total_written = len; while (total_written) { size_t to_write = mtd->erasesize - offs; if (to_write > total_written) to_write = total_written; err = ubi_write(gluebi->desc, lnum, buf, offs, to_write); if (err) break; lnum += 1; offs = 0; total_written -= to_write; buf += to_write; } *retlen = len - total_written; return err; } static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr) { int err, i, lnum, count; struct gluebi_device *gluebi; if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd)) return -EINVAL; lnum = mtd_div_by_eb(instr->addr, mtd); count = mtd_div_by_eb(instr->len, mtd); gluebi = container_of(mtd, struct gluebi_device, mtd); for (i = 0; i < count - 1; i++) { err = ubi_leb_unmap(gluebi->desc, lnum + i); if (err) goto out_err; } err = ubi_leb_erase(gluebi->desc, lnum + i); if (err) goto out_err; instr->state = MTD_ERASE_DONE; mtd_erase_callback(instr); return 0; out_err: instr->state = MTD_ERASE_FAILED; instr->fail_addr = (long long)lnum * mtd->erasesize; return err; } static int gluebi_create(struct ubi_device_info *di, struct ubi_volume_info *vi) { struct gluebi_device *gluebi, *g; struct mtd_info *mtd; gluebi = kzalloc(sizeof(struct gluebi_device), GFP_KERNEL); if (!gluebi) return -ENOMEM; mtd = &gluebi->mtd; mtd->name = kmemdup(vi->name, vi->name_len + 1, GFP_KERNEL); if (!mtd->name) { kfree(gluebi); return -ENOMEM; } gluebi->vol_id = vi->vol_id; gluebi->ubi_num = vi->ubi_num; mtd->type = MTD_UBIVOLUME; if (!di->ro_mode) mtd->flags = MTD_WRITEABLE; mtd->owner = THIS_MODULE; mtd->writesize = di->min_io_size; mtd->erasesize = vi->usable_leb_size; mtd->_read = gluebi_read; mtd->_write = gluebi_write; mtd->_erase = gluebi_erase; mtd->_get_device = gluebi_get_device; mtd->_put_device = gluebi_put_device; if (vi->vol_type == UBI_DYNAMIC_VOLUME) mtd->size = (unsigned long long)vi->usable_leb_size * vi->size; else mtd->size = vi->used_bytes; mutex_lock(&devices_mutex); g = find_gluebi_nolock(vi->ubi_num, vi->vol_id); if (g) err_msg("gluebi MTD device %d form UBI device %d volume %d " "already exists", g->mtd.index, vi->ubi_num, vi->vol_id); mutex_unlock(&devices_mutex); if (mtd_device_register(mtd, NULL, 0)) { err_msg("cannot add MTD device"); kfree(mtd->name); kfree(gluebi); return -ENFILE; } mutex_lock(&devices_mutex); list_add_tail(&gluebi->list, &gluebi_devices); mutex_unlock(&devices_mutex); return 0; } static int gluebi_remove(struct ubi_volume_info *vi) { int err = 0; struct mtd_info *mtd; struct gluebi_device *gluebi; mutex_lock(&devices_mutex); gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id); if (!gluebi) { err_msg("got remove notification for unknown UBI device %d " "volume %d", vi->ubi_num, vi->vol_id); err = -ENOENT; } else if (gluebi->refcnt) err = -EBUSY; else list_del(&gluebi->list); mutex_unlock(&devices_mutex); if (err) return err; mtd = &gluebi->mtd; err = mtd_device_unregister(mtd); if (err) { err_msg("cannot remove fake MTD device %d, UBI device %d, " "volume %d, error %d", mtd->index, gluebi->ubi_num, gluebi->vol_id, err); mutex_lock(&devices_mutex); list_add_tail(&gluebi->list, &gluebi_devices); mutex_unlock(&devices_mutex); return err; } kfree(mtd->name); kfree(gluebi); return 0; } static int gluebi_updated(struct ubi_volume_info *vi) { struct gluebi_device *gluebi; mutex_lock(&devices_mutex); gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id); if (!gluebi) { mutex_unlock(&devices_mutex); err_msg("got update notification for unknown UBI device %d " "volume %d", vi->ubi_num, vi->vol_id); return -ENOENT; } if (vi->vol_type == UBI_STATIC_VOLUME) gluebi->mtd.size = vi->used_bytes; mutex_unlock(&devices_mutex); return 0; } static int gluebi_resized(struct ubi_volume_info *vi) { struct gluebi_device *gluebi; mutex_lock(&devices_mutex); gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id); if (!gluebi) { mutex_unlock(&devices_mutex); err_msg("got update notification for unknown UBI device %d " "volume %d", vi->ubi_num, vi->vol_id); return -ENOENT; } gluebi->mtd.size = vi->used_bytes; mutex_unlock(&devices_mutex); return 0; } static int gluebi_notify(struct notifier_block *nb, unsigned long l, void *ns_ptr) { struct ubi_notification *nt = ns_ptr; switch (l) { case UBI_VOLUME_ADDED: gluebi_create(&nt->di, &nt->vi); break; case UBI_VOLUME_REMOVED: gluebi_remove(&nt->vi); break; case UBI_VOLUME_RESIZED: gluebi_resized(&nt->vi); break; case UBI_VOLUME_UPDATED: gluebi_updated(&nt->vi); break; default: break; } return NOTIFY_OK; } static struct notifier_block gluebi_notifier = { .notifier_call = gluebi_notify, }; static int __init ubi_gluebi_init(void) { return ubi_register_volume_notifier(&gluebi_notifier, 0); } static void __exit ubi_gluebi_exit(void) { struct gluebi_device *gluebi, *g; list_for_each_entry_safe(gluebi, g, &gluebi_devices, list) { int err; struct mtd_info *mtd = &gluebi->mtd; err = mtd_device_unregister(mtd); if (err) err_msg("error %d while removing gluebi MTD device %d, " "UBI device %d, volume %d - ignoring", err, mtd->index, gluebi->ubi_num, gluebi->vol_id); kfree(mtd->name); kfree(gluebi); } ubi_unregister_volume_notifier(&gluebi_notifier); } module_init(ubi_gluebi_init); module_exit(ubi_gluebi_exit); MODULE_DESCRIPTION("MTD emulation layer over UBI volumes"); MODULE_AUTHOR("Artem Bityutskiy, Joern Engel"); MODULE_LICENSE("GPL");
gpl-2.0
Blackburn29/PsycoKernel
arch/ia64/sn/pci/tioca_provider.c
34
13140
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved. */ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/bitmap.h> #include <linux/slab.h> #include <linux/export.h> #include <asm/sn/sn_sal.h> #include <asm/sn/addrs.h> #include <asm/sn/io.h> #include <asm/sn/pcidev.h> #include <asm/sn/pcibus_provider_defs.h> #include <asm/sn/tioca_provider.h> u32 tioca_gart_found; EXPORT_SYMBOL(tioca_gart_found); LIST_HEAD(tioca_list); EXPORT_SYMBOL(tioca_list); static int tioca_gart_init(struct tioca_kernel *); static int tioca_gart_init(struct tioca_kernel *tioca_kern) { u64 ap_reg; u64 offset; struct page *tmp; struct tioca_common *tioca_common; struct tioca __iomem *ca_base; tioca_common = tioca_kern->ca_common; ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base; if (list_empty(tioca_kern->ca_devices)) return 0; ap_reg = 0; switch (CA_APERATURE_SIZE >> 20) { case 4: ap_reg |= (0x3ff << CA_GART_AP_SIZE_SHFT); break; case 8: ap_reg |= (0x3fe << CA_GART_AP_SIZE_SHFT); break; case 16: ap_reg |= (0x3fc << CA_GART_AP_SIZE_SHFT); break; case 32: ap_reg |= (0x3f8 << CA_GART_AP_SIZE_SHFT); break; case 64: ap_reg |= (0x3f0 << CA_GART_AP_SIZE_SHFT); break; case 128: ap_reg |= (0x3e0 << CA_GART_AP_SIZE_SHFT); break; case 256: ap_reg |= (0x3c0 << CA_GART_AP_SIZE_SHFT); break; case 512: ap_reg |= (0x380 << CA_GART_AP_SIZE_SHFT); break; case 1024: ap_reg |= (0x300 << CA_GART_AP_SIZE_SHFT); break; case 2048: ap_reg |= (0x200 << CA_GART_AP_SIZE_SHFT); break; case 4096: ap_reg |= (0x000 << CA_GART_AP_SIZE_SHFT); break; default: printk(KERN_ERR "%s: Invalid CA_APERATURE_SIZE " "0x%lx\n", __func__, (ulong) CA_APERATURE_SIZE); return -1; } if (PAGE_SIZE >= 16384) { tioca_kern->ca_ap_pagesize = 16384; ap_reg |= CA_GART_PAGE_SIZE; } else { tioca_kern->ca_ap_pagesize = 4096; } tioca_kern->ca_ap_size = CA_APERATURE_SIZE; tioca_kern->ca_ap_bus_base = CA_APERATURE_BASE; tioca_kern->ca_gart_entries = tioca_kern->ca_ap_size / tioca_kern->ca_ap_pagesize; ap_reg |= (CA_GART_AP_ENB_AGP | CA_GART_AP_ENB_PCI); ap_reg |= tioca_kern->ca_ap_bus_base; tioca_kern->ca_gart_size = tioca_kern->ca_gart_entries * sizeof(u64); tmp = alloc_pages_node(tioca_kern->ca_closest_node, GFP_KERNEL | __GFP_ZERO, get_order(tioca_kern->ca_gart_size)); if (!tmp) { printk(KERN_ERR "%s: Could not allocate " "%llu bytes (order %d) for GART\n", __func__, tioca_kern->ca_gart_size, get_order(tioca_kern->ca_gart_size)); return -ENOMEM; } tioca_kern->ca_gart = page_address(tmp); tioca_kern->ca_gart_coretalk_addr = PHYS_TO_TIODMA(virt_to_phys(tioca_kern->ca_gart)); offset = CA_PCI32_MAPPED_BASE - CA_APERATURE_BASE; tioca_kern->ca_pciap_base = CA_PCI32_MAPPED_BASE; tioca_kern->ca_pciap_size = CA_PCI32_MAPPED_SIZE; tioca_kern->ca_pcigart_start = offset / tioca_kern->ca_ap_pagesize; tioca_kern->ca_pcigart_base = tioca_kern->ca_gart_coretalk_addr + offset; tioca_kern->ca_pcigart = &tioca_kern->ca_gart[tioca_kern->ca_pcigart_start]; tioca_kern->ca_pcigart_entries = tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize; tioca_kern->ca_pcigart_pagemap = kzalloc(tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL); if (!tioca_kern->ca_pcigart_pagemap) { free_pages((unsigned long)tioca_kern->ca_gart, get_order(tioca_kern->ca_gart_size)); return -1; } offset = CA_AGP_MAPPED_BASE - CA_APERATURE_BASE; tioca_kern->ca_gfxap_base = CA_AGP_MAPPED_BASE; tioca_kern->ca_gfxap_size = CA_AGP_MAPPED_SIZE; tioca_kern->ca_gfxgart_start = offset / tioca_kern->ca_ap_pagesize; tioca_kern->ca_gfxgart_base = tioca_kern->ca_gart_coretalk_addr + offset; tioca_kern->ca_gfxgart = &tioca_kern->ca_gart[tioca_kern->ca_gfxgart_start]; tioca_kern->ca_gfxgart_entries = tioca_kern->ca_gfxap_size / tioca_kern->ca_ap_pagesize; __sn_setq_relaxed(&ca_base->ca_control1, CA_AGPDMA_OP_ENB_COMBDELAY); __sn_clrq_relaxed(&ca_base->ca_control2, CA_GART_MEM_PARAM); __sn_setq_relaxed(&ca_base->ca_control2, (0x2ull << CA_GART_MEM_PARAM_SHFT)); tioca_kern->ca_gart_iscoherent = 1; __sn_clrq_relaxed(&ca_base->ca_control2, (CA_GART_WR_PREFETCH_ENB | CA_GART_RD_PREFETCH_ENB)); writeq(CA_GART_FETCH_ERR, &ca_base->ca_int_status_alias); writeq(CA_GART_FETCH_ERR, &ca_base->ca_mult_error_alias); __sn_clrq_relaxed(&ca_base->ca_int_mask, CA_GART_FETCH_ERR); writeq(ap_reg, &ca_base->ca_gart_aperature); writeq(tioca_kern->ca_gart_coretalk_addr|1, &ca_base->ca_gart_ptr_table); return 0; } void tioca_fastwrite_enable(struct tioca_kernel *tioca_kern) { int cap_ptr; u32 reg; struct tioca __iomem *tioca_base; struct pci_dev *pdev; struct tioca_common *common; common = tioca_kern->ca_common; list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) { if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8)) continue; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); if (!cap_ptr) return; pci_read_config_dword(pdev, cap_ptr + PCI_AGP_STATUS, &reg); if (!(reg & PCI_AGP_STATUS_FW)) return; } list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) { if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8)) continue; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); pci_read_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, &reg); reg |= PCI_AGP_COMMAND_FW; pci_write_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, reg); } tioca_base = (struct tioca __iomem*)common->ca_common.bs_base; __sn_setq_relaxed(&tioca_base->ca_control1, CA_AGP_FW_ENABLE); } EXPORT_SYMBOL(tioca_fastwrite_enable); static u64 tioca_dma_d64(unsigned long paddr) { dma_addr_t bus_addr; bus_addr = PHYS_TO_TIODMA(paddr); BUG_ON(!bus_addr); BUG_ON(bus_addr >> 54); bus_addr |= (1UL << 60); return bus_addr; } static u64 tioca_dma_d48(struct pci_dev *pdev, u64 paddr) { struct tioca_common *tioca_common; struct tioca __iomem *ca_base; u64 ct_addr; dma_addr_t bus_addr; u32 node_upper; u64 agp_dma_extn; struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev); tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info; ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base; ct_addr = PHYS_TO_TIODMA(paddr); if (!ct_addr) return 0; bus_addr = (dma_addr_t) (ct_addr & 0xffffffffffffUL); node_upper = ct_addr >> 48; if (node_upper > 64) { printk(KERN_ERR "%s: coretalk addr 0x%p node id out " "of range\n", __func__, (void *)ct_addr); return 0; } agp_dma_extn = __sn_readq_relaxed(&ca_base->ca_agp_dma_addr_extn); if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) { printk(KERN_ERR "%s: coretalk upper node (%u) " "mismatch with ca_agp_dma_addr_extn (%llu)\n", __func__, node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)); return 0; } return bus_addr; } static dma_addr_t tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size) { int ps, ps_shift, entry, entries, mapsize; u64 xio_addr, end_xio_addr; struct tioca_common *tioca_common; struct tioca_kernel *tioca_kern; dma_addr_t bus_addr = 0; struct tioca_dmamap *ca_dmamap; void *map; unsigned long flags; struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev); tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info; tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private; xio_addr = PHYS_TO_TIODMA(paddr); if (!xio_addr) return 0; spin_lock_irqsave(&tioca_kern->ca_lock, flags); ca_dmamap = kzalloc(sizeof(struct tioca_dmamap), GFP_ATOMIC); if (!ca_dmamap) goto map_return; ps = tioca_kern->ca_ap_pagesize; ps_shift = ffs(ps) - 1; end_xio_addr = xio_addr + req_size - 1; entries = (end_xio_addr >> ps_shift) - (xio_addr >> ps_shift) + 1; map = tioca_kern->ca_pcigart_pagemap; mapsize = tioca_kern->ca_pcigart_entries; entry = bitmap_find_next_zero_area(map, mapsize, 0, entries, 0); if (entry >= mapsize) { kfree(ca_dmamap); goto map_return; } bitmap_set(map, entry, entries); bus_addr = tioca_kern->ca_pciap_base + (entry * ps); ca_dmamap->cad_dma_addr = bus_addr; ca_dmamap->cad_gart_size = entries; ca_dmamap->cad_gart_entry = entry; list_add(&ca_dmamap->cad_list, &tioca_kern->ca_dmamaps); if (xio_addr % ps) { tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr); bus_addr += xio_addr & (ps - 1); xio_addr &= ~(ps - 1); xio_addr += ps; entry++; } while (xio_addr < end_xio_addr) { tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr); xio_addr += ps; entry++; } tioca_tlbflush(tioca_kern); map_return: spin_unlock_irqrestore(&tioca_kern->ca_lock, flags); return bus_addr; } static void tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) { int i, entry; struct tioca_common *tioca_common; struct tioca_kernel *tioca_kern; struct tioca_dmamap *map; struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev); unsigned long flags; tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info; tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private; if (bus_addr < tioca_kern->ca_pciap_base || bus_addr >= (tioca_kern->ca_pciap_base + tioca_kern->ca_pciap_size)) return; spin_lock_irqsave(&tioca_kern->ca_lock, flags); list_for_each_entry(map, &tioca_kern->ca_dmamaps, cad_list) if (map->cad_dma_addr == bus_addr) break; BUG_ON(map == NULL); entry = map->cad_gart_entry; for (i = 0; i < map->cad_gart_size; i++, entry++) { clear_bit(entry, tioca_kern->ca_pcigart_pagemap); tioca_kern->ca_pcigart[entry] = 0; } tioca_tlbflush(tioca_kern); list_del(&map->cad_list); spin_unlock_irqrestore(&tioca_kern->ca_lock, flags); kfree(map); } static u64 tioca_dma_map(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags) { u64 mapaddr; if (dma_flags & SN_DMA_MSI) return 0; if (pdev->dma_mask == ~0UL) mapaddr = tioca_dma_d64(paddr); else if (pdev->dma_mask == 0xffffffffffffUL) mapaddr = tioca_dma_d48(pdev, paddr); else mapaddr = 0; if (mapaddr == 0) mapaddr = tioca_dma_mapped(pdev, paddr, byte_count); return mapaddr; } static irqreturn_t tioca_error_intr_handler(int irq, void *arg) { struct tioca_common *soft = arg; struct ia64_sal_retval ret_stuff; u64 segment; u64 busnum; ret_stuff.status = 0; ret_stuff.v0 = 0; segment = soft->ca_common.bs_persist_segment; busnum = soft->ca_common.bs_persist_busnum; SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_ERROR_INTERRUPT, segment, busnum, 0, 0, 0, 0, 0); return IRQ_HANDLED; } static void * tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller) { struct tioca_common *tioca_common; struct tioca_kernel *tioca_kern; struct pci_bus *bus; if (is_shub1() && sn_sal_rev() < 0x0406) { printk (KERN_ERR "%s: SGI prom rev 4.06 or greater required " "for tioca support\n", __func__); return NULL; } tioca_common = kmemdup(prom_bussoft, sizeof(struct tioca_common), GFP_KERNEL); if (!tioca_common) return NULL; tioca_common->ca_common.bs_base = (unsigned long) ioremap(REGION_OFFSET(tioca_common->ca_common.bs_base), sizeof(struct tioca_common)); tioca_kern = kzalloc(sizeof(struct tioca_kernel), GFP_KERNEL); if (!tioca_kern) { kfree(tioca_common); return NULL; } tioca_kern->ca_common = tioca_common; spin_lock_init(&tioca_kern->ca_lock); INIT_LIST_HEAD(&tioca_kern->ca_dmamaps); tioca_kern->ca_closest_node = nasid_to_cnodeid(tioca_common->ca_closest_nasid); tioca_common->ca_kernel_private = (u64) tioca_kern; bus = pci_find_bus(tioca_common->ca_common.bs_persist_segment, tioca_common->ca_common.bs_persist_busnum); BUG_ON(!bus); tioca_kern->ca_devices = &bus->devices; if (tioca_gart_init(tioca_kern) < 0) { kfree(tioca_kern); kfree(tioca_common); return NULL; } tioca_gart_found++; list_add(&tioca_kern->ca_list, &tioca_list); if (request_irq(SGI_TIOCA_ERROR, tioca_error_intr_handler, IRQF_SHARED, "TIOCA error", (void *)tioca_common)) printk(KERN_WARNING "%s: Unable to get irq %d. " "Error interrupts won't be routed for TIOCA bus %d\n", __func__, SGI_TIOCA_ERROR, (int)tioca_common->ca_common.bs_persist_busnum); irq_set_handler(SGI_TIOCA_ERROR, handle_level_irq); sn_set_err_irq_affinity(SGI_TIOCA_ERROR); controller->node = tioca_kern->ca_closest_node; return tioca_common; } static struct sn_pcibus_provider tioca_pci_interfaces = { .dma_map = tioca_dma_map, .dma_map_consistent = tioca_dma_map, .dma_unmap = tioca_dma_unmap, .bus_fixup = tioca_bus_fixup, .force_interrupt = NULL, .target_interrupt = NULL }; int tioca_init_provider(void) { sn_pci_provider[PCIIO_ASIC_TYPE_TIOCA] = &tioca_pci_interfaces; return 0; }
gpl-2.0
jfdsmabalot/kernel_sense_m8
drivers/i2c/busses/i2c-simtec.c
34
3836
/* * Copyright (C) 2005 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * Simtec Generic I2C Controller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> struct simtec_i2c_data { struct resource *ioarea; void __iomem *reg; struct i2c_adapter adap; struct i2c_algo_bit_data bit; }; #define CMD_SET_SDA (1<<2) #define CMD_SET_SCL (1<<3) #define STATE_SDA (1<<0) #define STATE_SCL (1<<1) static void simtec_i2c_setsda(void *pw, int state) { struct simtec_i2c_data *pd = pw; writeb(CMD_SET_SDA | (state ? STATE_SDA : 0), pd->reg); } static void simtec_i2c_setscl(void *pw, int state) { struct simtec_i2c_data *pd = pw; writeb(CMD_SET_SCL | (state ? STATE_SCL : 0), pd->reg); } static int simtec_i2c_getsda(void *pw) { struct simtec_i2c_data *pd = pw; return readb(pd->reg) & STATE_SDA ? 1 : 0; } static int simtec_i2c_getscl(void *pw) { struct simtec_i2c_data *pd = pw; return readb(pd->reg) & STATE_SCL ? 1 : 0; } static int simtec_i2c_probe(struct platform_device *dev) { struct simtec_i2c_data *pd; struct resource *res; int size; int ret; pd = kzalloc(sizeof(struct simtec_i2c_data), GFP_KERNEL); if (pd == NULL) { dev_err(&dev->dev, "cannot allocate private data\n"); return -ENOMEM; } platform_set_drvdata(dev, pd); res = platform_get_resource(dev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&dev->dev, "cannot find IO resource\n"); ret = -ENOENT; goto err; } size = resource_size(res); pd->ioarea = request_mem_region(res->start, size, dev->name); if (pd->ioarea == NULL) { dev_err(&dev->dev, "cannot request IO\n"); ret = -ENXIO; goto err; } pd->reg = ioremap(res->start, size); if (pd->reg == NULL) { dev_err(&dev->dev, "cannot map IO\n"); ret = -ENXIO; goto err_res; } pd->adap.owner = THIS_MODULE; pd->adap.algo_data = &pd->bit; pd->adap.dev.parent = &dev->dev; strlcpy(pd->adap.name, "Simtec I2C", sizeof(pd->adap.name)); pd->bit.data = pd; pd->bit.setsda = simtec_i2c_setsda; pd->bit.setscl = simtec_i2c_setscl; pd->bit.getsda = simtec_i2c_getsda; pd->bit.getscl = simtec_i2c_getscl; pd->bit.timeout = HZ; pd->bit.udelay = 20; ret = i2c_bit_add_bus(&pd->adap); if (ret) goto err_all; return 0; err_all: iounmap(pd->reg); err_res: release_resource(pd->ioarea); kfree(pd->ioarea); err: kfree(pd); return ret; } static int simtec_i2c_remove(struct platform_device *dev) { struct simtec_i2c_data *pd = platform_get_drvdata(dev); i2c_del_adapter(&pd->adap); iounmap(pd->reg); release_resource(pd->ioarea); kfree(pd->ioarea); kfree(pd); return 0; } static struct platform_driver simtec_i2c_driver = { .driver = { .name = "simtec-i2c", .owner = THIS_MODULE, }, .probe = simtec_i2c_probe, .remove = simtec_i2c_remove, }; module_platform_driver(simtec_i2c_driver); MODULE_DESCRIPTION("Simtec Generic I2C Bus driver"); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:simtec-i2c");
gpl-2.0
giorgio130/KK_kernel
sound/soc/mxs/mxs-devb-spdif.c
34
2279
/* * ASoC driver for MXS Evk development board * * Copyright (C) 2008-2010 Freescale Semiconductor, Inc. * * based on stmp3780_devb_spdif.c * * Vladimir Barinov <vbarinov@embeddedalley.com> * * Copyright 2008 SigmaTel, Inc * Copyright 2008 Embedded Alley Solutions, Inc * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <asm/mach-types.h> #include <asm/dma.h> #include <mach/hardware.h> #include "mxs-spdif-dai.h" #include "../codecs/mxs_spdif.h" #include "mxs-pcm.h" /* mxs devb digital audio interface glue - connects codec <--> CPU */ static struct snd_soc_dai_link mxs_devb_dai = { .name = "MXS SPDIF", .stream_name = "MXS SPDIF", .cpu_dai = &mxs_spdif_dai, .codec_dai = &mxs_spdif_codec_dai, }; /* mxs devb audio machine driver */ static struct snd_soc_card snd_soc_machine_mxs_devb = { .name = "mxs-evk", .platform = &mxs_soc_platform, .dai_link = &mxs_devb_dai, .num_links = 1, }; /* mxs devb audio subsystem */ static struct snd_soc_device mxs_devb_snd_devdata = { .card = &snd_soc_machine_mxs_devb, .codec_dev = &soc_spdif_codec_dev_mxs, }; static struct platform_device *mxs_devb_snd_device; static int __init mxs_devb_init(void) { int ret = 0; mxs_devb_snd_device = platform_device_alloc("soc-audio", 2); if (!mxs_devb_snd_device) return -ENOMEM; platform_set_drvdata(mxs_devb_snd_device, &mxs_devb_snd_devdata); mxs_devb_snd_devdata.dev = &mxs_devb_snd_device->dev; mxs_devb_snd_device->dev.platform_data = &mxs_devb_snd_devdata; ret = platform_device_add(mxs_devb_snd_device); if (ret) platform_device_put(mxs_devb_snd_device); return ret; } static void __exit mxs_devb_exit(void) { platform_device_unregister(mxs_devb_snd_device); } module_init(mxs_devb_init); module_exit(mxs_devb_exit); MODULE_AUTHOR("Vladimir Barinov"); MODULE_DESCRIPTION("MXS EVK development board ASoC driver"); MODULE_LICENSE("GPL");
gpl-2.0
GuneetAtwal/kernel_m8
drivers/gpu/drm/mga/mga_warp.c
34
4761
/* mga_warp.c -- Matrox G200/G400 WARP engine management -*- linux-c -*- * Created: Thu Jan 11 21:29:32 2001 by gareth@valinux.com * * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Gareth Hughes <gareth@valinux.com> */ #include <linux/firmware.h> #include <linux/ihex.h> #include <linux/platform_device.h> #include <linux/module.h> #include "drmP.h" #include "drm.h" #include "mga_drm.h" #include "mga_drv.h" #define FIRMWARE_G200 "matrox/g200_warp.fw" #define FIRMWARE_G400 "matrox/g400_warp.fw" MODULE_FIRMWARE(FIRMWARE_G200); MODULE_FIRMWARE(FIRMWARE_G400); #define MGA_WARP_CODE_ALIGN 256 #define WARP_UCODE_SIZE(size) ALIGN(size, MGA_WARP_CODE_ALIGN) int mga_warp_install_microcode(drm_mga_private_t *dev_priv) { unsigned char *vcbase = dev_priv->warp->handle; unsigned long pcbase = dev_priv->warp->offset; const char *firmware_name; struct platform_device *pdev; const struct firmware *fw = NULL; const struct ihex_binrec *rec; unsigned int size; int n_pipes, where; int rc = 0; switch (dev_priv->chipset) { case MGA_CARD_TYPE_G400: case MGA_CARD_TYPE_G550: firmware_name = FIRMWARE_G400; n_pipes = MGA_MAX_G400_PIPES; break; case MGA_CARD_TYPE_G200: firmware_name = FIRMWARE_G200; n_pipes = MGA_MAX_G200_PIPES; break; default: return -EINVAL; } pdev = platform_device_register_simple("mga_warp", 0, NULL, 0); if (IS_ERR(pdev)) { DRM_ERROR("mga: Failed to register microcode\n"); return PTR_ERR(pdev); } rc = request_ihex_firmware(&fw, firmware_name, &pdev->dev); platform_device_unregister(pdev); if (rc) { DRM_ERROR("mga: Failed to load microcode \"%s\"\n", firmware_name); return rc; } size = 0; where = 0; for (rec = (const struct ihex_binrec *)fw->data; rec; rec = ihex_next_binrec(rec)) { size += WARP_UCODE_SIZE(be16_to_cpu(rec->len)); where++; } if (where != n_pipes) { DRM_ERROR("mga: Invalid microcode \"%s\"\n", firmware_name); rc = -EINVAL; goto out; } size = PAGE_ALIGN(size); DRM_DEBUG("MGA ucode size = %d bytes\n", size); if (size > dev_priv->warp->size) { DRM_ERROR("microcode too large! (%u > %lu)\n", size, dev_priv->warp->size); rc = -ENOMEM; goto out; } memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys)); where = 0; for (rec = (const struct ihex_binrec *)fw->data; rec; rec = ihex_next_binrec(rec)) { unsigned int src_size, dst_size; DRM_DEBUG(" pcbase = 0x%08lx vcbase = %p\n", pcbase, vcbase); dev_priv->warp_pipe_phys[where] = pcbase; src_size = be16_to_cpu(rec->len); dst_size = WARP_UCODE_SIZE(src_size); memcpy(vcbase, rec->data, src_size); pcbase += dst_size; vcbase += dst_size; where++; } out: release_firmware(fw); return rc; } #define WMISC_EXPECTED (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE) int mga_warp_init(drm_mga_private_t *dev_priv) { u32 wmisc; switch (dev_priv->chipset) { case MGA_CARD_TYPE_G400: case MGA_CARD_TYPE_G550: MGA_WRITE(MGA_WIADDR2, MGA_WMODE_SUSPEND); MGA_WRITE(MGA_WGETMSB, 0x00000E00); MGA_WRITE(MGA_WVRTXSZ, 0x00001807); MGA_WRITE(MGA_WACCEPTSEQ, 0x18000000); break; case MGA_CARD_TYPE_G200: MGA_WRITE(MGA_WIADDR, MGA_WMODE_SUSPEND); MGA_WRITE(MGA_WGETMSB, 0x1606); MGA_WRITE(MGA_WVRTXSZ, 7); break; default: return -EINVAL; } MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE | MGA_WCACHEFLUSH_ENABLE)); wmisc = MGA_READ(MGA_WMISC); if (wmisc != WMISC_EXPECTED) { DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n", wmisc, WMISC_EXPECTED); return -EINVAL; } return 0; }
gpl-2.0
marcoxx626/M8_Kernel_Sense
drivers/staging/iio/dac/ad5504.c
34
9810
/* * AD5504, AD5501 High Voltage Digital to Analog Converter * * Copyright 2011 Analog Devices Inc. * * Licensed under the GPL-2. */ #include <linux/interrupt.h> #include <linux/fs.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/regulator/consumer.h> #include <linux/module.h> #include "../iio.h" #include "../sysfs.h" #include "../events.h" #include "dac.h" #include "ad5504.h" #define AD5504_CHANNEL(_chan) { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ .output = 1, \ .channel = (_chan), \ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \ .address = AD5504_ADDR_DAC(_chan), \ .scan_type = IIO_ST('u', 12, 16, 0), \ } static const struct iio_chan_spec ad5504_channels[] = { AD5504_CHANNEL(0), AD5504_CHANNEL(1), AD5504_CHANNEL(2), AD5504_CHANNEL(3), }; static int ad5504_spi_write(struct spi_device *spi, u8 addr, u16 val) { u16 tmp = cpu_to_be16(AD5504_CMD_WRITE | AD5504_ADDR(addr) | (val & AD5504_RES_MASK)); return spi_write(spi, (u8 *)&tmp, 2); } static int ad5504_spi_read(struct spi_device *spi, u8 addr) { u16 tmp = cpu_to_be16(AD5504_CMD_READ | AD5504_ADDR(addr)); u16 val; int ret; struct spi_transfer t = { .tx_buf = &tmp, .rx_buf = &val, .len = 2, }; struct spi_message m; spi_message_init(&m); spi_message_add_tail(&t, &m); ret = spi_sync(spi, &m); if (ret < 0) return ret; return be16_to_cpu(val) & AD5504_RES_MASK; } static int ad5504_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long m) { struct ad5504_state *st = iio_priv(indio_dev); unsigned long scale_uv; int ret; switch (m) { case 0: ret = ad5504_spi_read(st->spi, chan->address); if (ret < 0) return ret; *val = ret; return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: scale_uv = (st->vref_mv * 1000) >> chan->scan_type.realbits; *val = scale_uv / 1000; *val2 = (scale_uv % 1000) * 1000; return IIO_VAL_INT_PLUS_MICRO; } return -EINVAL; } static int ad5504_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { struct ad5504_state *st = iio_priv(indio_dev); int ret; switch (mask) { case 0: if (val >= (1 << chan->scan_type.realbits) || val < 0) return -EINVAL; return ad5504_spi_write(st->spi, chan->address, val); default: ret = -EINVAL; } return -EINVAL; } static ssize_t ad5504_read_powerdown_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad5504_state *st = iio_priv(indio_dev); const char mode[][14] = {"20kohm_to_gnd", "three_state"}; return sprintf(buf, "%s\n", mode[st->pwr_down_mode]); } static ssize_t ad5504_write_powerdown_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad5504_state *st = iio_priv(indio_dev); int ret; if (sysfs_streq(buf, "20kohm_to_gnd")) st->pwr_down_mode = AD5504_DAC_PWRDN_20K; else if (sysfs_streq(buf, "three_state")) st->pwr_down_mode = AD5504_DAC_PWRDN_3STATE; else ret = -EINVAL; return ret ? ret : len; } static ssize_t ad5504_read_dac_powerdown(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad5504_state *st = iio_priv(indio_dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); return sprintf(buf, "%d\n", !(st->pwr_down_mask & (1 << this_attr->address))); } static ssize_t ad5504_write_dac_powerdown(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { long readin; int ret; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad5504_state *st = iio_priv(indio_dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); ret = strict_strtol(buf, 10, &readin); if (ret) return ret; if (readin == 0) st->pwr_down_mask |= (1 << this_attr->address); else if (readin == 1) st->pwr_down_mask &= ~(1 << this_attr->address); else ret = -EINVAL; ret = ad5504_spi_write(st->spi, AD5504_ADDR_CTRL, AD5504_DAC_PWRDWN_MODE(st->pwr_down_mode) | AD5504_DAC_PWR(st->pwr_down_mask)); ad5504_spi_write(st->spi, AD5504_ADDR_NOOP, 0); return ret ? ret : len; } static IIO_DEVICE_ATTR(out_voltage_powerdown_mode, S_IRUGO | S_IWUSR, ad5504_read_powerdown_mode, ad5504_write_powerdown_mode, 0); static IIO_CONST_ATTR(out_voltage_powerdown_mode_available, "20kohm_to_gnd three_state"); #define IIO_DEV_ATTR_DAC_POWERDOWN(_num, _show, _store, _addr) \ IIO_DEVICE_ATTR(out_voltage##_num##_powerdown, \ S_IRUGO | S_IWUSR, _show, _store, _addr) static IIO_DEV_ATTR_DAC_POWERDOWN(0, ad5504_read_dac_powerdown, ad5504_write_dac_powerdown, 0); static IIO_DEV_ATTR_DAC_POWERDOWN(1, ad5504_read_dac_powerdown, ad5504_write_dac_powerdown, 1); static IIO_DEV_ATTR_DAC_POWERDOWN(2, ad5504_read_dac_powerdown, ad5504_write_dac_powerdown, 2); static IIO_DEV_ATTR_DAC_POWERDOWN(3, ad5504_read_dac_powerdown, ad5504_write_dac_powerdown, 3); static struct attribute *ad5504_attributes[] = { &iio_dev_attr_out_voltage0_powerdown.dev_attr.attr, &iio_dev_attr_out_voltage1_powerdown.dev_attr.attr, &iio_dev_attr_out_voltage2_powerdown.dev_attr.attr, &iio_dev_attr_out_voltage3_powerdown.dev_attr.attr, &iio_dev_attr_out_voltage_powerdown_mode.dev_attr.attr, &iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr, NULL, }; static const struct attribute_group ad5504_attribute_group = { .attrs = ad5504_attributes, }; static struct attribute *ad5501_attributes[] = { &iio_dev_attr_out_voltage0_powerdown.dev_attr.attr, &iio_dev_attr_out_voltage_powerdown_mode.dev_attr.attr, &iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr, NULL, }; static const struct attribute_group ad5501_attribute_group = { .attrs = ad5501_attributes, }; static IIO_CONST_ATTR(temp0_thresh_rising_value, "110000"); static IIO_CONST_ATTR(temp0_thresh_rising_en, "1"); static struct attribute *ad5504_ev_attributes[] = { &iio_const_attr_temp0_thresh_rising_value.dev_attr.attr, &iio_const_attr_temp0_thresh_rising_en.dev_attr.attr, NULL, }; static struct attribute_group ad5504_ev_attribute_group = { .attrs = ad5504_ev_attributes, .name = "events", }; static irqreturn_t ad5504_event_handler(int irq, void *private) { iio_push_event(private, IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0, IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), iio_get_time_ns()); return IRQ_HANDLED; } static const struct iio_info ad5504_info = { .write_raw = ad5504_write_raw, .read_raw = ad5504_read_raw, .attrs = &ad5504_attribute_group, .event_attrs = &ad5504_ev_attribute_group, .driver_module = THIS_MODULE, }; static const struct iio_info ad5501_info = { .write_raw = ad5504_write_raw, .read_raw = ad5504_read_raw, .attrs = &ad5501_attribute_group, .event_attrs = &ad5504_ev_attribute_group, .driver_module = THIS_MODULE, }; static int __devinit ad5504_probe(struct spi_device *spi) { struct ad5504_platform_data *pdata = spi->dev.platform_data; struct iio_dev *indio_dev; struct ad5504_state *st; struct regulator *reg; int ret, voltage_uv = 0; indio_dev = iio_allocate_device(sizeof(*st)); if (indio_dev == NULL) { ret = -ENOMEM; goto error_ret; } reg = regulator_get(&spi->dev, "vcc"); if (!IS_ERR(reg)) { ret = regulator_enable(reg); if (ret) goto error_put_reg; voltage_uv = regulator_get_voltage(reg); } spi_set_drvdata(spi, indio_dev); st = iio_priv(indio_dev); if (voltage_uv) st->vref_mv = voltage_uv / 1000; else if (pdata) st->vref_mv = pdata->vref_mv; else dev_warn(&spi->dev, "reference voltage unspecified\n"); st->reg = reg; st->spi = spi; indio_dev->dev.parent = &spi->dev; indio_dev->name = spi_get_device_id(st->spi)->name; if (spi_get_device_id(st->spi)->driver_data == ID_AD5501) { indio_dev->info = &ad5501_info; indio_dev->num_channels = 1; } else { indio_dev->info = &ad5504_info; indio_dev->num_channels = 4; } indio_dev->channels = ad5504_channels; indio_dev->modes = INDIO_DIRECT_MODE; if (spi->irq) { ret = request_threaded_irq(spi->irq, NULL, &ad5504_event_handler, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, spi_get_device_id(st->spi)->name, indio_dev); if (ret) goto error_disable_reg; } ret = iio_device_register(indio_dev); if (ret) goto error_free_irq; return 0; error_free_irq: free_irq(spi->irq, indio_dev); error_disable_reg: if (!IS_ERR(reg)) regulator_disable(reg); error_put_reg: if (!IS_ERR(reg)) regulator_put(reg); iio_free_device(indio_dev); error_ret: return ret; } static int __devexit ad5504_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad5504_state *st = iio_priv(indio_dev); iio_device_unregister(indio_dev); if (spi->irq) free_irq(spi->irq, indio_dev); if (!IS_ERR(st->reg)) { regulator_disable(st->reg); regulator_put(st->reg); } iio_free_device(indio_dev); return 0; } static const struct spi_device_id ad5504_id[] = { {"ad5504", ID_AD5504}, {"ad5501", ID_AD5501}, {} }; MODULE_DEVICE_TABLE(spi, ad5504_id); static struct spi_driver ad5504_driver = { .driver = { .name = "ad5504", .owner = THIS_MODULE, }, .probe = ad5504_probe, .remove = __devexit_p(ad5504_remove), .id_table = ad5504_id, }; module_spi_driver(ad5504_driver); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("Analog Devices AD5501/AD5501 DAC"); MODULE_LICENSE("GPL v2");
gpl-2.0
k5t4j5/kernel_htc_m8
arch/sparc/kernel/windows.c
34
2592
/* windows.c: Routines to deal with register window management * at the C-code level. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/smp.h> #include <asm/uaccess.h> void flush_user_windows(void) { register int ctr asm("g5"); ctr = 0; __asm__ __volatile__( "\n1:\n\t" "ld [%%g6 + %2], %%g4\n\t" "orcc %%g0, %%g4, %%g0\n\t" "add %0, 1, %0\n\t" "bne 1b\n\t" " save %%sp, -64, %%sp\n" "2:\n\t" "subcc %0, 1, %0\n\t" "bne 2b\n\t" " restore %%g0, %%g0, %%g0\n" : "=&r" (ctr) : "0" (ctr), "i" ((const unsigned long)TI_UWINMASK) : "g4", "cc"); } static inline void shift_window_buffer(int first_win, int last_win, struct thread_info *tp) { int i; for(i = first_win; i < last_win; i++) { tp->rwbuf_stkptrs[i] = tp->rwbuf_stkptrs[i+1]; memcpy(&tp->reg_window[i], &tp->reg_window[i+1], sizeof(struct reg_window32)); } } void synchronize_user_stack(void) { struct thread_info *tp = current_thread_info(); int window; flush_user_windows(); if(!tp->w_saved) return; for(window = tp->w_saved - 1; window >= 0; window--) { unsigned long sp = tp->rwbuf_stkptrs[window]; if (copy_to_user((char __user *) sp, &tp->reg_window[window], sizeof(struct reg_window32))) continue; shift_window_buffer(window, tp->w_saved - 1, tp); tp->w_saved--; } } #if 0 static inline void copy_aligned_window(void *dest, const void *src) { __asm__ __volatile__("ldd [%1], %%g2\n\t" "ldd [%1 + 0x8], %%g4\n\t" "std %%g2, [%0]\n\t" "std %%g4, [%0 + 0x8]\n\t" "ldd [%1 + 0x10], %%g2\n\t" "ldd [%1 + 0x18], %%g4\n\t" "std %%g2, [%0 + 0x10]\n\t" "std %%g4, [%0 + 0x18]\n\t" "ldd [%1 + 0x20], %%g2\n\t" "ldd [%1 + 0x28], %%g4\n\t" "std %%g2, [%0 + 0x20]\n\t" "std %%g4, [%0 + 0x28]\n\t" "ldd [%1 + 0x30], %%g2\n\t" "ldd [%1 + 0x38], %%g4\n\t" "std %%g2, [%0 + 0x30]\n\t" "std %%g4, [%0 + 0x38]\n\t" : : "r" (dest), "r" (src) : "g2", "g3", "g4", "g5"); } #endif void try_to_clear_window_buffer(struct pt_regs *regs, int who) { struct thread_info *tp = current_thread_info(); int window; flush_user_windows(); for(window = 0; window < tp->w_saved; window++) { unsigned long sp = tp->rwbuf_stkptrs[window]; if ((sp & 7) || copy_to_user((char __user *) sp, &tp->reg_window[window], sizeof(struct reg_window32))) do_exit(SIGILL); } tp->w_saved = 0; }
gpl-2.0
jameshilliard/m8whl-3.4.0-g278eae8
drivers/media/dvb/frontends/sp8870.c
34
12641
/* Driver for Spase SP8870 demodulator Copyright (C) 1999 Juergen Peitz This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define SP8870_DEFAULT_FIRMWARE "dvb-fe-sp8870.fw" #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "sp8870.h" struct sp8870_state { struct i2c_adapter* i2c; const struct sp8870_config* config; struct dvb_frontend frontend; u8 initialised:1; }; static int debug; #define dprintk(args...) \ do { \ if (debug) printk(KERN_DEBUG "sp8870: " args); \ } while (0) #define SP8870_FIRMWARE_SIZE 16382 #define SP8870_FIRMWARE_OFFSET 0x0A static int sp8870_writereg (struct sp8870_state* state, u16 reg, u16 data) { u8 buf [] = { reg >> 8, reg & 0xff, data >> 8, data & 0xff }; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 4 }; int err; if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) { dprintk ("%s: writereg error (err == %i, reg == 0x%02x, data == 0x%02x)\n", __func__, err, reg, data); return -EREMOTEIO; } return 0; } static int sp8870_readreg (struct sp8870_state* state, u16 reg) { int ret; u8 b0 [] = { reg >> 8 , reg & 0xff }; u8 b1 [] = { 0, 0 }; struct i2c_msg msg [] = { { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 2 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 2 } }; ret = i2c_transfer (state->i2c, msg, 2); if (ret != 2) { dprintk("%s: readreg error (ret == %i)\n", __func__, ret); return -1; } return (b1[0] << 8 | b1[1]); } static int sp8870_firmware_upload (struct sp8870_state* state, const struct firmware *fw) { struct i2c_msg msg; const char *fw_buf = fw->data; int fw_pos; u8 tx_buf[255]; int tx_len; int err = 0; dprintk ("%s: ...\n", __func__); if (fw->size < SP8870_FIRMWARE_SIZE + SP8870_FIRMWARE_OFFSET) return -EINVAL; sp8870_writereg(state, 0x0F00, 0x0000); sp8870_writereg(state, 0x8F08, ((SP8870_FIRMWARE_SIZE / 2) & 0xFFFF)); sp8870_writereg(state, 0x8F0A, ((SP8870_FIRMWARE_SIZE / 2) >> 16)); fw_pos = SP8870_FIRMWARE_OFFSET; while (fw_pos < SP8870_FIRMWARE_SIZE + SP8870_FIRMWARE_OFFSET){ tx_len = (fw_pos <= SP8870_FIRMWARE_SIZE + SP8870_FIRMWARE_OFFSET - 252) ? 252 : SP8870_FIRMWARE_SIZE + SP8870_FIRMWARE_OFFSET - fw_pos; tx_buf[0] = 0xCF; tx_buf[1] = 0x0A; memcpy(&tx_buf[2], fw_buf + fw_pos, tx_len); msg.addr = state->config->demod_address; msg.flags = 0; msg.buf = tx_buf; msg.len = tx_len + 2; if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) { printk("%s: firmware upload failed!\n", __func__); printk ("%s: i2c error (err == %i)\n", __func__, err); return err; } fw_pos += tx_len; } dprintk ("%s: done!\n", __func__); return 0; }; static void sp8870_microcontroller_stop (struct sp8870_state* state) { sp8870_writereg(state, 0x0F08, 0x000); sp8870_writereg(state, 0x0F09, 0x000); sp8870_writereg(state, 0x0F00, 0x000); } static void sp8870_microcontroller_start (struct sp8870_state* state) { sp8870_writereg(state, 0x0F08, 0x000); sp8870_writereg(state, 0x0F09, 0x000); sp8870_writereg(state, 0x0F00, 0x001); sp8870_readreg(state, 0x0D01); } static int sp8870_read_data_valid_signal(struct sp8870_state* state) { return (sp8870_readreg(state, 0x0D02) > 0); } static int configure_reg0xc05 (struct dtv_frontend_properties *p, u16 *reg0xc05) { int known_parameters = 1; *reg0xc05 = 0x000; switch (p->modulation) { case QPSK: break; case QAM_16: *reg0xc05 |= (1 << 10); break; case QAM_64: *reg0xc05 |= (2 << 10); break; case QAM_AUTO: known_parameters = 0; break; default: return -EINVAL; }; switch (p->hierarchy) { case HIERARCHY_NONE: break; case HIERARCHY_1: *reg0xc05 |= (1 << 7); break; case HIERARCHY_2: *reg0xc05 |= (2 << 7); break; case HIERARCHY_4: *reg0xc05 |= (3 << 7); break; case HIERARCHY_AUTO: known_parameters = 0; break; default: return -EINVAL; }; switch (p->code_rate_HP) { case FEC_1_2: break; case FEC_2_3: *reg0xc05 |= (1 << 3); break; case FEC_3_4: *reg0xc05 |= (2 << 3); break; case FEC_5_6: *reg0xc05 |= (3 << 3); break; case FEC_7_8: *reg0xc05 |= (4 << 3); break; case FEC_AUTO: known_parameters = 0; break; default: return -EINVAL; }; if (known_parameters) *reg0xc05 |= (2 << 1); else *reg0xc05 |= (1 << 1); return 0; } static int sp8870_wake_up(struct sp8870_state* state) { return sp8870_writereg(state, 0xC18, 0x00D); } static int sp8870_set_frontend_parameters(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct sp8870_state* state = fe->demodulator_priv; int err; u16 reg0xc05; if ((err = configure_reg0xc05(p, &reg0xc05))) return err; sp8870_microcontroller_stop(state); if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } sp8870_writereg(state, 0x0319, 0x000A); sp8870_writereg(state, 0x031A, 0x0AAB); sp8870_writereg(state, 0x0309, 0x0400); sp8870_writereg(state, 0x030A, 0x0000); if (p->bandwidth_hz == 6000000) sp8870_writereg(state, 0x0311, 0x0002); else if (p->bandwidth_hz == 7000000) sp8870_writereg(state, 0x0311, 0x0001); else sp8870_writereg(state, 0x0311, 0x0000); if (p->transmission_mode == TRANSMISSION_MODE_2K) sp8870_writereg(state, 0x0338, 0x0000); else sp8870_writereg(state, 0x0338, 0x0001); sp8870_writereg(state, 0xc05, reg0xc05); sp8870_readreg(state, 0x200); sp8870_microcontroller_start(state); return 0; } static int sp8870_init (struct dvb_frontend* fe) { struct sp8870_state* state = fe->demodulator_priv; const struct firmware *fw = NULL; sp8870_wake_up(state); if (state->initialised) return 0; state->initialised = 1; dprintk ("%s\n", __func__); printk("sp8870: waiting for firmware upload (%s)...\n", SP8870_DEFAULT_FIRMWARE); if (state->config->request_firmware(fe, &fw, SP8870_DEFAULT_FIRMWARE)) { printk("sp8870: no firmware upload (timeout or file not found?)\n"); return -EIO; } if (sp8870_firmware_upload(state, fw)) { printk("sp8870: writing firmware to device failed\n"); release_firmware(fw); return -EIO; } release_firmware(fw); printk("sp8870: firmware upload complete\n"); sp8870_writereg(state, 0xc18, 0x00d); sp8870_microcontroller_stop(state); sp8870_writereg(state, 0x0301, 0x0003); sp8870_writereg(state, 0x0C13, 0x0001); sp8870_writereg(state, 0x0C14, 0x0001); sp8870_writereg(state, 0x0D00, 0x010); sp8870_writereg(state, 0x0D01, 0x000); return 0; } static int sp8870_read_status (struct dvb_frontend* fe, fe_status_t * fe_status) { struct sp8870_state* state = fe->demodulator_priv; int status; int signal; *fe_status = 0; status = sp8870_readreg (state, 0x0200); if (status < 0) return -EIO; signal = sp8870_readreg (state, 0x0303); if (signal < 0) return -EIO; if (signal > 0x0F) *fe_status |= FE_HAS_SIGNAL; if (status & 0x08) *fe_status |= FE_HAS_SYNC; if (status & 0x04) *fe_status |= FE_HAS_LOCK | FE_HAS_CARRIER | FE_HAS_VITERBI; return 0; } static int sp8870_read_ber (struct dvb_frontend* fe, u32 * ber) { struct sp8870_state* state = fe->demodulator_priv; int ret; u32 tmp; *ber = 0; ret = sp8870_readreg(state, 0xC08); if (ret < 0) return -EIO; tmp = ret & 0x3F; ret = sp8870_readreg(state, 0xC07); if (ret < 0) return -EIO; tmp = ret << 6; if (tmp >= 0x3FFF0) tmp = ~0; *ber = tmp; return 0; } static int sp8870_read_signal_strength(struct dvb_frontend* fe, u16 * signal) { struct sp8870_state* state = fe->demodulator_priv; int ret; u16 tmp; *signal = 0; ret = sp8870_readreg (state, 0x306); if (ret < 0) return -EIO; tmp = ret << 8; ret = sp8870_readreg (state, 0x303); if (ret < 0) return -EIO; tmp |= ret; if (tmp) *signal = 0xFFFF - tmp; return 0; } static int sp8870_read_uncorrected_blocks (struct dvb_frontend* fe, u32* ublocks) { struct sp8870_state* state = fe->demodulator_priv; int ret; *ublocks = 0; ret = sp8870_readreg(state, 0xC0C); if (ret < 0) return -EIO; if (ret == 0xFFFF) ret = ~0; *ublocks = ret; return 0; } #define MAXTRIALS 5 #define MAXCHECKS 100 static int lockups; static int switches; static int sp8870_set_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct sp8870_state* state = fe->demodulator_priv; int err = 0; int valid = 0; int trials = 0; int check_count = 0; dprintk("%s: frequency = %i\n", __func__, p->frequency); for (trials = 1; trials <= MAXTRIALS; trials++) { err = sp8870_set_frontend_parameters(fe); if (err) return err; for (check_count = 0; check_count < MAXCHECKS; check_count++) { valid = sp8870_read_data_valid_signal(state); if (valid) { dprintk("%s: delay = %i usec\n", __func__, check_count * 10); break; } udelay(10); } if (valid) break; } if (!valid) { printk("%s: firmware crash!!!!!!\n", __func__); return -EIO; } if (debug) { if (valid) { if (trials > 1) { printk("%s: firmware lockup!!!\n", __func__); printk("%s: recovered after %i trial(s))\n", __func__, trials - 1); lockups++; } } switches++; printk("%s: switches = %i lockups = %i\n", __func__, switches, lockups); } return 0; } static int sp8870_sleep(struct dvb_frontend* fe) { struct sp8870_state* state = fe->demodulator_priv; return sp8870_writereg(state, 0xC18, 0x000); } static int sp8870_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* fesettings) { fesettings->min_delay_ms = 350; fesettings->step_size = 0; fesettings->max_drift = 0; return 0; } static int sp8870_i2c_gate_ctrl(struct dvb_frontend* fe, int enable) { struct sp8870_state* state = fe->demodulator_priv; if (enable) { return sp8870_writereg(state, 0x206, 0x001); } else { return sp8870_writereg(state, 0x206, 0x000); } } static void sp8870_release(struct dvb_frontend* fe) { struct sp8870_state* state = fe->demodulator_priv; kfree(state); } static struct dvb_frontend_ops sp8870_ops; struct dvb_frontend* sp8870_attach(const struct sp8870_config* config, struct i2c_adapter* i2c) { struct sp8870_state* state = NULL; state = kzalloc(sizeof(struct sp8870_state), GFP_KERNEL); if (state == NULL) goto error; state->config = config; state->i2c = i2c; state->initialised = 0; if (sp8870_readreg(state, 0x0200) < 0) goto error; memcpy(&state->frontend.ops, &sp8870_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error: kfree(state); return NULL; } static struct dvb_frontend_ops sp8870_ops = { .delsys = { SYS_DVBT }, .info = { .name = "Spase SP8870 DVB-T", .frequency_min = 470000000, .frequency_max = 860000000, .frequency_stepsize = 166666, .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_HIERARCHY_AUTO | FE_CAN_RECOVER }, .release = sp8870_release, .init = sp8870_init, .sleep = sp8870_sleep, .i2c_gate_ctrl = sp8870_i2c_gate_ctrl, .set_frontend = sp8870_set_frontend, .get_tune_settings = sp8870_get_tune_settings, .read_status = sp8870_read_status, .read_ber = sp8870_read_ber, .read_signal_strength = sp8870_read_signal_strength, .read_ucblocks = sp8870_read_uncorrected_blocks, }; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); MODULE_DESCRIPTION("Spase SP8870 DVB-T Demodulator driver"); MODULE_AUTHOR("Juergen Peitz"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(sp8870_attach);
gpl-2.0
BPI-SINOVOIP/BPI-Mainline-kernel
linux-5.4/drivers/net/wireless/cisco/airo_cs.c
290
6485
/*====================================================================== Aironet driver for 4500 and 4800 series cards This code is released under both the GPL version 2 and BSD licenses. Either license may be used. The respective licenses are found at the end of this file. This code was developed by Benjamin Reed <breed@users.sourceforge.net> including portions of which come from the Aironet PC4500 Developer's Reference Manual and used with permission. Copyright (C) 1999 Benjamin Reed. All Rights Reserved. Permission to use code in the Developer's manual was granted for this driver by Aironet. In addition this module was derived from dummy_cs. The initial developer of dummy_cs is David A. Hinds <dahinds@users.sourceforge.net>. Portions created by David A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights Reserved. ======================================================================*/ #ifdef __IN_PCMCIA_PACKAGE__ #include <pcmcia/k_compat.h> #endif #include <linux/kernel.h> #include <linux/module.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/netdevice.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> #include <linux/io.h> #include "airo.h" /*====================================================================*/ MODULE_AUTHOR("Benjamin Reed"); MODULE_DESCRIPTION("Support for Cisco/Aironet 802.11 wireless ethernet " "cards. This is the module that links the PCMCIA card " "with the airo module."); MODULE_LICENSE("Dual BSD/GPL"); MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340 PCMCIA cards"); /*====================================================================*/ static int airo_config(struct pcmcia_device *link); static void airo_release(struct pcmcia_device *link); static void airo_detach(struct pcmcia_device *p_dev); struct local_info { struct net_device *eth_dev; }; static int airo_probe(struct pcmcia_device *p_dev) { struct local_info *local; dev_dbg(&p_dev->dev, "airo_attach()\n"); /* Allocate space for private device-specific data */ local = kzalloc(sizeof(*local), GFP_KERNEL); if (!local) return -ENOMEM; p_dev->priv = local; return airo_config(p_dev); } /* airo_attach */ static void airo_detach(struct pcmcia_device *link) { dev_dbg(&link->dev, "airo_detach\n"); airo_release(link); if (((struct local_info *)link->priv)->eth_dev) { stop_airo_card(((struct local_info *)link->priv)->eth_dev, 0); } ((struct local_info *)link->priv)->eth_dev = NULL; kfree(link->priv); } /* airo_detach */ static int airo_cs_config_check(struct pcmcia_device *p_dev, void *priv_data) { if (p_dev->config_index == 0) return -EINVAL; return pcmcia_request_io(p_dev); } static int airo_config(struct pcmcia_device *link) { int ret; dev_dbg(&link->dev, "airo_config\n"); link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP | CONF_AUTO_AUDIO | CONF_AUTO_SET_IO; ret = pcmcia_loop_config(link, airo_cs_config_check, NULL); if (ret) goto failed; if (!link->irq) goto failed; ret = pcmcia_enable_device(link); if (ret) goto failed; ((struct local_info *)link->priv)->eth_dev = init_airo_card(link->irq, link->resource[0]->start, 1, &link->dev); if (!((struct local_info *)link->priv)->eth_dev) goto failed; return 0; failed: airo_release(link); return -ENODEV; } /* airo_config */ static void airo_release(struct pcmcia_device *link) { dev_dbg(&link->dev, "airo_release\n"); pcmcia_disable_device(link); } static int airo_suspend(struct pcmcia_device *link) { struct local_info *local = link->priv; netif_device_detach(local->eth_dev); return 0; } static int airo_resume(struct pcmcia_device *link) { struct local_info *local = link->priv; if (link->open) { reset_airo_card(local->eth_dev); netif_device_attach(local->eth_dev); } return 0; } static const struct pcmcia_device_id airo_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x015f, 0x000a), PCMCIA_DEVICE_MANF_CARD(0x015f, 0x0005), PCMCIA_DEVICE_MANF_CARD(0x015f, 0x0007), PCMCIA_DEVICE_MANF_CARD(0x0105, 0x0007), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, airo_ids); static struct pcmcia_driver airo_driver = { .owner = THIS_MODULE, .name = "airo_cs", .probe = airo_probe, .remove = airo_detach, .id_table = airo_ids, .suspend = airo_suspend, .resume = airo_resume, }; module_pcmcia_driver(airo_driver); /* This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. In addition: Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
gpl-2.0
bigzz/linux-xfs
drivers/misc/mei/hw-me.c
290
22772
/* * * Intel Management Engine Interface (Intel MEI) Linux driver * Copyright (c) 2003-2012, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * */ #include <linux/pci.h> #include <linux/kthread.h> #include <linux/interrupt.h> #include "mei_dev.h" #include "hbm.h" #include "hw-me.h" #include "hw-me-regs.h" #include "mei-trace.h" /** * mei_me_reg_read - Reads 32bit data from the mei device * * @hw: the me hardware structure * @offset: offset from which to read the data * * Return: register value (u32) */ static inline u32 mei_me_reg_read(const struct mei_me_hw *hw, unsigned long offset) { return ioread32(hw->mem_addr + offset); } /** * mei_me_reg_write - Writes 32bit data to the mei device * * @hw: the me hardware structure * @offset: offset from which to write the data * @value: register value to write (u32) */ static inline void mei_me_reg_write(const struct mei_me_hw *hw, unsigned long offset, u32 value) { iowrite32(value, hw->mem_addr + offset); } /** * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer * read window register * * @dev: the device structure * * Return: ME_CB_RW register value (u32) */ static inline u32 mei_me_mecbrw_read(const struct mei_device *dev) { return mei_me_reg_read(to_me_hw(dev), ME_CB_RW); } /** * mei_me_hcbww_write - write 32bit data to the host circular buffer * * @dev: the device structure * @data: 32bit data to be written to the host circular buffer */ static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data) { mei_me_reg_write(to_me_hw(dev), H_CB_WW, data); } /** * mei_me_mecsr_read - Reads 32bit data from the ME CSR * * @dev: the device structure * * Return: ME_CSR_HA register value (u32) */ static inline u32 mei_me_mecsr_read(const struct mei_device *dev) { u32 reg; reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA); trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg); return reg; } /** * mei_hcsr_read - Reads 32bit data from the host CSR * * @dev: the device structure * * Return: H_CSR register value (u32) */ static inline u32 mei_hcsr_read(const struct mei_device *dev) { u32 reg; reg = mei_me_reg_read(to_me_hw(dev), H_CSR); trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg); return reg; } /** * mei_hcsr_write - writes H_CSR register to the mei device * * @dev: the device structure * @reg: new register value */ static inline void mei_hcsr_write(struct mei_device *dev, u32 reg) { trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg); mei_me_reg_write(to_me_hw(dev), H_CSR, reg); } /** * mei_hcsr_set - writes H_CSR register to the mei device, * and ignores the H_IS bit for it is write-one-to-zero. * * @dev: the device structure * @reg: new register value */ static inline void mei_hcsr_set(struct mei_device *dev, u32 reg) { reg &= ~H_IS; mei_hcsr_write(dev, reg); } /** * mei_me_fw_status - read fw status register from pci config space * * @dev: mei device * @fw_status: fw status register values * * Return: 0 on success, error otherwise */ static int mei_me_fw_status(struct mei_device *dev, struct mei_fw_status *fw_status) { struct pci_dev *pdev = to_pci_dev(dev->dev); struct mei_me_hw *hw = to_me_hw(dev); const struct mei_fw_status *fw_src = &hw->cfg->fw_status; int ret; int i; if (!fw_status) return -EINVAL; fw_status->count = fw_src->count; for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) { ret = pci_read_config_dword(pdev, fw_src->status[i], &fw_status->status[i]); if (ret) return ret; } return 0; } /** * mei_me_hw_config - configure hw dependent settings * * @dev: mei device */ static void mei_me_hw_config(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); u32 hcsr = mei_hcsr_read(dev); /* Doesn't change in runtime */ dev->hbuf_depth = (hcsr & H_CBD) >> 24; hw->pg_state = MEI_PG_OFF; } /** * mei_me_pg_state - translate internal pg state * to the mei power gating state * * @dev: mei device * * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise */ static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); return hw->pg_state; } /** * mei_me_intr_clear - clear and stop interrupts * * @dev: the device structure */ static void mei_me_intr_clear(struct mei_device *dev) { u32 hcsr = mei_hcsr_read(dev); if ((hcsr & H_IS) == H_IS) mei_hcsr_write(dev, hcsr); } /** * mei_me_intr_enable - enables mei device interrupts * * @dev: the device structure */ static void mei_me_intr_enable(struct mei_device *dev) { u32 hcsr = mei_hcsr_read(dev); hcsr |= H_IE; mei_hcsr_set(dev, hcsr); } /** * mei_me_intr_disable - disables mei device interrupts * * @dev: the device structure */ static void mei_me_intr_disable(struct mei_device *dev) { u32 hcsr = mei_hcsr_read(dev); hcsr &= ~H_IE; mei_hcsr_set(dev, hcsr); } /** * mei_me_hw_reset_release - release device from the reset * * @dev: the device structure */ static void mei_me_hw_reset_release(struct mei_device *dev) { u32 hcsr = mei_hcsr_read(dev); hcsr |= H_IG; hcsr &= ~H_RST; mei_hcsr_set(dev, hcsr); /* complete this write before we set host ready on another CPU */ mmiowb(); } /** * mei_me_hw_reset - resets fw via mei csr register. * * @dev: the device structure * @intr_enable: if interrupt should be enabled after reset. * * Return: always 0 */ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) { u32 hcsr = mei_hcsr_read(dev); /* H_RST may be found lit before reset is started, * for example if preceding reset flow hasn't completed. * In that case asserting H_RST will be ignored, therefore * we need to clean H_RST bit to start a successful reset sequence. */ if ((hcsr & H_RST) == H_RST) { dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr); hcsr &= ~H_RST; mei_hcsr_set(dev, hcsr); hcsr = mei_hcsr_read(dev); } hcsr |= H_RST | H_IG | H_IS; if (intr_enable) hcsr |= H_IE; else hcsr &= ~H_IE; dev->recvd_hw_ready = false; mei_hcsr_write(dev, hcsr); /* * Host reads the H_CSR once to ensure that the * posted write to H_CSR completes. */ hcsr = mei_hcsr_read(dev); if ((hcsr & H_RST) == 0) dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr); if ((hcsr & H_RDY) == H_RDY) dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr); if (intr_enable == false) mei_me_hw_reset_release(dev); return 0; } /** * mei_me_host_set_ready - enable device * * @dev: mei device */ static void mei_me_host_set_ready(struct mei_device *dev) { u32 hcsr = mei_hcsr_read(dev); hcsr |= H_IE | H_IG | H_RDY; mei_hcsr_set(dev, hcsr); } /** * mei_me_host_is_ready - check whether the host has turned ready * * @dev: mei device * Return: bool */ static bool mei_me_host_is_ready(struct mei_device *dev) { u32 hcsr = mei_hcsr_read(dev); return (hcsr & H_RDY) == H_RDY; } /** * mei_me_hw_is_ready - check whether the me(hw) has turned ready * * @dev: mei device * Return: bool */ static bool mei_me_hw_is_ready(struct mei_device *dev) { u32 mecsr = mei_me_mecsr_read(dev); return (mecsr & ME_RDY_HRA) == ME_RDY_HRA; } /** * mei_me_hw_ready_wait - wait until the me(hw) has turned ready * or timeout is reached * * @dev: mei device * Return: 0 on success, error otherwise */ static int mei_me_hw_ready_wait(struct mei_device *dev) { mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_hw_ready, dev->recvd_hw_ready, mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT)); mutex_lock(&dev->device_lock); if (!dev->recvd_hw_ready) { dev_err(dev->dev, "wait hw ready failed\n"); return -ETIME; } mei_me_hw_reset_release(dev); dev->recvd_hw_ready = false; return 0; } /** * mei_me_hw_start - hw start routine * * @dev: mei device * Return: 0 on success, error otherwise */ static int mei_me_hw_start(struct mei_device *dev) { int ret = mei_me_hw_ready_wait(dev); if (ret) return ret; dev_dbg(dev->dev, "hw is ready\n"); mei_me_host_set_ready(dev); return ret; } /** * mei_hbuf_filled_slots - gets number of device filled buffer slots * * @dev: the device structure * * Return: number of filled slots */ static unsigned char mei_hbuf_filled_slots(struct mei_device *dev) { u32 hcsr; char read_ptr, write_ptr; hcsr = mei_hcsr_read(dev); read_ptr = (char) ((hcsr & H_CBRP) >> 8); write_ptr = (char) ((hcsr & H_CBWP) >> 16); return (unsigned char) (write_ptr - read_ptr); } /** * mei_me_hbuf_is_empty - checks if host buffer is empty. * * @dev: the device structure * * Return: true if empty, false - otherwise. */ static bool mei_me_hbuf_is_empty(struct mei_device *dev) { return mei_hbuf_filled_slots(dev) == 0; } /** * mei_me_hbuf_empty_slots - counts write empty slots. * * @dev: the device structure * * Return: -EOVERFLOW if overflow, otherwise empty slots count */ static int mei_me_hbuf_empty_slots(struct mei_device *dev) { unsigned char filled_slots, empty_slots; filled_slots = mei_hbuf_filled_slots(dev); empty_slots = dev->hbuf_depth - filled_slots; /* check for overflow */ if (filled_slots > dev->hbuf_depth) return -EOVERFLOW; return empty_slots; } /** * mei_me_hbuf_max_len - returns size of hw buffer. * * @dev: the device structure * * Return: size of hw buffer in bytes */ static size_t mei_me_hbuf_max_len(const struct mei_device *dev) { return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr); } /** * mei_me_write_message - writes a message to mei device. * * @dev: the device structure * @header: mei HECI header of message * @buf: message payload will be written * * Return: -EIO if write has failed */ static int mei_me_write_message(struct mei_device *dev, struct mei_msg_hdr *header, unsigned char *buf) { unsigned long rem; unsigned long length = header->length; u32 *reg_buf = (u32 *)buf; u32 hcsr; u32 dw_cnt; int i; int empty_slots; dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header)); empty_slots = mei_hbuf_empty_slots(dev); dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots); dw_cnt = mei_data2slots(length); if (empty_slots < 0 || dw_cnt > empty_slots) return -EMSGSIZE; mei_me_hcbww_write(dev, *((u32 *) header)); for (i = 0; i < length / 4; i++) mei_me_hcbww_write(dev, reg_buf[i]); rem = length & 0x3; if (rem > 0) { u32 reg = 0; memcpy(&reg, &buf[length - rem], rem); mei_me_hcbww_write(dev, reg); } hcsr = mei_hcsr_read(dev) | H_IG; mei_hcsr_set(dev, hcsr); if (!mei_me_hw_is_ready(dev)) return -EIO; return 0; } /** * mei_me_count_full_read_slots - counts read full slots. * * @dev: the device structure * * Return: -EOVERFLOW if overflow, otherwise filled slots count */ static int mei_me_count_full_read_slots(struct mei_device *dev) { u32 me_csr; char read_ptr, write_ptr; unsigned char buffer_depth, filled_slots; me_csr = mei_me_mecsr_read(dev); buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24); read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8); write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16); filled_slots = (unsigned char) (write_ptr - read_ptr); /* check for overflow */ if (filled_slots > buffer_depth) return -EOVERFLOW; dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots); return (int)filled_slots; } /** * mei_me_read_slots - reads a message from mei device. * * @dev: the device structure * @buffer: message buffer will be written * @buffer_length: message size will be read * * Return: always 0 */ static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer, unsigned long buffer_length) { u32 *reg_buf = (u32 *)buffer; u32 hcsr; for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32)) *reg_buf++ = mei_me_mecbrw_read(dev); if (buffer_length > 0) { u32 reg = mei_me_mecbrw_read(dev); memcpy(reg_buf, &reg, buffer_length); } hcsr = mei_hcsr_read(dev) | H_IG; mei_hcsr_set(dev, hcsr); return 0; } /** * mei_me_pg_set - write pg enter register * * @dev: the device structure */ static void mei_me_pg_set(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); u32 reg; reg = mei_me_reg_read(hw, H_HPG_CSR); trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); reg |= H_HPG_CSR_PGI; trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); mei_me_reg_write(hw, H_HPG_CSR, reg); } /** * mei_me_pg_unset - write pg exit register * * @dev: the device structure */ static void mei_me_pg_unset(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); u32 reg; reg = mei_me_reg_read(hw, H_HPG_CSR); trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n"); reg |= H_HPG_CSR_PGIHEXR; trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg); mei_me_reg_write(hw, H_HPG_CSR, reg); } /** * mei_me_pg_enter_sync - perform pg entry procedure * * @dev: the device structure * * Return: 0 on success an error code otherwise */ int mei_me_pg_enter_sync(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); int ret; dev->pg_event = MEI_PG_EVENT_WAIT; ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD); if (ret) return ret; mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_pg, dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); mutex_lock(&dev->device_lock); if (dev->pg_event == MEI_PG_EVENT_RECEIVED) { mei_me_pg_set(dev); ret = 0; } else { ret = -ETIME; } dev->pg_event = MEI_PG_EVENT_IDLE; hw->pg_state = MEI_PG_ON; return ret; } /** * mei_me_pg_exit_sync - perform pg exit procedure * * @dev: the device structure * * Return: 0 on success an error code otherwise */ int mei_me_pg_exit_sync(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); int ret; if (dev->pg_event == MEI_PG_EVENT_RECEIVED) goto reply; dev->pg_event = MEI_PG_EVENT_WAIT; mei_me_pg_unset(dev); mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_pg, dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); mutex_lock(&dev->device_lock); reply: if (dev->pg_event != MEI_PG_EVENT_RECEIVED) { ret = -ETIME; goto out; } dev->pg_event = MEI_PG_EVENT_INTR_WAIT; ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD); if (ret) return ret; mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_pg, dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout); mutex_lock(&dev->device_lock); if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED) ret = 0; else ret = -ETIME; out: dev->pg_event = MEI_PG_EVENT_IDLE; hw->pg_state = MEI_PG_OFF; return ret; } /** * mei_me_pg_in_transition - is device now in pg transition * * @dev: the device structure * * Return: true if in pg transition, false otherwise */ static bool mei_me_pg_in_transition(struct mei_device *dev) { return dev->pg_event >= MEI_PG_EVENT_WAIT && dev->pg_event <= MEI_PG_EVENT_INTR_WAIT; } /** * mei_me_pg_is_enabled - detect if PG is supported by HW * * @dev: the device structure * * Return: true is pg supported, false otherwise */ static bool mei_me_pg_is_enabled(struct mei_device *dev) { u32 reg = mei_me_mecsr_read(dev); if ((reg & ME_PGIC_HRA) == 0) goto notsupported; if (!dev->hbm_f_pg_supported) goto notsupported; return true; notsupported: dev_dbg(dev->dev, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n", !!(reg & ME_PGIC_HRA), dev->version.major_version, dev->version.minor_version, HBM_MAJOR_VERSION_PGI, HBM_MINOR_VERSION_PGI); return false; } /** * mei_me_pg_intr - perform pg processing in interrupt thread handler * * @dev: the device structure */ static void mei_me_pg_intr(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT) return; dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED; hw->pg_state = MEI_PG_OFF; if (waitqueue_active(&dev->wait_pg)) wake_up(&dev->wait_pg); } /** * mei_me_irq_quick_handler - The ISR of the MEI device * * @irq: The irq number * @dev_id: pointer to the device structure * * Return: irqreturn_t */ irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id) { struct mei_device *dev = (struct mei_device *) dev_id; u32 hcsr = mei_hcsr_read(dev); if ((hcsr & H_IS) != H_IS) return IRQ_NONE; /* clear H_IS bit in H_CSR */ mei_hcsr_write(dev, hcsr); return IRQ_WAKE_THREAD; } /** * mei_me_irq_thread_handler - function called after ISR to handle the interrupt * processing. * * @irq: The irq number * @dev_id: pointer to the device structure * * Return: irqreturn_t * */ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) { struct mei_device *dev = (struct mei_device *) dev_id; struct mei_cl_cb complete_list; s32 slots; int rets = 0; dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n"); /* initialize our complete list */ mutex_lock(&dev->device_lock); mei_io_list_init(&complete_list); /* Ack the interrupt here * In case of MSI we don't go through the quick handler */ if (pci_dev_msi_enabled(to_pci_dev(dev->dev))) mei_clear_interrupts(dev); /* check if ME wants a reset */ if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) { dev_warn(dev->dev, "FW not ready: resetting.\n"); schedule_work(&dev->reset_work); goto end; } mei_me_pg_intr(dev); /* check if we need to start the dev */ if (!mei_host_is_ready(dev)) { if (mei_hw_is_ready(dev)) { dev_dbg(dev->dev, "we need to start the dev.\n"); dev->recvd_hw_ready = true; wake_up(&dev->wait_hw_ready); } else { dev_dbg(dev->dev, "Spurious Interrupt\n"); } goto end; } /* check slots available for reading */ slots = mei_count_full_read_slots(dev); while (slots > 0) { dev_dbg(dev->dev, "slots to read = %08x\n", slots); rets = mei_irq_read_handler(dev, &complete_list, &slots); /* There is a race between ME write and interrupt delivery: * Not all data is always available immediately after the * interrupt, so try to read again on the next interrupt. */ if (rets == -ENODATA) break; if (rets && dev->dev_state != MEI_DEV_RESETTING) { dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n", rets); schedule_work(&dev->reset_work); goto end; } } dev->hbuf_is_ready = mei_hbuf_is_ready(dev); /* * During PG handshake only allowed write is the replay to the * PG exit message, so block calling write function * if the pg event is in PG handshake */ if (dev->pg_event != MEI_PG_EVENT_WAIT && dev->pg_event != MEI_PG_EVENT_RECEIVED) { rets = mei_irq_write_handler(dev, &complete_list); dev->hbuf_is_ready = mei_hbuf_is_ready(dev); } mei_irq_compl_handler(dev, &complete_list); end: dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets); mutex_unlock(&dev->device_lock); return IRQ_HANDLED; } static const struct mei_hw_ops mei_me_hw_ops = { .fw_status = mei_me_fw_status, .pg_state = mei_me_pg_state, .host_is_ready = mei_me_host_is_ready, .hw_is_ready = mei_me_hw_is_ready, .hw_reset = mei_me_hw_reset, .hw_config = mei_me_hw_config, .hw_start = mei_me_hw_start, .pg_in_transition = mei_me_pg_in_transition, .pg_is_enabled = mei_me_pg_is_enabled, .intr_clear = mei_me_intr_clear, .intr_enable = mei_me_intr_enable, .intr_disable = mei_me_intr_disable, .hbuf_free_slots = mei_me_hbuf_empty_slots, .hbuf_is_ready = mei_me_hbuf_is_empty, .hbuf_max_len = mei_me_hbuf_max_len, .write = mei_me_write_message, .rdbuf_full_slots = mei_me_count_full_read_slots, .read_hdr = mei_me_mecbrw_read, .read = mei_me_read_slots }; static bool mei_me_fw_type_nm(struct pci_dev *pdev) { u32 reg; pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg); /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */ return (reg & 0x600) == 0x200; } #define MEI_CFG_FW_NM \ .quirk_probe = mei_me_fw_type_nm static bool mei_me_fw_type_sps(struct pci_dev *pdev) { u32 reg; /* Read ME FW Status check for SPS Firmware */ pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg); /* if bits [19:16] = 15, running SPS Firmware */ return (reg & 0xf0000) == 0xf0000; } #define MEI_CFG_FW_SPS \ .quirk_probe = mei_me_fw_type_sps #define MEI_CFG_LEGACY_HFS \ .fw_status.count = 0 #define MEI_CFG_ICH_HFS \ .fw_status.count = 1, \ .fw_status.status[0] = PCI_CFG_HFS_1 #define MEI_CFG_PCH_HFS \ .fw_status.count = 2, \ .fw_status.status[0] = PCI_CFG_HFS_1, \ .fw_status.status[1] = PCI_CFG_HFS_2 #define MEI_CFG_PCH8_HFS \ .fw_status.count = 6, \ .fw_status.status[0] = PCI_CFG_HFS_1, \ .fw_status.status[1] = PCI_CFG_HFS_2, \ .fw_status.status[2] = PCI_CFG_HFS_3, \ .fw_status.status[3] = PCI_CFG_HFS_4, \ .fw_status.status[4] = PCI_CFG_HFS_5, \ .fw_status.status[5] = PCI_CFG_HFS_6 /* ICH Legacy devices */ const struct mei_cfg mei_me_legacy_cfg = { MEI_CFG_LEGACY_HFS, }; /* ICH devices */ const struct mei_cfg mei_me_ich_cfg = { MEI_CFG_ICH_HFS, }; /* PCH devices */ const struct mei_cfg mei_me_pch_cfg = { MEI_CFG_PCH_HFS, }; /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */ const struct mei_cfg mei_me_pch_cpt_pbg_cfg = { MEI_CFG_PCH_HFS, MEI_CFG_FW_NM, }; /* PCH8 Lynx Point and newer devices */ const struct mei_cfg mei_me_pch8_cfg = { MEI_CFG_PCH8_HFS, }; /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */ const struct mei_cfg mei_me_pch8_sps_cfg = { MEI_CFG_PCH8_HFS, MEI_CFG_FW_SPS, }; /** * mei_me_dev_init - allocates and initializes the mei device structure * * @pdev: The pci device structure * @cfg: per device generation config * * Return: The mei_device_device pointer on success, NULL on failure. */ struct mei_device *mei_me_dev_init(struct pci_dev *pdev, const struct mei_cfg *cfg) { struct mei_device *dev; struct mei_me_hw *hw; dev = kzalloc(sizeof(struct mei_device) + sizeof(struct mei_me_hw), GFP_KERNEL); if (!dev) return NULL; hw = to_me_hw(dev); mei_device_init(dev, &pdev->dev, &mei_me_hw_ops); hw->cfg = cfg; return dev; }
gpl-2.0
gpillusion/YP-GB70_Illusion_kernel
drivers/base/dma-coherent.c
802
4534
/* * Coherent per-device memory handling. * Borrowed from i386 */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/dma-mapping.h> struct dma_coherent_mem { void *virt_base; u32 device_base; int size; int flags; unsigned long *bitmap; }; int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, dma_addr_t device_addr, size_t size, int flags) { void __iomem *mem_base = NULL; int pages = size >> PAGE_SHIFT; int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) goto out; if (!size) goto out; if (dev->dma_mem) goto out; /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ mem_base = ioremap(bus_addr, size); if (!mem_base) goto out; dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); if (!dev->dma_mem) goto out; dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); if (!dev->dma_mem->bitmap) goto free1_out; dev->dma_mem->virt_base = mem_base; dev->dma_mem->device_base = device_addr; dev->dma_mem->size = pages; dev->dma_mem->flags = flags; if (flags & DMA_MEMORY_MAP) return DMA_MEMORY_MAP; return DMA_MEMORY_IO; free1_out: kfree(dev->dma_mem); out: if (mem_base) iounmap(mem_base); return 0; } EXPORT_SYMBOL(dma_declare_coherent_memory); void dma_release_declared_memory(struct device *dev) { struct dma_coherent_mem *mem = dev->dma_mem; if (!mem) return; dev->dma_mem = NULL; iounmap(mem->virt_base); kfree(mem->bitmap); kfree(mem); } EXPORT_SYMBOL(dma_release_declared_memory); void *dma_mark_declared_memory_occupied(struct device *dev, dma_addr_t device_addr, size_t size) { struct dma_coherent_mem *mem = dev->dma_mem; int pos, err; size += device_addr & ~PAGE_MASK; if (!mem) return ERR_PTR(-EINVAL); pos = (device_addr - mem->device_base) >> PAGE_SHIFT; err = bitmap_allocate_region(mem->bitmap, pos, get_order(size)); if (err != 0) return ERR_PTR(err); return mem->virt_base + (pos << PAGE_SHIFT); } EXPORT_SYMBOL(dma_mark_declared_memory_occupied); /** * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area * * @dev: device from which we allocate memory * @size: size of requested memory area * @dma_handle: This will be filled with the correct dma handle * @ret: This pointer will be filled with the virtual address * to allocated area. * * This function should be only called from per-arch dma_alloc_coherent() * to support allocation from per-device coherent memory pools. * * Returns 0 if dma_alloc_coherent should continue with allocating from * generic memory areas, or !0 if dma_alloc_coherent should return @ret. */ int dma_alloc_from_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle, void **ret) { struct dma_coherent_mem *mem; int order = get_order(size); int pageno; if (!dev) return 0; mem = dev->dma_mem; if (!mem) return 0; *ret = NULL; if (unlikely(size > (mem->size << PAGE_SHIFT))) goto err; pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); if (unlikely(pageno < 0)) goto err; /* * Memory was found in the per-device area. */ *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); *ret = mem->virt_base + (pageno << PAGE_SHIFT); memset(*ret, 0, size); return 1; err: /* * In the case where the allocation can not be satisfied from the * per-device area, try to fall back to generic memory if the * constraints allow it. */ return mem->flags & DMA_MEMORY_EXCLUSIVE; } EXPORT_SYMBOL(dma_alloc_from_coherent); /** * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool * @dev: device from which the memory was allocated * @order: the order of pages allocated * @vaddr: virtual address of allocated pages * * This checks whether the memory was allocated from the per-device * coherent memory pool and if so, releases that memory. * * Returns 1 if we correctly released the memory, or 0 if * dma_release_coherent() should proceed with releasing memory from * generic pools. */ int dma_release_from_coherent(struct device *dev, int order, void *vaddr) { struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) { int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; bitmap_release_region(mem->bitmap, page, order); return 1; } return 0; } EXPORT_SYMBOL(dma_release_from_coherent);
gpl-2.0
oschmidt/kernel_I8160P
drivers/staging/dream/qdsp5/adsp_driver.c
1058
15282
/* arch/arm/mach-msm/qdsp5/adsp_driver.c * * Copyright (C) 2008 Google, Inc. * Author: Iliyan Malchev <ibm@android.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/cdev.h> #include <linux/fs.h> #include <linux/list.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/uaccess.h> #include "adsp.h" #include <linux/msm_adsp.h> #include <linux/android_pmem.h> struct adsp_pmem_region { struct hlist_node list; void *vaddr; unsigned long paddr; unsigned long kvaddr; unsigned long len; struct file *file; }; struct adsp_device { struct msm_adsp_module *module; spinlock_t event_queue_lock; wait_queue_head_t event_wait; struct list_head event_queue; int abort; const char *name; struct device *device; struct cdev cdev; }; static struct adsp_device *inode_to_device(struct inode *inode); #define __CONTAINS(r, v, l) ({ \ typeof(r) __r = r; \ typeof(v) __v = v; \ typeof(v) __e = __v + l; \ int res = __v >= __r->vaddr && \ __e <= __r->vaddr + __r->len; \ res; \ }) #define CONTAINS(r1, r2) ({ \ typeof(r2) __r2 = r2; \ __CONTAINS(r1, __r2->vaddr, __r2->len); \ }) #define IN_RANGE(r, v) ({ \ typeof(r) __r = r; \ typeof(v) __vv = v; \ int res = ((__vv >= __r->vaddr) && \ (__vv < (__r->vaddr + __r->len))); \ res; \ }) #define OVERLAPS(r1, r2) ({ \ typeof(r1) __r1 = r1; \ typeof(r2) __r2 = r2; \ typeof(__r2->vaddr) __v = __r2->vaddr; \ typeof(__v) __e = __v + __r2->len - 1; \ int res = (IN_RANGE(__r1, __v) || IN_RANGE(__r1, __e)); \ res; \ }) static int adsp_pmem_check(struct msm_adsp_module *module, void *vaddr, unsigned long len) { struct adsp_pmem_region *region_elt; struct hlist_node *node; struct adsp_pmem_region t = { .vaddr = vaddr, .len = len }; hlist_for_each_entry(region_elt, node, &module->pmem_regions, list) { if (CONTAINS(region_elt, &t) || CONTAINS(&t, region_elt) || OVERLAPS(region_elt, &t)) { printk(KERN_ERR "adsp: module %s:" " region (vaddr %p len %ld)" " clashes with registered region" " (vaddr %p paddr %p len %ld)\n", module->name, vaddr, len, region_elt->vaddr, (void *)region_elt->paddr, region_elt->len); return -EINVAL; } } return 0; } static int adsp_pmem_add(struct msm_adsp_module *module, struct adsp_pmem_info *info) { unsigned long paddr, kvaddr, len; struct file *file; struct adsp_pmem_region *region; int rc = -EINVAL; mutex_lock(&module->pmem_regions_lock); region = kmalloc(sizeof(*region), GFP_KERNEL); if (!region) { rc = -ENOMEM; goto end; } INIT_HLIST_NODE(&region->list); if (get_pmem_file(info->fd, &paddr, &kvaddr, &len, &file)) { kfree(region); goto end; } rc = adsp_pmem_check(module, info->vaddr, len); if (rc < 0) { put_pmem_file(file); kfree(region); goto end; } region->vaddr = info->vaddr; region->paddr = paddr; region->kvaddr = kvaddr; region->len = len; region->file = file; hlist_add_head(&region->list, &module->pmem_regions); end: mutex_unlock(&module->pmem_regions_lock); return rc; } static int adsp_pmem_lookup_vaddr(struct msm_adsp_module *module, void **addr, unsigned long len, struct adsp_pmem_region **region) { struct hlist_node *node; void *vaddr = *addr; struct adsp_pmem_region *region_elt; int match_count = 0; *region = NULL; /* returns physical address or zero */ hlist_for_each_entry(region_elt, node, &module->pmem_regions, list) { if (vaddr >= region_elt->vaddr && vaddr < region_elt->vaddr + region_elt->len && vaddr + len <= region_elt->vaddr + region_elt->len) { /* offset since we could pass vaddr inside a registerd * pmem buffer */ match_count++; if (!*region) *region = region_elt; } } if (match_count > 1) { printk(KERN_ERR "adsp: module %s: " "multiple hits for vaddr %p, len %ld\n", module->name, vaddr, len); hlist_for_each_entry(region_elt, node, &module->pmem_regions, list) { if (vaddr >= region_elt->vaddr && vaddr < region_elt->vaddr + region_elt->len && vaddr + len <= region_elt->vaddr + region_elt->len) printk(KERN_ERR "\t%p, %ld --> %p\n", region_elt->vaddr, region_elt->len, (void *)region_elt->paddr); } } return *region ? 0 : -1; } int adsp_pmem_fixup_kvaddr(struct msm_adsp_module *module, void **addr, unsigned long *kvaddr, unsigned long len) { struct adsp_pmem_region *region; void *vaddr = *addr; unsigned long *paddr = (unsigned long *)addr; int ret; ret = adsp_pmem_lookup_vaddr(module, addr, len, &region); if (ret) { printk(KERN_ERR "adsp: not patching %s (paddr & kvaddr)," " lookup (%p, %ld) failed\n", module->name, vaddr, len); return ret; } *paddr = region->paddr + (vaddr - region->vaddr); *kvaddr = region->kvaddr + (vaddr - region->vaddr); return 0; } int adsp_pmem_fixup(struct msm_adsp_module *module, void **addr, unsigned long len) { struct adsp_pmem_region *region; void *vaddr = *addr; unsigned long *paddr = (unsigned long *)addr; int ret; ret = adsp_pmem_lookup_vaddr(module, addr, len, &region); if (ret) { printk(KERN_ERR "adsp: not patching %s, lookup (%p, %ld) failed\n", module->name, vaddr, len); return ret; } *paddr = region->paddr + (vaddr - region->vaddr); return 0; } static int adsp_verify_cmd(struct msm_adsp_module *module, unsigned int queue_id, void *cmd_data, size_t cmd_size) { /* call the per module verifier */ if (module->verify_cmd) return module->verify_cmd(module, queue_id, cmd_data, cmd_size); else printk(KERN_INFO "adsp: no packet verifying function " "for task %s\n", module->name); return 0; } static long adsp_write_cmd(struct adsp_device *adev, void __user *arg) { struct adsp_command_t cmd; unsigned char buf[256]; void *cmd_data; long rc; if (copy_from_user(&cmd, (void __user *)arg, sizeof(cmd))) return -EFAULT; if (cmd.len > 256) { cmd_data = kmalloc(cmd.len, GFP_USER); if (!cmd_data) return -ENOMEM; } else { cmd_data = buf; } if (copy_from_user(cmd_data, (void __user *)(cmd.data), cmd.len)) { rc = -EFAULT; goto end; } mutex_lock(&adev->module->pmem_regions_lock); if (adsp_verify_cmd(adev->module, cmd.queue, cmd_data, cmd.len)) { printk(KERN_ERR "module %s: verify failed.\n", adev->module->name); rc = -EINVAL; goto end; } rc = msm_adsp_write(adev->module, cmd.queue, cmd_data, cmd.len); end: mutex_unlock(&adev->module->pmem_regions_lock); if (cmd.len > 256) kfree(cmd_data); return rc; } static int adsp_events_pending(struct adsp_device *adev) { unsigned long flags; int yes; spin_lock_irqsave(&adev->event_queue_lock, flags); yes = !list_empty(&adev->event_queue); spin_unlock_irqrestore(&adev->event_queue_lock, flags); return yes || adev->abort; } static int adsp_pmem_lookup_paddr(struct msm_adsp_module *module, void **addr, struct adsp_pmem_region **region) { struct hlist_node *node; unsigned long paddr = (unsigned long)(*addr); struct adsp_pmem_region *region_elt; hlist_for_each_entry(region_elt, node, &module->pmem_regions, list) { if (paddr >= region_elt->paddr && paddr < region_elt->paddr + region_elt->len) { *region = region_elt; return 0; } } return -1; } int adsp_pmem_paddr_fixup(struct msm_adsp_module *module, void **addr) { struct adsp_pmem_region *region; unsigned long paddr = (unsigned long)(*addr); unsigned long *vaddr = (unsigned long *)addr; int ret; ret = adsp_pmem_lookup_paddr(module, addr, &region); if (ret) { printk(KERN_ERR "adsp: not patching %s, paddr %p lookup failed\n", module->name, vaddr); return ret; } *vaddr = (unsigned long)region->vaddr + (paddr - region->paddr); return 0; } static int adsp_patch_event(struct msm_adsp_module *module, struct adsp_event *event) { /* call the per-module msg verifier */ if (module->patch_event) return module->patch_event(module, event); return 0; } static long adsp_get_event(struct adsp_device *adev, void __user *arg) { unsigned long flags; struct adsp_event *data = NULL; struct adsp_event_t evt; int timeout; long rc = 0; if (copy_from_user(&evt, arg, sizeof(struct adsp_event_t))) return -EFAULT; timeout = (int)evt.timeout_ms; if (timeout > 0) { rc = wait_event_interruptible_timeout( adev->event_wait, adsp_events_pending(adev), msecs_to_jiffies(timeout)); if (rc == 0) return -ETIMEDOUT; } else { rc = wait_event_interruptible( adev->event_wait, adsp_events_pending(adev)); } if (rc < 0) return rc; if (adev->abort) return -ENODEV; spin_lock_irqsave(&adev->event_queue_lock, flags); if (!list_empty(&adev->event_queue)) { data = list_first_entry(&adev->event_queue, struct adsp_event, list); list_del(&data->list); } spin_unlock_irqrestore(&adev->event_queue_lock, flags); if (!data) return -EAGAIN; /* DSP messages are type 0; they may contain physical addresses */ if (data->type == 0) adsp_patch_event(adev->module, data); /* map adsp_event --> adsp_event_t */ if (evt.len < data->size) { rc = -ETOOSMALL; goto end; } if (data->msg_id != EVENT_MSG_ID) { if (copy_to_user((void *)(evt.data), data->data.msg16, data->size)) { rc = -EFAULT; goto end; } } else { if (copy_to_user((void *)(evt.data), data->data.msg32, data->size)) { rc = -EFAULT; goto end; } } evt.type = data->type; /* 0 --> from aDSP, 1 --> from ARM9 */ evt.msg_id = data->msg_id; evt.flags = data->is16; evt.len = data->size; if (copy_to_user(arg, &evt, sizeof(evt))) rc = -EFAULT; end: kfree(data); return rc; } static long adsp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct adsp_device *adev = filp->private_data; switch (cmd) { case ADSP_IOCTL_ENABLE: return msm_adsp_enable(adev->module); case ADSP_IOCTL_DISABLE: return msm_adsp_disable(adev->module); case ADSP_IOCTL_DISABLE_EVENT_RSP: return 0; case ADSP_IOCTL_DISABLE_ACK: pr_err("adsp: ADSP_IOCTL_DISABLE_ACK is not implemented.\n"); break; case ADSP_IOCTL_WRITE_COMMAND: return adsp_write_cmd(adev, (void __user *) arg); case ADSP_IOCTL_GET_EVENT: return adsp_get_event(adev, (void __user *) arg); case ADSP_IOCTL_SET_CLKRATE: { #if CONFIG_MSM_AMSS_VERSION==6350 unsigned long clk_rate; if (copy_from_user(&clk_rate, (void *) arg, sizeof(clk_rate))) return -EFAULT; return adsp_set_clkrate(adev->module, clk_rate); #endif } case ADSP_IOCTL_REGISTER_PMEM: { struct adsp_pmem_info info; if (copy_from_user(&info, (void *) arg, sizeof(info))) return -EFAULT; return adsp_pmem_add(adev->module, &info); } case ADSP_IOCTL_ABORT_EVENT_READ: adev->abort = 1; wake_up(&adev->event_wait); break; default: break; } return -EINVAL; } static int adsp_release(struct inode *inode, struct file *filp) { struct adsp_device *adev = filp->private_data; struct msm_adsp_module *module = adev->module; struct hlist_node *node, *tmp; struct adsp_pmem_region *region; pr_info("adsp_release() '%s'\n", adev->name); /* clear module before putting it to avoid race with open() */ adev->module = NULL; mutex_lock(&module->pmem_regions_lock); hlist_for_each_safe(node, tmp, &module->pmem_regions) { region = hlist_entry(node, struct adsp_pmem_region, list); hlist_del(node); put_pmem_file(region->file); kfree(region); } mutex_unlock(&module->pmem_regions_lock); BUG_ON(!hlist_empty(&module->pmem_regions)); msm_adsp_put(module); return 0; } static void adsp_event(void *driver_data, unsigned id, size_t len, void (*getevent)(void *ptr, size_t len)) { struct adsp_device *adev = driver_data; struct adsp_event *event; unsigned long flags; if (len > ADSP_EVENT_MAX_SIZE) { pr_err("adsp_event: event too large (%d bytes)\n", len); return; } event = kmalloc(sizeof(*event), GFP_ATOMIC); if (!event) { pr_err("adsp_event: cannot allocate buffer\n"); return; } if (id != EVENT_MSG_ID) { event->type = 0; event->is16 = 0; event->msg_id = id; event->size = len; getevent(event->data.msg16, len); } else { event->type = 1; event->is16 = 1; event->msg_id = id; event->size = len; getevent(event->data.msg32, len); } spin_lock_irqsave(&adev->event_queue_lock, flags); list_add_tail(&event->list, &adev->event_queue); spin_unlock_irqrestore(&adev->event_queue_lock, flags); wake_up(&adev->event_wait); } static struct msm_adsp_ops adsp_ops = { .event = adsp_event, }; static int adsp_open(struct inode *inode, struct file *filp) { struct adsp_device *adev; int rc; rc = nonseekable_open(inode, filp); if (rc < 0) return rc; adev = inode_to_device(inode); if (!adev) return -ENODEV; pr_info("adsp_open() name = '%s'\n", adev->name); rc = msm_adsp_get(adev->name, &adev->module, &adsp_ops, adev); if (rc) return rc; pr_info("adsp_open() module '%s' adev %p\n", adev->name, adev); filp->private_data = adev; adev->abort = 0; INIT_HLIST_HEAD(&adev->module->pmem_regions); mutex_init(&adev->module->pmem_regions_lock); return 0; } static unsigned adsp_device_count; static struct adsp_device *adsp_devices; static struct adsp_device *inode_to_device(struct inode *inode) { unsigned n = MINOR(inode->i_rdev); if (n < adsp_device_count) { if (adsp_devices[n].device) return adsp_devices + n; } return NULL; } static dev_t adsp_devno; static struct class *adsp_class; static struct file_operations adsp_fops = { .owner = THIS_MODULE, .open = adsp_open, .unlocked_ioctl = adsp_ioctl, .release = adsp_release, }; static void adsp_create(struct adsp_device *adev, const char *name, struct device *parent, dev_t devt) { struct device *dev; int rc; dev = device_create(adsp_class, parent, devt, "%s", name); if (IS_ERR(dev)) return; init_waitqueue_head(&adev->event_wait); INIT_LIST_HEAD(&adev->event_queue); spin_lock_init(&adev->event_queue_lock); cdev_init(&adev->cdev, &adsp_fops); adev->cdev.owner = THIS_MODULE; rc = cdev_add(&adev->cdev, devt, 1); if (rc < 0) { device_destroy(adsp_class, devt); } else { adev->device = dev; adev->name = name; } } void msm_adsp_publish_cdevs(struct msm_adsp_module *modules, unsigned n) { int rc; adsp_devices = kzalloc(sizeof(struct adsp_device) * n, GFP_KERNEL); if (!adsp_devices) return; adsp_class = class_create(THIS_MODULE, "adsp"); if (IS_ERR(adsp_class)) goto fail_create_class; rc = alloc_chrdev_region(&adsp_devno, 0, n, "adsp"); if (rc < 0) goto fail_alloc_region; adsp_device_count = n; for (n = 0; n < adsp_device_count; n++) { adsp_create(adsp_devices + n, modules[n].name, &modules[n].pdev.dev, MKDEV(MAJOR(adsp_devno), n)); } return; fail_alloc_region: class_unregister(adsp_class); fail_create_class: kfree(adsp_devices); }
gpl-2.0
riversource/Galaxy-Note-Kernel
net/netfilter/ipvs/ip_vs_proto_udp.c
1314
12805
/* * ip_vs_proto_udp.c: UDP load balancing support for IPVS * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * Julian Anastasov <ja@ssi.bg> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Changes: * */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/in.h> #include <linux/ip.h> #include <linux/kernel.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/udp.h> #include <net/ip_vs.h> #include <net/ip.h> #include <net/ip6_checksum.h> static struct ip_vs_conn * udp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp, const struct ip_vs_iphdr *iph, unsigned int proto_off, int inverse) { struct ip_vs_conn *cp; __be16 _ports[2], *pptr; pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports); if (pptr == NULL) return NULL; if (likely(!inverse)) { cp = ip_vs_conn_in_get(af, iph->protocol, &iph->saddr, pptr[0], &iph->daddr, pptr[1]); } else { cp = ip_vs_conn_in_get(af, iph->protocol, &iph->daddr, pptr[1], &iph->saddr, pptr[0]); } return cp; } static struct ip_vs_conn * udp_conn_out_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp, const struct ip_vs_iphdr *iph, unsigned int proto_off, int inverse) { struct ip_vs_conn *cp; __be16 _ports[2], *pptr; pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports); if (pptr == NULL) return NULL; if (likely(!inverse)) { cp = ip_vs_conn_out_get(af, iph->protocol, &iph->saddr, pptr[0], &iph->daddr, pptr[1]); } else { cp = ip_vs_conn_out_get(af, iph->protocol, &iph->daddr, pptr[1], &iph->saddr, pptr[0]); } return cp; } static int udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, int *verdict, struct ip_vs_conn **cpp) { struct ip_vs_service *svc; struct udphdr _udph, *uh; struct ip_vs_iphdr iph; ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); uh = skb_header_pointer(skb, iph.len, sizeof(_udph), &_udph); if (uh == NULL) { *verdict = NF_DROP; return 0; } svc = ip_vs_service_get(af, skb->mark, iph.protocol, &iph.daddr, uh->dest); if (svc) { if (ip_vs_todrop()) { /* * It seems that we are very loaded. * We have to drop this packet :( */ ip_vs_service_put(svc); *verdict = NF_DROP; return 0; } /* * Let the virtual server select a real server for the * incoming connection, and create a connection entry. */ *cpp = ip_vs_schedule(svc, skb); if (!*cpp) { *verdict = ip_vs_leave(svc, skb, pp); return 0; } ip_vs_service_put(svc); } return 1; } static inline void udp_fast_csum_update(int af, struct udphdr *uhdr, const union nf_inet_addr *oldip, const union nf_inet_addr *newip, __be16 oldport, __be16 newport) { #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) uhdr->check = csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6, ip_vs_check_diff2(oldport, newport, ~csum_unfold(uhdr->check)))); else #endif uhdr->check = csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip, ip_vs_check_diff2(oldport, newport, ~csum_unfold(uhdr->check)))); if (!uhdr->check) uhdr->check = CSUM_MANGLED_0; } static inline void udp_partial_csum_update(int af, struct udphdr *uhdr, const union nf_inet_addr *oldip, const union nf_inet_addr *newip, __be16 oldlen, __be16 newlen) { #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) uhdr->check = csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6, ip_vs_check_diff2(oldlen, newlen, ~csum_unfold(uhdr->check)))); else #endif uhdr->check = csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip, ip_vs_check_diff2(oldlen, newlen, ~csum_unfold(uhdr->check)))); } static int udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp) { struct udphdr *udph; unsigned int udphoff; int oldlen; #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) udphoff = sizeof(struct ipv6hdr); else #endif udphoff = ip_hdrlen(skb); oldlen = skb->len - udphoff; /* csum_check requires unshared skb */ if (!skb_make_writable(skb, udphoff+sizeof(*udph))) return 0; if (unlikely(cp->app != NULL)) { /* Some checks before mangling */ if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) return 0; /* * Call application helper if needed */ if (!ip_vs_app_pkt_out(cp, skb)) return 0; } udph = (void *)skb_network_header(skb) + udphoff; udph->source = cp->vport; /* * Adjust UDP checksums */ if (skb->ip_summed == CHECKSUM_PARTIAL) { udp_partial_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr, htons(oldlen), htons(skb->len - udphoff)); } else if (!cp->app && (udph->check != 0)) { /* Only port and addr are changed, do fast csum update */ udp_fast_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr, cp->dport, cp->vport); if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_NONE; } else { /* full checksum calculation */ udph->check = 0; skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0); #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) udph->check = csum_ipv6_magic(&cp->vaddr.in6, &cp->caddr.in6, skb->len - udphoff, cp->protocol, skb->csum); else #endif udph->check = csum_tcpudp_magic(cp->vaddr.ip, cp->caddr.ip, skb->len - udphoff, cp->protocol, skb->csum); if (udph->check == 0) udph->check = CSUM_MANGLED_0; IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n", pp->name, udph->check, (char*)&(udph->check) - (char*)udph); } return 1; } static int udp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp) { struct udphdr *udph; unsigned int udphoff; int oldlen; #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) udphoff = sizeof(struct ipv6hdr); else #endif udphoff = ip_hdrlen(skb); oldlen = skb->len - udphoff; /* csum_check requires unshared skb */ if (!skb_make_writable(skb, udphoff+sizeof(*udph))) return 0; if (unlikely(cp->app != NULL)) { /* Some checks before mangling */ if (pp->csum_check && !pp->csum_check(cp->af, skb, pp)) return 0; /* * Attempt ip_vs_app call. * It will fix ip_vs_conn */ if (!ip_vs_app_pkt_in(cp, skb)) return 0; } udph = (void *)skb_network_header(skb) + udphoff; udph->dest = cp->dport; /* * Adjust UDP checksums */ if (skb->ip_summed == CHECKSUM_PARTIAL) { udp_partial_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr, htons(oldlen), htons(skb->len - udphoff)); } else if (!cp->app && (udph->check != 0)) { /* Only port and addr are changed, do fast csum update */ udp_fast_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr, cp->vport, cp->dport); if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_NONE; } else { /* full checksum calculation */ udph->check = 0; skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0); #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) udph->check = csum_ipv6_magic(&cp->caddr.in6, &cp->daddr.in6, skb->len - udphoff, cp->protocol, skb->csum); else #endif udph->check = csum_tcpudp_magic(cp->caddr.ip, cp->daddr.ip, skb->len - udphoff, cp->protocol, skb->csum); if (udph->check == 0) udph->check = CSUM_MANGLED_0; skb->ip_summed = CHECKSUM_UNNECESSARY; } return 1; } static int udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp) { struct udphdr _udph, *uh; unsigned int udphoff; #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) udphoff = sizeof(struct ipv6hdr); else #endif udphoff = ip_hdrlen(skb); uh = skb_header_pointer(skb, udphoff, sizeof(_udph), &_udph); if (uh == NULL) return 0; if (uh->check != 0) { switch (skb->ip_summed) { case CHECKSUM_NONE: skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0); case CHECKSUM_COMPLETE: #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->len - udphoff, ipv6_hdr(skb)->nexthdr, skb->csum)) { IP_VS_DBG_RL_PKT(0, pp, skb, 0, "Failed checksum for"); return 0; } } else #endif if (csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, skb->len - udphoff, ip_hdr(skb)->protocol, skb->csum)) { IP_VS_DBG_RL_PKT(0, pp, skb, 0, "Failed checksum for"); return 0; } break; default: /* No need to checksum. */ break; } } return 1; } /* * Note: the caller guarantees that only one of register_app, * unregister_app or app_conn_bind is called each time. */ #define UDP_APP_TAB_BITS 4 #define UDP_APP_TAB_SIZE (1 << UDP_APP_TAB_BITS) #define UDP_APP_TAB_MASK (UDP_APP_TAB_SIZE - 1) static struct list_head udp_apps[UDP_APP_TAB_SIZE]; static DEFINE_SPINLOCK(udp_app_lock); static inline __u16 udp_app_hashkey(__be16 port) { return (((__force u16)port >> UDP_APP_TAB_BITS) ^ (__force u16)port) & UDP_APP_TAB_MASK; } static int udp_register_app(struct ip_vs_app *inc) { struct ip_vs_app *i; __u16 hash; __be16 port = inc->port; int ret = 0; hash = udp_app_hashkey(port); spin_lock_bh(&udp_app_lock); list_for_each_entry(i, &udp_apps[hash], p_list) { if (i->port == port) { ret = -EEXIST; goto out; } } list_add(&inc->p_list, &udp_apps[hash]); atomic_inc(&ip_vs_protocol_udp.appcnt); out: spin_unlock_bh(&udp_app_lock); return ret; } static void udp_unregister_app(struct ip_vs_app *inc) { spin_lock_bh(&udp_app_lock); atomic_dec(&ip_vs_protocol_udp.appcnt); list_del(&inc->p_list); spin_unlock_bh(&udp_app_lock); } static int udp_app_conn_bind(struct ip_vs_conn *cp) { int hash; struct ip_vs_app *inc; int result = 0; /* Default binding: bind app only for NAT */ if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) return 0; /* Lookup application incarnations and bind the right one */ hash = udp_app_hashkey(cp->vport); spin_lock(&udp_app_lock); list_for_each_entry(inc, &udp_apps[hash], p_list) { if (inc->port == cp->vport) { if (unlikely(!ip_vs_app_inc_get(inc))) break; spin_unlock(&udp_app_lock); IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->" "%s:%u to app %s on port %u\n", __func__, IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), inc->name, ntohs(inc->port)); cp->app = inc; if (inc->init_conn) result = inc->init_conn(inc, cp); goto out; } } spin_unlock(&udp_app_lock); out: return result; } static int udp_timeouts[IP_VS_UDP_S_LAST+1] = { [IP_VS_UDP_S_NORMAL] = 5*60*HZ, [IP_VS_UDP_S_LAST] = 2*HZ, }; static const char *const udp_state_name_table[IP_VS_UDP_S_LAST+1] = { [IP_VS_UDP_S_NORMAL] = "UDP", [IP_VS_UDP_S_LAST] = "BUG!", }; static int udp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to) { return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_UDP_S_LAST, udp_state_name_table, sname, to); } static const char * udp_state_name(int state) { if (state >= IP_VS_UDP_S_LAST) return "ERR!"; return udp_state_name_table[state] ? udp_state_name_table[state] : "?"; } static int udp_state_transition(struct ip_vs_conn *cp, int direction, const struct sk_buff *skb, struct ip_vs_protocol *pp) { cp->timeout = pp->timeout_table[IP_VS_UDP_S_NORMAL]; return 1; } static void udp_init(struct ip_vs_protocol *pp) { IP_VS_INIT_HASH_TABLE(udp_apps); pp->timeout_table = udp_timeouts; } static void udp_exit(struct ip_vs_protocol *pp) { } struct ip_vs_protocol ip_vs_protocol_udp = { .name = "UDP", .protocol = IPPROTO_UDP, .num_states = IP_VS_UDP_S_LAST, .dont_defrag = 0, .init = udp_init, .exit = udp_exit, .conn_schedule = udp_conn_schedule, .conn_in_get = udp_conn_in_get, .conn_out_get = udp_conn_out_get, .snat_handler = udp_snat_handler, .dnat_handler = udp_dnat_handler, .csum_check = udp_csum_check, .state_transition = udp_state_transition, .state_name = udp_state_name, .register_app = udp_register_app, .unregister_app = udp_unregister_app, .app_conn_bind = udp_app_conn_bind, .debug_packet = ip_vs_tcpudp_debug_packet, .timeout_change = NULL, .set_state_timeout = udp_set_state_timeout, };
gpl-2.0
DJSteve/StreakKernel
arch/avr32/kernel/avr32_ksyms.c
1826
1843
/* * Export AVR32-specific functions for loadable modules. * * Copyright (C) 2004-2006 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/io.h> #include <linux/module.h> #include <asm/checksum.h> #include <asm/uaccess.h> /* * GCC functions */ extern unsigned long long __avr32_lsl64(unsigned long long u, unsigned long b); extern unsigned long long __avr32_lsr64(unsigned long long u, unsigned long b); extern unsigned long long __avr32_asr64(unsigned long long u, unsigned long b); EXPORT_SYMBOL(__avr32_lsl64); EXPORT_SYMBOL(__avr32_lsr64); EXPORT_SYMBOL(__avr32_asr64); /* * String functions */ EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(copy_page); /* * Userspace access stuff. */ EXPORT_SYMBOL(copy_from_user); EXPORT_SYMBOL(copy_to_user); EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(strncpy_from_user); EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(clear_user); EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(strnlen_user); EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_generic); /* Delay loops (lib/delay.S) */ EXPORT_SYMBOL(__ndelay); EXPORT_SYMBOL(__udelay); EXPORT_SYMBOL(__const_udelay); /* Bit operations (lib/findbit.S) */ EXPORT_SYMBOL(find_first_zero_bit); EXPORT_SYMBOL(find_next_zero_bit); EXPORT_SYMBOL(find_first_bit); EXPORT_SYMBOL(find_next_bit); EXPORT_SYMBOL(generic_find_next_le_bit); EXPORT_SYMBOL(generic_find_next_zero_le_bit); /* I/O primitives (lib/io-*.S) */ EXPORT_SYMBOL(__raw_readsb); EXPORT_SYMBOL(__raw_readsw); EXPORT_SYMBOL(__raw_readsl); EXPORT_SYMBOL(__raw_writesb); EXPORT_SYMBOL(__raw_writesw); EXPORT_SYMBOL(__raw_writesl);
gpl-2.0
Hardslog/android_kernel_asus_ze551kl
drivers/media/i2c/saa7115.c
2082
51012
/* saa711x - Philips SAA711x video decoder driver * This driver can work with saa7111, saa7111a, saa7113, saa7114, * saa7115 and saa7118. * * Based on saa7114 driver by Maxim Yevtyushkin, which is based on * the saa7111 driver by Dave Perks. * * Copyright (C) 1998 Dave Perks <dperks@ibm.net> * Copyright (C) 2002 Maxim Yevtyushkin <max@linuxmedialabs.com> * * Slight changes for video timing and attachment output by * Wolfgang Scherr <scherr@net4you.net> * * Moved over to the linux >= 2.4.x i2c protocol (1/1/2003) * by Ronald Bultje <rbultje@ronald.bitfreak.net> * * Added saa7115 support by Kevin Thayer <nufan_wfk at yahoo.com> * (2/17/2003) * * VBI support (2004) and cleanups (2005) by Hans Verkuil <hverkuil@xs4all.nl> * * Copyright (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org> * SAA7111, SAA7113 and SAA7118 support * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "saa711x_regs.h" #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-chip-ident.h> #include <media/saa7115.h> #include <asm/div64.h> #define VRES_60HZ (480+16) MODULE_DESCRIPTION("Philips SAA7111/SAA7113/SAA7114/SAA7115/SAA7118 video decoder driver"); MODULE_AUTHOR( "Maxim Yevtyushkin, Kevin Thayer, Chris Kennedy, " "Hans Verkuil, Mauro Carvalho Chehab"); MODULE_LICENSE("GPL"); static bool debug; module_param(debug, bool, 0644); MODULE_PARM_DESC(debug, "Debug level (0-1)"); struct saa711x_state { struct v4l2_subdev sd; struct v4l2_ctrl_handler hdl; struct { /* chroma gain control cluster */ struct v4l2_ctrl *agc; struct v4l2_ctrl *gain; }; v4l2_std_id std; int input; int output; int enable; int radio; int width; int height; u32 ident; u32 audclk_freq; u32 crystal_freq; bool ucgc; u8 cgcdiv; bool apll; bool double_asclk; }; static inline struct saa711x_state *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct saa711x_state, sd); } static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl) { return &container_of(ctrl->handler, struct saa711x_state, hdl)->sd; } /* ----------------------------------------------------------------------- */ static inline int saa711x_write(struct v4l2_subdev *sd, u8 reg, u8 value) { struct i2c_client *client = v4l2_get_subdevdata(sd); return i2c_smbus_write_byte_data(client, reg, value); } /* Sanity routine to check if a register is present */ static int saa711x_has_reg(const int id, const u8 reg) { if (id == V4L2_IDENT_SAA7111) return reg < 0x20 && reg != 0x01 && reg != 0x0f && (reg < 0x13 || reg > 0x19) && reg != 0x1d && reg != 0x1e; if (id == V4L2_IDENT_SAA7111A) return reg < 0x20 && reg != 0x01 && reg != 0x0f && reg != 0x14 && reg != 0x18 && reg != 0x19 && reg != 0x1d && reg != 0x1e; /* common for saa7113/4/5/8 */ if (unlikely((reg >= 0x3b && reg <= 0x3f) || reg == 0x5c || reg == 0x5f || reg == 0xa3 || reg == 0xa7 || reg == 0xab || reg == 0xaf || (reg >= 0xb5 && reg <= 0xb7) || reg == 0xd3 || reg == 0xd7 || reg == 0xdb || reg == 0xdf || (reg >= 0xe5 && reg <= 0xe7) || reg == 0x82 || (reg >= 0x89 && reg <= 0x8e))) return 0; switch (id) { case V4L2_IDENT_SAA7113: return reg != 0x14 && (reg < 0x18 || reg > 0x1e) && (reg < 0x20 || reg > 0x3f) && reg != 0x5d && reg < 0x63; case V4L2_IDENT_SAA7114: return (reg < 0x1a || reg > 0x1e) && (reg < 0x20 || reg > 0x2f) && (reg < 0x63 || reg > 0x7f) && reg != 0x33 && reg != 0x37 && reg != 0x81 && reg < 0xf0; case V4L2_IDENT_SAA7115: return (reg < 0x20 || reg > 0x2f) && reg != 0x65 && (reg < 0xfc || reg > 0xfe); case V4L2_IDENT_SAA7118: return (reg < 0x1a || reg > 0x1d) && (reg < 0x20 || reg > 0x22) && (reg < 0x26 || reg > 0x28) && reg != 0x33 && reg != 0x37 && (reg < 0x63 || reg > 0x7f) && reg != 0x81 && reg < 0xf0; } return 1; } static int saa711x_writeregs(struct v4l2_subdev *sd, const unsigned char *regs) { struct saa711x_state *state = to_state(sd); unsigned char reg, data; while (*regs != 0x00) { reg = *(regs++); data = *(regs++); /* According with datasheets, reserved regs should be filled with 0 - seems better not to touch on they */ if (saa711x_has_reg(state->ident, reg)) { if (saa711x_write(sd, reg, data) < 0) return -1; } else { v4l2_dbg(1, debug, sd, "tried to access reserved reg 0x%02x\n", reg); } } return 0; } static inline int saa711x_read(struct v4l2_subdev *sd, u8 reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); return i2c_smbus_read_byte_data(client, reg); } /* ----------------------------------------------------------------------- */ /* SAA7111 initialization table */ static const unsigned char saa7111_init[] = { R_01_INC_DELAY, 0x00, /* reserved */ /*front end */ R_02_INPUT_CNTL_1, 0xd0, /* FUSE=3, GUDL=2, MODE=0 */ R_03_INPUT_CNTL_2, 0x23, /* HLNRS=0, VBSL=1, WPOFF=0, HOLDG=0, * GAFIX=0, GAI1=256, GAI2=256 */ R_04_INPUT_CNTL_3, 0x00, /* GAI1=256 */ R_05_INPUT_CNTL_4, 0x00, /* GAI2=256 */ /* decoder */ R_06_H_SYNC_START, 0xf3, /* HSB at 13(50Hz) / 17(60Hz) * pixels after end of last line */ R_07_H_SYNC_STOP, 0xe8, /* HSS seems to be needed to * work with NTSC, too */ R_08_SYNC_CNTL, 0xc8, /* AUFD=1, FSEL=1, EXFIL=0, * VTRC=1, HPLL=0, VNOI=0 */ R_09_LUMA_CNTL, 0x01, /* BYPS=0, PREF=0, BPSS=0, * VBLB=0, UPTCV=0, APER=1 */ R_0A_LUMA_BRIGHT_CNTL, 0x80, R_0B_LUMA_CONTRAST_CNTL, 0x47, /* 0b - CONT=1.109 */ R_0C_CHROMA_SAT_CNTL, 0x40, R_0D_CHROMA_HUE_CNTL, 0x00, R_0E_CHROMA_CNTL_1, 0x01, /* 0e - CDTO=0, CSTD=0, DCCF=0, * FCTC=0, CHBW=1 */ R_0F_CHROMA_GAIN_CNTL, 0x00, /* reserved */ R_10_CHROMA_CNTL_2, 0x48, /* 10 - OFTS=1, HDEL=0, VRLN=1, YDEL=0 */ R_11_MODE_DELAY_CNTL, 0x1c, /* 11 - GPSW=0, CM99=0, FECO=0, COMPO=1, * OEYC=1, OEHV=1, VIPB=0, COLO=0 */ R_12_RT_SIGNAL_CNTL, 0x00, /* 12 - output control 2 */ R_13_RT_X_PORT_OUT_CNTL, 0x00, /* 13 - output control 3 */ R_14_ANAL_ADC_COMPAT_CNTL, 0x00, R_15_VGATE_START_FID_CHG, 0x00, R_16_VGATE_STOP, 0x00, R_17_MISC_VGATE_CONF_AND_MSB, 0x00, 0x00, 0x00 }; /* SAA7113 init codes */ static const unsigned char saa7113_init[] = { R_01_INC_DELAY, 0x08, R_02_INPUT_CNTL_1, 0xc2, R_03_INPUT_CNTL_2, 0x30, R_04_INPUT_CNTL_3, 0x00, R_05_INPUT_CNTL_4, 0x00, R_06_H_SYNC_START, 0x89, R_07_H_SYNC_STOP, 0x0d, R_08_SYNC_CNTL, 0x88, R_09_LUMA_CNTL, 0x01, R_0A_LUMA_BRIGHT_CNTL, 0x80, R_0B_LUMA_CONTRAST_CNTL, 0x47, R_0C_CHROMA_SAT_CNTL, 0x40, R_0D_CHROMA_HUE_CNTL, 0x00, R_0E_CHROMA_CNTL_1, 0x01, R_0F_CHROMA_GAIN_CNTL, 0x2a, R_10_CHROMA_CNTL_2, 0x08, R_11_MODE_DELAY_CNTL, 0x0c, R_12_RT_SIGNAL_CNTL, 0x07, R_13_RT_X_PORT_OUT_CNTL, 0x00, R_14_ANAL_ADC_COMPAT_CNTL, 0x00, R_15_VGATE_START_FID_CHG, 0x00, R_16_VGATE_STOP, 0x00, R_17_MISC_VGATE_CONF_AND_MSB, 0x00, 0x00, 0x00 }; /* If a value differs from the Hauppauge driver values, then the comment starts with 'was 0xXX' to denote the Hauppauge value. Otherwise the value is identical to what the Hauppauge driver sets. */ /* SAA7114 and SAA7115 initialization table */ static const unsigned char saa7115_init_auto_input[] = { /* Front-End Part */ R_01_INC_DELAY, 0x48, /* white peak control disabled */ R_03_INPUT_CNTL_2, 0x20, /* was 0x30. 0x20: long vertical blanking */ R_04_INPUT_CNTL_3, 0x90, /* analog gain set to 0 */ R_05_INPUT_CNTL_4, 0x90, /* analog gain set to 0 */ /* Decoder Part */ R_06_H_SYNC_START, 0xeb, /* horiz sync begin = -21 */ R_07_H_SYNC_STOP, 0xe0, /* horiz sync stop = -17 */ R_09_LUMA_CNTL, 0x53, /* 0x53, was 0x56 for 60hz. luminance control */ R_0A_LUMA_BRIGHT_CNTL, 0x80, /* was 0x88. decoder brightness, 0x80 is itu standard */ R_0B_LUMA_CONTRAST_CNTL, 0x44, /* was 0x48. decoder contrast, 0x44 is itu standard */ R_0C_CHROMA_SAT_CNTL, 0x40, /* was 0x47. decoder saturation, 0x40 is itu standard */ R_0D_CHROMA_HUE_CNTL, 0x00, R_0F_CHROMA_GAIN_CNTL, 0x00, /* use automatic gain */ R_10_CHROMA_CNTL_2, 0x06, /* chroma: active adaptive combfilter */ R_11_MODE_DELAY_CNTL, 0x00, R_12_RT_SIGNAL_CNTL, 0x9d, /* RTS0 output control: VGATE */ R_13_RT_X_PORT_OUT_CNTL, 0x80, /* ITU656 standard mode, RTCO output enable RTCE */ R_14_ANAL_ADC_COMPAT_CNTL, 0x00, R_18_RAW_DATA_GAIN_CNTL, 0x40, /* gain 0x00 = nominal */ R_19_RAW_DATA_OFF_CNTL, 0x80, R_1A_COLOR_KILL_LVL_CNTL, 0x77, /* recommended value */ R_1B_MISC_TVVCRDET, 0x42, /* recommended value */ R_1C_ENHAN_COMB_CTRL1, 0xa9, /* recommended value */ R_1D_ENHAN_COMB_CTRL2, 0x01, /* recommended value */ R_80_GLOBAL_CNTL_1, 0x0, /* No tasks enabled at init */ /* Power Device Control */ R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0, /* reset device */ R_88_POWER_SAVE_ADC_PORT_CNTL, 0xf0, /* set device programmed, all in operational mode */ 0x00, 0x00 }; /* Used to reset saa7113, saa7114 and saa7115 */ static const unsigned char saa7115_cfg_reset_scaler[] = { R_87_I_PORT_I_O_ENA_OUT_CLK_AND_GATED, 0x00, /* disable I-port output */ R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0, /* reset scaler */ R_88_POWER_SAVE_ADC_PORT_CNTL, 0xf0, /* activate scaler */ R_87_I_PORT_I_O_ENA_OUT_CLK_AND_GATED, 0x01, /* enable I-port output */ 0x00, 0x00 }; /* ============== SAA7715 VIDEO templates ============= */ static const unsigned char saa7115_cfg_60hz_video[] = { R_80_GLOBAL_CNTL_1, 0x00, /* reset tasks */ R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0, /* reset scaler */ R_15_VGATE_START_FID_CHG, 0x03, R_16_VGATE_STOP, 0x11, R_17_MISC_VGATE_CONF_AND_MSB, 0x9c, R_08_SYNC_CNTL, 0x68, /* 0xBO: auto detection, 0x68 = NTSC */ R_0E_CHROMA_CNTL_1, 0x07, /* video autodetection is on */ R_5A_V_OFF_FOR_SLICER, 0x06, /* standard 60hz value for ITU656 line counting */ /* Task A */ R_90_A_TASK_HANDLING_CNTL, 0x80, R_91_A_X_PORT_FORMATS_AND_CONF, 0x48, R_92_A_X_PORT_INPUT_REFERENCE_SIGNAL, 0x40, R_93_A_I_PORT_OUTPUT_FORMATS_AND_CONF, 0x84, /* hoffset low (input), 0x0002 is minimum */ R_94_A_HORIZ_INPUT_WINDOW_START, 0x01, R_95_A_HORIZ_INPUT_WINDOW_START_MSB, 0x00, /* hsize low (input), 0x02d0 = 720 */ R_96_A_HORIZ_INPUT_WINDOW_LENGTH, 0xd0, R_97_A_HORIZ_INPUT_WINDOW_LENGTH_MSB, 0x02, R_98_A_VERT_INPUT_WINDOW_START, 0x05, R_99_A_VERT_INPUT_WINDOW_START_MSB, 0x00, R_9A_A_VERT_INPUT_WINDOW_LENGTH, 0x0c, R_9B_A_VERT_INPUT_WINDOW_LENGTH_MSB, 0x00, R_9C_A_HORIZ_OUTPUT_WINDOW_LENGTH, 0xa0, R_9D_A_HORIZ_OUTPUT_WINDOW_LENGTH_MSB, 0x05, R_9E_A_VERT_OUTPUT_WINDOW_LENGTH, 0x0c, R_9F_A_VERT_OUTPUT_WINDOW_LENGTH_MSB, 0x00, /* Task B */ R_C0_B_TASK_HANDLING_CNTL, 0x00, R_C1_B_X_PORT_FORMATS_AND_CONF, 0x08, R_C2_B_INPUT_REFERENCE_SIGNAL_DEFINITION, 0x00, R_C3_B_I_PORT_FORMATS_AND_CONF, 0x80, /* 0x0002 is minimum */ R_C4_B_HORIZ_INPUT_WINDOW_START, 0x02, R_C5_B_HORIZ_INPUT_WINDOW_START_MSB, 0x00, /* 0x02d0 = 720 */ R_C6_B_HORIZ_INPUT_WINDOW_LENGTH, 0xd0, R_C7_B_HORIZ_INPUT_WINDOW_LENGTH_MSB, 0x02, /* vwindow start 0x12 = 18 */ R_C8_B_VERT_INPUT_WINDOW_START, 0x12, R_C9_B_VERT_INPUT_WINDOW_START_MSB, 0x00, /* vwindow length 0xf8 = 248 */ R_CA_B_VERT_INPUT_WINDOW_LENGTH, VRES_60HZ>>1, R_CB_B_VERT_INPUT_WINDOW_LENGTH_MSB, VRES_60HZ>>9, /* hwindow 0x02d0 = 720 */ R_CC_B_HORIZ_OUTPUT_WINDOW_LENGTH, 0xd0, R_CD_B_HORIZ_OUTPUT_WINDOW_LENGTH_MSB, 0x02, R_F0_LFCO_PER_LINE, 0xad, /* Set PLL Register. 60hz 525 lines per frame, 27 MHz */ R_F1_P_I_PARAM_SELECT, 0x05, /* low bit with 0xF0 */ R_F5_PULSGEN_LINE_LENGTH, 0xad, R_F6_PULSE_A_POS_LSB_AND_PULSEGEN_CONFIG, 0x01, 0x00, 0x00 }; static const unsigned char saa7115_cfg_50hz_video[] = { R_80_GLOBAL_CNTL_1, 0x00, R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0, /* reset scaler */ R_15_VGATE_START_FID_CHG, 0x37, /* VGATE start */ R_16_VGATE_STOP, 0x16, R_17_MISC_VGATE_CONF_AND_MSB, 0x99, R_08_SYNC_CNTL, 0x28, /* 0x28 = PAL */ R_0E_CHROMA_CNTL_1, 0x07, R_5A_V_OFF_FOR_SLICER, 0x03, /* standard 50hz value */ /* Task A */ R_90_A_TASK_HANDLING_CNTL, 0x81, R_91_A_X_PORT_FORMATS_AND_CONF, 0x48, R_92_A_X_PORT_INPUT_REFERENCE_SIGNAL, 0x40, R_93_A_I_PORT_OUTPUT_FORMATS_AND_CONF, 0x84, /* This is weird: the datasheet says that you should use 2 as the minimum value, */ /* but Hauppauge uses 0, and changing that to 2 causes indeed problems (for 50hz) */ /* hoffset low (input), 0x0002 is minimum */ R_94_A_HORIZ_INPUT_WINDOW_START, 0x00, R_95_A_HORIZ_INPUT_WINDOW_START_MSB, 0x00, /* hsize low (input), 0x02d0 = 720 */ R_96_A_HORIZ_INPUT_WINDOW_LENGTH, 0xd0, R_97_A_HORIZ_INPUT_WINDOW_LENGTH_MSB, 0x02, R_98_A_VERT_INPUT_WINDOW_START, 0x03, R_99_A_VERT_INPUT_WINDOW_START_MSB, 0x00, /* vsize 0x12 = 18 */ R_9A_A_VERT_INPUT_WINDOW_LENGTH, 0x12, R_9B_A_VERT_INPUT_WINDOW_LENGTH_MSB, 0x00, /* hsize 0x05a0 = 1440 */ R_9C_A_HORIZ_OUTPUT_WINDOW_LENGTH, 0xa0, R_9D_A_HORIZ_OUTPUT_WINDOW_LENGTH_MSB, 0x05, /* hsize hi (output) */ R_9E_A_VERT_OUTPUT_WINDOW_LENGTH, 0x12, /* vsize low (output), 0x12 = 18 */ R_9F_A_VERT_OUTPUT_WINDOW_LENGTH_MSB, 0x00, /* vsize hi (output) */ /* Task B */ R_C0_B_TASK_HANDLING_CNTL, 0x00, R_C1_B_X_PORT_FORMATS_AND_CONF, 0x08, R_C2_B_INPUT_REFERENCE_SIGNAL_DEFINITION, 0x00, R_C3_B_I_PORT_FORMATS_AND_CONF, 0x80, /* This is weird: the datasheet says that you should use 2 as the minimum value, */ /* but Hauppauge uses 0, and changing that to 2 causes indeed problems (for 50hz) */ /* hoffset low (input), 0x0002 is minimum. See comment above. */ R_C4_B_HORIZ_INPUT_WINDOW_START, 0x00, R_C5_B_HORIZ_INPUT_WINDOW_START_MSB, 0x00, /* hsize 0x02d0 = 720 */ R_C6_B_HORIZ_INPUT_WINDOW_LENGTH, 0xd0, R_C7_B_HORIZ_INPUT_WINDOW_LENGTH_MSB, 0x02, /* voffset 0x16 = 22 */ R_C8_B_VERT_INPUT_WINDOW_START, 0x16, R_C9_B_VERT_INPUT_WINDOW_START_MSB, 0x00, /* vsize 0x0120 = 288 */ R_CA_B_VERT_INPUT_WINDOW_LENGTH, 0x20, R_CB_B_VERT_INPUT_WINDOW_LENGTH_MSB, 0x01, /* hsize 0x02d0 = 720 */ R_CC_B_HORIZ_OUTPUT_WINDOW_LENGTH, 0xd0, R_CD_B_HORIZ_OUTPUT_WINDOW_LENGTH_MSB, 0x02, R_F0_LFCO_PER_LINE, 0xb0, /* Set PLL Register. 50hz 625 lines per frame, 27 MHz */ R_F1_P_I_PARAM_SELECT, 0x05, /* low bit with 0xF0, (was 0x05) */ R_F5_PULSGEN_LINE_LENGTH, 0xb0, R_F6_PULSE_A_POS_LSB_AND_PULSEGEN_CONFIG, 0x01, 0x00, 0x00 }; /* ============== SAA7715 VIDEO templates (end) ======= */ static const unsigned char saa7115_cfg_vbi_on[] = { R_80_GLOBAL_CNTL_1, 0x00, /* reset tasks */ R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0, /* reset scaler */ R_80_GLOBAL_CNTL_1, 0x30, /* Activate both tasks */ R_88_POWER_SAVE_ADC_PORT_CNTL, 0xf0, /* activate scaler */ R_87_I_PORT_I_O_ENA_OUT_CLK_AND_GATED, 0x01, /* Enable I-port output */ 0x00, 0x00 }; static const unsigned char saa7115_cfg_vbi_off[] = { R_80_GLOBAL_CNTL_1, 0x00, /* reset tasks */ R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0, /* reset scaler */ R_80_GLOBAL_CNTL_1, 0x20, /* Activate only task "B" */ R_88_POWER_SAVE_ADC_PORT_CNTL, 0xf0, /* activate scaler */ R_87_I_PORT_I_O_ENA_OUT_CLK_AND_GATED, 0x01, /* Enable I-port output */ 0x00, 0x00 }; static const unsigned char saa7115_init_misc[] = { R_81_V_SYNC_FLD_ID_SRC_SEL_AND_RETIMED_V_F, 0x01, R_83_X_PORT_I_O_ENA_AND_OUT_CLK, 0x01, R_84_I_PORT_SIGNAL_DEF, 0x20, R_85_I_PORT_SIGNAL_POLAR, 0x21, R_86_I_PORT_FIFO_FLAG_CNTL_AND_ARBIT, 0xc5, R_87_I_PORT_I_O_ENA_OUT_CLK_AND_GATED, 0x01, /* Task A */ R_A0_A_HORIZ_PRESCALING, 0x01, R_A1_A_ACCUMULATION_LENGTH, 0x00, R_A2_A_PRESCALER_DC_GAIN_AND_FIR_PREFILTER, 0x00, /* Configure controls at nominal value*/ R_A4_A_LUMA_BRIGHTNESS_CNTL, 0x80, R_A5_A_LUMA_CONTRAST_CNTL, 0x40, R_A6_A_CHROMA_SATURATION_CNTL, 0x40, /* note: 2 x zoom ensures that VBI lines have same length as video lines. */ R_A8_A_HORIZ_LUMA_SCALING_INC, 0x00, R_A9_A_HORIZ_LUMA_SCALING_INC_MSB, 0x02, R_AA_A_HORIZ_LUMA_PHASE_OFF, 0x00, /* must be horiz lum scaling / 2 */ R_AC_A_HORIZ_CHROMA_SCALING_INC, 0x00, R_AD_A_HORIZ_CHROMA_SCALING_INC_MSB, 0x01, /* must be offset luma / 2 */ R_AE_A_HORIZ_CHROMA_PHASE_OFF, 0x00, R_B0_A_VERT_LUMA_SCALING_INC, 0x00, R_B1_A_VERT_LUMA_SCALING_INC_MSB, 0x04, R_B2_A_VERT_CHROMA_SCALING_INC, 0x00, R_B3_A_VERT_CHROMA_SCALING_INC_MSB, 0x04, R_B4_A_VERT_SCALING_MODE_CNTL, 0x01, R_B8_A_VERT_CHROMA_PHASE_OFF_00, 0x00, R_B9_A_VERT_CHROMA_PHASE_OFF_01, 0x00, R_BA_A_VERT_CHROMA_PHASE_OFF_10, 0x00, R_BB_A_VERT_CHROMA_PHASE_OFF_11, 0x00, R_BC_A_VERT_LUMA_PHASE_OFF_00, 0x00, R_BD_A_VERT_LUMA_PHASE_OFF_01, 0x00, R_BE_A_VERT_LUMA_PHASE_OFF_10, 0x00, R_BF_A_VERT_LUMA_PHASE_OFF_11, 0x00, /* Task B */ R_D0_B_HORIZ_PRESCALING, 0x01, R_D1_B_ACCUMULATION_LENGTH, 0x00, R_D2_B_PRESCALER_DC_GAIN_AND_FIR_PREFILTER, 0x00, /* Configure controls at nominal value*/ R_D4_B_LUMA_BRIGHTNESS_CNTL, 0x80, R_D5_B_LUMA_CONTRAST_CNTL, 0x40, R_D6_B_CHROMA_SATURATION_CNTL, 0x40, /* hor lum scaling 0x0400 = 1 */ R_D8_B_HORIZ_LUMA_SCALING_INC, 0x00, R_D9_B_HORIZ_LUMA_SCALING_INC_MSB, 0x04, R_DA_B_HORIZ_LUMA_PHASE_OFF, 0x00, /* must be hor lum scaling / 2 */ R_DC_B_HORIZ_CHROMA_SCALING, 0x00, R_DD_B_HORIZ_CHROMA_SCALING_MSB, 0x02, /* must be offset luma / 2 */ R_DE_B_HORIZ_PHASE_OFFSET_CRHOMA, 0x00, R_E0_B_VERT_LUMA_SCALING_INC, 0x00, R_E1_B_VERT_LUMA_SCALING_INC_MSB, 0x04, R_E2_B_VERT_CHROMA_SCALING_INC, 0x00, R_E3_B_VERT_CHROMA_SCALING_INC_MSB, 0x04, R_E4_B_VERT_SCALING_MODE_CNTL, 0x01, R_E8_B_VERT_CHROMA_PHASE_OFF_00, 0x00, R_E9_B_VERT_CHROMA_PHASE_OFF_01, 0x00, R_EA_B_VERT_CHROMA_PHASE_OFF_10, 0x00, R_EB_B_VERT_CHROMA_PHASE_OFF_11, 0x00, R_EC_B_VERT_LUMA_PHASE_OFF_00, 0x00, R_ED_B_VERT_LUMA_PHASE_OFF_01, 0x00, R_EE_B_VERT_LUMA_PHASE_OFF_10, 0x00, R_EF_B_VERT_LUMA_PHASE_OFF_11, 0x00, R_F2_NOMINAL_PLL2_DTO, 0x50, /* crystal clock = 24.576 MHz, target = 27MHz */ R_F3_PLL_INCREMENT, 0x46, R_F4_PLL2_STATUS, 0x00, R_F7_PULSE_A_POS_MSB, 0x4b, /* not the recommended settings! */ R_F8_PULSE_B_POS, 0x00, R_F9_PULSE_B_POS_MSB, 0x4b, R_FA_PULSE_C_POS, 0x00, R_FB_PULSE_C_POS_MSB, 0x4b, /* PLL2 lock detection settings: 71 lines 50% phase error */ R_FF_S_PLL_MAX_PHASE_ERR_THRESH_NUM_LINES, 0x88, /* Turn off VBI */ R_40_SLICER_CNTL_1, 0x20, /* No framing code errors allowed. */ R_41_LCR_BASE, 0xff, R_41_LCR_BASE+1, 0xff, R_41_LCR_BASE+2, 0xff, R_41_LCR_BASE+3, 0xff, R_41_LCR_BASE+4, 0xff, R_41_LCR_BASE+5, 0xff, R_41_LCR_BASE+6, 0xff, R_41_LCR_BASE+7, 0xff, R_41_LCR_BASE+8, 0xff, R_41_LCR_BASE+9, 0xff, R_41_LCR_BASE+10, 0xff, R_41_LCR_BASE+11, 0xff, R_41_LCR_BASE+12, 0xff, R_41_LCR_BASE+13, 0xff, R_41_LCR_BASE+14, 0xff, R_41_LCR_BASE+15, 0xff, R_41_LCR_BASE+16, 0xff, R_41_LCR_BASE+17, 0xff, R_41_LCR_BASE+18, 0xff, R_41_LCR_BASE+19, 0xff, R_41_LCR_BASE+20, 0xff, R_41_LCR_BASE+21, 0xff, R_41_LCR_BASE+22, 0xff, R_58_PROGRAM_FRAMING_CODE, 0x40, R_59_H_OFF_FOR_SLICER, 0x47, R_5B_FLD_OFF_AND_MSB_FOR_H_AND_V_OFF, 0x83, R_5D_DID, 0xbd, R_5E_SDID, 0x35, R_02_INPUT_CNTL_1, 0xc4, /* input tuner -> input 4, amplifier active */ R_80_GLOBAL_CNTL_1, 0x20, /* enable task B */ R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0, R_88_POWER_SAVE_ADC_PORT_CNTL, 0xf0, 0x00, 0x00 }; static int saa711x_odd_parity(u8 c) { c ^= (c >> 4); c ^= (c >> 2); c ^= (c >> 1); return c & 1; } static int saa711x_decode_vps(u8 *dst, u8 *p) { static const u8 biphase_tbl[] = { 0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4, 0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0, 0xd2, 0x5a, 0x52, 0xd2, 0x96, 0x1e, 0x16, 0x96, 0x92, 0x1a, 0x12, 0x92, 0xd2, 0x5a, 0x52, 0xd2, 0xd0, 0x58, 0x50, 0xd0, 0x94, 0x1c, 0x14, 0x94, 0x90, 0x18, 0x10, 0x90, 0xd0, 0x58, 0x50, 0xd0, 0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4, 0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0, 0xe1, 0x69, 0x61, 0xe1, 0xa5, 0x2d, 0x25, 0xa5, 0xa1, 0x29, 0x21, 0xa1, 0xe1, 0x69, 0x61, 0xe1, 0xc3, 0x4b, 0x43, 0xc3, 0x87, 0x0f, 0x07, 0x87, 0x83, 0x0b, 0x03, 0x83, 0xc3, 0x4b, 0x43, 0xc3, 0xc1, 0x49, 0x41, 0xc1, 0x85, 0x0d, 0x05, 0x85, 0x81, 0x09, 0x01, 0x81, 0xc1, 0x49, 0x41, 0xc1, 0xe1, 0x69, 0x61, 0xe1, 0xa5, 0x2d, 0x25, 0xa5, 0xa1, 0x29, 0x21, 0xa1, 0xe1, 0x69, 0x61, 0xe1, 0xe0, 0x68, 0x60, 0xe0, 0xa4, 0x2c, 0x24, 0xa4, 0xa0, 0x28, 0x20, 0xa0, 0xe0, 0x68, 0x60, 0xe0, 0xc2, 0x4a, 0x42, 0xc2, 0x86, 0x0e, 0x06, 0x86, 0x82, 0x0a, 0x02, 0x82, 0xc2, 0x4a, 0x42, 0xc2, 0xc0, 0x48, 0x40, 0xc0, 0x84, 0x0c, 0x04, 0x84, 0x80, 0x08, 0x00, 0x80, 0xc0, 0x48, 0x40, 0xc0, 0xe0, 0x68, 0x60, 0xe0, 0xa4, 0x2c, 0x24, 0xa4, 0xa0, 0x28, 0x20, 0xa0, 0xe0, 0x68, 0x60, 0xe0, 0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4, 0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0, 0xd2, 0x5a, 0x52, 0xd2, 0x96, 0x1e, 0x16, 0x96, 0x92, 0x1a, 0x12, 0x92, 0xd2, 0x5a, 0x52, 0xd2, 0xd0, 0x58, 0x50, 0xd0, 0x94, 0x1c, 0x14, 0x94, 0x90, 0x18, 0x10, 0x90, 0xd0, 0x58, 0x50, 0xd0, 0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4, 0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0, }; int i; u8 c, err = 0; for (i = 0; i < 2 * 13; i += 2) { err |= biphase_tbl[p[i]] | biphase_tbl[p[i + 1]]; c = (biphase_tbl[p[i + 1]] & 0xf) | ((biphase_tbl[p[i]] & 0xf) << 4); dst[i / 2] = c; } return err & 0xf0; } static int saa711x_decode_wss(u8 *p) { static const int wss_bits[8] = { 0, 0, 0, 1, 0, 1, 1, 1 }; unsigned char parity; int wss = 0; int i; for (i = 0; i < 16; i++) { int b1 = wss_bits[p[i] & 7]; int b2 = wss_bits[(p[i] >> 3) & 7]; if (b1 == b2) return -1; wss |= b2 << i; } parity = wss & 15; parity ^= parity >> 2; parity ^= parity >> 1; if (!(parity & 1)) return -1; return wss; } static int saa711x_s_clock_freq(struct v4l2_subdev *sd, u32 freq) { struct saa711x_state *state = to_state(sd); u32 acpf; u32 acni; u32 hz; u64 f; u8 acc = 0; /* reg 0x3a, audio clock control */ /* Checks for chips that don't have audio clock (saa7111, saa7113) */ if (!saa711x_has_reg(state->ident, R_30_AUD_MAST_CLK_CYCLES_PER_FIELD)) return 0; v4l2_dbg(1, debug, sd, "set audio clock freq: %d\n", freq); /* sanity check */ if (freq < 32000 || freq > 48000) return -EINVAL; /* hz is the refresh rate times 100 */ hz = (state->std & V4L2_STD_525_60) ? 5994 : 5000; /* acpf = (256 * freq) / field_frequency == (256 * 100 * freq) / hz */ acpf = (25600 * freq) / hz; /* acni = (256 * freq * 2^23) / crystal_frequency = (freq * 2^(8+23)) / crystal_frequency = (freq << 31) / crystal_frequency */ f = freq; f = f << 31; do_div(f, state->crystal_freq); acni = f; if (state->ucgc) { acpf = acpf * state->cgcdiv / 16; acni = acni * state->cgcdiv / 16; acc = 0x80; if (state->cgcdiv == 3) acc |= 0x40; } if (state->apll) acc |= 0x08; if (state->double_asclk) { acpf <<= 1; acni <<= 1; } saa711x_write(sd, R_38_CLK_RATIO_AMXCLK_TO_ASCLK, 0x03); saa711x_write(sd, R_39_CLK_RATIO_ASCLK_TO_ALRCLK, 0x10 << state->double_asclk); saa711x_write(sd, R_3A_AUD_CLK_GEN_BASIC_SETUP, acc); saa711x_write(sd, R_30_AUD_MAST_CLK_CYCLES_PER_FIELD, acpf & 0xff); saa711x_write(sd, R_30_AUD_MAST_CLK_CYCLES_PER_FIELD+1, (acpf >> 8) & 0xff); saa711x_write(sd, R_30_AUD_MAST_CLK_CYCLES_PER_FIELD+2, (acpf >> 16) & 0x03); saa711x_write(sd, R_34_AUD_MAST_CLK_NOMINAL_INC, acni & 0xff); saa711x_write(sd, R_34_AUD_MAST_CLK_NOMINAL_INC+1, (acni >> 8) & 0xff); saa711x_write(sd, R_34_AUD_MAST_CLK_NOMINAL_INC+2, (acni >> 16) & 0x3f); state->audclk_freq = freq; return 0; } static int saa711x_g_volatile_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); struct saa711x_state *state = to_state(sd); switch (ctrl->id) { case V4L2_CID_CHROMA_AGC: /* chroma gain cluster */ if (state->agc->val) state->gain->val = saa711x_read(sd, R_0F_CHROMA_GAIN_CNTL) & 0x7f; break; } return 0; } static int saa711x_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); struct saa711x_state *state = to_state(sd); switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: saa711x_write(sd, R_0A_LUMA_BRIGHT_CNTL, ctrl->val); break; case V4L2_CID_CONTRAST: saa711x_write(sd, R_0B_LUMA_CONTRAST_CNTL, ctrl->val); break; case V4L2_CID_SATURATION: saa711x_write(sd, R_0C_CHROMA_SAT_CNTL, ctrl->val); break; case V4L2_CID_HUE: saa711x_write(sd, R_0D_CHROMA_HUE_CNTL, ctrl->val); break; case V4L2_CID_CHROMA_AGC: /* chroma gain cluster */ if (state->agc->val) saa711x_write(sd, R_0F_CHROMA_GAIN_CNTL, state->gain->val); else saa711x_write(sd, R_0F_CHROMA_GAIN_CNTL, state->gain->val | 0x80); break; default: return -EINVAL; } return 0; } static int saa711x_set_size(struct v4l2_subdev *sd, int width, int height) { struct saa711x_state *state = to_state(sd); int HPSC, HFSC; int VSCY; int res; int is_50hz = state->std & V4L2_STD_625_50; int Vsrc = is_50hz ? 576 : 480; v4l2_dbg(1, debug, sd, "decoder set size to %ix%i\n", width, height); /* FIXME need better bounds checking here */ if ((width < 1) || (width > 1440)) return -EINVAL; if ((height < 1) || (height > Vsrc)) return -EINVAL; if (!saa711x_has_reg(state->ident, R_D0_B_HORIZ_PRESCALING)) { /* Decoder only supports 720 columns and 480 or 576 lines */ if (width != 720) return -EINVAL; if (height != Vsrc) return -EINVAL; } state->width = width; state->height = height; if (!saa711x_has_reg(state->ident, R_CC_B_HORIZ_OUTPUT_WINDOW_LENGTH)) return 0; /* probably have a valid size, let's set it */ /* Set output width/height */ /* width */ saa711x_write(sd, R_CC_B_HORIZ_OUTPUT_WINDOW_LENGTH, (u8) (width & 0xff)); saa711x_write(sd, R_CD_B_HORIZ_OUTPUT_WINDOW_LENGTH_MSB, (u8) ((width >> 8) & 0xff)); /* Vertical Scaling uses height/2 */ res = height / 2; /* On 60Hz, it is using a higher Vertical Output Size */ if (!is_50hz) res += (VRES_60HZ - 480) >> 1; /* height */ saa711x_write(sd, R_CE_B_VERT_OUTPUT_WINDOW_LENGTH, (u8) (res & 0xff)); saa711x_write(sd, R_CF_B_VERT_OUTPUT_WINDOW_LENGTH_MSB, (u8) ((res >> 8) & 0xff)); /* Scaling settings */ /* Hprescaler is floor(inres/outres) */ HPSC = (int)(720 / width); /* 0 is not allowed (div. by zero) */ HPSC = HPSC ? HPSC : 1; HFSC = (int)((1024 * 720) / (HPSC * width)); /* FIXME hardcodes to "Task B" * write H prescaler integer */ saa711x_write(sd, R_D0_B_HORIZ_PRESCALING, (u8) (HPSC & 0x3f)); v4l2_dbg(1, debug, sd, "Hpsc: 0x%05x, Hfsc: 0x%05x\n", HPSC, HFSC); /* write H fine-scaling (luminance) */ saa711x_write(sd, R_D8_B_HORIZ_LUMA_SCALING_INC, (u8) (HFSC & 0xff)); saa711x_write(sd, R_D9_B_HORIZ_LUMA_SCALING_INC_MSB, (u8) ((HFSC >> 8) & 0xff)); /* write H fine-scaling (chrominance) * must be lum/2, so i'll just bitshift :) */ saa711x_write(sd, R_DC_B_HORIZ_CHROMA_SCALING, (u8) ((HFSC >> 1) & 0xff)); saa711x_write(sd, R_DD_B_HORIZ_CHROMA_SCALING_MSB, (u8) ((HFSC >> 9) & 0xff)); VSCY = (int)((1024 * Vsrc) / height); v4l2_dbg(1, debug, sd, "Vsrc: %d, Vscy: 0x%05x\n", Vsrc, VSCY); /* Correct Contrast and Luminance */ saa711x_write(sd, R_D5_B_LUMA_CONTRAST_CNTL, (u8) (64 * 1024 / VSCY)); saa711x_write(sd, R_D6_B_CHROMA_SATURATION_CNTL, (u8) (64 * 1024 / VSCY)); /* write V fine-scaling (luminance) */ saa711x_write(sd, R_E0_B_VERT_LUMA_SCALING_INC, (u8) (VSCY & 0xff)); saa711x_write(sd, R_E1_B_VERT_LUMA_SCALING_INC_MSB, (u8) ((VSCY >> 8) & 0xff)); /* write V fine-scaling (chrominance) */ saa711x_write(sd, R_E2_B_VERT_CHROMA_SCALING_INC, (u8) (VSCY & 0xff)); saa711x_write(sd, R_E3_B_VERT_CHROMA_SCALING_INC_MSB, (u8) ((VSCY >> 8) & 0xff)); saa711x_writeregs(sd, saa7115_cfg_reset_scaler); /* Activates task "B" */ saa711x_write(sd, R_80_GLOBAL_CNTL_1, saa711x_read(sd, R_80_GLOBAL_CNTL_1) | 0x20); return 0; } static void saa711x_set_v4lstd(struct v4l2_subdev *sd, v4l2_std_id std) { struct saa711x_state *state = to_state(sd); /* Prevent unnecessary standard changes. During a standard change the I-Port is temporarily disabled. Any devices reading from that port can get confused. Note that s_std is also used to switch from radio to TV mode, so if a s_std is broadcast to all I2C devices then you do not want to have an unwanted side-effect here. */ if (std == state->std) return; state->std = std; // This works for NTSC-M, SECAM-L and the 50Hz PAL variants. if (std & V4L2_STD_525_60) { v4l2_dbg(1, debug, sd, "decoder set standard 60 Hz\n"); saa711x_writeregs(sd, saa7115_cfg_60hz_video); saa711x_set_size(sd, 720, 480); } else { v4l2_dbg(1, debug, sd, "decoder set standard 50 Hz\n"); saa711x_writeregs(sd, saa7115_cfg_50hz_video); saa711x_set_size(sd, 720, 576); } /* Register 0E - Bits D6-D4 on NO-AUTO mode (SAA7111 and SAA7113 doesn't have auto mode) 50 Hz / 625 lines 60 Hz / 525 lines 000 PAL BGDHI (4.43Mhz) NTSC M (3.58MHz) 001 NTSC 4.43 (50 Hz) PAL 4.43 (60 Hz) 010 Combination-PAL N (3.58MHz) NTSC 4.43 (60 Hz) 011 NTSC N (3.58MHz) PAL M (3.58MHz) 100 reserved NTSC-Japan (3.58MHz) */ if (state->ident <= V4L2_IDENT_SAA7113) { u8 reg = saa711x_read(sd, R_0E_CHROMA_CNTL_1) & 0x8f; if (std == V4L2_STD_PAL_M) { reg |= 0x30; } else if (std == V4L2_STD_PAL_Nc) { reg |= 0x20; } else if (std == V4L2_STD_PAL_60) { reg |= 0x10; } else if (std == V4L2_STD_NTSC_M_JP) { reg |= 0x40; } else if (std & V4L2_STD_SECAM) { reg |= 0x50; } saa711x_write(sd, R_0E_CHROMA_CNTL_1, reg); } else { /* restart task B if needed */ int taskb = saa711x_read(sd, R_80_GLOBAL_CNTL_1) & 0x10; if (taskb && state->ident == V4L2_IDENT_SAA7114) { saa711x_writeregs(sd, saa7115_cfg_vbi_on); } /* switch audio mode too! */ saa711x_s_clock_freq(sd, state->audclk_freq); } } /* setup the sliced VBI lcr registers according to the sliced VBI format */ static void saa711x_set_lcr(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt) { struct saa711x_state *state = to_state(sd); int is_50hz = (state->std & V4L2_STD_625_50); u8 lcr[24]; int i, x; #if 1 /* saa7113/7114/7118 VBI support are experimental */ if (!saa711x_has_reg(state->ident, R_41_LCR_BASE)) return; #else /* SAA7113 and SAA7118 also should support VBI - Need testing */ if (state->ident != V4L2_IDENT_SAA7115) return; #endif for (i = 0; i <= 23; i++) lcr[i] = 0xff; if (fmt == NULL) { /* raw VBI */ if (is_50hz) for (i = 6; i <= 23; i++) lcr[i] = 0xdd; else for (i = 10; i <= 21; i++) lcr[i] = 0xdd; } else { /* sliced VBI */ /* first clear lines that cannot be captured */ if (is_50hz) { for (i = 0; i <= 5; i++) fmt->service_lines[0][i] = fmt->service_lines[1][i] = 0; } else { for (i = 0; i <= 9; i++) fmt->service_lines[0][i] = fmt->service_lines[1][i] = 0; for (i = 22; i <= 23; i++) fmt->service_lines[0][i] = fmt->service_lines[1][i] = 0; } /* Now set the lcr values according to the specified service */ for (i = 6; i <= 23; i++) { lcr[i] = 0; for (x = 0; x <= 1; x++) { switch (fmt->service_lines[1-x][i]) { case 0: lcr[i] |= 0xf << (4 * x); break; case V4L2_SLICED_TELETEXT_B: lcr[i] |= 1 << (4 * x); break; case V4L2_SLICED_CAPTION_525: lcr[i] |= 4 << (4 * x); break; case V4L2_SLICED_WSS_625: lcr[i] |= 5 << (4 * x); break; case V4L2_SLICED_VPS: lcr[i] |= 7 << (4 * x); break; } } } } /* write the lcr registers */ for (i = 2; i <= 23; i++) { saa711x_write(sd, i - 2 + R_41_LCR_BASE, lcr[i]); } /* enable/disable raw VBI capturing */ saa711x_writeregs(sd, fmt == NULL ? saa7115_cfg_vbi_on : saa7115_cfg_vbi_off); } static int saa711x_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *sliced) { static u16 lcr2vbi[] = { 0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */ 0, V4L2_SLICED_CAPTION_525, /* 4 */ V4L2_SLICED_WSS_625, 0, /* 5 */ V4L2_SLICED_VPS, 0, 0, 0, 0, /* 7 */ 0, 0, 0, 0 }; int i; memset(sliced->service_lines, 0, sizeof(sliced->service_lines)); sliced->service_set = 0; /* done if using raw VBI */ if (saa711x_read(sd, R_80_GLOBAL_CNTL_1) & 0x10) return 0; for (i = 2; i <= 23; i++) { u8 v = saa711x_read(sd, i - 2 + R_41_LCR_BASE); sliced->service_lines[0][i] = lcr2vbi[v >> 4]; sliced->service_lines[1][i] = lcr2vbi[v & 0xf]; sliced->service_set |= sliced->service_lines[0][i] | sliced->service_lines[1][i]; } return 0; } static int saa711x_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt) { saa711x_set_lcr(sd, NULL); return 0; } static int saa711x_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt) { saa711x_set_lcr(sd, fmt); return 0; } static int saa711x_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt) { if (fmt->code != V4L2_MBUS_FMT_FIXED) return -EINVAL; fmt->field = V4L2_FIELD_INTERLACED; fmt->colorspace = V4L2_COLORSPACE_SMPTE170M; return saa711x_set_size(sd, fmt->width, fmt->height); } /* Decode the sliced VBI data stream as created by the saa7115. The format is described in the saa7115 datasheet in Tables 25 and 26 and in Figure 33. The current implementation uses SAV/EAV codes and not the ancillary data headers. The vbi->p pointer points to the R_5E_SDID byte right after the SAV code. */ static int saa711x_decode_vbi_line(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi) { struct saa711x_state *state = to_state(sd); static const char vbi_no_data_pattern[] = { 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0 }; u8 *p = vbi->p; u32 wss; int id1, id2; /* the ID1 and ID2 bytes from the internal header */ vbi->type = 0; /* mark result as a failure */ id1 = p[2]; id2 = p[3]; /* Note: the field bit is inverted for 60 Hz video */ if (state->std & V4L2_STD_525_60) id1 ^= 0x40; /* Skip internal header, p now points to the start of the payload */ p += 4; vbi->p = p; /* calculate field and line number of the VBI packet (1-23) */ vbi->is_second_field = ((id1 & 0x40) != 0); vbi->line = (id1 & 0x3f) << 3; vbi->line |= (id2 & 0x70) >> 4; /* Obtain data type */ id2 &= 0xf; /* If the VBI slicer does not detect any signal it will fill up the payload buffer with 0xa0 bytes. */ if (!memcmp(p, vbi_no_data_pattern, sizeof(vbi_no_data_pattern))) return 0; /* decode payloads */ switch (id2) { case 1: vbi->type = V4L2_SLICED_TELETEXT_B; break; case 4: if (!saa711x_odd_parity(p[0]) || !saa711x_odd_parity(p[1])) return 0; vbi->type = V4L2_SLICED_CAPTION_525; break; case 5: wss = saa711x_decode_wss(p); if (wss == -1) return 0; p[0] = wss & 0xff; p[1] = wss >> 8; vbi->type = V4L2_SLICED_WSS_625; break; case 7: if (saa711x_decode_vps(p, p) != 0) return 0; vbi->type = V4L2_SLICED_VPS; break; default: break; } return 0; } /* ============ SAA7115 AUDIO settings (end) ============= */ static int saa711x_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct saa711x_state *state = to_state(sd); int status; if (state->radio) return 0; status = saa711x_read(sd, R_1F_STATUS_BYTE_2_VD_DEC); v4l2_dbg(1, debug, sd, "status: 0x%02x\n", status); vt->signal = ((status & (1 << 6)) == 0) ? 0xffff : 0x0; return 0; } static int saa711x_s_std(struct v4l2_subdev *sd, v4l2_std_id std) { struct saa711x_state *state = to_state(sd); state->radio = 0; saa711x_set_v4lstd(sd, std); return 0; } static int saa711x_s_radio(struct v4l2_subdev *sd) { struct saa711x_state *state = to_state(sd); state->radio = 1; return 0; } static int saa711x_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct saa711x_state *state = to_state(sd); u8 mask = (state->ident <= V4L2_IDENT_SAA7111A) ? 0xf8 : 0xf0; v4l2_dbg(1, debug, sd, "decoder set input %d output %d\n", input, output); /* saa7111/3 does not have these inputs */ if (state->ident <= V4L2_IDENT_SAA7113 && (input == SAA7115_COMPOSITE4 || input == SAA7115_COMPOSITE5)) { return -EINVAL; } if (input > SAA7115_SVIDEO3) return -EINVAL; if (state->input == input && state->output == output) return 0; v4l2_dbg(1, debug, sd, "now setting %s input %s output\n", (input >= SAA7115_SVIDEO0) ? "S-Video" : "Composite", (output == SAA7115_IPORT_ON) ? "iport on" : "iport off"); state->input = input; /* saa7111 has slightly different input numbering */ if (state->ident <= V4L2_IDENT_SAA7111A) { if (input >= SAA7115_COMPOSITE4) input -= 2; /* saa7111 specific */ saa711x_write(sd, R_10_CHROMA_CNTL_2, (saa711x_read(sd, R_10_CHROMA_CNTL_2) & 0x3f) | ((output & 0xc0) ^ 0x40)); saa711x_write(sd, R_13_RT_X_PORT_OUT_CNTL, (saa711x_read(sd, R_13_RT_X_PORT_OUT_CNTL) & 0xf0) | ((output & 2) ? 0x0a : 0)); } /* select mode */ saa711x_write(sd, R_02_INPUT_CNTL_1, (saa711x_read(sd, R_02_INPUT_CNTL_1) & mask) | input); /* bypass chrominance trap for S-Video modes */ saa711x_write(sd, R_09_LUMA_CNTL, (saa711x_read(sd, R_09_LUMA_CNTL) & 0x7f) | (state->input >= SAA7115_SVIDEO0 ? 0x80 : 0x0)); state->output = output; if (state->ident == V4L2_IDENT_SAA7114 || state->ident == V4L2_IDENT_SAA7115) { saa711x_write(sd, R_83_X_PORT_I_O_ENA_AND_OUT_CLK, (saa711x_read(sd, R_83_X_PORT_I_O_ENA_AND_OUT_CLK) & 0xfe) | (state->output & 0x01)); } if (state->ident > V4L2_IDENT_SAA7111A) { if (config & SAA7115_IDQ_IS_DEFAULT) saa711x_write(sd, R_85_I_PORT_SIGNAL_POLAR, 0x20); else saa711x_write(sd, R_85_I_PORT_SIGNAL_POLAR, 0x21); } return 0; } static int saa711x_s_gpio(struct v4l2_subdev *sd, u32 val) { struct saa711x_state *state = to_state(sd); if (state->ident > V4L2_IDENT_SAA7111A) return -EINVAL; saa711x_write(sd, 0x11, (saa711x_read(sd, 0x11) & 0x7f) | (val ? 0x80 : 0)); return 0; } static int saa711x_s_stream(struct v4l2_subdev *sd, int enable) { struct saa711x_state *state = to_state(sd); v4l2_dbg(1, debug, sd, "%s output\n", enable ? "enable" : "disable"); if (state->enable == enable) return 0; state->enable = enable; if (!saa711x_has_reg(state->ident, R_87_I_PORT_I_O_ENA_OUT_CLK_AND_GATED)) return 0; saa711x_write(sd, R_87_I_PORT_I_O_ENA_OUT_CLK_AND_GATED, state->enable); return 0; } static int saa711x_s_crystal_freq(struct v4l2_subdev *sd, u32 freq, u32 flags) { struct saa711x_state *state = to_state(sd); if (freq != SAA7115_FREQ_32_11_MHZ && freq != SAA7115_FREQ_24_576_MHZ) return -EINVAL; state->crystal_freq = freq; state->double_asclk = flags & SAA7115_FREQ_FL_DOUBLE_ASCLK; state->cgcdiv = (flags & SAA7115_FREQ_FL_CGCDIV) ? 3 : 4; state->ucgc = flags & SAA7115_FREQ_FL_UCGC; state->apll = flags & SAA7115_FREQ_FL_APLL; saa711x_s_clock_freq(sd, state->audclk_freq); return 0; } static int saa711x_reset(struct v4l2_subdev *sd, u32 val) { v4l2_dbg(1, debug, sd, "decoder RESET\n"); saa711x_writeregs(sd, saa7115_cfg_reset_scaler); return 0; } static int saa711x_g_vbi_data(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_data *data) { /* Note: the internal field ID is inverted for NTSC, so data->field 0 maps to the saa7115 even field, whereas for PAL it maps to the saa7115 odd field. */ switch (data->id) { case V4L2_SLICED_WSS_625: if (saa711x_read(sd, 0x6b) & 0xc0) return -EIO; data->data[0] = saa711x_read(sd, 0x6c); data->data[1] = saa711x_read(sd, 0x6d); return 0; case V4L2_SLICED_CAPTION_525: if (data->field == 0) { /* CC */ if (saa711x_read(sd, 0x66) & 0x30) return -EIO; data->data[0] = saa711x_read(sd, 0x69); data->data[1] = saa711x_read(sd, 0x6a); return 0; } /* XDS */ if (saa711x_read(sd, 0x66) & 0xc0) return -EIO; data->data[0] = saa711x_read(sd, 0x67); data->data[1] = saa711x_read(sd, 0x68); return 0; default: return -EINVAL; } } static int saa711x_querystd(struct v4l2_subdev *sd, v4l2_std_id *std) { struct saa711x_state *state = to_state(sd); int reg1f, reg1e; /* * The V4L2 core already initializes std with all supported * Standards. All driver needs to do is to mask it, to remove * standards that don't apply from the mask */ reg1f = saa711x_read(sd, R_1F_STATUS_BYTE_2_VD_DEC); if (state->ident == V4L2_IDENT_SAA7115) { reg1e = saa711x_read(sd, R_1E_STATUS_BYTE_1_VD_DEC); v4l2_dbg(1, debug, sd, "Status byte 1 (0x1e)=0x%02x\n", reg1e); switch (reg1e & 0x03) { case 1: *std &= V4L2_STD_NTSC; break; case 2: /* * V4L2_STD_PAL just cover the european PAL standards. * This is wrong, as the device could also be using an * other PAL standard. */ *std &= V4L2_STD_PAL | V4L2_STD_PAL_N | V4L2_STD_PAL_Nc | V4L2_STD_PAL_M | V4L2_STD_PAL_60; break; case 3: *std &= V4L2_STD_SECAM; break; default: /* Can't detect anything */ break; } } v4l2_dbg(1, debug, sd, "Status byte 2 (0x1f)=0x%02x\n", reg1f); /* horizontal/vertical not locked */ if (reg1f & 0x40) goto ret; if (reg1f & 0x20) *std &= V4L2_STD_525_60; else *std &= V4L2_STD_625_50; ret: v4l2_dbg(1, debug, sd, "detected std mask = %08Lx\n", *std); return 0; } static int saa711x_g_input_status(struct v4l2_subdev *sd, u32 *status) { struct saa711x_state *state = to_state(sd); int reg1e = 0x80; int reg1f; *status = V4L2_IN_ST_NO_SIGNAL; if (state->ident == V4L2_IDENT_SAA7115) reg1e = saa711x_read(sd, R_1E_STATUS_BYTE_1_VD_DEC); reg1f = saa711x_read(sd, R_1F_STATUS_BYTE_2_VD_DEC); if ((reg1f & 0xc1) == 0x81 && (reg1e & 0xc0) == 0x80) *status = 0; return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int saa711x_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (!v4l2_chip_match_i2c_client(client, &reg->match)) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; reg->val = saa711x_read(sd, reg->reg & 0xff); reg->size = 1; return 0; } static int saa711x_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (!v4l2_chip_match_i2c_client(client, &reg->match)) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; saa711x_write(sd, reg->reg & 0xff, reg->val & 0xff); return 0; } #endif static int saa711x_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct saa711x_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, state->ident, 0); } static int saa711x_log_status(struct v4l2_subdev *sd) { struct saa711x_state *state = to_state(sd); int reg1e, reg1f; int signalOk; int vcr; v4l2_info(sd, "Audio frequency: %d Hz\n", state->audclk_freq); if (state->ident != V4L2_IDENT_SAA7115) { /* status for the saa7114 */ reg1f = saa711x_read(sd, R_1F_STATUS_BYTE_2_VD_DEC); signalOk = (reg1f & 0xc1) == 0x81; v4l2_info(sd, "Video signal: %s\n", signalOk ? "ok" : "bad"); v4l2_info(sd, "Frequency: %s\n", (reg1f & 0x20) ? "60 Hz" : "50 Hz"); return 0; } /* status for the saa7115 */ reg1e = saa711x_read(sd, R_1E_STATUS_BYTE_1_VD_DEC); reg1f = saa711x_read(sd, R_1F_STATUS_BYTE_2_VD_DEC); signalOk = (reg1f & 0xc1) == 0x81 && (reg1e & 0xc0) == 0x80; vcr = !(reg1f & 0x10); if (state->input >= 6) v4l2_info(sd, "Input: S-Video %d\n", state->input - 6); else v4l2_info(sd, "Input: Composite %d\n", state->input); v4l2_info(sd, "Video signal: %s\n", signalOk ? (vcr ? "VCR" : "broadcast/DVD") : "bad"); v4l2_info(sd, "Frequency: %s\n", (reg1f & 0x20) ? "60 Hz" : "50 Hz"); switch (reg1e & 0x03) { case 1: v4l2_info(sd, "Detected format: NTSC\n"); break; case 2: v4l2_info(sd, "Detected format: PAL\n"); break; case 3: v4l2_info(sd, "Detected format: SECAM\n"); break; default: v4l2_info(sd, "Detected format: BW/No color\n"); break; } v4l2_info(sd, "Width, Height: %d, %d\n", state->width, state->height); v4l2_ctrl_handler_log_status(&state->hdl, sd->name); return 0; } /* ----------------------------------------------------------------------- */ static const struct v4l2_ctrl_ops saa711x_ctrl_ops = { .s_ctrl = saa711x_s_ctrl, .g_volatile_ctrl = saa711x_g_volatile_ctrl, }; static const struct v4l2_subdev_core_ops saa711x_core_ops = { .log_status = saa711x_log_status, .g_chip_ident = saa711x_g_chip_ident, .g_ext_ctrls = v4l2_subdev_g_ext_ctrls, .try_ext_ctrls = v4l2_subdev_try_ext_ctrls, .s_ext_ctrls = v4l2_subdev_s_ext_ctrls, .g_ctrl = v4l2_subdev_g_ctrl, .s_ctrl = v4l2_subdev_s_ctrl, .queryctrl = v4l2_subdev_queryctrl, .querymenu = v4l2_subdev_querymenu, .s_std = saa711x_s_std, .reset = saa711x_reset, .s_gpio = saa711x_s_gpio, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = saa711x_g_register, .s_register = saa711x_s_register, #endif }; static const struct v4l2_subdev_tuner_ops saa711x_tuner_ops = { .s_radio = saa711x_s_radio, .g_tuner = saa711x_g_tuner, }; static const struct v4l2_subdev_audio_ops saa711x_audio_ops = { .s_clock_freq = saa711x_s_clock_freq, }; static const struct v4l2_subdev_video_ops saa711x_video_ops = { .s_routing = saa711x_s_routing, .s_crystal_freq = saa711x_s_crystal_freq, .s_mbus_fmt = saa711x_s_mbus_fmt, .s_stream = saa711x_s_stream, .querystd = saa711x_querystd, .g_input_status = saa711x_g_input_status, }; static const struct v4l2_subdev_vbi_ops saa711x_vbi_ops = { .g_vbi_data = saa711x_g_vbi_data, .decode_vbi_line = saa711x_decode_vbi_line, .g_sliced_fmt = saa711x_g_sliced_fmt, .s_sliced_fmt = saa711x_s_sliced_fmt, .s_raw_fmt = saa711x_s_raw_fmt, }; static const struct v4l2_subdev_ops saa711x_ops = { .core = &saa711x_core_ops, .tuner = &saa711x_tuner_ops, .audio = &saa711x_audio_ops, .video = &saa711x_video_ops, .vbi = &saa711x_vbi_ops, }; /* ----------------------------------------------------------------------- */ static int saa711x_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct saa711x_state *state; struct v4l2_subdev *sd; struct v4l2_ctrl_handler *hdl; int i; char name[17]; char chip_id; int autodetect = !id || id->driver_data == 1; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; for (i = 0; i < 0x0f; i++) { i2c_smbus_write_byte_data(client, 0, i); name[i] = (i2c_smbus_read_byte_data(client, 0) & 0x0f) + '0'; if (name[i] > '9') name[i] += 'a' - '9' - 1; } name[i] = '\0'; chip_id = name[5]; /* Check whether this chip is part of the saa711x series */ if (memcmp(name + 1, "f711", 4)) { v4l_dbg(1, debug, client, "chip found @ 0x%x (ID %s) does not match a known saa711x chip.\n", client->addr << 1, name); return -ENODEV; } /* Safety check */ if (!autodetect && id->name[6] != chip_id) { v4l_warn(client, "found saa711%c while %s was expected\n", chip_id, id->name); } snprintf(client->name, sizeof(client->name), "saa711%c", chip_id); v4l_info(client, "saa711%c found (%s) @ 0x%x (%s)\n", chip_id, name, client->addr << 1, client->adapter->name); state = kzalloc(sizeof(struct saa711x_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; v4l2_i2c_subdev_init(sd, client, &saa711x_ops); hdl = &state->hdl; v4l2_ctrl_handler_init(hdl, 6); /* add in ascending ID order */ v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 255, 1, 128); v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops, V4L2_CID_CONTRAST, 0, 127, 1, 64); v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops, V4L2_CID_SATURATION, 0, 127, 1, 64); v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops, V4L2_CID_HUE, -128, 127, 1, 0); state->agc = v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops, V4L2_CID_CHROMA_AGC, 0, 1, 1, 1); state->gain = v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops, V4L2_CID_CHROMA_GAIN, 0, 127, 1, 40); sd->ctrl_handler = hdl; if (hdl->error) { int err = hdl->error; v4l2_ctrl_handler_free(hdl); kfree(state); return err; } v4l2_ctrl_auto_cluster(2, &state->agc, 0, true); state->input = -1; state->output = SAA7115_IPORT_ON; state->enable = 1; state->radio = 0; switch (chip_id) { case '1': state->ident = V4L2_IDENT_SAA7111; if (saa711x_read(sd, R_00_CHIP_VERSION) & 0xf0) { v4l_info(client, "saa7111a variant found\n"); state->ident = V4L2_IDENT_SAA7111A; } break; case '3': state->ident = V4L2_IDENT_SAA7113; break; case '4': state->ident = V4L2_IDENT_SAA7114; break; case '5': state->ident = V4L2_IDENT_SAA7115; break; case '8': state->ident = V4L2_IDENT_SAA7118; break; default: state->ident = V4L2_IDENT_SAA7111; v4l2_info(sd, "WARNING: Chip is not known - Falling back to saa7111\n"); break; } state->audclk_freq = 48000; v4l2_dbg(1, debug, sd, "writing init values\n"); /* init to 60hz/48khz */ state->crystal_freq = SAA7115_FREQ_24_576_MHZ; switch (state->ident) { case V4L2_IDENT_SAA7111: case V4L2_IDENT_SAA7111A: saa711x_writeregs(sd, saa7111_init); break; case V4L2_IDENT_SAA7113: saa711x_writeregs(sd, saa7113_init); break; default: state->crystal_freq = SAA7115_FREQ_32_11_MHZ; saa711x_writeregs(sd, saa7115_init_auto_input); } if (state->ident > V4L2_IDENT_SAA7111A) saa711x_writeregs(sd, saa7115_init_misc); saa711x_set_v4lstd(sd, V4L2_STD_NTSC); v4l2_ctrl_handler_setup(hdl); v4l2_dbg(1, debug, sd, "status: (1E) 0x%02x, (1F) 0x%02x\n", saa711x_read(sd, R_1E_STATUS_BYTE_1_VD_DEC), saa711x_read(sd, R_1F_STATUS_BYTE_2_VD_DEC)); return 0; } /* ----------------------------------------------------------------------- */ static int saa711x_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); v4l2_ctrl_handler_free(sd->ctrl_handler); kfree(to_state(sd)); return 0; } static const struct i2c_device_id saa711x_id[] = { { "saa7115_auto", 1 }, /* autodetect */ { "saa7111", 0 }, { "saa7113", 0 }, { "saa7114", 0 }, { "saa7115", 0 }, { "saa7118", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, saa711x_id); static struct i2c_driver saa711x_driver = { .driver = { .owner = THIS_MODULE, .name = "saa7115", }, .probe = saa711x_probe, .remove = saa711x_remove, .id_table = saa711x_id, }; module_i2c_driver(saa711x_driver);
gpl-2.0
Tesla-Redux-Devices/android_kernel_samsung_trlte
drivers/net/ethernet/intel/e1000e/mac.c
2594
52986
/******************************************************************************* Intel PRO/1000 Linux driver Copyright(c) 1999 - 2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ #include "e1000.h" /** * e1000e_get_bus_info_pcie - Get PCIe bus information * @hw: pointer to the HW structure * * Determines and stores the system bus information for a particular * network interface. The following bus information is determined and stored: * bus speed, bus width, type (PCIe), and PCIe function. **/ s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; struct e1000_bus_info *bus = &hw->bus; struct e1000_adapter *adapter = hw->adapter; u16 pcie_link_status, cap_offset; cap_offset = adapter->pdev->pcie_cap; if (!cap_offset) { bus->width = e1000_bus_width_unknown; } else { pci_read_config_word(adapter->pdev, cap_offset + PCIE_LINK_STATUS, &pcie_link_status); bus->width = (enum e1000_bus_width)((pcie_link_status & PCIE_LINK_WIDTH_MASK) >> PCIE_LINK_WIDTH_SHIFT); } mac->ops.set_lan_id(hw); return 0; } /** * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices * * @hw: pointer to the HW structure * * Determines the LAN function id by reading memory-mapped registers * and swaps the port value if requested. **/ void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw) { struct e1000_bus_info *bus = &hw->bus; u32 reg; /* The status register reports the correct function number * for the device regardless of function swap state. */ reg = er32(STATUS); bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; } /** * e1000_set_lan_id_single_port - Set LAN id for a single port device * @hw: pointer to the HW structure * * Sets the LAN function id to zero for a single port device. **/ void e1000_set_lan_id_single_port(struct e1000_hw *hw) { struct e1000_bus_info *bus = &hw->bus; bus->func = 0; } /** * e1000_clear_vfta_generic - Clear VLAN filter table * @hw: pointer to the HW structure * * Clears the register array which contains the VLAN filter table by * setting all the values to 0. **/ void e1000_clear_vfta_generic(struct e1000_hw *hw) { u32 offset; for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); e1e_flush(); } } /** * e1000_write_vfta_generic - Write value to VLAN filter table * @hw: pointer to the HW structure * @offset: register offset in VLAN filter table * @value: register value written to VLAN filter table * * Writes value at the given offset in the register array which stores * the VLAN filter table. **/ void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) { E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); e1e_flush(); } /** * e1000e_init_rx_addrs - Initialize receive address's * @hw: pointer to the HW structure * @rar_count: receive address registers * * Setup the receive address registers by setting the base receive address * register to the devices MAC address and clearing all the other receive * address registers to 0. **/ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) { u32 i; u8 mac_addr[ETH_ALEN] = { 0 }; /* Setup the receive address */ e_dbg("Programming MAC Address into RAR[0]\n"); hw->mac.ops.rar_set(hw, hw->mac.addr, 0); /* Zero out the other (rar_entry_count - 1) receive addresses */ e_dbg("Clearing RAR[1-%u]\n", rar_count - 1); for (i = 1; i < rar_count; i++) hw->mac.ops.rar_set(hw, mac_addr, i); } /** * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr * @hw: pointer to the HW structure * * Checks the nvm for an alternate MAC address. An alternate MAC address * can be setup by pre-boot software and must be treated like a permanent * address and must override the actual permanent MAC address. If an * alternate MAC address is found it is programmed into RAR0, replacing * the permanent address that was installed into RAR0 by the Si on reset. * This function will return SUCCESS unless it encounters an error while * reading the EEPROM. **/ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) { u32 i; s32 ret_val; u16 offset, nvm_alt_mac_addr_offset, nvm_data; u8 alt_mac_addr[ETH_ALEN]; ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data); if (ret_val) return ret_val; /* not supported on 82573 */ if (hw->mac.type == e1000_82573) return 0; ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, &nvm_alt_mac_addr_offset); if (ret_val) { e_dbg("NVM Read Error\n"); return ret_val; } if ((nvm_alt_mac_addr_offset == 0xFFFF) || (nvm_alt_mac_addr_offset == 0x0000)) /* There is no Alternate MAC Address */ return 0; if (hw->bus.func == E1000_FUNC_1) nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; for (i = 0; i < ETH_ALEN; i += 2) { offset = nvm_alt_mac_addr_offset + (i >> 1); ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); if (ret_val) { e_dbg("NVM Read Error\n"); return ret_val; } alt_mac_addr[i] = (u8)(nvm_data & 0xFF); alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); } /* if multicast bit is set, the alternate address will not be used */ if (is_multicast_ether_addr(alt_mac_addr)) { e_dbg("Ignoring Alternate Mac Address with MC bit set\n"); return 0; } /* We have a valid alternate MAC address, and we want to treat it the * same as the normal permanent MAC address stored by the HW into the * RAR. Do this by mapping this address into RAR0. */ hw->mac.ops.rar_set(hw, alt_mac_addr, 0); return 0; } /** * e1000e_rar_set_generic - Set receive address register * @hw: pointer to the HW structure * @addr: pointer to the receive address * @index: receive address array register * * Sets the receive address array register at index to the address passed * in by addr. **/ void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) { u32 rar_low, rar_high; /* HW expects these in little endian so we reverse the byte order * from network order (big endian) to little endian */ rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); /* If MAC address zero, no need to set the AV bit */ if (rar_low || rar_high) rar_high |= E1000_RAH_AV; /* Some bridges will combine consecutive 32-bit writes into * a single burst write, which will malfunction on some parts. * The flushes avoid this. */ ew32(RAL(index), rar_low); e1e_flush(); ew32(RAH(index), rar_high); e1e_flush(); } /** * e1000_hash_mc_addr - Generate a multicast hash value * @hw: pointer to the HW structure * @mc_addr: pointer to a multicast address * * Generates a multicast address hash value which is used to determine * the multicast filter table array address and new table value. **/ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) { u32 hash_value, hash_mask; u8 bit_shift = 0; /* Register count multiplied by bits per register */ hash_mask = (hw->mac.mta_reg_count * 32) - 1; /* For a mc_filter_type of 0, bit_shift is the number of left-shifts * where 0xFF would still fall within the hash mask. */ while (hash_mask >> bit_shift != 0xFF) bit_shift++; /* The portion of the address that is used for the hash table * is determined by the mc_filter_type setting. * The algorithm is such that there is a total of 8 bits of shifting. * The bit_shift for a mc_filter_type of 0 represents the number of * left-shifts where the MSB of mc_addr[5] would still fall within * the hash_mask. Case 0 does this exactly. Since there are a total * of 8 bits of shifting, then mc_addr[4] will shift right the * remaining number of bits. Thus 8 - bit_shift. The rest of the * cases are a variation of this algorithm...essentially raising the * number of bits to shift mc_addr[5] left, while still keeping the * 8-bit shifting total. * * For example, given the following Destination MAC Address and an * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), * we can see that the bit_shift for case 0 is 4. These are the hash * values resulting from each mc_filter_type... * [0] [1] [2] [3] [4] [5] * 01 AA 00 12 34 56 * LSB MSB * * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 */ switch (hw->mac.mc_filter_type) { default: case 0: break; case 1: bit_shift += 1; break; case 2: bit_shift += 2; break; case 3: bit_shift += 4; break; } hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | (((u16)mc_addr[5]) << bit_shift))); return hash_value; } /** * e1000e_update_mc_addr_list_generic - Update Multicast addresses * @hw: pointer to the HW structure * @mc_addr_list: array of multicast addresses to program * @mc_addr_count: number of multicast addresses to program * * Updates entire Multicast Table Array. * The caller must have a packed mc_addr_list of multicast addresses. **/ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, u8 *mc_addr_list, u32 mc_addr_count) { u32 hash_value, hash_bit, hash_reg; int i; /* clear mta_shadow */ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); /* update mta_shadow from mc_addr_list */ for (i = 0; (u32)i < mc_addr_count; i++) { hash_value = e1000_hash_mc_addr(hw, mc_addr_list); hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); hash_bit = hash_value & 0x1F; hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); mc_addr_list += (ETH_ALEN); } /* replace the entire MTA table */ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]); e1e_flush(); } /** * e1000e_clear_hw_cntrs_base - Clear base hardware counters * @hw: pointer to the HW structure * * Clears the base hardware counters by reading the counter registers. **/ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) { er32(CRCERRS); er32(SYMERRS); er32(MPC); er32(SCC); er32(ECOL); er32(MCC); er32(LATECOL); er32(COLC); er32(DC); er32(SEC); er32(RLEC); er32(XONRXC); er32(XONTXC); er32(XOFFRXC); er32(XOFFTXC); er32(FCRUC); er32(GPRC); er32(BPRC); er32(MPRC); er32(GPTC); er32(GORCL); er32(GORCH); er32(GOTCL); er32(GOTCH); er32(RNBC); er32(RUC); er32(RFC); er32(ROC); er32(RJC); er32(TORL); er32(TORH); er32(TOTL); er32(TOTH); er32(TPR); er32(TPT); er32(MPTC); er32(BPTC); } /** * e1000e_check_for_copper_link - Check for link (Copper) * @hw: pointer to the HW structure * * Checks to see of the link status of the hardware has changed. If a * change in link status has been detected, then we read the PHY registers * to get the current speed/duplex if link exists. **/ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val; bool link; /* We only want to go out to the PHY registers to see if Auto-Neg * has completed and/or if our link status has changed. The * get_link_status flag is set upon receiving a Link Status * Change or Rx Sequence Error interrupt. */ if (!mac->get_link_status) return 0; /* First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex * of the PHY. */ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) return ret_val; if (!link) return 0; /* No link detected */ mac->get_link_status = false; /* Check if there was DownShift, must be checked * immediately after link-up */ e1000e_check_downshift(hw); /* If we are forcing speed/duplex, then we simply return since * we have already determined whether we have link or not. */ if (!mac->autoneg) return -E1000_ERR_CONFIG; /* Auto-Neg is enabled. Auto Speed Detection takes care * of MAC speed/duplex configuration. So we only need to * configure Collision Distance in the MAC. */ mac->ops.config_collision_dist(hw); /* Configure Flow Control now that Auto-Neg has completed. * First, we need to restore the desired flow control * settings because we may have had to re-autoneg with a * different link partner. */ ret_val = e1000e_config_fc_after_link_up(hw); if (ret_val) e_dbg("Error configuring flow control\n"); return ret_val; } /** * e1000e_check_for_fiber_link - Check for link (Fiber) * @hw: pointer to the HW structure * * Checks for link up on the hardware. If link is not up and we have * a signal, then we need to force link up. **/ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; u32 rxcw; u32 ctrl; u32 status; s32 ret_val; ctrl = er32(CTRL); status = er32(STATUS); rxcw = er32(RXCW); /* If we don't have link (auto-negotiation failed or link partner * cannot auto-negotiate), the cable is plugged in (we have signal), * and our link partner is not trying to auto-negotiate with us (we * are receiving idles or data), we need to force link up. We also * need to give auto-negotiation time to complete, in case the cable * was just plugged in. The autoneg_failed flag does this. */ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) { if (!mac->autoneg_failed) { mac->autoneg_failed = true; return 0; } e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); /* Disable auto-negotiation in the TXCW register */ ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); /* Force link-up and also force full-duplex. */ ctrl = er32(CTRL); ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); ew32(CTRL, ctrl); /* Configure Flow Control after forcing link up. */ ret_val = e1000e_config_fc_after_link_up(hw); if (ret_val) { e_dbg("Error configuring flow control\n"); return ret_val; } } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { /* If we are forcing link and we are receiving /C/ ordered * sets, re-enable auto-negotiation in the TXCW register * and disable forced link in the Device Control register * in an attempt to auto-negotiate with our link partner. */ e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); ew32(TXCW, mac->txcw); ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); mac->serdes_has_link = true; } return 0; } /** * e1000e_check_for_serdes_link - Check for link (Serdes) * @hw: pointer to the HW structure * * Checks for link up on the hardware. If link is not up and we have * a signal, then we need to force link up. **/ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; u32 rxcw; u32 ctrl; u32 status; s32 ret_val; ctrl = er32(CTRL); status = er32(STATUS); rxcw = er32(RXCW); /* If we don't have link (auto-negotiation failed or link partner * cannot auto-negotiate), and our link partner is not trying to * auto-negotiate with us (we are receiving idles or data), * we need to force link up. We also need to give auto-negotiation * time to complete. */ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) { if (!mac->autoneg_failed) { mac->autoneg_failed = true; return 0; } e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); /* Disable auto-negotiation in the TXCW register */ ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); /* Force link-up and also force full-duplex. */ ctrl = er32(CTRL); ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); ew32(CTRL, ctrl); /* Configure Flow Control after forcing link up. */ ret_val = e1000e_config_fc_after_link_up(hw); if (ret_val) { e_dbg("Error configuring flow control\n"); return ret_val; } } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { /* If we are forcing link and we are receiving /C/ ordered * sets, re-enable auto-negotiation in the TXCW register * and disable forced link in the Device Control register * in an attempt to auto-negotiate with our link partner. */ e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); ew32(TXCW, mac->txcw); ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); mac->serdes_has_link = true; } else if (!(E1000_TXCW_ANE & er32(TXCW))) { /* If we force link for non-auto-negotiation switch, check * link status based on MAC synchronization for internal * serdes media type. */ /* SYNCH bit and IV bit are sticky. */ usleep_range(10, 20); rxcw = er32(RXCW); if (rxcw & E1000_RXCW_SYNCH) { if (!(rxcw & E1000_RXCW_IV)) { mac->serdes_has_link = true; e_dbg("SERDES: Link up - forced.\n"); } } else { mac->serdes_has_link = false; e_dbg("SERDES: Link down - force failed.\n"); } } if (E1000_TXCW_ANE & er32(TXCW)) { status = er32(STATUS); if (status & E1000_STATUS_LU) { /* SYNCH bit and IV bit are sticky, so reread rxcw. */ usleep_range(10, 20); rxcw = er32(RXCW); if (rxcw & E1000_RXCW_SYNCH) { if (!(rxcw & E1000_RXCW_IV)) { mac->serdes_has_link = true; e_dbg("SERDES: Link up - autoneg completed successfully.\n"); } else { mac->serdes_has_link = false; e_dbg("SERDES: Link down - invalid codewords detected in autoneg.\n"); } } else { mac->serdes_has_link = false; e_dbg("SERDES: Link down - no sync.\n"); } } else { mac->serdes_has_link = false; e_dbg("SERDES: Link down - autoneg failed\n"); } } return 0; } /** * e1000_set_default_fc_generic - Set flow control default values * @hw: pointer to the HW structure * * Read the EEPROM for the default values for flow control and store the * values. **/ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) { s32 ret_val; u16 nvm_data; /* Read and store word 0x0F of the EEPROM. This word contains bits * that determine the hardware's default PAUSE (flow control) mode, * a bit that determines whether the HW defaults to enabling or * disabling auto-negotiation, and the direction of the * SW defined pins. If there is no SW over-ride of the flow * control setting, then the variable hw->fc will * be initialized based on a value in the EEPROM. */ ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); if (ret_val) { e_dbg("NVM Read Error\n"); return ret_val; } if (!(nvm_data & NVM_WORD0F_PAUSE_MASK)) hw->fc.requested_mode = e1000_fc_none; else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR) hw->fc.requested_mode = e1000_fc_tx_pause; else hw->fc.requested_mode = e1000_fc_full; return 0; } /** * e1000e_setup_link_generic - Setup flow control and link settings * @hw: pointer to the HW structure * * Determines which flow control settings to use, then configures flow * control. Calls the appropriate media-specific link configuration * function. Assuming the adapter has a valid link partner, a valid link * should be established. Assumes the hardware has previously been reset * and the transmitter and receiver are not enabled. **/ s32 e1000e_setup_link_generic(struct e1000_hw *hw) { s32 ret_val; /* In the case of the phy reset being blocked, we already have a link. * We do not need to set it up again. */ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) return 0; /* If requested flow control is set to default, set flow control * based on the EEPROM flow control settings. */ if (hw->fc.requested_mode == e1000_fc_default) { ret_val = e1000_set_default_fc_generic(hw); if (ret_val) return ret_val; } /* Save off the requested flow control mode for use later. Depending * on the link partner's capabilities, we may or may not use this mode. */ hw->fc.current_mode = hw->fc.requested_mode; e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); /* Call the necessary media_type subroutine to configure the link. */ ret_val = hw->mac.ops.setup_physical_interface(hw); if (ret_val) return ret_val; /* Initialize the flow control address, type, and PAUSE timer * registers to their default values. This is done even if flow * control is disabled, because it does not hurt anything to * initialize these registers. */ e_dbg("Initializing the Flow Control address, type and timer regs\n"); ew32(FCT, FLOW_CONTROL_TYPE); ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); ew32(FCTTV, hw->fc.pause_time); return e1000e_set_fc_watermarks(hw); } /** * e1000_commit_fc_settings_generic - Configure flow control * @hw: pointer to the HW structure * * Write the flow control settings to the Transmit Config Word Register (TXCW) * base on the flow control settings in e1000_mac_info. **/ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; u32 txcw; /* Check for a software override of the flow control settings, and * setup the device accordingly. If auto-negotiation is enabled, then * software will have to set the "PAUSE" bits to the correct value in * the Transmit Config Word Register (TXCW) and re-start auto- * negotiation. However, if auto-negotiation is disabled, then * software will have to manually configure the two flow control enable * bits in the CTRL register. * * The possible values of the "fc" parameter are: * 0: Flow control is completely disabled * 1: Rx flow control is enabled (we can receive pause frames, * but not send pause frames). * 2: Tx flow control is enabled (we can send pause frames but we * do not support receiving pause frames). * 3: Both Rx and Tx flow control (symmetric) are enabled. */ switch (hw->fc.current_mode) { case e1000_fc_none: /* Flow control completely disabled by a software over-ride. */ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); break; case e1000_fc_rx_pause: /* Rx Flow control is enabled and Tx Flow control is disabled * by a software over-ride. Since there really isn't a way to * advertise that we are capable of Rx Pause ONLY, we will * advertise that we support both symmetric and asymmetric Rx * PAUSE. Later, we will disable the adapter's ability to send * PAUSE frames. */ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); break; case e1000_fc_tx_pause: /* Tx Flow control is enabled, and Rx Flow control is disabled, * by a software over-ride. */ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); break; case e1000_fc_full: /* Flow control (both Rx and Tx) is enabled by a software * over-ride. */ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); break; default: e_dbg("Flow control param set incorrectly\n"); return -E1000_ERR_CONFIG; break; } ew32(TXCW, txcw); mac->txcw = txcw; return 0; } /** * e1000_poll_fiber_serdes_link_generic - Poll for link up * @hw: pointer to the HW structure * * Polls for link up by reading the status register, if link fails to come * up with auto-negotiation, then the link is forced if a signal is detected. **/ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; u32 i, status; s32 ret_val; /* If we have a signal (the cable is plugged in, or assumed true for * serdes media) then poll for a "Link-Up" indication in the Device * Status Register. Time-out if a link isn't seen in 500 milliseconds * seconds (Auto-negotiation should complete in less than 500 * milliseconds even if the other end is doing it in SW). */ for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { usleep_range(10000, 20000); status = er32(STATUS); if (status & E1000_STATUS_LU) break; } if (i == FIBER_LINK_UP_LIMIT) { e_dbg("Never got a valid link from auto-neg!!!\n"); mac->autoneg_failed = true; /* AutoNeg failed to achieve a link, so we'll call * mac->check_for_link. This routine will force the * link up if we detect a signal. This will allow us to * communicate with non-autonegotiating link partners. */ ret_val = mac->ops.check_for_link(hw); if (ret_val) { e_dbg("Error while checking for link\n"); return ret_val; } mac->autoneg_failed = false; } else { mac->autoneg_failed = false; e_dbg("Valid Link Found\n"); } return 0; } /** * e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes * @hw: pointer to the HW structure * * Configures collision distance and flow control for fiber and serdes * links. Upon successful setup, poll for link. **/ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; ctrl = er32(CTRL); /* Take the link out of reset */ ctrl &= ~E1000_CTRL_LRST; hw->mac.ops.config_collision_dist(hw); ret_val = e1000_commit_fc_settings_generic(hw); if (ret_val) return ret_val; /* Since auto-negotiation is enabled, take the link out of reset (the * link will be in reset, because we previously reset the chip). This * will restart auto-negotiation. If auto-negotiation is successful * then the link-up status bit will be set and the flow control enable * bits (RFCE and TFCE) will be set according to their negotiated value. */ e_dbg("Auto-negotiation enabled\n"); ew32(CTRL, ctrl); e1e_flush(); usleep_range(1000, 2000); /* For these adapters, the SW definable pin 1 is set when the optics * detect a signal. If we have a signal, then poll for a "Link-Up" * indication. */ if (hw->phy.media_type == e1000_media_type_internal_serdes || (er32(CTRL) & E1000_CTRL_SWDPIN1)) { ret_val = e1000_poll_fiber_serdes_link_generic(hw); } else { e_dbg("No signal detected\n"); } return ret_val; } /** * e1000e_config_collision_dist_generic - Configure collision distance * @hw: pointer to the HW structure * * Configures the collision distance to the default value and is used * during link setup. **/ void e1000e_config_collision_dist_generic(struct e1000_hw *hw) { u32 tctl; tctl = er32(TCTL); tctl &= ~E1000_TCTL_COLD; tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; ew32(TCTL, tctl); e1e_flush(); } /** * e1000e_set_fc_watermarks - Set flow control high/low watermarks * @hw: pointer to the HW structure * * Sets the flow control high/low threshold (watermark) registers. If * flow control XON frame transmission is enabled, then set XON frame * transmission as well. **/ s32 e1000e_set_fc_watermarks(struct e1000_hw *hw) { u32 fcrtl = 0, fcrth = 0; /* Set the flow control receive threshold registers. Normally, * these registers will be set to a default threshold that may be * adjusted later by the driver's runtime code. However, if the * ability to transmit pause frames is not enabled, then these * registers will be set to 0. */ if (hw->fc.current_mode & e1000_fc_tx_pause) { /* We need to set up the Receive Threshold high and low water * marks as well as (optionally) enabling the transmission of * XON frames. */ fcrtl = hw->fc.low_water; if (hw->fc.send_xon) fcrtl |= E1000_FCRTL_XONE; fcrth = hw->fc.high_water; } ew32(FCRTL, fcrtl); ew32(FCRTH, fcrth); return 0; } /** * e1000e_force_mac_fc - Force the MAC's flow control settings * @hw: pointer to the HW structure * * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the * device control register to reflect the adapter settings. TFCE and RFCE * need to be explicitly set by software when a copper PHY is used because * autonegotiation is managed by the PHY rather than the MAC. Software must * also configure these bits when link is forced on a fiber connection. **/ s32 e1000e_force_mac_fc(struct e1000_hw *hw) { u32 ctrl; ctrl = er32(CTRL); /* Because we didn't get link via the internal auto-negotiation * mechanism (we either forced link or we got link via PHY * auto-neg), we have to manually enable/disable transmit an * receive flow control. * * The "Case" statement below enables/disable flow control * according to the "hw->fc.current_mode" parameter. * * The possible values of the "fc" parameter are: * 0: Flow control is completely disabled * 1: Rx flow control is enabled (we can receive pause * frames but not send pause frames). * 2: Tx flow control is enabled (we can send pause frames * frames but we do not receive pause frames). * 3: Both Rx and Tx flow control (symmetric) is enabled. * other: No other values should be possible at this point. */ e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); switch (hw->fc.current_mode) { case e1000_fc_none: ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); break; case e1000_fc_rx_pause: ctrl &= (~E1000_CTRL_TFCE); ctrl |= E1000_CTRL_RFCE; break; case e1000_fc_tx_pause: ctrl &= (~E1000_CTRL_RFCE); ctrl |= E1000_CTRL_TFCE; break; case e1000_fc_full: ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); break; default: e_dbg("Flow control param set incorrectly\n"); return -E1000_ERR_CONFIG; } ew32(CTRL, ctrl); return 0; } /** * e1000e_config_fc_after_link_up - Configures flow control after link * @hw: pointer to the HW structure * * Checks the status of auto-negotiation after link up to ensure that the * speed and duplex were not forced. If the link needed to be forced, then * flow control needs to be forced also. If auto-negotiation is enabled * and did not fail, then we configure flow control based on our link * partner. **/ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val = 0; u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; u16 speed, duplex; /* Check for the case where we have fiber media and auto-neg failed * so we had to force link. In this case, we need to force the * configuration of the MAC to match the "fc" parameter. */ if (mac->autoneg_failed) { if (hw->phy.media_type == e1000_media_type_fiber || hw->phy.media_type == e1000_media_type_internal_serdes) ret_val = e1000e_force_mac_fc(hw); } else { if (hw->phy.media_type == e1000_media_type_copper) ret_val = e1000e_force_mac_fc(hw); } if (ret_val) { e_dbg("Error forcing flow control settings\n"); return ret_val; } /* Check for the case where we have copper media and auto-neg is * enabled. In this case, we need to check and see if Auto-Neg * has completed, and if so, how the PHY and link partner has * flow control configured. */ if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { /* Read the MII Status Register and check to see if AutoNeg * has completed. We read this twice because this reg has * some "sticky" (latched) bits. */ ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg); if (ret_val) return ret_val; if (!(mii_status_reg & BMSR_ANEGCOMPLETE)) { e_dbg("Copper PHY and Auto Neg has not completed.\n"); return ret_val; } /* The AutoNeg process has completed, so we now need to * read both the Auto Negotiation Advertisement * Register (Address 4) and the Auto_Negotiation Base * Page Ability Register (Address 5) to determine how * flow control was negotiated. */ ret_val = e1e_rphy(hw, MII_ADVERTISE, &mii_nway_adv_reg); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, MII_LPA, &mii_nway_lp_ability_reg); if (ret_val) return ret_val; /* Two bits in the Auto Negotiation Advertisement Register * (Address 4) and two bits in the Auto Negotiation Base * Page Ability Register (Address 5) determine flow control * for both the PHY and the link partner. The following * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, * 1999, describes these PAUSE resolution bits and how flow * control is determined based upon these settings. * NOTE: DC = Don't Care * * LOCAL DEVICE | LINK PARTNER * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution *-------|---------|-------|---------|-------------------- * 0 | 0 | DC | DC | e1000_fc_none * 0 | 1 | 0 | DC | e1000_fc_none * 0 | 1 | 1 | 0 | e1000_fc_none * 0 | 1 | 1 | 1 | e1000_fc_tx_pause * 1 | 0 | 0 | DC | e1000_fc_none * 1 | DC | 1 | DC | e1000_fc_full * 1 | 1 | 0 | 0 | e1000_fc_none * 1 | 1 | 0 | 1 | e1000_fc_rx_pause * * Are both PAUSE bits set to 1? If so, this implies * Symmetric Flow Control is enabled at both ends. The * ASM_DIR bits are irrelevant per the spec. * * For Symmetric Flow Control: * * LOCAL DEVICE | LINK PARTNER * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result *-------|---------|-------|---------|-------------------- * 1 | DC | 1 | DC | E1000_fc_full * */ if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) && (mii_nway_lp_ability_reg & LPA_PAUSE_CAP)) { /* Now we need to check if the user selected Rx ONLY * of pause frames. In this case, we had to advertise * FULL flow control because we could not advertise Rx * ONLY. Hence, we must now check to see if we need to * turn OFF the TRANSMISSION of PAUSE frames. */ if (hw->fc.requested_mode == e1000_fc_full) { hw->fc.current_mode = e1000_fc_full; e_dbg("Flow Control = FULL.\n"); } else { hw->fc.current_mode = e1000_fc_rx_pause; e_dbg("Flow Control = Rx PAUSE frames only.\n"); } } /* For receiving PAUSE frames ONLY. * * LOCAL DEVICE | LINK PARTNER * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result *-------|---------|-------|---------|-------------------- * 0 | 1 | 1 | 1 | e1000_fc_tx_pause */ else if (!(mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) && (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) && (mii_nway_lp_ability_reg & LPA_PAUSE_CAP) && (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) { hw->fc.current_mode = e1000_fc_tx_pause; e_dbg("Flow Control = Tx PAUSE frames only.\n"); } /* For transmitting PAUSE frames ONLY. * * LOCAL DEVICE | LINK PARTNER * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result *-------|---------|-------|---------|-------------------- * 1 | 1 | 0 | 1 | e1000_fc_rx_pause */ else if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) && (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) && !(mii_nway_lp_ability_reg & LPA_PAUSE_CAP) && (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) { hw->fc.current_mode = e1000_fc_rx_pause; e_dbg("Flow Control = Rx PAUSE frames only.\n"); } else { /* Per the IEEE spec, at this point flow control * should be disabled. */ hw->fc.current_mode = e1000_fc_none; e_dbg("Flow Control = NONE.\n"); } /* Now we need to do one last check... If we auto- * negotiated to HALF DUPLEX, flow control should not be * enabled per IEEE 802.3 spec. */ ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); if (ret_val) { e_dbg("Error getting link speed and duplex\n"); return ret_val; } if (duplex == HALF_DUPLEX) hw->fc.current_mode = e1000_fc_none; /* Now we call a subroutine to actually force the MAC * controller to use the correct flow control settings. */ ret_val = e1000e_force_mac_fc(hw); if (ret_val) { e_dbg("Error forcing flow control settings\n"); return ret_val; } } /* Check for the case where we have SerDes media and auto-neg is * enabled. In this case, we need to check and see if Auto-Neg * has completed, and if so, how the PHY and link partner has * flow control configured. */ if ((hw->phy.media_type == e1000_media_type_internal_serdes) && mac->autoneg) { /* Read the PCS_LSTS and check to see if AutoNeg * has completed. */ pcs_status_reg = er32(PCS_LSTAT); if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { e_dbg("PCS Auto Neg has not completed.\n"); return ret_val; } /* The AutoNeg process has completed, so we now need to * read both the Auto Negotiation Advertisement * Register (PCS_ANADV) and the Auto_Negotiation Base * Page Ability Register (PCS_LPAB) to determine how * flow control was negotiated. */ pcs_adv_reg = er32(PCS_ANADV); pcs_lp_ability_reg = er32(PCS_LPAB); /* Two bits in the Auto Negotiation Advertisement Register * (PCS_ANADV) and two bits in the Auto Negotiation Base * Page Ability Register (PCS_LPAB) determine flow control * for both the PHY and the link partner. The following * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, * 1999, describes these PAUSE resolution bits and how flow * control is determined based upon these settings. * NOTE: DC = Don't Care * * LOCAL DEVICE | LINK PARTNER * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution *-------|---------|-------|---------|-------------------- * 0 | 0 | DC | DC | e1000_fc_none * 0 | 1 | 0 | DC | e1000_fc_none * 0 | 1 | 1 | 0 | e1000_fc_none * 0 | 1 | 1 | 1 | e1000_fc_tx_pause * 1 | 0 | 0 | DC | e1000_fc_none * 1 | DC | 1 | DC | e1000_fc_full * 1 | 1 | 0 | 0 | e1000_fc_none * 1 | 1 | 0 | 1 | e1000_fc_rx_pause * * Are both PAUSE bits set to 1? If so, this implies * Symmetric Flow Control is enabled at both ends. The * ASM_DIR bits are irrelevant per the spec. * * For Symmetric Flow Control: * * LOCAL DEVICE | LINK PARTNER * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result *-------|---------|-------|---------|-------------------- * 1 | DC | 1 | DC | e1000_fc_full * */ if ((pcs_adv_reg & E1000_TXCW_PAUSE) && (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) { /* Now we need to check if the user selected Rx ONLY * of pause frames. In this case, we had to advertise * FULL flow control because we could not advertise Rx * ONLY. Hence, we must now check to see if we need to * turn OFF the TRANSMISSION of PAUSE frames. */ if (hw->fc.requested_mode == e1000_fc_full) { hw->fc.current_mode = e1000_fc_full; e_dbg("Flow Control = FULL.\n"); } else { hw->fc.current_mode = e1000_fc_rx_pause; e_dbg("Flow Control = Rx PAUSE frames only.\n"); } } /* For receiving PAUSE frames ONLY. * * LOCAL DEVICE | LINK PARTNER * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result *-------|---------|-------|---------|-------------------- * 0 | 1 | 1 | 1 | e1000_fc_tx_pause */ else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) && (pcs_adv_reg & E1000_TXCW_ASM_DIR) && (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { hw->fc.current_mode = e1000_fc_tx_pause; e_dbg("Flow Control = Tx PAUSE frames only.\n"); } /* For transmitting PAUSE frames ONLY. * * LOCAL DEVICE | LINK PARTNER * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result *-------|---------|-------|---------|-------------------- * 1 | 1 | 0 | 1 | e1000_fc_rx_pause */ else if ((pcs_adv_reg & E1000_TXCW_PAUSE) && (pcs_adv_reg & E1000_TXCW_ASM_DIR) && !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { hw->fc.current_mode = e1000_fc_rx_pause; e_dbg("Flow Control = Rx PAUSE frames only.\n"); } else { /* Per the IEEE spec, at this point flow control * should be disabled. */ hw->fc.current_mode = e1000_fc_none; e_dbg("Flow Control = NONE.\n"); } /* Now we call a subroutine to actually force the MAC * controller to use the correct flow control settings. */ pcs_ctrl_reg = er32(PCS_LCTL); pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; ew32(PCS_LCTL, pcs_ctrl_reg); ret_val = e1000e_force_mac_fc(hw); if (ret_val) { e_dbg("Error forcing flow control settings\n"); return ret_val; } } return 0; } /** * e1000e_get_speed_and_duplex_copper - Retrieve current speed/duplex * @hw: pointer to the HW structure * @speed: stores the current speed * @duplex: stores the current duplex * * Read the status register for the current speed/duplex and store the current * speed and duplex for copper connections. **/ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex) { u32 status; status = er32(STATUS); if (status & E1000_STATUS_SPEED_1000) *speed = SPEED_1000; else if (status & E1000_STATUS_SPEED_100) *speed = SPEED_100; else *speed = SPEED_10; if (status & E1000_STATUS_FD) *duplex = FULL_DUPLEX; else *duplex = HALF_DUPLEX; e_dbg("%u Mbps, %s Duplex\n", *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10, *duplex == FULL_DUPLEX ? "Full" : "Half"); return 0; } /** * e1000e_get_speed_and_duplex_fiber_serdes - Retrieve current speed/duplex * @hw: pointer to the HW structure * @speed: stores the current speed * @duplex: stores the current duplex * * Sets the speed and duplex to gigabit full duplex (the only possible option) * for fiber/serdes links. **/ s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw __always_unused *hw, u16 *speed, u16 *duplex) { *speed = SPEED_1000; *duplex = FULL_DUPLEX; return 0; } /** * e1000e_get_hw_semaphore - Acquire hardware semaphore * @hw: pointer to the HW structure * * Acquire the HW semaphore to access the PHY or NVM **/ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) { u32 swsm; s32 timeout = hw->nvm.word_size + 1; s32 i = 0; /* Get the SW semaphore */ while (i < timeout) { swsm = er32(SWSM); if (!(swsm & E1000_SWSM_SMBI)) break; usleep_range(50, 100); i++; } if (i == timeout) { e_dbg("Driver can't access device - SMBI bit is set.\n"); return -E1000_ERR_NVM; } /* Get the FW semaphore. */ for (i = 0; i < timeout; i++) { swsm = er32(SWSM); ew32(SWSM, swsm | E1000_SWSM_SWESMBI); /* Semaphore acquired if bit latched */ if (er32(SWSM) & E1000_SWSM_SWESMBI) break; usleep_range(50, 100); } if (i == timeout) { /* Release semaphores */ e1000e_put_hw_semaphore(hw); e_dbg("Driver can't access the NVM\n"); return -E1000_ERR_NVM; } return 0; } /** * e1000e_put_hw_semaphore - Release hardware semaphore * @hw: pointer to the HW structure * * Release hardware semaphore used to access the PHY or NVM **/ void e1000e_put_hw_semaphore(struct e1000_hw *hw) { u32 swsm; swsm = er32(SWSM); swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); ew32(SWSM, swsm); } /** * e1000e_get_auto_rd_done - Check for auto read completion * @hw: pointer to the HW structure * * Check EEPROM for Auto Read done bit. **/ s32 e1000e_get_auto_rd_done(struct e1000_hw *hw) { s32 i = 0; while (i < AUTO_READ_DONE_TIMEOUT) { if (er32(EECD) & E1000_EECD_AUTO_RD) break; usleep_range(1000, 2000); i++; } if (i == AUTO_READ_DONE_TIMEOUT) { e_dbg("Auto read by HW from NVM has not completed.\n"); return -E1000_ERR_RESET; } return 0; } /** * e1000e_valid_led_default - Verify a valid default LED config * @hw: pointer to the HW structure * @data: pointer to the NVM (EEPROM) * * Read the EEPROM for the current default LED configuration. If the * LED configuration is not valid, set to a valid LED configuration. **/ s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data) { s32 ret_val; ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); if (ret_val) { e_dbg("NVM Read Error\n"); return ret_val; } if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) *data = ID_LED_DEFAULT; return 0; } /** * e1000e_id_led_init_generic - * @hw: pointer to the HW structure * **/ s32 e1000e_id_led_init_generic(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val; const u32 ledctl_mask = 0x000000FF; const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; u16 data, i, temp; const u16 led_mask = 0x0F; ret_val = hw->nvm.ops.valid_led_default(hw, &data); if (ret_val) return ret_val; mac->ledctl_default = er32(LEDCTL); mac->ledctl_mode1 = mac->ledctl_default; mac->ledctl_mode2 = mac->ledctl_default; for (i = 0; i < 4; i++) { temp = (data >> (i << 2)) & led_mask; switch (temp) { case ID_LED_ON1_DEF2: case ID_LED_ON1_ON2: case ID_LED_ON1_OFF2: mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); mac->ledctl_mode1 |= ledctl_on << (i << 3); break; case ID_LED_OFF1_DEF2: case ID_LED_OFF1_ON2: case ID_LED_OFF1_OFF2: mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); mac->ledctl_mode1 |= ledctl_off << (i << 3); break; default: /* Do nothing */ break; } switch (temp) { case ID_LED_DEF1_ON2: case ID_LED_ON1_ON2: case ID_LED_OFF1_ON2: mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); mac->ledctl_mode2 |= ledctl_on << (i << 3); break; case ID_LED_DEF1_OFF2: case ID_LED_ON1_OFF2: case ID_LED_OFF1_OFF2: mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); mac->ledctl_mode2 |= ledctl_off << (i << 3); break; default: /* Do nothing */ break; } } return 0; } /** * e1000e_setup_led_generic - Configures SW controllable LED * @hw: pointer to the HW structure * * This prepares the SW controllable LED for use and saves the current state * of the LED so it can be later restored. **/ s32 e1000e_setup_led_generic(struct e1000_hw *hw) { u32 ledctl; if (hw->mac.ops.setup_led != e1000e_setup_led_generic) return -E1000_ERR_CONFIG; if (hw->phy.media_type == e1000_media_type_fiber) { ledctl = er32(LEDCTL); hw->mac.ledctl_default = ledctl; /* Turn off LED0 */ ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED0_MODE_MASK); ledctl |= (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT); ew32(LEDCTL, ledctl); } else if (hw->phy.media_type == e1000_media_type_copper) { ew32(LEDCTL, hw->mac.ledctl_mode1); } return 0; } /** * e1000e_cleanup_led_generic - Set LED config to default operation * @hw: pointer to the HW structure * * Remove the current LED configuration and set the LED configuration * to the default value, saved from the EEPROM. **/ s32 e1000e_cleanup_led_generic(struct e1000_hw *hw) { ew32(LEDCTL, hw->mac.ledctl_default); return 0; } /** * e1000e_blink_led_generic - Blink LED * @hw: pointer to the HW structure * * Blink the LEDs which are set to be on. **/ s32 e1000e_blink_led_generic(struct e1000_hw *hw) { u32 ledctl_blink = 0; u32 i; if (hw->phy.media_type == e1000_media_type_fiber) { /* always blink LED0 for PCI-E fiber */ ledctl_blink = E1000_LEDCTL_LED0_BLINK | (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); } else { /* Set the blink bit for each LED that's "on" (0x0E) * (or "off" if inverted) in ledctl_mode2. The blink * logic in hardware only works when mode is set to "on" * so it must be changed accordingly when the mode is * "off" and inverted. */ ledctl_blink = hw->mac.ledctl_mode2; for (i = 0; i < 32; i += 8) { u32 mode = (hw->mac.ledctl_mode2 >> i) & E1000_LEDCTL_LED0_MODE_MASK; u32 led_default = hw->mac.ledctl_default >> i; if ((!(led_default & E1000_LEDCTL_LED0_IVRT) && (mode == E1000_LEDCTL_MODE_LED_ON)) || ((led_default & E1000_LEDCTL_LED0_IVRT) && (mode == E1000_LEDCTL_MODE_LED_OFF))) { ledctl_blink &= ~(E1000_LEDCTL_LED0_MODE_MASK << i); ledctl_blink |= (E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_MODE_LED_ON) << i; } } } ew32(LEDCTL, ledctl_blink); return 0; } /** * e1000e_led_on_generic - Turn LED on * @hw: pointer to the HW structure * * Turn LED on. **/ s32 e1000e_led_on_generic(struct e1000_hw *hw) { u32 ctrl; switch (hw->phy.media_type) { case e1000_media_type_fiber: ctrl = er32(CTRL); ctrl &= ~E1000_CTRL_SWDPIN0; ctrl |= E1000_CTRL_SWDPIO0; ew32(CTRL, ctrl); break; case e1000_media_type_copper: ew32(LEDCTL, hw->mac.ledctl_mode2); break; default: break; } return 0; } /** * e1000e_led_off_generic - Turn LED off * @hw: pointer to the HW structure * * Turn LED off. **/ s32 e1000e_led_off_generic(struct e1000_hw *hw) { u32 ctrl; switch (hw->phy.media_type) { case e1000_media_type_fiber: ctrl = er32(CTRL); ctrl |= E1000_CTRL_SWDPIN0; ctrl |= E1000_CTRL_SWDPIO0; ew32(CTRL, ctrl); break; case e1000_media_type_copper: ew32(LEDCTL, hw->mac.ledctl_mode1); break; default: break; } return 0; } /** * e1000e_set_pcie_no_snoop - Set PCI-express capabilities * @hw: pointer to the HW structure * @no_snoop: bitmap of snoop events * * Set the PCI-express register to snoop for events enabled in 'no_snoop'. **/ void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop) { u32 gcr; if (no_snoop) { gcr = er32(GCR); gcr &= ~(PCIE_NO_SNOOP_ALL); gcr |= no_snoop; ew32(GCR, gcr); } } /** * e1000e_disable_pcie_master - Disables PCI-express master access * @hw: pointer to the HW structure * * Returns 0 if successful, else returns -10 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused * the master requests to be disabled. * * Disables PCI-Express master access and verifies there are no pending * requests. **/ s32 e1000e_disable_pcie_master(struct e1000_hw *hw) { u32 ctrl; s32 timeout = MASTER_DISABLE_TIMEOUT; ctrl = er32(CTRL); ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; ew32(CTRL, ctrl); while (timeout) { if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) break; usleep_range(100, 200); timeout--; } if (!timeout) { e_dbg("Master requests are pending.\n"); return -E1000_ERR_MASTER_REQUESTS_PENDING; } return 0; } /** * e1000e_reset_adaptive - Reset Adaptive Interframe Spacing * @hw: pointer to the HW structure * * Reset the Adaptive Interframe Spacing throttle to default values. **/ void e1000e_reset_adaptive(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; if (!mac->adaptive_ifs) { e_dbg("Not in Adaptive IFS mode!\n"); return; } mac->current_ifs_val = 0; mac->ifs_min_val = IFS_MIN; mac->ifs_max_val = IFS_MAX; mac->ifs_step_size = IFS_STEP; mac->ifs_ratio = IFS_RATIO; mac->in_ifs_mode = false; ew32(AIT, 0); } /** * e1000e_update_adaptive - Update Adaptive Interframe Spacing * @hw: pointer to the HW structure * * Update the Adaptive Interframe Spacing Throttle value based on the * time between transmitted packets and time between collisions. **/ void e1000e_update_adaptive(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; if (!mac->adaptive_ifs) { e_dbg("Not in Adaptive IFS mode!\n"); return; } if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { if (mac->tx_packet_delta > MIN_NUM_XMITS) { mac->in_ifs_mode = true; if (mac->current_ifs_val < mac->ifs_max_val) { if (!mac->current_ifs_val) mac->current_ifs_val = mac->ifs_min_val; else mac->current_ifs_val += mac->ifs_step_size; ew32(AIT, mac->current_ifs_val); } } } else { if (mac->in_ifs_mode && (mac->tx_packet_delta <= MIN_NUM_XMITS)) { mac->current_ifs_val = 0; mac->in_ifs_mode = false; ew32(AIT, 0); } } }
gpl-2.0
davidmueller13/flo-1
drivers/video/msm/vidc/720p/ddl/vcd_ddl_interrupt_handler.c
3618
29418
/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <media/msm/vidc_type.h> #include "vidc.h" #include "vcd_ddl_utils.h" #include "vcd_ddl_metadata.h" #if DEBUG #define DBG(x...) printk(KERN_DEBUG x) #else #define DBG(x...) #endif static void ddl_decoder_input_done_callback( struct ddl_client_context *ddl, u32 frame_transact_end); static u32 ddl_decoder_output_done_callback( struct ddl_client_context *ddl, u32 frame_transact_end); static u32 ddl_get_frame (struct vcd_frame_data *frame, u32 frame_type); static void ddl_getdec_profilelevel (struct ddl_decoder_data *decoder, u32 profile, u32 level); static void ddl_dma_done_callback(struct ddl_context *ddl_context) { if (!DDLCOMMAND_STATE_IS(ddl_context, DDL_CMD_DMA_INIT)) { VIDC_LOGERR_STRING("UNKWN_DMADONE"); return; } ddl_move_command_state(ddl_context, DDL_CMD_INVALID); VIDC_LOG_STRING("DMA_DONE"); ddl_core_start_cpu(ddl_context); } static void ddl_cpu_started_callback(struct ddl_context *ddl_context) { ddl_move_command_state(ddl_context, DDL_CMD_INVALID); VIDC_LOG_STRING("CPU-STARTED"); if (!vidc_720p_cpu_start()) { ddl_hw_fatal_cb(ddl_context); return; } vidc_720p_set_deblock_line_buffer( ddl_context->db_line_buffer.align_physical_addr, ddl_context->db_line_buffer.buffer_size); ddl_context->device_state = DDL_DEVICE_INITED; ddl_context->ddl_callback(VCD_EVT_RESP_DEVICE_INIT, VCD_S_SUCCESS, NULL, 0, NULL, ddl_context->client_data); DDL_IDLE(ddl_context); } static u32 ddl_eos_done_callback(struct ddl_context *ddl_context) { struct ddl_client_context *ddl = ddl_context->current_ddl; u32 displaystatus, resl_change; if (!DDLCOMMAND_STATE_IS(ddl_context, DDL_CMD_EOS)) { VIDC_LOGERR_STRING("UNKWN_EOSDONE"); ddl_client_fatal_cb(ddl_context); return true; } if (!ddl || !ddl->decoding || !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_EOS_DONE) ) { VIDC_LOG_STRING("STATE-CRITICAL-EOSDONE"); ddl_client_fatal_cb(ddl_context); return true; } ddl_move_command_state(ddl_context, DDL_CMD_INVALID); vidc_720p_eos_info(&displaystatus, &resl_change); if ((enum vidc_720p_display_status)displaystatus != VIDC_720P_EMPTY_BUFFER) { VIDC_LOG_STRING("EOSDONE-EMPTYBUF-ISSUE"); } ddl_decode_dynamic_property(ddl, false); if (resl_change == 0x1) { ddl->codec_data.decoder.header_in_start = false; ddl->codec_data.decoder.decode_config.sequence_header = ddl->input_frame.vcd_frm.physical; ddl->codec_data.decoder.decode_config.sequence_header_len = ddl->input_frame.vcd_frm.data_len; ddl_decode_init_codec(ddl); return false; } ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_FRAME); VIDC_LOG_STRING("EOS_DONE"); ddl_context->ddl_callback(VCD_EVT_RESP_EOS_DONE, VCD_S_SUCCESS, NULL, 0, (u32 *) ddl, ddl_context->client_data); DDL_IDLE(ddl_context); return true; } static u32 ddl_channel_set_callback(struct ddl_context *ddl_context) { struct ddl_client_context *ddl = ddl_context->current_ddl; u32 return_status = false; ddl_move_command_state(ddl_context, DDL_CMD_INVALID); VIDC_DEBUG_REGISTER_LOG; if (!ddl || !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_CHDONE) ) { VIDC_LOG_STRING("STATE-CRITICAL-CHSET"); DDL_IDLE(ddl_context); return return_status; } VIDC_LOG_STRING("Channel-set"); ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_INITCODEC); if (ddl->decoding) { if (vidc_msg_timing) ddl_calc_core_proc_time(__func__, DEC_OP_TIME); if (ddl->codec_data.decoder.header_in_start) { ddl_decode_init_codec(ddl); } else { ddl_context->ddl_callback(VCD_EVT_RESP_START, VCD_S_SUCCESS, NULL, 0, (u32 *) ddl, ddl_context->client_data); DDL_IDLE(ddl_context); return_status = true; } } else { ddl_encode_init_codec(ddl); } return return_status; } static void ddl_init_codec_done_callback(struct ddl_context *ddl_context) { struct ddl_client_context *ddl = ddl_context->current_ddl; struct ddl_encoder_data *encoder; if (!ddl || ddl->decoding || !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_INITCODECDONE) ) { VIDC_LOG_STRING("STATE-CRITICAL-INITCODEC"); ddl_client_fatal_cb(ddl_context); return; } ddl_move_command_state(ddl_context, DDL_CMD_INVALID); ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_FRAME); VIDC_LOG_STRING("INIT_CODEC_DONE"); encoder = &ddl->codec_data.encoder; if (encoder->seq_header.virtual_base_addr) { vidc_720p_encode_get_header(&encoder->seq_header. buffer_size); } ddl_context->ddl_callback(VCD_EVT_RESP_START, VCD_S_SUCCESS, NULL, 0, (u32 *) ddl, ddl_context->client_data); DDL_IDLE(ddl_context); } static u32 ddl_header_done_callback(struct ddl_context *ddl_context) { struct ddl_client_context *ddl = ddl_context->current_ddl; struct ddl_decoder_data *decoder; struct vidc_720p_seq_hdr_info seq_hdr_info; u32 process_further = true; u32 seq_hdr_only_frame = false; u32 need_reconfig = true; struct vcd_frame_data *input_vcd_frm; struct ddl_frame_data_tag *reconfig_payload = NULL; u32 reconfig_payload_size = 0; if (!ddl || !ddl->decoding || !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_INITCODECDONE) ) { VIDC_LOG_STRING("STATE-CRITICAL-HDDONE"); ddl_client_fatal_cb(ddl_context); return true; } if (vidc_msg_timing) ddl_calc_core_proc_time(__func__, DEC_OP_TIME); ddl_move_command_state(ddl_context, DDL_CMD_INVALID); ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_DPB); VIDC_LOG_STRING("HEADER_DONE"); VIDC_DEBUG_REGISTER_LOG; vidc_720p_decode_get_seq_hdr_info(&seq_hdr_info); decoder = &(ddl->codec_data.decoder); decoder->frame_size.width = seq_hdr_info.img_size_x; decoder->frame_size.height = seq_hdr_info.img_size_y; decoder->min_dpb_num = seq_hdr_info.min_num_dpb; decoder->y_cb_cr_size = seq_hdr_info.min_dpb_size; decoder->progressive_only = 1 - seq_hdr_info.progressive; if (!seq_hdr_info.img_size_x || !seq_hdr_info.img_size_y) { VIDC_LOGERR_STRING("FATAL: ZeroImageSize"); ddl_client_fatal_cb(ddl_context); return process_further; } if (seq_hdr_info.data_partitioned == 0x1 && decoder->codec.codec == VCD_CODEC_MPEG4 && seq_hdr_info.img_size_x > DDL_MAX_DP_FRAME_WIDTH && seq_hdr_info.img_size_y > DDL_MAX_DP_FRAME_HEIGHT) { ddl_client_fatal_cb(ddl_context); return process_further; } ddl_getdec_profilelevel(decoder, seq_hdr_info.profile, seq_hdr_info.level); ddl_calculate_stride(&decoder->frame_size, !decoder->progressive_only, decoder->codec.codec); if (decoder->buf_format.buffer_format == VCD_BUFFER_FORMAT_TILE_4x2) { decoder->frame_size.stride = DDL_TILE_ALIGN(decoder->frame_size.width, DDL_TILE_ALIGN_WIDTH); decoder->frame_size.scan_lines = DDL_TILE_ALIGN(decoder->frame_size.height, DDL_TILE_ALIGN_HEIGHT); } if (seq_hdr_info.crop_exists) { decoder->frame_size.width -= (seq_hdr_info.crop_right_offset + seq_hdr_info.crop_left_offset); decoder->frame_size.height -= (seq_hdr_info.crop_top_offset + seq_hdr_info.crop_bottom_offset); } ddl_set_default_decoder_buffer_req(decoder, false); if (decoder->header_in_start) { decoder->client_frame_size = decoder->frame_size; decoder->client_output_buf_req = decoder->actual_output_buf_req; decoder->client_input_buf_req = decoder->actual_input_buf_req; ddl_context->ddl_callback(VCD_EVT_RESP_START, VCD_S_SUCCESS, NULL, 0, (u32 *) ddl, ddl_context->client_data); DDL_IDLE(ddl_context); } else { DBG("%s(): Client data: WxH(%u x %u) SxSL(%u x %u) Sz(%u)\n", __func__, decoder->client_frame_size.width, decoder->client_frame_size.height, decoder->client_frame_size.stride, decoder->client_frame_size.scan_lines, decoder->client_output_buf_req.sz); DBG("%s(): DDL data: WxH(%u x %u) SxSL(%u x %u) Sz(%u)\n", __func__, decoder->frame_size.width, decoder->frame_size.height, decoder->frame_size.stride, decoder->frame_size.scan_lines, decoder->actual_output_buf_req.sz); DBG("%s(): min_dpb_num = %d actual_count = %d\n", __func__, decoder->min_dpb_num, decoder->client_output_buf_req.actual_count); input_vcd_frm = &(ddl->input_frame.vcd_frm); if (decoder->frame_size.width == decoder->client_frame_size.width && decoder->frame_size.height == decoder->client_frame_size.height && decoder->frame_size.stride == decoder->client_frame_size.stride && decoder->frame_size.scan_lines == decoder->client_frame_size.scan_lines && decoder->actual_output_buf_req.sz <= decoder->client_output_buf_req.sz && decoder->actual_output_buf_req.actual_count <= decoder->client_output_buf_req.actual_count && decoder->progressive_only) need_reconfig = false; if ((input_vcd_frm->data_len <= seq_hdr_info.dec_frm_size || (input_vcd_frm->flags & VCD_FRAME_FLAG_CODECCONFIG)) && (!need_reconfig || !(input_vcd_frm->flags & VCD_FRAME_FLAG_EOS))) { input_vcd_frm->flags |= VCD_FRAME_FLAG_CODECCONFIG; seq_hdr_only_frame = true; input_vcd_frm->data_len = 0; ddl->input_frame.frm_trans_end = !need_reconfig; ddl_context->ddl_callback( VCD_EVT_RESP_INPUT_DONE, VCD_S_SUCCESS, &ddl->input_frame, sizeof(struct ddl_frame_data_tag), (u32 *) ddl, ddl->ddl_context->client_data); } else if (decoder->codec.codec != VCD_CODEC_H263) { input_vcd_frm->offset += seq_hdr_info.dec_frm_size; input_vcd_frm->data_len -= seq_hdr_info.dec_frm_size; } if (need_reconfig) { decoder->client_frame_size = decoder->frame_size; decoder->client_output_buf_req = decoder->actual_output_buf_req; decoder->client_input_buf_req = decoder->actual_input_buf_req; if (!seq_hdr_only_frame) { reconfig_payload = &ddl->input_frame; reconfig_payload_size = sizeof(struct ddl_frame_data_tag); } ddl_context->ddl_callback(VCD_EVT_IND_OUTPUT_RECONFIG, VCD_S_SUCCESS, reconfig_payload, reconfig_payload_size, (u32 *) ddl, ddl_context->client_data); } if (!need_reconfig && !seq_hdr_only_frame) { if (ddl_decode_set_buffers(ddl) == VCD_S_SUCCESS) process_further = false; else ddl_client_fatal_cb(ddl_context); } else DDL_IDLE(ddl_context); } return process_further; } static u32 ddl_dpb_buffers_set_done_callback(struct ddl_context *ddl_context) { struct ddl_client_context *ddl = ddl_context->current_ddl; ddl_move_command_state(ddl_context, DDL_CMD_INVALID); if (!ddl || !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPBDONE) ) { VIDC_LOG_STRING("STATE-CRITICAL-DPBDONE"); ddl_client_fatal_cb(ddl_context); return true; } if (vidc_msg_timing) { ddl_calc_core_proc_time(__func__, DEC_OP_TIME); ddl_reset_core_time_variables(DEC_OP_TIME); } VIDC_LOG_STRING("INTR_DPBDONE"); ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_FRAME); ddl->codec_data.decoder.dec_disp_info.img_size_x = 0; ddl->codec_data.decoder.dec_disp_info.img_size_y = 0; ddl_decode_frame_run(ddl); return false; } static void ddl_encoder_frame_run_callback(struct ddl_context *ddl_context) { struct ddl_client_context *ddl = ddl_context->current_ddl; struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder); u32 eos_present = false; if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME_DONE) ) { VIDC_LOG_STRING("STATE-CRITICAL-ENCFRMRUN"); ddl_client_fatal_cb(ddl_context); return; } VIDC_LOG_STRING("ENC_FRM_RUN_DONE"); ddl_move_command_state(ddl_context, DDL_CMD_INVALID); vidc_720p_enc_frame_info(&encoder->enc_frame_info); ddl->output_frame.vcd_frm.ip_frm_tag = ddl->input_frame.vcd_frm.ip_frm_tag; ddl->output_frame.vcd_frm.data_len = encoder->enc_frame_info.enc_size; ddl->output_frame.vcd_frm.flags |= VCD_FRAME_FLAG_ENDOFFRAME; ddl_get_frame (&(ddl->output_frame.vcd_frm), encoder->enc_frame_info.frame); ddl_process_encoder_metadata(ddl); ddl_encode_dynamic_property(ddl, false); ddl->input_frame.frm_trans_end = false; ddl_context->ddl_callback(VCD_EVT_RESP_INPUT_DONE, VCD_S_SUCCESS, &(ddl->input_frame), sizeof(struct ddl_frame_data_tag), (u32 *) ddl, ddl_context->client_data); if (vidc_msg_timing) ddl_calc_core_proc_time(__func__, ENC_OP_TIME); /* check the presence of EOS */ eos_present = ((VCD_FRAME_FLAG_EOS & ddl->input_frame.vcd_frm.flags)); ddl->output_frame.frm_trans_end = !eos_present; ddl_context->ddl_callback(VCD_EVT_RESP_OUTPUT_DONE, VCD_S_SUCCESS, &(ddl->output_frame), sizeof(struct ddl_frame_data_tag), (u32 *) ddl, ddl_context->client_data); if (eos_present) { VIDC_LOG_STRING("ENC-EOS_DONE"); ddl_context->ddl_callback(VCD_EVT_RESP_EOS_DONE, VCD_S_SUCCESS, NULL, 0, (u32 *)ddl, ddl_context->client_data); } ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_FRAME); DDL_IDLE(ddl_context); } static u32 ddl_decoder_frame_run_callback(struct ddl_context *ddl_context) { struct ddl_client_context *ddl = ddl_context->current_ddl; struct vidc_720p_dec_disp_info *dec_disp_info = &(ddl->codec_data.decoder.dec_disp_info); u32 callback_end = false; u32 status = true, eos_present = false;; if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME_DONE)) { VIDC_LOG_STRING("STATE-CRITICAL-DECFRMRUN"); ddl_client_fatal_cb(ddl_context); return true; } VIDC_LOG_STRING("DEC_FRM_RUN_DONE"); ddl_move_command_state(ddl_context, DDL_CMD_INVALID); vidc_720p_decode_display_info(dec_disp_info); ddl_decode_dynamic_property(ddl, false); if (dec_disp_info->resl_change) { VIDC_LOG_STRING ("DEC_FRM_RUN_DONE: RECONFIG"); ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_EOS_DONE); ddl_move_command_state(ddl_context, DDL_CMD_EOS); vidc_720p_submit_command(ddl->channel_id, VIDC_720P_CMD_FRAMERUN_REALLOCATE); return false; } if ((VCD_FRAME_FLAG_EOS & ddl->input_frame.vcd_frm.flags)) { callback_end = false; eos_present = true; } if (dec_disp_info->disp_status == VIDC_720P_DECODE_ONLY || dec_disp_info->disp_status == VIDC_720P_DECODE_AND_DISPLAY) { if (!eos_present) callback_end = (dec_disp_info->disp_status == VIDC_720P_DECODE_ONLY); ddl_decoder_input_done_callback(ddl, callback_end); } if (dec_disp_info->disp_status == VIDC_720P_DECODE_AND_DISPLAY || dec_disp_info->disp_status == VIDC_720P_DISPLAY_ONLY) { if (!eos_present) callback_end = (dec_disp_info->disp_status == VIDC_720P_DECODE_AND_DISPLAY); if (ddl_decoder_output_done_callback(ddl, callback_end) != VCD_S_SUCCESS) return true; } if (dec_disp_info->disp_status == VIDC_720P_DISPLAY_ONLY || dec_disp_info->disp_status == VIDC_720P_EMPTY_BUFFER) { /* send the same input once again for decoding */ ddl_decode_frame_run(ddl); /* client need to ignore the interrupt */ status = false; } else if (eos_present) { /* send EOS command to HW */ ddl_decode_eos_run(ddl); /* client need to ignore the interrupt */ status = false; } else { ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_FRAME); /* move to Idle */ DDL_IDLE(ddl_context); } return status; } static u32 ddl_eos_frame_done_callback(struct ddl_context *ddl_context) { struct ddl_client_context *ddl = ddl_context->current_ddl; struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder); struct vidc_720p_dec_disp_info *dec_disp_info = &(decoder->dec_disp_info); if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_EOS_DONE)) { VIDC_LOGERR_STRING("STATE-CRITICAL-EOSFRMRUN"); ddl_client_fatal_cb(ddl_context); return true; } VIDC_LOG_STRING("EOS_FRM_RUN_DONE"); ddl_move_command_state(ddl_context, DDL_CMD_INVALID); vidc_720p_decode_display_info(dec_disp_info); ddl_decode_dynamic_property(ddl, false); if (dec_disp_info->disp_status == VIDC_720P_DISPLAY_ONLY) { if (ddl_decoder_output_done_callback(ddl, false) != VCD_S_SUCCESS) return true; } else VIDC_LOG_STRING("STATE-CRITICAL-WRONG-DISP-STATUS"); ddl_decoder_dpb_transact(decoder, NULL, DDL_DPB_OP_SET_MASK); ddl_move_command_state(ddl_context, DDL_CMD_EOS); vidc_720p_submit_command(ddl->channel_id, VIDC_720P_CMD_FRAMERUN); return false; } static void ddl_channel_end_callback(struct ddl_context *ddl_context) { struct ddl_client_context *ddl; ddl_move_command_state(ddl_context, DDL_CMD_INVALID); VIDC_LOG_STRING("CH_END_DONE"); ddl = ddl_context->current_ddl; if (!ddl || !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_CHEND) ) { VIDC_LOG_STRING("STATE-CRITICAL-CHEND"); DDL_IDLE(ddl_context); return; } ddl_release_client_internal_buffers(ddl); ddl_context->ddl_callback(VCD_EVT_RESP_STOP, VCD_S_SUCCESS, NULL, 0, (u32 *) ddl, ddl_context->client_data); ddl_move_client_state(ddl, DDL_CLIENT_OPEN); DDL_IDLE(ddl_context); } static u32 ddl_operation_done_callback(struct ddl_context *ddl_context) { u32 return_status = true; switch (ddl_context->cmd_state) { case DDL_CMD_DECODE_FRAME: { return_status = ddl_decoder_frame_run_callback( ddl_context); break; } case DDL_CMD_ENCODE_FRAME: { ddl_encoder_frame_run_callback(ddl_context); break; } case DDL_CMD_CHANNEL_SET: { return_status = ddl_channel_set_callback( ddl_context); break; } case DDL_CMD_INIT_CODEC: { ddl_init_codec_done_callback(ddl_context); break; } case DDL_CMD_HEADER_PARSE: { return_status = ddl_header_done_callback( ddl_context); break; } case DDL_CMD_DECODE_SET_DPB: { return_status = ddl_dpb_buffers_set_done_callback( ddl_context); break; } case DDL_CMD_CHANNEL_END: { ddl_channel_end_callback(ddl_context); break; } case DDL_CMD_EOS: { return_status = ddl_eos_frame_done_callback( ddl_context); break; } case DDL_CMD_CPU_RESET: { ddl_cpu_started_callback(ddl_context); break; } default: { VIDC_LOG_STRING("UNKWN_OPDONE"); return_status = false; break; } } return return_status; } static u32 ddl_process_intr_status(struct ddl_context *ddl_context, u32 int_status) { u32 status = true; switch (int_status) { case VIDC_720P_INTR_FRAME_DONE: { status = ddl_operation_done_callback(ddl_context); break; } case VIDC_720P_INTR_DMA_DONE: { ddl_dma_done_callback(ddl_context); status = false; break; } case VIDC_720P_INTR_FW_DONE: { status = ddl_eos_done_callback(ddl_context); break; } case VIDC_720P_INTR_BUFFER_FULL: { VIDC_LOGERR_STRING("BUF_FULL_INTR"); ddl_hw_fatal_cb(ddl_context); break; } default: { VIDC_LOGERR_STRING("UNKWN_INTR"); break; } } return status; } void ddl_read_and_clear_interrupt(void) { struct ddl_context *ddl_context; ddl_context = ddl_get_context(); if (!ddl_context->core_virtual_base_addr) { VIDC_LOGERR_STRING("SPURIOUS_INTERRUPT"); return; } vidc_720p_get_interrupt_status(&ddl_context->intr_status, &ddl_context->cmd_err_status, &ddl_context->disp_pic_err_status, &ddl_context->op_failed ); vidc_720p_interrupt_done_clear(); } u32 ddl_process_core_response(void) { struct ddl_context *ddl_context; u32 return_status = true; ddl_context = ddl_get_context(); if (!ddl_context->core_virtual_base_addr) { VIDC_LOGERR_STRING("UNKWN_INTR"); return false; } if (!ddl_handle_core_errors(ddl_context)) { return_status = ddl_process_intr_status(ddl_context, ddl_context->intr_status); } if (ddl_context->interrupt_clr) (*ddl_context->interrupt_clr)(); return return_status; } static void ddl_decoder_input_done_callback( struct ddl_client_context *ddl, u32 frame_transact_end) { struct vidc_720p_dec_disp_info *dec_disp_info = &(ddl->codec_data.decoder.dec_disp_info); struct vcd_frame_data *input_vcd_frm = &(ddl->input_frame.vcd_frm); ddl_get_frame(input_vcd_frm, dec_disp_info-> input_frame); input_vcd_frm->interlaced = (dec_disp_info-> input_is_interlace); input_vcd_frm->offset += dec_disp_info->input_bytes_consumed; input_vcd_frm->data_len -= dec_disp_info->input_bytes_consumed; ddl->input_frame.frm_trans_end = frame_transact_end; if (vidc_msg_timing) ddl_calc_core_proc_time(__func__, DEC_IP_TIME); ddl->ddl_context->ddl_callback( VCD_EVT_RESP_INPUT_DONE, VCD_S_SUCCESS, &ddl->input_frame, sizeof(struct ddl_frame_data_tag), (void *)ddl, ddl->ddl_context->client_data); } static u32 ddl_decoder_output_done_callback( struct ddl_client_context *ddl, u32 frame_transact_end) { struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder); struct vidc_720p_dec_disp_info *dec_disp_info = &(decoder->dec_disp_info); struct ddl_frame_data_tag *output_frame = &ddl->output_frame; struct vcd_frame_data *output_vcd_frm = &(output_frame->vcd_frm); u32 vcd_status; u32 free_luma_dpb = 0; output_vcd_frm->physical = (u8 *)dec_disp_info->y_addr; if (decoder->codec.codec == VCD_CODEC_MPEG4 || decoder->codec.codec == VCD_CODEC_VC1 || decoder->codec.codec == VCD_CODEC_VC1_RCV || (decoder->codec.codec >= VCD_CODEC_DIVX_3 && decoder->codec.codec <= VCD_CODEC_XVID)){ vidc_720p_decode_skip_frm_details(&free_luma_dpb); if (free_luma_dpb) output_vcd_frm->physical = (u8 *) free_luma_dpb; } vcd_status = ddl_decoder_dpb_transact( decoder, output_frame, DDL_DPB_OP_MARK_BUSY); if (vcd_status != VCD_S_SUCCESS) { VIDC_LOGERR_STRING("CorruptedOutputBufferAddress"); ddl_hw_fatal_cb(ddl->ddl_context); return vcd_status; } output_vcd_frm->ip_frm_tag = dec_disp_info->tag_top; if (dec_disp_info->crop_exists == 0x1) { output_vcd_frm->dec_op_prop.disp_frm.left = dec_disp_info->crop_left_offset; output_vcd_frm->dec_op_prop.disp_frm.top = dec_disp_info->crop_top_offset; output_vcd_frm->dec_op_prop.disp_frm.right = dec_disp_info->img_size_x - dec_disp_info->crop_right_offset; output_vcd_frm->dec_op_prop.disp_frm.bottom = dec_disp_info->img_size_y - dec_disp_info->crop_bottom_offset; } else { output_vcd_frm->dec_op_prop.disp_frm.left = 0; output_vcd_frm->dec_op_prop.disp_frm.top = 0; output_vcd_frm->dec_op_prop.disp_frm.right = dec_disp_info->img_size_x; output_vcd_frm->dec_op_prop.disp_frm.bottom = dec_disp_info->img_size_y; } if (!dec_disp_info->disp_is_interlace) { output_vcd_frm->interlaced = false; output_vcd_frm->intrlcd_ip_frm_tag = VCD_FRAMETAG_INVALID; } else { output_vcd_frm->interlaced = true; output_vcd_frm->intrlcd_ip_frm_tag = dec_disp_info->tag_bottom; } output_vcd_frm->offset = 0; output_vcd_frm->data_len = decoder->y_cb_cr_size; if (free_luma_dpb) { output_vcd_frm->data_len = 0; output_vcd_frm->flags |= VCD_FRAME_FLAG_DECODEONLY; } output_vcd_frm->flags |= VCD_FRAME_FLAG_ENDOFFRAME; ddl_process_decoder_metadata(ddl); output_frame->frm_trans_end = frame_transact_end; if (vidc_msg_timing) ddl_calc_core_proc_time(__func__, DEC_OP_TIME); ddl->ddl_context->ddl_callback( VCD_EVT_RESP_OUTPUT_DONE, vcd_status, output_frame, sizeof(struct ddl_frame_data_tag), (void *)ddl, ddl->ddl_context->client_data); return vcd_status; } static u32 ddl_get_frame (struct vcd_frame_data *frame, u32 frametype) { enum vidc_720p_frame vidc_frame = (enum vidc_720p_frame)frametype; u32 status = true; switch (vidc_frame) { case VIDC_720P_IFRAME: { frame->flags |= VCD_FRAME_FLAG_SYNCFRAME; frame->frame = VCD_FRAME_I; break; } case VIDC_720P_PFRAME: { frame->frame = VCD_FRAME_P; break; } case VIDC_720P_BFRAME: { frame->frame = VCD_FRAME_B; break; } case VIDC_720P_NOTCODED: { frame->frame = VCD_FRAME_NOTCODED; frame->data_len = 0; break; } case VIDC_720P_IDRFRAME: { frame->flags |= VCD_FRAME_FLAG_SYNCFRAME; frame->frame = VCD_FRAME_IDR; break; } default: { VIDC_LOG_STRING("CRITICAL-FRAMETYPE"); status = false; break; } } return status; } static void ddl_getmpeg4_declevel(enum vcd_codec_level *codec_level, u32 level) { switch (level) { case VIDC_720P_MPEG4_LEVEL0: { *codec_level = VCD_LEVEL_MPEG4_0; break; } case VIDC_720P_MPEG4_LEVEL0b: { *codec_level = VCD_LEVEL_MPEG4_0b; break; } case VIDC_720P_MPEG4_LEVEL1: { *codec_level = VCD_LEVEL_MPEG4_1; break; } case VIDC_720P_MPEG4_LEVEL2: { *codec_level = VCD_LEVEL_MPEG4_2; break; } case VIDC_720P_MPEG4_LEVEL3: { *codec_level = VCD_LEVEL_MPEG4_3; break; } case VIDC_720P_MPEG4_LEVEL3b: { *codec_level = VCD_LEVEL_MPEG4_3b; break; } case VIDC_720P_MPEG4_LEVEL4a: { *codec_level = VCD_LEVEL_MPEG4_4a; break; } case VIDC_720P_MPEG4_LEVEL5: { *codec_level = VCD_LEVEL_MPEG4_5; break; } case VIDC_720P_MPEG4_LEVEL6: { *codec_level = VCD_LEVEL_MPEG4_6; break; } } } static void ddl_geth264_declevel(enum vcd_codec_level *codec_level, u32 level) { switch (level) { case VIDC_720P_H264_LEVEL1: { *codec_level = VCD_LEVEL_H264_1; break; } case VIDC_720P_H264_LEVEL1b: { *codec_level = VCD_LEVEL_H264_1b; break; } case VIDC_720P_H264_LEVEL1p1: { *codec_level = VCD_LEVEL_H264_1p1; break; } case VIDC_720P_H264_LEVEL1p2: { *codec_level = VCD_LEVEL_H264_1p2; break; } case VIDC_720P_H264_LEVEL1p3: { *codec_level = VCD_LEVEL_H264_1p3; break; } case VIDC_720P_H264_LEVEL2: { *codec_level = VCD_LEVEL_H264_2; break; } case VIDC_720P_H264_LEVEL2p1: { *codec_level = VCD_LEVEL_H264_2p1; break; } case VIDC_720P_H264_LEVEL2p2: { *codec_level = VCD_LEVEL_H264_2p2; break; } case VIDC_720P_H264_LEVEL3: { *codec_level = VCD_LEVEL_H264_3; break; } case VIDC_720P_H264_LEVEL3p1: { *codec_level = VCD_LEVEL_H264_3p1; break; } case VIDC_720P_H264_LEVEL3p2: { *codec_level = VCD_LEVEL_H264_3p2; break; } } } static void ddl_get_vc1_dec_level( enum vcd_codec_level *codec_level, u32 level, enum vcd_codec_profile vc1_profile) { if (vc1_profile == VCD_PROFILE_VC1_ADVANCE) { switch (level) { case VIDC_720P_VC1_LEVEL0: { *codec_level = VCD_LEVEL_VC1_A_0; break; } case VIDC_720P_VC1_LEVEL1: { *codec_level = VCD_LEVEL_VC1_A_1; break; } case VIDC_720P_VC1_LEVEL2: { *codec_level = VCD_LEVEL_VC1_A_2; break; } case VIDC_720P_VC1_LEVEL3: { *codec_level = VCD_LEVEL_VC1_A_3; break; } case VIDC_720P_VC1_LEVEL4: { *codec_level = VCD_LEVEL_VC1_A_4; break; } } return; } else if (vc1_profile == VCD_PROFILE_VC1_MAIN) { switch (level) { case VIDC_720P_VC1_LEVEL_LOW: { *codec_level = VCD_LEVEL_VC1_M_LOW; break; } case VIDC_720P_VC1_LEVEL_MED: { *codec_level = VCD_LEVEL_VC1_M_MEDIUM; break; } case VIDC_720P_VC1_LEVEL_HIGH: { *codec_level = VCD_LEVEL_VC1_M_HIGH; break; } } } else if (vc1_profile == VCD_PROFILE_VC1_SIMPLE) { switch (level) { case VIDC_720P_VC1_LEVEL_LOW: { *codec_level = VCD_LEVEL_VC1_S_LOW; break; } case VIDC_720P_VC1_LEVEL_MED: { *codec_level = VCD_LEVEL_VC1_S_MEDIUM; break; } } } } static void ddl_get_mpeg2_dec_level(enum vcd_codec_level *codec_level, u32 level) { switch (level) { case VIDCL_720P_MPEG2_LEVEL_LOW: { *codec_level = VCD_LEVEL_MPEG2_LOW; break; } case VIDCL_720P_MPEG2_LEVEL_MAIN: { *codec_level = VCD_LEVEL_MPEG2_MAIN; break; } case VIDCL_720P_MPEG2_LEVEL_HIGH14: { *codec_level = VCD_LEVEL_MPEG2_HIGH_14; break; } } } static void ddl_getdec_profilelevel(struct ddl_decoder_data *decoder, u32 profile, u32 level) { enum vcd_codec_profile codec_profile = VCD_PROFILE_UNKNOWN; enum vcd_codec_level codec_level = VCD_LEVEL_UNKNOWN; switch (decoder->codec.codec) { case VCD_CODEC_MPEG4: { if (profile == VIDC_720P_PROFILE_MPEG4_SP) codec_profile = VCD_PROFILE_MPEG4_SP; else if (profile == VIDC_720P_PROFILE_MPEG4_ASP) codec_profile = VCD_PROFILE_MPEG4_ASP; ddl_getmpeg4_declevel(&codec_level, level); break; } case VCD_CODEC_H264: { if (profile == VIDC_720P_PROFILE_H264_BASELINE) codec_profile = VCD_PROFILE_H264_BASELINE; else if (profile == VIDC_720P_PROFILE_H264_MAIN) codec_profile = VCD_PROFILE_H264_MAIN; else if (profile == VIDC_720P_PROFILE_H264_HIGH) codec_profile = VCD_PROFILE_H264_HIGH; ddl_geth264_declevel(&codec_level, level); break; } default: case VCD_CODEC_H263: { break; } case VCD_CODEC_VC1: case VCD_CODEC_VC1_RCV: { if (profile == VIDC_720P_PROFILE_VC1_SP) codec_profile = VCD_PROFILE_VC1_SIMPLE; else if (profile == VIDC_720P_PROFILE_VC1_MAIN) codec_profile = VCD_PROFILE_VC1_MAIN; else if (profile == VIDC_720P_PROFILE_VC1_ADV) codec_profile = VCD_PROFILE_VC1_ADVANCE; ddl_get_vc1_dec_level(&codec_level, level, profile); break; } case VCD_CODEC_MPEG2: { if (profile == VIDC_720P_PROFILE_MPEG2_MAIN) codec_profile = VCD_PROFILE_MPEG2_MAIN; else if (profile == VIDC_720P_PROFILE_MPEG2_SP) codec_profile = VCD_PROFILE_MPEG2_SIMPLE; ddl_get_mpeg2_dec_level(&codec_level, level); break; } } decoder->profile.profile = codec_profile; decoder->level.level = codec_level; }
gpl-2.0
RolanDroid/lge_MonsterKernel-JB-Stock
drivers/staging/bcm/Bcmchar.c
3874
62996
#include <linux/fs.h> #include "headers.h" /*************************************************************** * Function - bcm_char_open() * * Description - This is the "open" entry point for the character * driver. * * Parameters - inode: Pointer to the Inode structure of char device * filp : File pointer of the char device * * Returns - Zero(Success) ****************************************************************/ static int bcm_char_open(struct inode *inode, struct file * filp) { PMINI_ADAPTER Adapter = NULL; PPER_TARANG_DATA pTarang = NULL; Adapter = GET_BCM_ADAPTER(gblpnetdev); pTarang = kzalloc(sizeof(PER_TARANG_DATA), GFP_KERNEL); if (!pTarang) return -ENOMEM; pTarang->Adapter = Adapter; pTarang->RxCntrlMsgBitMask = 0xFFFFFFFF & ~(1 << 0xB); down(&Adapter->RxAppControlQueuelock); pTarang->next = Adapter->pTarangs; Adapter->pTarangs = pTarang; up(&Adapter->RxAppControlQueuelock); /* Store the Adapter structure */ filp->private_data = pTarang; /* Start Queuing the control response Packets */ atomic_inc(&Adapter->ApplicationRunning); nonseekable_open(inode, filp); return 0; } static int bcm_char_release(struct inode *inode, struct file *filp) { PPER_TARANG_DATA pTarang, tmp, ptmp; PMINI_ADAPTER Adapter = NULL; struct sk_buff *pkt, *npkt; pTarang = (PPER_TARANG_DATA)filp->private_data; if (pTarang == NULL) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "ptarang is null\n"); return 0; } Adapter = pTarang->Adapter; down(&Adapter->RxAppControlQueuelock); tmp = Adapter->pTarangs; for (ptmp = NULL; tmp; ptmp = tmp, tmp = tmp->next) { if (tmp == pTarang) break; } if (tmp) { if (!ptmp) Adapter->pTarangs = tmp->next; else ptmp->next = tmp->next; } else { up(&Adapter->RxAppControlQueuelock); return 0; } pkt = pTarang->RxAppControlHead; while (pkt) { npkt = pkt->next; kfree_skb(pkt); pkt = npkt; } up(&Adapter->RxAppControlQueuelock); /* Stop Queuing the control response Packets */ atomic_dec(&Adapter->ApplicationRunning); kfree(pTarang); /* remove this filp from the asynchronously notified filp's */ filp->private_data = NULL; return 0; } static ssize_t bcm_char_read(struct file *filp, char __user *buf, size_t size, loff_t *f_pos) { PPER_TARANG_DATA pTarang = filp->private_data; PMINI_ADAPTER Adapter = pTarang->Adapter; struct sk_buff *Packet = NULL; ssize_t PktLen = 0; int wait_ret_val = 0; unsigned long ret = 0; wait_ret_val = wait_event_interruptible(Adapter->process_read_wait_queue, (pTarang->RxAppControlHead || Adapter->device_removed)); if ((wait_ret_val == -ERESTARTSYS)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Exiting as i've been asked to exit!!!\n"); return wait_ret_val; } if (Adapter->device_removed) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device Removed... Killing the Apps...\n"); return -ENODEV; } if (FALSE == Adapter->fw_download_done) return -EACCES; down(&Adapter->RxAppControlQueuelock); if (pTarang->RxAppControlHead) { Packet = pTarang->RxAppControlHead; DEQUEUEPACKET(pTarang->RxAppControlHead, pTarang->RxAppControlTail); pTarang->AppCtrlQueueLen--; } up(&Adapter->RxAppControlQueuelock); if (Packet) { PktLen = Packet->len; ret = copy_to_user(buf, Packet->data, min_t(size_t, PktLen, size)); if (ret) { dev_kfree_skb(Packet); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Returning from copy to user failure\n"); return -EFAULT; } BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Read %zd Bytes From Adapter packet = %p by process %d!\n", PktLen, Packet, current->pid); dev_kfree_skb(Packet); } BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "<\n"); return PktLen; } static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) { PPER_TARANG_DATA pTarang = filp->private_data; void __user *argp = (void __user *)arg; PMINI_ADAPTER Adapter = pTarang->Adapter; INT Status = STATUS_FAILURE; int timeout = 0; IOCTL_BUFFER IoBuffer; int bytes; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Parameters Passed to control IOCTL cmd=0x%X arg=0x%lX", cmd, arg); if (_IOC_TYPE(cmd) != BCM_IOCTL) return -EFAULT; if (_IOC_DIR(cmd) & _IOC_READ) Status = !access_ok(VERIFY_WRITE, argp, _IOC_SIZE(cmd)); else if (_IOC_DIR(cmd) & _IOC_WRITE) Status = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd)); else if (_IOC_NONE == (_IOC_DIR(cmd) & _IOC_NONE)) Status = STATUS_SUCCESS; if (Status) return -EFAULT; if (Adapter->device_removed) return -EFAULT; if (FALSE == Adapter->fw_download_done) { switch (cmd) { case IOCTL_MAC_ADDR_REQ: case IOCTL_LINK_REQ: case IOCTL_CM_REQUEST: case IOCTL_SS_INFO_REQ: case IOCTL_SEND_CONTROL_MESSAGE: case IOCTL_IDLE_REQ: case IOCTL_BCM_GPIO_SET_REQUEST: case IOCTL_BCM_GPIO_STATUS_REQUEST: return -EACCES; default: break; } } Status = vendorextnIoctl(Adapter, cmd, arg); if (Status != CONTINUE_COMMON_PATH) return Status; switch (cmd) { /* Rdms for Swin Idle... */ case IOCTL_BCM_REGISTER_READ_PRIVATE: { RDM_BUFFER sRdmBuffer = {0}; PCHAR temp_buff; UINT Bufflen; u16 temp_value; /* Copy Ioctl Buffer structure */ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) return -EFAULT; if (IoBuffer.InputLength > sizeof(sRdmBuffer)) return -EINVAL; if (copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength)) return -EFAULT; if (IoBuffer.OutputLength > USHRT_MAX || IoBuffer.OutputLength == 0) { return -EINVAL; } Bufflen = IoBuffer.OutputLength; temp_value = 4 - (Bufflen % 4); Bufflen += temp_value % 4; temp_buff = kmalloc(Bufflen, GFP_KERNEL); if (!temp_buff) return -ENOMEM; bytes = rdmalt(Adapter, (UINT)sRdmBuffer.Register, (PUINT)temp_buff, Bufflen); if (bytes > 0) { Status = STATUS_SUCCESS; if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, bytes)) { kfree(temp_buff); return -EFAULT; } } else { Status = bytes; } kfree(temp_buff); break; } case IOCTL_BCM_REGISTER_WRITE_PRIVATE: { WRM_BUFFER sWrmBuffer = {0}; UINT uiTempVar = 0; /* Copy Ioctl Buffer structure */ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) return -EFAULT; if (IoBuffer.InputLength > sizeof(sWrmBuffer)) return -EINVAL; /* Get WrmBuffer structure */ if (copy_from_user(&sWrmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength)) return -EFAULT; uiTempVar = sWrmBuffer.Register & EEPROM_REJECT_MASK; if (!((Adapter->pstargetparams->m_u32Customize) & VSG_MODE) && ((uiTempVar == EEPROM_REJECT_REG_1) || (uiTempVar == EEPROM_REJECT_REG_2) || (uiTempVar == EEPROM_REJECT_REG_3) || (uiTempVar == EEPROM_REJECT_REG_4))) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "EEPROM Access Denied, not in VSG Mode\n"); return -EFAULT; } Status = wrmalt(Adapter, (UINT)sWrmBuffer.Register, (PUINT)sWrmBuffer.Data, sizeof(ULONG)); if (Status == STATUS_SUCCESS) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "WRM Done\n"); } else { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "WRM Failed\n"); Status = -EFAULT; } break; } case IOCTL_BCM_REGISTER_READ: case IOCTL_BCM_EEPROM_REGISTER_READ: { RDM_BUFFER sRdmBuffer = {0}; PCHAR temp_buff = NULL; UINT uiTempVar = 0; if ((Adapter->IdleMode == TRUE) || (Adapter->bShutStatus == TRUE) || (Adapter->bPreparingForLowPowerMode == TRUE)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device in Idle Mode, Blocking Rdms\n"); return -EACCES; } /* Copy Ioctl Buffer structure */ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) return -EFAULT; if (IoBuffer.InputLength > sizeof(sRdmBuffer)) return -EINVAL; if (copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength)) return -EFAULT; if (IoBuffer.OutputLength > USHRT_MAX || IoBuffer.OutputLength == 0) { return -EINVAL; } temp_buff = kmalloc(IoBuffer.OutputLength, GFP_KERNEL); if (!temp_buff) return STATUS_FAILURE; if ((((ULONG)sRdmBuffer.Register & 0x0F000000) != 0x0F000000) || ((ULONG)sRdmBuffer.Register & 0x3)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM Done On invalid Address : %x Access Denied.\n", (int)sRdmBuffer.Register); kfree(temp_buff); return -EINVAL; } uiTempVar = sRdmBuffer.Register & EEPROM_REJECT_MASK; bytes = rdmaltWithLock(Adapter, (UINT)sRdmBuffer.Register, (PUINT)temp_buff, IoBuffer.OutputLength); if (bytes > 0) { Status = STATUS_SUCCESS; if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, bytes)) { kfree(temp_buff); return -EFAULT; } } else { Status = bytes; } kfree(temp_buff); break; } case IOCTL_BCM_REGISTER_WRITE: case IOCTL_BCM_EEPROM_REGISTER_WRITE: { WRM_BUFFER sWrmBuffer = {0}; UINT uiTempVar = 0; if ((Adapter->IdleMode == TRUE) || (Adapter->bShutStatus == TRUE) || (Adapter->bPreparingForLowPowerMode == TRUE)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device in Idle Mode, Blocking Wrms\n"); return -EACCES; } /* Copy Ioctl Buffer structure */ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) return -EFAULT; if (IoBuffer.InputLength > sizeof(sWrmBuffer)) return -EINVAL; /* Get WrmBuffer structure */ if (copy_from_user(&sWrmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength)) return -EFAULT; if ((((ULONG)sWrmBuffer.Register & 0x0F000000) != 0x0F000000) || ((ULONG)sWrmBuffer.Register & 0x3)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM Done On invalid Address : %x Access Denied.\n", (int)sWrmBuffer.Register); return -EINVAL; } uiTempVar = sWrmBuffer.Register & EEPROM_REJECT_MASK; if (!((Adapter->pstargetparams->m_u32Customize) & VSG_MODE) && ((uiTempVar == EEPROM_REJECT_REG_1) || (uiTempVar == EEPROM_REJECT_REG_2) || (uiTempVar == EEPROM_REJECT_REG_3) || (uiTempVar == EEPROM_REJECT_REG_4)) && (cmd == IOCTL_BCM_REGISTER_WRITE)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "EEPROM Access Denied, not in VSG Mode\n"); return -EFAULT; } Status = wrmaltWithLock(Adapter, (UINT)sWrmBuffer.Register, (PUINT)sWrmBuffer.Data, sWrmBuffer.Length); if (Status == STATUS_SUCCESS) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, OSAL_DBG, DBG_LVL_ALL, "WRM Done\n"); } else { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "WRM Failed\n"); Status = -EFAULT; } break; } case IOCTL_BCM_GPIO_SET_REQUEST: { UCHAR ucResetValue[4]; UINT value = 0; UINT uiBit = 0; UINT uiOperation = 0; GPIO_INFO gpio_info = {0}; if ((Adapter->IdleMode == TRUE) || (Adapter->bShutStatus == TRUE) || (Adapter->bPreparingForLowPowerMode == TRUE)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "GPIO Can't be set/clear in Low power Mode"); return -EACCES; } if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) return -EFAULT; if (IoBuffer.InputLength > sizeof(gpio_info)) return -EINVAL; if (copy_from_user(&gpio_info, IoBuffer.InputBuffer, IoBuffer.InputLength)) return -EFAULT; uiBit = gpio_info.uiGpioNumber; uiOperation = gpio_info.uiGpioValue; value = (1<<uiBit); if (IsReqGpioIsLedInNVM(Adapter, value) == FALSE) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Sorry, Requested GPIO<0x%X> is not correspond to LED !!!", value); Status = -EINVAL; break; } /* Set - setting 1 */ if (uiOperation) { /* Set the gpio output register */ Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_SET_REG, (PUINT)(&value), sizeof(UINT)); if (Status == STATUS_SUCCESS) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Set the GPIO bit\n"); } else { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Failed to set the %dth GPIO\n", uiBit); break; } } else { /* Set the gpio output register */ Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, (PUINT)(&value), sizeof(UINT)); if (Status == STATUS_SUCCESS) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Set the GPIO bit\n"); } else { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Failed to clear the %dth GPIO\n", uiBit); break; } } bytes = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(UINT)); if (bytes < 0) { Status = bytes; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "GPIO_MODE_REGISTER read failed"); break; } else { Status = STATUS_SUCCESS; } /* Set the gpio mode register to output */ *(UINT *)ucResetValue |= (1<<uiBit); Status = wrmaltWithLock(Adapter, GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(UINT)); if (Status == STATUS_SUCCESS) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Set the GPIO to output Mode\n"); } else { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Failed to put GPIO in Output Mode\n"); break; } } break; case BCM_LED_THREAD_STATE_CHANGE_REQ: { USER_THREAD_REQ threadReq = {0}; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "User made LED thread InActive"); if ((Adapter->IdleMode == TRUE) || (Adapter->bShutStatus == TRUE) || (Adapter->bPreparingForLowPowerMode == TRUE)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "GPIO Can't be set/clear in Low power Mode"); Status = -EACCES; break; } if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) return -EFAULT; if (IoBuffer.InputLength > sizeof(threadReq)) return -EINVAL; if (copy_from_user(&threadReq, IoBuffer.InputBuffer, IoBuffer.InputLength)) return -EFAULT; /* if LED thread is running(Actively or Inactively) set it state to make inactive */ if (Adapter->LEDInfo.led_thread_running) { if (threadReq.ThreadState == LED_THREAD_ACTIVATION_REQ) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Activating thread req"); Adapter->DriverState = LED_THREAD_ACTIVE; } else { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "DeActivating Thread req....."); Adapter->DriverState = LED_THREAD_INACTIVE; } /* signal thread. */ wake_up(&Adapter->LEDInfo.notify_led_event); } } break; case IOCTL_BCM_GPIO_STATUS_REQUEST: { ULONG uiBit = 0; UCHAR ucRead[4]; GPIO_INFO gpio_info = {0}; if ((Adapter->IdleMode == TRUE) || (Adapter->bShutStatus == TRUE) || (Adapter->bPreparingForLowPowerMode == TRUE)) return -EACCES; if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) return -EFAULT; if (IoBuffer.InputLength > sizeof(gpio_info)) return -EINVAL; if (copy_from_user(&gpio_info, IoBuffer.InputBuffer, IoBuffer.InputLength)) return -EFAULT; uiBit = gpio_info.uiGpioNumber; /* Set the gpio output register */ bytes = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER, (PUINT)ucRead, sizeof(UINT)); if (bytes < 0) { Status = bytes; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM Failed\n"); return Status; } else { Status = STATUS_SUCCESS; } } break; case IOCTL_BCM_GPIO_MULTI_REQUEST: { UCHAR ucResetValue[4]; GPIO_MULTI_INFO gpio_multi_info[MAX_IDX]; PGPIO_MULTI_INFO pgpio_multi_info = (PGPIO_MULTI_INFO)gpio_multi_info; memset(pgpio_multi_info, 0, MAX_IDX * sizeof(GPIO_MULTI_INFO)); if ((Adapter->IdleMode == TRUE) || (Adapter->bShutStatus == TRUE) || (Adapter->bPreparingForLowPowerMode == TRUE)) return -EINVAL; if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) return -EFAULT; if (IoBuffer.InputLength > sizeof(gpio_multi_info)) return -EINVAL; if (copy_from_user(&gpio_multi_info, IoBuffer.InputBuffer, IoBuffer.InputLength)) return -EFAULT; if (IsReqGpioIsLedInNVM(Adapter, pgpio_multi_info[WIMAX_IDX].uiGPIOMask) == FALSE) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!", pgpio_multi_info[WIMAX_IDX].uiGPIOMask, Adapter->gpioBitMap); Status = -EINVAL; break; } /* Set the gpio output register */ if ((pgpio_multi_info[WIMAX_IDX].uiGPIOMask) & (pgpio_multi_info[WIMAX_IDX].uiGPIOCommand)) { /* Set 1's in GPIO OUTPUT REGISTER */ *(UINT *)ucResetValue = pgpio_multi_info[WIMAX_IDX].uiGPIOMask & pgpio_multi_info[WIMAX_IDX].uiGPIOCommand & pgpio_multi_info[WIMAX_IDX].uiGPIOValue; if (*(UINT *) ucResetValue) Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_SET_REG, (PUINT)ucResetValue, sizeof(ULONG)); if (Status != STATUS_SUCCESS) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM to BCM_GPIO_OUTPUT_SET_REG Failed."); return Status; } /* Clear to 0's in GPIO OUTPUT REGISTER */ *(UINT *)ucResetValue = (pgpio_multi_info[WIMAX_IDX].uiGPIOMask & pgpio_multi_info[WIMAX_IDX].uiGPIOCommand & (~(pgpio_multi_info[WIMAX_IDX].uiGPIOValue))); if (*(UINT *) ucResetValue) Status = wrmaltWithLock(Adapter, BCM_GPIO_OUTPUT_CLR_REG, (PUINT)ucResetValue, sizeof(ULONG)); if (Status != STATUS_SUCCESS) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM to BCM_GPIO_OUTPUT_CLR_REG Failed."); return Status; } } if (pgpio_multi_info[WIMAX_IDX].uiGPIOMask) { bytes = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER, (PUINT)ucResetValue, sizeof(UINT)); if (bytes < 0) { Status = bytes; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM to GPIO_PIN_STATE_REGISTER Failed."); return Status; } else { Status = STATUS_SUCCESS; } pgpio_multi_info[WIMAX_IDX].uiGPIOValue = (*(UINT *)ucResetValue & pgpio_multi_info[WIMAX_IDX].uiGPIOMask); } Status = copy_to_user(IoBuffer.OutputBuffer, &gpio_multi_info, IoBuffer.OutputLength); if (Status) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Failed while copying Content to IOBufer for user space err:%d", Status); return -EFAULT; } } break; case IOCTL_BCM_GPIO_MODE_REQUEST: { UCHAR ucResetValue[4]; GPIO_MULTI_MODE gpio_multi_mode[MAX_IDX]; PGPIO_MULTI_MODE pgpio_multi_mode = (PGPIO_MULTI_MODE)gpio_multi_mode; if ((Adapter->IdleMode == TRUE) || (Adapter->bShutStatus == TRUE) || (Adapter->bPreparingForLowPowerMode == TRUE)) return -EINVAL; if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) return -EFAULT; if (IoBuffer.InputLength > sizeof(gpio_multi_mode)) return -EINVAL; if (copy_from_user(&gpio_multi_mode, IoBuffer.InputBuffer, IoBuffer.InputLength)) return -EFAULT; bytes = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(UINT)); if (bytes < 0) { Status = bytes; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Read of GPIO_MODE_REGISTER failed"); return Status; } else { Status = STATUS_SUCCESS; } /* Validating the request */ if (IsReqGpioIsLedInNVM(Adapter, pgpio_multi_mode[WIMAX_IDX].uiGPIOMask) == FALSE) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!", pgpio_multi_mode[WIMAX_IDX].uiGPIOMask, Adapter->gpioBitMap); Status = -EINVAL; break; } if (pgpio_multi_mode[WIMAX_IDX].uiGPIOMask) { /* write all OUT's (1's) */ *(UINT *) ucResetValue |= (pgpio_multi_mode[WIMAX_IDX].uiGPIOMode & pgpio_multi_mode[WIMAX_IDX].uiGPIOMask); /* write all IN's (0's) */ *(UINT *) ucResetValue &= ~((~pgpio_multi_mode[WIMAX_IDX].uiGPIOMode) & pgpio_multi_mode[WIMAX_IDX].uiGPIOMask); /* Currently implemented return the modes of all GPIO's * else needs to bit AND with mask */ pgpio_multi_mode[WIMAX_IDX].uiGPIOMode = *(UINT *)ucResetValue; Status = wrmaltWithLock(Adapter, GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(ULONG)); if (Status == STATUS_SUCCESS) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "WRM to GPIO_MODE_REGISTER Done"); } else { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM to GPIO_MODE_REGISTER Failed"); Status = -EFAULT; break; } } else { /* if uiGPIOMask is 0 then return mode register configuration */ pgpio_multi_mode[WIMAX_IDX].uiGPIOMode = *(UINT *)ucResetValue; } Status = copy_to_user(IoBuffer.OutputBuffer, &gpio_multi_mode, IoBuffer.OutputLength); if (Status) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Failed while copying Content to IOBufer for user space err:%d", Status); return -EFAULT; } } break; case IOCTL_MAC_ADDR_REQ: case IOCTL_LINK_REQ: case IOCTL_CM_REQUEST: case IOCTL_SS_INFO_REQ: case IOCTL_SEND_CONTROL_MESSAGE: case IOCTL_IDLE_REQ: { PVOID pvBuffer = NULL; /* Copy Ioctl Buffer structure */ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) return -EFAULT; if (IoBuffer.InputLength < sizeof(struct link_request)) return -EINVAL; if (IoBuffer.InputLength > MAX_CNTL_PKT_SIZE) return -EINVAL; pvBuffer = memdup_user(IoBuffer.InputBuffer, IoBuffer.InputLength); if (IS_ERR(pvBuffer)) return PTR_ERR(pvBuffer); down(&Adapter->LowPowerModeSync); Status = wait_event_interruptible_timeout(Adapter->lowpower_mode_wait_queue, !Adapter->bPreparingForLowPowerMode, (1 * HZ)); if (Status == -ERESTARTSYS) goto cntrlEnd; if (Adapter->bPreparingForLowPowerMode) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Preparing Idle Mode is still True - Hence Rejecting control message\n"); Status = STATUS_FAILURE; goto cntrlEnd; } Status = CopyBufferToControlPacket(Adapter, (PVOID)pvBuffer); cntrlEnd: up(&Adapter->LowPowerModeSync); kfree(pvBuffer); break; } case IOCTL_BCM_BUFFER_DOWNLOAD_START: { if (down_trylock(&Adapter->NVMRdmWrmLock)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n"); return -EACCES; } BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Starting the firmware download PID =0x%x!!!!\n", current->pid); if (down_trylock(&Adapter->fw_download_sema)) return -EBUSY; Adapter->bBinDownloaded = FALSE; Adapter->fw_download_process_pid = current->pid; Adapter->bCfgDownloaded = FALSE; Adapter->fw_download_done = FALSE; netif_carrier_off(Adapter->dev); netif_stop_queue(Adapter->dev); Status = reset_card_proc(Adapter); if (Status) { pr_err(PFX "%s: reset_card_proc Failed!\n", Adapter->dev->name); up(&Adapter->fw_download_sema); up(&Adapter->NVMRdmWrmLock); return Status; } mdelay(10); up(&Adapter->NVMRdmWrmLock); return Status; } case IOCTL_BCM_BUFFER_DOWNLOAD: { FIRMWARE_INFO *psFwInfo = NULL; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Starting the firmware download PID =0x%x!!!!\n", current->pid); if (!down_trylock(&Adapter->fw_download_sema)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Invalid way to download buffer. Use Start and then call this!!!\n"); up(&Adapter->fw_download_sema); Status = -EINVAL; return Status; } /* Copy Ioctl Buffer structure */ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) { up(&Adapter->fw_download_sema); return -EFAULT; } BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Length for FW DLD is : %lx\n", IoBuffer.InputLength); if (IoBuffer.InputLength > sizeof(FIRMWARE_INFO)) { up(&Adapter->fw_download_sema); return -EINVAL; } psFwInfo = kmalloc(sizeof(*psFwInfo), GFP_KERNEL); if (!psFwInfo) { up(&Adapter->fw_download_sema); return -ENOMEM; } if (copy_from_user(psFwInfo, IoBuffer.InputBuffer, IoBuffer.InputLength)) { up(&Adapter->fw_download_sema); return -EFAULT; } if (!psFwInfo->pvMappedFirmwareAddress || (psFwInfo->u32FirmwareLength == 0)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Something else is wrong %lu\n", psFwInfo->u32FirmwareLength); up(&Adapter->fw_download_sema); Status = -EINVAL; return Status; } Status = bcm_ioctl_fw_download(Adapter, psFwInfo); if (Status != STATUS_SUCCESS) { if (psFwInfo->u32StartingAddress == CONFIG_BEGIN_ADDR) BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL: Configuration File Upload Failed\n"); else BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL: Firmware File Upload Failed\n"); /* up(&Adapter->fw_download_sema); */ if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) { Adapter->DriverState = DRIVER_INIT; Adapter->LEDInfo.bLedInitDone = FALSE; wake_up(&Adapter->LEDInfo.notify_led_event); } } if (Status != STATUS_SUCCESS) up(&Adapter->fw_download_sema); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, OSAL_DBG, DBG_LVL_ALL, "IOCTL: Firmware File Uploaded\n"); kfree(psFwInfo); return Status; } case IOCTL_BCM_BUFFER_DOWNLOAD_STOP: { if (!down_trylock(&Adapter->fw_download_sema)) { up(&Adapter->fw_download_sema); return -EINVAL; } if (down_trylock(&Adapter->NVMRdmWrmLock)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "FW download blocked as EEPROM Read/Write is in progress\n"); up(&Adapter->fw_download_sema); return -EACCES; } Adapter->bBinDownloaded = TRUE; Adapter->bCfgDownloaded = TRUE; atomic_set(&Adapter->CurrNumFreeTxDesc, 0); Adapter->CurrNumRecvDescs = 0; Adapter->downloadDDR = 0; /* setting the Mips to Run */ Status = run_card_proc(Adapter); if (Status) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Firm Download Failed\n"); up(&Adapter->fw_download_sema); up(&Adapter->NVMRdmWrmLock); return Status; } else { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Firm Download Over...\n"); } mdelay(10); /* Wait for MailBox Interrupt */ if (StartInterruptUrb((PS_INTERFACE_ADAPTER)Adapter->pvInterfaceAdapter)) BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Unable to send interrupt...\n"); timeout = 5*HZ; Adapter->waiting_to_fw_download_done = FALSE; wait_event_timeout(Adapter->ioctl_fw_dnld_wait_queue, Adapter->waiting_to_fw_download_done, timeout); Adapter->fw_download_process_pid = INVALID_PID; Adapter->fw_download_done = TRUE; atomic_set(&Adapter->CurrNumFreeTxDesc, 0); Adapter->CurrNumRecvDescs = 0; Adapter->PrevNumRecvDescs = 0; atomic_set(&Adapter->cntrlpktCnt, 0); Adapter->LinkUpStatus = 0; Adapter->LinkStatus = 0; if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) { Adapter->DriverState = FW_DOWNLOAD_DONE; wake_up(&Adapter->LEDInfo.notify_led_event); } if (!timeout) Status = -ENODEV; up(&Adapter->fw_download_sema); up(&Adapter->NVMRdmWrmLock); return Status; } case IOCTL_BE_BUCKET_SIZE: Status = 0; if (get_user(Adapter->BEBucketSize, (unsigned long __user *)arg)) Status = -EFAULT; break; case IOCTL_RTPS_BUCKET_SIZE: Status = 0; if (get_user(Adapter->rtPSBucketSize, (unsigned long __user *)arg)) Status = -EFAULT; break; case IOCTL_CHIP_RESET: { INT NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock); if (NVMAccess) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, " IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n"); return -EACCES; } down(&Adapter->RxAppControlQueuelock); Status = reset_card_proc(Adapter); flushAllAppQ(); up(&Adapter->RxAppControlQueuelock); up(&Adapter->NVMRdmWrmLock); ResetCounters(Adapter); break; } case IOCTL_QOS_THRESHOLD: { USHORT uiLoopIndex; Status = 0; for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES; uiLoopIndex++) { if (get_user(Adapter->PackInfo[uiLoopIndex].uiThreshold, (unsigned long __user *)arg)) { Status = -EFAULT; break; } } break; } case IOCTL_DUMP_PACKET_INFO: DumpPackInfo(Adapter); DumpPhsRules(&Adapter->stBCMPhsContext); Status = STATUS_SUCCESS; break; case IOCTL_GET_PACK_INFO: if (copy_to_user(argp, &Adapter->PackInfo, sizeof(PacketInfo)*NO_OF_QUEUES)) return -EFAULT; Status = STATUS_SUCCESS; break; case IOCTL_BCM_SWITCH_TRANSFER_MODE: { UINT uiData = 0; if (copy_from_user(&uiData, argp, sizeof(UINT))) return -EFAULT; if (uiData) { /* Allow All Packets */ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SWITCH_TRANSFER_MODE: ETH_PACKET_TUNNELING_MODE\n"); Adapter->TransferMode = ETH_PACKET_TUNNELING_MODE; } else { /* Allow IP only Packets */ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SWITCH_TRANSFER_MODE: IP_PACKET_ONLY_MODE\n"); Adapter->TransferMode = IP_PACKET_ONLY_MODE; } Status = STATUS_SUCCESS; break; } case IOCTL_BCM_GET_DRIVER_VERSION: { ulong len; /* Copy Ioctl Buffer structure */ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) return -EFAULT; len = min_t(ulong, IoBuffer.OutputLength, strlen(VER_FILEVERSION_STR) + 1); if (copy_to_user(IoBuffer.OutputBuffer, VER_FILEVERSION_STR, len)) return -EFAULT; Status = STATUS_SUCCESS; break; } case IOCTL_BCM_GET_CURRENT_STATUS: { LINK_STATE link_state; /* Copy Ioctl Buffer structure */ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "copy_from_user failed..\n"); return -EFAULT; } if (IoBuffer.OutputLength != sizeof(link_state)) { Status = -EINVAL; break; } memset(&link_state, 0, sizeof(link_state)); link_state.bIdleMode = Adapter->IdleMode; link_state.bShutdownMode = Adapter->bShutStatus; link_state.ucLinkStatus = Adapter->LinkStatus; if (copy_to_user(IoBuffer.OutputBuffer, &link_state, min_t(size_t, sizeof(link_state), IoBuffer.OutputLength))) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy_to_user Failed..\n"); return -EFAULT; } Status = STATUS_SUCCESS; break; } case IOCTL_BCM_SET_MAC_TRACING: { UINT tracing_flag; /* copy ioctl Buffer structure */ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) return -EFAULT; if (copy_from_user(&tracing_flag, IoBuffer.InputBuffer, sizeof(UINT))) return -EFAULT; if (tracing_flag) Adapter->pTarangs->MacTracingEnabled = TRUE; else Adapter->pTarangs->MacTracingEnabled = FALSE; break; } case IOCTL_BCM_GET_DSX_INDICATION: { ULONG ulSFId = 0; if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) return -EFAULT; if (IoBuffer.OutputLength < sizeof(stLocalSFAddIndicationAlt)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Mismatch req: %lx needed is =0x%zx!!!", IoBuffer.OutputLength, sizeof(stLocalSFAddIndicationAlt)); return -EINVAL; } if (copy_from_user(&ulSFId, IoBuffer.InputBuffer, sizeof(ulSFId))) return -EFAULT; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Get DSX Data SF ID is =%lx\n", ulSFId); get_dsx_sf_data_to_application(Adapter, ulSFId, IoBuffer.OutputBuffer); Status = STATUS_SUCCESS; } break; case IOCTL_BCM_GET_HOST_MIBS: { PVOID temp_buff; if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) return -EFAULT; if (IoBuffer.OutputLength != sizeof(S_MIBS_HOST_STATS_MIBS)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Length Check failed %lu %zd\n", IoBuffer.OutputLength, sizeof(S_MIBS_HOST_STATS_MIBS)); return -EINVAL; } /* FIXME: HOST_STATS are too big for kmalloc (122048)! */ temp_buff = kzalloc(sizeof(S_MIBS_HOST_STATS_MIBS), GFP_KERNEL); if (!temp_buff) return STATUS_FAILURE; Status = ProcessGetHostMibs(Adapter, temp_buff); GetDroppedAppCntrlPktMibs(temp_buff, pTarang); if (Status != STATUS_FAILURE) if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, sizeof(S_MIBS_HOST_STATS_MIBS))) { kfree(temp_buff); return -EFAULT; } kfree(temp_buff); break; } case IOCTL_BCM_WAKE_UP_DEVICE_FROM_IDLE: if ((FALSE == Adapter->bTriedToWakeUpFromlowPowerMode) && (TRUE == Adapter->IdleMode)) { Adapter->usIdleModePattern = ABORT_IDLE_MODE; Adapter->bWakeUpDevice = TRUE; wake_up(&Adapter->process_rx_cntrlpkt); } Status = STATUS_SUCCESS; break; case IOCTL_BCM_BULK_WRM: { PBULKWRM_BUFFER pBulkBuffer; UINT uiTempVar = 0; PCHAR pvBuffer = NULL; if ((Adapter->IdleMode == TRUE) || (Adapter->bShutStatus == TRUE) || (Adapter->bPreparingForLowPowerMode == TRUE)) { BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "Device in Idle/Shutdown Mode, Blocking Wrms\n"); Status = -EACCES; break; } /* Copy Ioctl Buffer structure */ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) return -EFAULT; if (IoBuffer.InputLength < sizeof(ULONG) * 2) return -EINVAL; pvBuffer = memdup_user(IoBuffer.InputBuffer, IoBuffer.InputLength); if (IS_ERR(pvBuffer)) return PTR_ERR(pvBuffer); pBulkBuffer = (PBULKWRM_BUFFER)pvBuffer; if (((ULONG)pBulkBuffer->Register & 0x0F000000) != 0x0F000000 || ((ULONG)pBulkBuffer->Register & 0x3)) { kfree(pvBuffer); BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM Done On invalid Address : %x Access Denied.\n", (int)pBulkBuffer->Register); Status = -EINVAL; break; } uiTempVar = pBulkBuffer->Register & EEPROM_REJECT_MASK; if (!((Adapter->pstargetparams->m_u32Customize)&VSG_MODE) && ((uiTempVar == EEPROM_REJECT_REG_1) || (uiTempVar == EEPROM_REJECT_REG_2) || (uiTempVar == EEPROM_REJECT_REG_3) || (uiTempVar == EEPROM_REJECT_REG_4)) && (cmd == IOCTL_BCM_REGISTER_WRITE)) { kfree(pvBuffer); BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "EEPROM Access Denied, not in VSG Mode\n"); Status = -EFAULT; break; } if (pBulkBuffer->SwapEndian == FALSE) Status = wrmWithLock(Adapter, (UINT)pBulkBuffer->Register, (PCHAR)pBulkBuffer->Values, IoBuffer.InputLength - 2*sizeof(ULONG)); else Status = wrmaltWithLock(Adapter, (UINT)pBulkBuffer->Register, (PUINT)pBulkBuffer->Values, IoBuffer.InputLength - 2*sizeof(ULONG)); if (Status != STATUS_SUCCESS) BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM Failed\n"); kfree(pvBuffer); break; } case IOCTL_BCM_GET_NVM_SIZE: if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) return -EFAULT; if (Adapter->eNVMType == NVM_EEPROM || Adapter->eNVMType == NVM_FLASH) { if (copy_to_user(IoBuffer.OutputBuffer, &Adapter->uiNVMDSDSize, sizeof(UINT))) return -EFAULT; } Status = STATUS_SUCCESS; break; case IOCTL_BCM_CAL_INIT: { UINT uiSectorSize = 0 ; if (Adapter->eNVMType == NVM_FLASH) { if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) return -EFAULT; if (copy_from_user(&uiSectorSize, IoBuffer.InputBuffer, sizeof(UINT))) return -EFAULT; if ((uiSectorSize < MIN_SECTOR_SIZE) || (uiSectorSize > MAX_SECTOR_SIZE)) { if (copy_to_user(IoBuffer.OutputBuffer, &Adapter->uiSectorSize, sizeof(UINT))) return -EFAULT; } else { if (IsFlash2x(Adapter)) { if (copy_to_user(IoBuffer.OutputBuffer, &Adapter->uiSectorSize, sizeof(UINT))) return -EFAULT; } else { if ((TRUE == Adapter->bShutStatus) || (TRUE == Adapter->IdleMode)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device is in Idle/Shutdown Mode\n"); return -EACCES; } Adapter->uiSectorSize = uiSectorSize; BcmUpdateSectorSize(Adapter, Adapter->uiSectorSize); } } Status = STATUS_SUCCESS; } else { Status = STATUS_FAILURE; } } break; case IOCTL_BCM_SET_DEBUG: #ifdef DEBUG { USER_BCM_DBG_STATE sUserDebugState; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "In SET_DEBUG ioctl\n"); if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) return -EFAULT; if (copy_from_user(&sUserDebugState, IoBuffer.InputBuffer, sizeof(USER_BCM_DBG_STATE))) return -EFAULT; BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL_BCM_SET_DEBUG: OnOff=%d Type = 0x%x ", sUserDebugState.OnOff, sUserDebugState.Type); /* sUserDebugState.Subtype <<= 1; */ sUserDebugState.Subtype = 1 << sUserDebugState.Subtype; BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "actual Subtype=0x%x\n", sUserDebugState.Subtype); /* Update new 'DebugState' in the Adapter */ Adapter->stDebugState.type |= sUserDebugState.Type; /* Subtype: A bitmap of 32 bits for Subtype per Type. * Valid indexes in 'subtype' array: 1,2,4,8 * corresponding to valid Type values. Hence we can use the 'Type' field * as the index value, ignoring the array entries 0,3,5,6,7 ! */ if (sUserDebugState.OnOff) Adapter->stDebugState.subtype[sUserDebugState.Type] |= sUserDebugState.Subtype; else Adapter->stDebugState.subtype[sUserDebugState.Type] &= ~sUserDebugState.Subtype; BCM_SHOW_DEBUG_BITMAP(Adapter); } #endif break; case IOCTL_BCM_NVM_READ: case IOCTL_BCM_NVM_WRITE: { NVM_READWRITE stNVMReadWrite; PUCHAR pReadData = NULL; ULONG ulDSDMagicNumInUsrBuff = 0; struct timeval tv0, tv1; memset(&tv0, 0, sizeof(struct timeval)); memset(&tv1, 0, sizeof(struct timeval)); if ((Adapter->eNVMType == NVM_FLASH) && (Adapter->uiFlashLayoutMajorVersion == 0)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "The Flash Control Section is Corrupted. Hence Rejection on NVM Read/Write\n"); return -EFAULT; } if (IsFlash2x(Adapter)) { if ((Adapter->eActiveDSD != DSD0) && (Adapter->eActiveDSD != DSD1) && (Adapter->eActiveDSD != DSD2)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No DSD is active..hence NVM Command is blocked"); return STATUS_FAILURE; } } /* Copy Ioctl Buffer structure */ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) return -EFAULT; if (copy_from_user(&stNVMReadWrite, (IOCTL_BCM_NVM_READ == cmd) ? IoBuffer.OutputBuffer : IoBuffer.InputBuffer, sizeof(NVM_READWRITE))) return -EFAULT; /* * Deny the access if the offset crosses the cal area limit. */ if (stNVMReadWrite.uiNumBytes > Adapter->uiNVMDSDSize) return STATUS_FAILURE; if (stNVMReadWrite.uiOffset > Adapter->uiNVMDSDSize - stNVMReadWrite.uiNumBytes) { /* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Can't allow access beyond NVM Size: 0x%x 0x%x\n", stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes); */ return STATUS_FAILURE; } pReadData = memdup_user(stNVMReadWrite.pBuffer, stNVMReadWrite.uiNumBytes); if (IS_ERR(pReadData)) return PTR_ERR(pReadData); do_gettimeofday(&tv0); if (IOCTL_BCM_NVM_READ == cmd) { down(&Adapter->NVMRdmWrmLock); if ((Adapter->IdleMode == TRUE) || (Adapter->bShutStatus == TRUE) || (Adapter->bPreparingForLowPowerMode == TRUE)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n"); up(&Adapter->NVMRdmWrmLock); kfree(pReadData); return -EACCES; } Status = BeceemNVMRead(Adapter, (PUINT)pReadData, stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes); up(&Adapter->NVMRdmWrmLock); if (Status != STATUS_SUCCESS) { kfree(pReadData); return Status; } if (copy_to_user(stNVMReadWrite.pBuffer, pReadData, stNVMReadWrite.uiNumBytes)) { kfree(pReadData); return -EFAULT; } } else { down(&Adapter->NVMRdmWrmLock); if ((Adapter->IdleMode == TRUE) || (Adapter->bShutStatus == TRUE) || (Adapter->bPreparingForLowPowerMode == TRUE)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n"); up(&Adapter->NVMRdmWrmLock); kfree(pReadData); return -EACCES; } Adapter->bHeaderChangeAllowed = TRUE; if (IsFlash2x(Adapter)) { /* * New Requirement:- * DSD section updation will be allowed in two case:- * 1. if DSD sig is present in DSD header means dongle is ok and updation is fruitfull * 2. if point 1 failes then user buff should have DSD sig. this point ensures that if dongle is * corrupted then user space program first modify the DSD header with valid DSD sig so * that this as well as further write may be worthwhile. * * This restriction has been put assuming that if DSD sig is corrupted, DSD * data won't be considered valid. */ Status = BcmFlash2xCorruptSig(Adapter, Adapter->eActiveDSD); if (Status != STATUS_SUCCESS) { if (((stNVMReadWrite.uiOffset + stNVMReadWrite.uiNumBytes) != Adapter->uiNVMDSDSize) || (stNVMReadWrite.uiNumBytes < SIGNATURE_SIZE)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "DSD Sig is present neither in Flash nor User provided Input.."); up(&Adapter->NVMRdmWrmLock); kfree(pReadData); return Status; } ulDSDMagicNumInUsrBuff = ntohl(*(PUINT)(pReadData + stNVMReadWrite.uiNumBytes - SIGNATURE_SIZE)); if (ulDSDMagicNumInUsrBuff != DSD_IMAGE_MAGIC_NUMBER) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "DSD Sig is present neither in Flash nor User provided Input.."); up(&Adapter->NVMRdmWrmLock); kfree(pReadData); return Status; } } } Status = BeceemNVMWrite(Adapter, (PUINT)pReadData, stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes, stNVMReadWrite.bVerify); if (IsFlash2x(Adapter)) BcmFlash2xWriteSig(Adapter, Adapter->eActiveDSD); Adapter->bHeaderChangeAllowed = FALSE; up(&Adapter->NVMRdmWrmLock); if (Status != STATUS_SUCCESS) { kfree(pReadData); return Status; } } do_gettimeofday(&tv1); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, " timetaken by Write/read :%ld msec\n", (tv1.tv_sec - tv0.tv_sec)*1000 + (tv1.tv_usec - tv0.tv_usec)/1000); kfree(pReadData); return STATUS_SUCCESS; } case IOCTL_BCM_FLASH2X_SECTION_READ: { FLASH2X_READWRITE sFlash2xRead = {0}; PUCHAR pReadBuff = NULL ; UINT NOB = 0; UINT BuffSize = 0; UINT ReadBytes = 0; UINT ReadOffset = 0; void __user *OutPutBuff; if (IsFlash2x(Adapter) != TRUE) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map"); return -EINVAL; } BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_FLASH2X_SECTION_READ Called"); if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) return -EFAULT; /* Reading FLASH 2.x READ structure */ if (copy_from_user(&sFlash2xRead, IoBuffer.InputBuffer, sizeof(FLASH2X_READWRITE))) return -EFAULT; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.Section :%x", sFlash2xRead.Section); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.offset :%x", sFlash2xRead.offset); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.numOfBytes :%x", sFlash2xRead.numOfBytes); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.bVerify :%x\n", sFlash2xRead.bVerify); /* This was internal to driver for raw read. now it has ben exposed to user space app. */ if (validateFlash2xReadWrite(Adapter, &sFlash2xRead) == FALSE) return STATUS_FAILURE; NOB = sFlash2xRead.numOfBytes; if (NOB > Adapter->uiSectorSize) BuffSize = Adapter->uiSectorSize; else BuffSize = NOB; ReadOffset = sFlash2xRead.offset ; OutPutBuff = IoBuffer.OutputBuffer; pReadBuff = (PCHAR)kzalloc(BuffSize , GFP_KERNEL); if (pReadBuff == NULL) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory allocation failed for Flash 2.x Read Structure"); return -ENOMEM; } down(&Adapter->NVMRdmWrmLock); if ((Adapter->IdleMode == TRUE) || (Adapter->bShutStatus == TRUE) || (Adapter->bPreparingForLowPowerMode == TRUE)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n"); up(&Adapter->NVMRdmWrmLock); kfree(pReadBuff); return -EACCES; } while (NOB) { if (NOB > Adapter->uiSectorSize) ReadBytes = Adapter->uiSectorSize; else ReadBytes = NOB; /* Reading the data from Flash 2.x */ Status = BcmFlash2xBulkRead(Adapter, (PUINT)pReadBuff, sFlash2xRead.Section, ReadOffset, ReadBytes); if (Status) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Flash 2x read err with Status :%d", Status); break; } BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, pReadBuff, ReadBytes); Status = copy_to_user(OutPutBuff, pReadBuff, ReadBytes); if (Status) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Copy to use failed with status :%d", Status); up(&Adapter->NVMRdmWrmLock); kfree(pReadBuff); return -EFAULT; } NOB = NOB - ReadBytes; if (NOB) { ReadOffset = ReadOffset + ReadBytes; OutPutBuff = OutPutBuff + ReadBytes ; } } up(&Adapter->NVMRdmWrmLock); kfree(pReadBuff); } break; case IOCTL_BCM_FLASH2X_SECTION_WRITE: { FLASH2X_READWRITE sFlash2xWrite = {0}; PUCHAR pWriteBuff; void __user *InputAddr; UINT NOB = 0; UINT BuffSize = 0; UINT WriteOffset = 0; UINT WriteBytes = 0; if (IsFlash2x(Adapter) != TRUE) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map"); return -EINVAL; } /* First make this False so that we can enable the Sector Permission Check in BeceemFlashBulkWrite */ Adapter->bAllDSDWriteAllow = FALSE; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_FLASH2X_SECTION_WRITE Called"); if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) return -EFAULT; /* Reading FLASH 2.x READ structure */ if (copy_from_user(&sFlash2xWrite, IoBuffer.InputBuffer, sizeof(FLASH2X_READWRITE))) return -EFAULT; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.Section :%x", sFlash2xWrite.Section); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.offset :%d", sFlash2xWrite.offset); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.numOfBytes :%x", sFlash2xWrite.numOfBytes); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\nsFlash2xRead.bVerify :%x\n", sFlash2xWrite.bVerify); if ((sFlash2xWrite.Section != VSA0) && (sFlash2xWrite.Section != VSA1) && (sFlash2xWrite.Section != VSA2)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Only VSA write is allowed"); return -EINVAL; } if (validateFlash2xReadWrite(Adapter, &sFlash2xWrite) == FALSE) return STATUS_FAILURE; InputAddr = sFlash2xWrite.pDataBuff; WriteOffset = sFlash2xWrite.offset; NOB = sFlash2xWrite.numOfBytes; if (NOB > Adapter->uiSectorSize) BuffSize = Adapter->uiSectorSize; else BuffSize = NOB ; pWriteBuff = kmalloc(BuffSize, GFP_KERNEL); if (pWriteBuff == NULL) return -ENOMEM; /* extracting the remainder of the given offset. */ WriteBytes = Adapter->uiSectorSize; if (WriteOffset % Adapter->uiSectorSize) WriteBytes = Adapter->uiSectorSize - (WriteOffset % Adapter->uiSectorSize); if (NOB < WriteBytes) WriteBytes = NOB; down(&Adapter->NVMRdmWrmLock); if ((Adapter->IdleMode == TRUE) || (Adapter->bShutStatus == TRUE) || (Adapter->bPreparingForLowPowerMode == TRUE)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n"); up(&Adapter->NVMRdmWrmLock); kfree(pWriteBuff); return -EACCES; } BcmFlash2xCorruptSig(Adapter, sFlash2xWrite.Section); do { Status = copy_from_user(pWriteBuff, InputAddr, WriteBytes); if (Status) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy to user failed with status :%d", Status); up(&Adapter->NVMRdmWrmLock); kfree(pWriteBuff); return -EFAULT; } BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, pWriteBuff, WriteBytes); /* Writing the data from Flash 2.x */ Status = BcmFlash2xBulkWrite(Adapter, (PUINT)pWriteBuff, sFlash2xWrite.Section, WriteOffset, WriteBytes, sFlash2xWrite.bVerify); if (Status) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash 2x read err with Status :%d", Status); break; } NOB = NOB - WriteBytes; if (NOB) { WriteOffset = WriteOffset + WriteBytes; InputAddr = InputAddr + WriteBytes; if (NOB > Adapter->uiSectorSize) WriteBytes = Adapter->uiSectorSize; else WriteBytes = NOB; } } while (NOB > 0); BcmFlash2xWriteSig(Adapter, sFlash2xWrite.Section); up(&Adapter->NVMRdmWrmLock); kfree(pWriteBuff); } break; case IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP: { PFLASH2X_BITMAP psFlash2xBitMap; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP Called"); if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) return -EFAULT; if (IoBuffer.OutputLength != sizeof(FLASH2X_BITMAP)) return -EINVAL; psFlash2xBitMap = kzalloc(sizeof(FLASH2X_BITMAP), GFP_KERNEL); if (psFlash2xBitMap == NULL) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory is not available"); return -ENOMEM; } /* Reading the Flash Sectio Bit map */ down(&Adapter->NVMRdmWrmLock); if ((Adapter->IdleMode == TRUE) || (Adapter->bShutStatus == TRUE) || (Adapter->bPreparingForLowPowerMode == TRUE)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n"); up(&Adapter->NVMRdmWrmLock); kfree(psFlash2xBitMap); return -EACCES; } BcmGetFlash2xSectionalBitMap(Adapter, psFlash2xBitMap); up(&Adapter->NVMRdmWrmLock); if (copy_to_user(IoBuffer.OutputBuffer, psFlash2xBitMap, sizeof(FLASH2X_BITMAP))) { kfree(psFlash2xBitMap); return -EFAULT; } kfree(psFlash2xBitMap); } break; case IOCTL_BCM_SET_ACTIVE_SECTION: { FLASH2X_SECTION_VAL eFlash2xSectionVal = 0; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SET_ACTIVE_SECTION Called"); if (IsFlash2x(Adapter) != TRUE) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map"); return -EINVAL; } Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)); if (Status) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed"); return -EFAULT; } Status = copy_from_user(&eFlash2xSectionVal, IoBuffer.InputBuffer, sizeof(INT)); if (Status) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of flash section val failed"); return -EFAULT; } down(&Adapter->NVMRdmWrmLock); if ((Adapter->IdleMode == TRUE) || (Adapter->bShutStatus == TRUE) || (Adapter->bPreparingForLowPowerMode == TRUE)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n"); up(&Adapter->NVMRdmWrmLock); return -EACCES; } Status = BcmSetActiveSection(Adapter, eFlash2xSectionVal); if (Status) BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Failed to make it's priority Highest. Status %d", Status); up(&Adapter->NVMRdmWrmLock); } break; case IOCTL_BCM_IDENTIFY_ACTIVE_SECTION: { /* Right Now we are taking care of only DSD */ Adapter->bAllDSDWriteAllow = FALSE; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_IDENTIFY_ACTIVE_SECTION called"); Status = STATUS_SUCCESS; } break; case IOCTL_BCM_COPY_SECTION: { FLASH2X_COPY_SECTION sCopySectStrut = {0}; Status = STATUS_SUCCESS; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_COPY_SECTION Called"); Adapter->bAllDSDWriteAllow = FALSE; if (IsFlash2x(Adapter) != TRUE) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map"); return -EINVAL; } Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)); if (Status) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed Status :%d", Status); return -EFAULT; } Status = copy_from_user(&sCopySectStrut, IoBuffer.InputBuffer, sizeof(FLASH2X_COPY_SECTION)); if (Status) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of Copy_Section_Struct failed with Status :%d", Status); return -EFAULT; } BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Source SEction :%x", sCopySectStrut.SrcSection); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Destination SEction :%x", sCopySectStrut.DstSection); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "offset :%x", sCopySectStrut.offset); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "NOB :%x", sCopySectStrut.numOfBytes); if (IsSectionExistInFlash(Adapter, sCopySectStrut.SrcSection) == FALSE) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Source Section<%x> does not exixt in Flash ", sCopySectStrut.SrcSection); return -EINVAL; } if (IsSectionExistInFlash(Adapter, sCopySectStrut.DstSection) == FALSE) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Destinatio Section<%x> does not exixt in Flash ", sCopySectStrut.DstSection); return -EINVAL; } if (sCopySectStrut.SrcSection == sCopySectStrut.DstSection) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Source and Destination section should be different"); return -EINVAL; } down(&Adapter->NVMRdmWrmLock); if ((Adapter->IdleMode == TRUE) || (Adapter->bShutStatus == TRUE) || (Adapter->bPreparingForLowPowerMode == TRUE)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n"); up(&Adapter->NVMRdmWrmLock); return -EACCES; } if (sCopySectStrut.SrcSection == ISO_IMAGE1 || sCopySectStrut.SrcSection == ISO_IMAGE2) { if (IsNonCDLessDevice(Adapter)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Device is Non-CDLess hence won't have ISO !!"); Status = -EINVAL; } else if (sCopySectStrut.numOfBytes == 0) { Status = BcmCopyISO(Adapter, sCopySectStrut); } else { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Partial Copy of ISO section is not Allowed.."); Status = STATUS_FAILURE; } up(&Adapter->NVMRdmWrmLock); return Status; } Status = BcmCopySection(Adapter, sCopySectStrut.SrcSection, sCopySectStrut.DstSection, sCopySectStrut.offset, sCopySectStrut.numOfBytes); up(&Adapter->NVMRdmWrmLock); } break; case IOCTL_BCM_GET_FLASH_CS_INFO: { Status = STATUS_SUCCESS; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, " IOCTL_BCM_GET_FLASH_CS_INFO Called"); Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)); if (Status) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed"); return -EFAULT; } if (Adapter->eNVMType != NVM_FLASH) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Connected device does not have flash"); Status = -EINVAL; break; } if (IsFlash2x(Adapter) == TRUE) { if (IoBuffer.OutputLength < sizeof(FLASH2X_CS_INFO)) return -EINVAL; if (copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlash2xCSInfo, sizeof(FLASH2X_CS_INFO))) return -EFAULT; } else { if (IoBuffer.OutputLength < sizeof(FLASH_CS_INFO)) return -EINVAL; if (copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlashCSInfo, sizeof(FLASH_CS_INFO))) return -EFAULT; } } break; case IOCTL_BCM_SELECT_DSD: { UINT SectOfset = 0; FLASH2X_SECTION_VAL eFlash2xSectionVal; eFlash2xSectionVal = NO_SECTION_VAL; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SELECT_DSD Called"); if (IsFlash2x(Adapter) != TRUE) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash Does not have 2.x map"); return -EINVAL; } Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)); if (Status) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed"); return -EFAULT; } Status = copy_from_user(&eFlash2xSectionVal, IoBuffer.InputBuffer, sizeof(INT)); if (Status) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of flash section val failed"); return -EFAULT; } BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Read Section :%d", eFlash2xSectionVal); if ((eFlash2xSectionVal != DSD0) && (eFlash2xSectionVal != DSD1) && (eFlash2xSectionVal != DSD2)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Passed section<%x> is not DSD section", eFlash2xSectionVal); return STATUS_FAILURE; } SectOfset = BcmGetSectionValStartOffset(Adapter, eFlash2xSectionVal); if (SectOfset == INVALID_OFFSET) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Provided Section val <%d> does not exixt in Flash 2.x", eFlash2xSectionVal); return -EINVAL; } Adapter->bAllDSDWriteAllow = TRUE; Adapter->ulFlashCalStart = SectOfset; Adapter->eActiveDSD = eFlash2xSectionVal; } Status = STATUS_SUCCESS; break; case IOCTL_BCM_NVM_RAW_READ: { NVM_READWRITE stNVMRead; INT NOB ; INT BuffSize ; INT ReadOffset = 0; UINT ReadBytes = 0 ; PUCHAR pReadBuff; void __user *OutPutBuff; if (Adapter->eNVMType != NVM_FLASH) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "NVM TYPE is not Flash"); return -EINVAL; } /* Copy Ioctl Buffer structure */ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "copy_from_user 1 failed\n"); return -EFAULT; } if (copy_from_user(&stNVMRead, IoBuffer.OutputBuffer, sizeof(NVM_READWRITE))) return -EFAULT; NOB = stNVMRead.uiNumBytes; /* In Raw-Read max Buff size : 64MB */ if (NOB > DEFAULT_BUFF_SIZE) BuffSize = DEFAULT_BUFF_SIZE; else BuffSize = NOB; ReadOffset = stNVMRead.uiOffset; OutPutBuff = stNVMRead.pBuffer; pReadBuff = kzalloc(BuffSize , GFP_KERNEL); if (pReadBuff == NULL) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Memory allocation failed for Flash 2.x Read Structure"); Status = -ENOMEM; break; } down(&Adapter->NVMRdmWrmLock); if ((Adapter->IdleMode == TRUE) || (Adapter->bShutStatus == TRUE) || (Adapter->bPreparingForLowPowerMode == TRUE)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Device is in Idle/Shutdown Mode\n"); kfree(pReadBuff); up(&Adapter->NVMRdmWrmLock); return -EACCES; } Adapter->bFlashRawRead = TRUE; while (NOB) { if (NOB > DEFAULT_BUFF_SIZE) ReadBytes = DEFAULT_BUFF_SIZE; else ReadBytes = NOB; /* Reading the data from Flash 2.x */ Status = BeceemNVMRead(Adapter, (PUINT)pReadBuff, ReadOffset, ReadBytes); if (Status) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Flash 2x read err with Status :%d", Status); break; } BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, pReadBuff, ReadBytes); Status = copy_to_user(OutPutBuff, pReadBuff, ReadBytes); if (Status) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy to use failed with status :%d", Status); up(&Adapter->NVMRdmWrmLock); kfree(pReadBuff); return -EFAULT; } NOB = NOB - ReadBytes; if (NOB) { ReadOffset = ReadOffset + ReadBytes; OutPutBuff = OutPutBuff + ReadBytes; } } Adapter->bFlashRawRead = FALSE; up(&Adapter->NVMRdmWrmLock); kfree(pReadBuff); break; } case IOCTL_BCM_CNTRLMSG_MASK: { ULONG RxCntrlMsgBitMask = 0; /* Copy Ioctl Buffer structure */ Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)); if (Status) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "copy of Ioctl buffer is failed from user space"); return -EFAULT; } if (IoBuffer.InputLength != sizeof(unsigned long)) { Status = -EINVAL; break; } Status = copy_from_user(&RxCntrlMsgBitMask, IoBuffer.InputBuffer, IoBuffer.InputLength); if (Status) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "copy of control bit mask failed from user space"); return -EFAULT; } BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\n Got user defined cntrl msg bit mask :%lx", RxCntrlMsgBitMask); pTarang->RxCntrlMsgBitMask = RxCntrlMsgBitMask; } break; case IOCTL_BCM_GET_DEVICE_DRIVER_INFO: { DEVICE_DRIVER_INFO DevInfo; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n"); DevInfo.MaxRDMBufferSize = BUFFER_4K; DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START; DevInfo.u32RxAlignmentCorrection = 0; DevInfo.u32NVMType = Adapter->eNVMType; DevInfo.u32InterfaceType = BCM_USB; if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) return -EFAULT; if (IoBuffer.OutputLength < sizeof(DevInfo)) return -EINVAL; if (copy_to_user(IoBuffer.OutputBuffer, &DevInfo, sizeof(DevInfo))) return -EFAULT; } break; case IOCTL_BCM_TIME_SINCE_NET_ENTRY: { ST_TIME_ELAPSED stTimeElapsedSinceNetEntry = {0}; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_TIME_SINCE_NET_ENTRY called"); if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) return -EFAULT; if (IoBuffer.OutputLength < sizeof(ST_TIME_ELAPSED)) return -EINVAL; stTimeElapsedSinceNetEntry.ul64TimeElapsedSinceNetEntry = get_seconds() - Adapter->liTimeSinceLastNetEntry; if (copy_to_user(IoBuffer.OutputBuffer, &stTimeElapsedSinceNetEntry, sizeof(ST_TIME_ELAPSED))) return -EFAULT; } break; case IOCTL_CLOSE_NOTIFICATION: BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_CLOSE_NOTIFICATION"); break; default: pr_info(DRV_NAME ": unknown ioctl cmd=%#x\n", cmd); Status = STATUS_FAILURE; break; } return Status; } static const struct file_operations bcm_fops = { .owner = THIS_MODULE, .open = bcm_char_open, .release = bcm_char_release, .read = bcm_char_read, .unlocked_ioctl = bcm_char_ioctl, .llseek = no_llseek, }; int register_control_device_interface(PMINI_ADAPTER Adapter) { if (Adapter->major > 0) return Adapter->major; Adapter->major = register_chrdev(0, DEV_NAME, &bcm_fops); if (Adapter->major < 0) { pr_err(DRV_NAME ": could not created character device\n"); return Adapter->major; } Adapter->pstCreatedClassDevice = device_create(bcm_class, NULL, MKDEV(Adapter->major, 0), Adapter, DEV_NAME); if (IS_ERR(Adapter->pstCreatedClassDevice)) { pr_err(DRV_NAME ": class device create failed\n"); unregister_chrdev(Adapter->major, DEV_NAME); return PTR_ERR(Adapter->pstCreatedClassDevice); } return 0; } void unregister_control_device_interface(PMINI_ADAPTER Adapter) { if (Adapter->major > 0) { device_destroy(bcm_class, MKDEV(Adapter->major, 0)); unregister_chrdev(Adapter->major, DEV_NAME); } }
gpl-2.0
Pafcholini/Beta_TW
arch/um/drivers/mmapper_kern.c
4386
2898
/* * arch/um/drivers/mmapper_kern.c * * BRIEF MODULE DESCRIPTION * * Copyright (C) 2000 RidgeRun, Inc. * Author: RidgeRun, Inc. * Greg Lonnon glonnon@ridgerun.com or info@ridgerun.com * */ #include <linux/stddef.h> #include <linux/types.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/mm.h> #include <asm/uaccess.h> #include <mem_user.h> /* These are set in mmapper_init, which is called at boot time */ static unsigned long mmapper_size; static unsigned long p_buf; static char *v_buf; static ssize_t mmapper_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return simple_read_from_buffer(buf, count, ppos, v_buf, mmapper_size); } static ssize_t mmapper_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { if (*ppos > mmapper_size) return -EINVAL; return simple_write_to_buffer(v_buf, mmapper_size, ppos, buf, count); } static long mmapper_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return -ENOIOCTLCMD; } static int mmapper_mmap(struct file *file, struct vm_area_struct *vma) { int ret = -EINVAL; int size; if (vma->vm_pgoff != 0) goto out; size = vma->vm_end - vma->vm_start; if (size > mmapper_size) return -EFAULT; /* * XXX A comment above remap_pfn_range says it should only be * called when the mm semaphore is held */ if (remap_pfn_range(vma, vma->vm_start, p_buf >> PAGE_SHIFT, size, vma->vm_page_prot)) goto out; ret = 0; out: return ret; } static int mmapper_open(struct inode *inode, struct file *file) { return 0; } static int mmapper_release(struct inode *inode, struct file *file) { return 0; } static const struct file_operations mmapper_fops = { .owner = THIS_MODULE, .read = mmapper_read, .write = mmapper_write, .unlocked_ioctl = mmapper_ioctl, .mmap = mmapper_mmap, .open = mmapper_open, .release = mmapper_release, .llseek = default_llseek, }; /* * No locking needed - only used (and modified) by below initcall and exitcall. */ static struct miscdevice mmapper_dev = { .minor = MISC_DYNAMIC_MINOR, .name = "mmapper", .fops = &mmapper_fops }; static int __init mmapper_init(void) { int err; printk(KERN_INFO "Mapper v0.1\n"); v_buf = (char *) find_iomem("mmapper", &mmapper_size); if (mmapper_size == 0) { printk(KERN_ERR "mmapper_init - find_iomem failed\n"); return -ENODEV; } p_buf = __pa(v_buf); err = misc_register(&mmapper_dev); if (err) { printk(KERN_ERR "mmapper - misc_register failed, err = %d\n", err); return err; } return 0; } static void mmapper_exit(void) { misc_deregister(&mmapper_dev); } module_init(mmapper_init); module_exit(mmapper_exit); MODULE_AUTHOR("Greg Lonnon <glonnon@ridgerun.com>"); MODULE_DESCRIPTION("DSPLinux simulator mmapper driver"); MODULE_LICENSE("GPL");
gpl-2.0
n-aizu/rowboat-kernel
arch/arm/mach-clps711x/p720t-leds.c
4898
1558
/* * linux/arch/arm/mach-clps711x/leds.c * * Integrator LED control routines * * Copyright (C) 2000 Deep Blue Solutions Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/leds.h> #include <asm/system.h> #include <asm/mach-types.h> #include <asm/hardware/clps7111.h> #include <asm/hardware/ep7212.h> static void p720t_leds_event(led_event_t ledevt) { unsigned long flags; u32 pddr; local_irq_save(flags); switch(ledevt) { case led_idle_start: break; case led_idle_end: break; case led_timer: pddr = clps_readb(PDDR); clps_writeb(pddr ^ 1, PDDR); break; default: break; } local_irq_restore(flags); } static int __init leds_init(void) { if (machine_is_p720t()) leds_event = p720t_leds_event; return 0; } arch_initcall(leds_init);
gpl-2.0
tommytarts/QuantumKernelM8-GPe
drivers/mfd/rc5t583-irq.c
5154
11797
/* * Interrupt driver for RICOH583 power management chip. * * Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved. * Author: Laxman dewangan <ldewangan@nvidia.com> * * based on code * Copyright (C) 2011 RICOH COMPANY,LTD * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/mfd/rc5t583.h> enum int_type { SYS_INT = 0x1, DCDC_INT = 0x2, RTC_INT = 0x4, ADC_INT = 0x8, GPIO_INT = 0x10, }; static int gpedge_add[] = { RC5T583_GPIO_GPEDGE2, RC5T583_GPIO_GPEDGE2 }; static int irq_en_add[] = { RC5T583_INT_EN_SYS1, RC5T583_INT_EN_SYS2, RC5T583_INT_EN_DCDC, RC5T583_INT_EN_RTC, RC5T583_INT_EN_ADC1, RC5T583_INT_EN_ADC2, RC5T583_INT_EN_ADC3, RC5T583_GPIO_EN_INT }; static int irq_mon_add[] = { RC5T583_INT_MON_SYS1, RC5T583_INT_MON_SYS2, RC5T583_INT_MON_DCDC, RC5T583_INT_MON_RTC, RC5T583_INT_IR_ADCL, RC5T583_INT_IR_ADCH, RC5T583_INT_IR_ADCEND, RC5T583_INT_IR_GPIOF, RC5T583_INT_IR_GPIOR }; static int irq_clr_add[] = { RC5T583_INT_IR_SYS1, RC5T583_INT_IR_SYS2, RC5T583_INT_IR_DCDC, RC5T583_INT_IR_RTC, RC5T583_INT_IR_ADCL, RC5T583_INT_IR_ADCH, RC5T583_INT_IR_ADCEND, RC5T583_INT_IR_GPIOF, RC5T583_INT_IR_GPIOR }; static int main_int_type[] = { SYS_INT, SYS_INT, DCDC_INT, RTC_INT, ADC_INT, ADC_INT, ADC_INT, GPIO_INT, GPIO_INT, }; struct rc5t583_irq_data { u8 int_type; u8 master_bit; u8 int_en_bit; u8 mask_reg_index; int grp_index; }; #define RC5T583_IRQ(_int_type, _master_bit, _grp_index, \ _int_bit, _mask_ind) \ { \ .int_type = _int_type, \ .master_bit = _master_bit, \ .grp_index = _grp_index, \ .int_en_bit = _int_bit, \ .mask_reg_index = _mask_ind, \ } static const struct rc5t583_irq_data rc5t583_irqs[RC5T583_MAX_IRQS] = { [RC5T583_IRQ_ONKEY] = RC5T583_IRQ(SYS_INT, 0, 0, 0, 0), [RC5T583_IRQ_ACOK] = RC5T583_IRQ(SYS_INT, 0, 1, 1, 0), [RC5T583_IRQ_LIDOPEN] = RC5T583_IRQ(SYS_INT, 0, 2, 2, 0), [RC5T583_IRQ_PREOT] = RC5T583_IRQ(SYS_INT, 0, 3, 3, 0), [RC5T583_IRQ_CLKSTP] = RC5T583_IRQ(SYS_INT, 0, 4, 4, 0), [RC5T583_IRQ_ONKEY_OFF] = RC5T583_IRQ(SYS_INT, 0, 5, 5, 0), [RC5T583_IRQ_WD] = RC5T583_IRQ(SYS_INT, 0, 7, 7, 0), [RC5T583_IRQ_EN_PWRREQ1] = RC5T583_IRQ(SYS_INT, 0, 8, 0, 1), [RC5T583_IRQ_EN_PWRREQ2] = RC5T583_IRQ(SYS_INT, 0, 9, 1, 1), [RC5T583_IRQ_PRE_VINDET] = RC5T583_IRQ(SYS_INT, 0, 10, 2, 1), [RC5T583_IRQ_DC0LIM] = RC5T583_IRQ(DCDC_INT, 1, 0, 0, 2), [RC5T583_IRQ_DC1LIM] = RC5T583_IRQ(DCDC_INT, 1, 1, 1, 2), [RC5T583_IRQ_DC2LIM] = RC5T583_IRQ(DCDC_INT, 1, 2, 2, 2), [RC5T583_IRQ_DC3LIM] = RC5T583_IRQ(DCDC_INT, 1, 3, 3, 2), [RC5T583_IRQ_CTC] = RC5T583_IRQ(RTC_INT, 2, 0, 0, 3), [RC5T583_IRQ_YALE] = RC5T583_IRQ(RTC_INT, 2, 5, 5, 3), [RC5T583_IRQ_DALE] = RC5T583_IRQ(RTC_INT, 2, 6, 6, 3), [RC5T583_IRQ_WALE] = RC5T583_IRQ(RTC_INT, 2, 7, 7, 3), [RC5T583_IRQ_AIN1L] = RC5T583_IRQ(ADC_INT, 3, 0, 0, 4), [RC5T583_IRQ_AIN2L] = RC5T583_IRQ(ADC_INT, 3, 1, 1, 4), [RC5T583_IRQ_AIN3L] = RC5T583_IRQ(ADC_INT, 3, 2, 2, 4), [RC5T583_IRQ_VBATL] = RC5T583_IRQ(ADC_INT, 3, 3, 3, 4), [RC5T583_IRQ_VIN3L] = RC5T583_IRQ(ADC_INT, 3, 4, 4, 4), [RC5T583_IRQ_VIN8L] = RC5T583_IRQ(ADC_INT, 3, 5, 5, 4), [RC5T583_IRQ_AIN1H] = RC5T583_IRQ(ADC_INT, 3, 6, 0, 5), [RC5T583_IRQ_AIN2H] = RC5T583_IRQ(ADC_INT, 3, 7, 1, 5), [RC5T583_IRQ_AIN3H] = RC5T583_IRQ(ADC_INT, 3, 8, 2, 5), [RC5T583_IRQ_VBATH] = RC5T583_IRQ(ADC_INT, 3, 9, 3, 5), [RC5T583_IRQ_VIN3H] = RC5T583_IRQ(ADC_INT, 3, 10, 4, 5), [RC5T583_IRQ_VIN8H] = RC5T583_IRQ(ADC_INT, 3, 11, 5, 5), [RC5T583_IRQ_ADCEND] = RC5T583_IRQ(ADC_INT, 3, 12, 0, 6), [RC5T583_IRQ_GPIO0] = RC5T583_IRQ(GPIO_INT, 4, 0, 0, 7), [RC5T583_IRQ_GPIO1] = RC5T583_IRQ(GPIO_INT, 4, 1, 1, 7), [RC5T583_IRQ_GPIO2] = RC5T583_IRQ(GPIO_INT, 4, 2, 2, 7), [RC5T583_IRQ_GPIO3] = RC5T583_IRQ(GPIO_INT, 4, 3, 3, 7), [RC5T583_IRQ_GPIO4] = RC5T583_IRQ(GPIO_INT, 4, 4, 4, 7), [RC5T583_IRQ_GPIO5] = RC5T583_IRQ(GPIO_INT, 4, 5, 5, 7), [RC5T583_IRQ_GPIO6] = RC5T583_IRQ(GPIO_INT, 4, 6, 6, 7), [RC5T583_IRQ_GPIO7] = RC5T583_IRQ(GPIO_INT, 4, 7, 7, 7), }; static void rc5t583_irq_lock(struct irq_data *irq_data) { struct rc5t583 *rc5t583 = irq_data_get_irq_chip_data(irq_data); mutex_lock(&rc5t583->irq_lock); } static void rc5t583_irq_unmask(struct irq_data *irq_data) { struct rc5t583 *rc5t583 = irq_data_get_irq_chip_data(irq_data); unsigned int __irq = irq_data->irq - rc5t583->irq_base; const struct rc5t583_irq_data *data = &rc5t583_irqs[__irq]; rc5t583->group_irq_en[data->grp_index] |= 1 << data->grp_index; rc5t583->intc_inten_reg |= 1 << data->master_bit; rc5t583->irq_en_reg[data->mask_reg_index] |= 1 << data->int_en_bit; } static void rc5t583_irq_mask(struct irq_data *irq_data) { struct rc5t583 *rc5t583 = irq_data_get_irq_chip_data(irq_data); unsigned int __irq = irq_data->irq - rc5t583->irq_base; const struct rc5t583_irq_data *data = &rc5t583_irqs[__irq]; rc5t583->group_irq_en[data->grp_index] &= ~(1 << data->grp_index); if (!rc5t583->group_irq_en[data->grp_index]) rc5t583->intc_inten_reg &= ~(1 << data->master_bit); rc5t583->irq_en_reg[data->mask_reg_index] &= ~(1 << data->int_en_bit); } static int rc5t583_irq_set_type(struct irq_data *irq_data, unsigned int type) { struct rc5t583 *rc5t583 = irq_data_get_irq_chip_data(irq_data); unsigned int __irq = irq_data->irq - rc5t583->irq_base; const struct rc5t583_irq_data *data = &rc5t583_irqs[__irq]; int val = 0; int gpedge_index; int gpedge_bit_pos; /* Supporting only trigger level inetrrupt */ if ((data->int_type & GPIO_INT) && (type & IRQ_TYPE_EDGE_BOTH)) { gpedge_index = data->int_en_bit / 4; gpedge_bit_pos = data->int_en_bit % 4; if (type & IRQ_TYPE_EDGE_FALLING) val |= 0x2; if (type & IRQ_TYPE_EDGE_RISING) val |= 0x1; rc5t583->gpedge_reg[gpedge_index] &= ~(3 << gpedge_bit_pos); rc5t583->gpedge_reg[gpedge_index] |= (val << gpedge_bit_pos); rc5t583_irq_unmask(irq_data); return 0; } return -EINVAL; } static void rc5t583_irq_sync_unlock(struct irq_data *irq_data) { struct rc5t583 *rc5t583 = irq_data_get_irq_chip_data(irq_data); int i; int ret; for (i = 0; i < ARRAY_SIZE(rc5t583->gpedge_reg); i++) { ret = rc5t583_write(rc5t583->dev, gpedge_add[i], rc5t583->gpedge_reg[i]); if (ret < 0) dev_warn(rc5t583->dev, "Error in writing reg 0x%02x error: %d\n", gpedge_add[i], ret); } for (i = 0; i < ARRAY_SIZE(rc5t583->irq_en_reg); i++) { ret = rc5t583_write(rc5t583->dev, irq_en_add[i], rc5t583->irq_en_reg[i]); if (ret < 0) dev_warn(rc5t583->dev, "Error in writing reg 0x%02x error: %d\n", irq_en_add[i], ret); } ret = rc5t583_write(rc5t583->dev, RC5T583_INTC_INTEN, rc5t583->intc_inten_reg); if (ret < 0) dev_warn(rc5t583->dev, "Error in writing reg 0x%02x error: %d\n", RC5T583_INTC_INTEN, ret); mutex_unlock(&rc5t583->irq_lock); } #ifdef CONFIG_PM_SLEEP static int rc5t583_irq_set_wake(struct irq_data *irq_data, unsigned int on) { struct rc5t583 *rc5t583 = irq_data_get_irq_chip_data(irq_data); return irq_set_irq_wake(rc5t583->chip_irq, on); } #else #define rc5t583_irq_set_wake NULL #endif static irqreturn_t rc5t583_irq(int irq, void *data) { struct rc5t583 *rc5t583 = data; uint8_t int_sts[RC5T583_MAX_INTERRUPT_MASK_REGS]; uint8_t master_int; int i; int ret; unsigned int rtc_int_sts = 0; /* Clear the status */ for (i = 0; i < RC5T583_MAX_INTERRUPT_MASK_REGS; i++) int_sts[i] = 0; ret = rc5t583_read(rc5t583->dev, RC5T583_INTC_INTMON, &master_int); if (ret < 0) { dev_err(rc5t583->dev, "Error in reading reg 0x%02x error: %d\n", RC5T583_INTC_INTMON, ret); return IRQ_HANDLED; } for (i = 0; i < RC5T583_MAX_INTERRUPT_MASK_REGS; ++i) { if (!(master_int & main_int_type[i])) continue; ret = rc5t583_read(rc5t583->dev, irq_mon_add[i], &int_sts[i]); if (ret < 0) { dev_warn(rc5t583->dev, "Error in reading reg 0x%02x error: %d\n", irq_mon_add[i], ret); int_sts[i] = 0; continue; } if (main_int_type[i] & RTC_INT) { rtc_int_sts = 0; if (int_sts[i] & 0x1) rtc_int_sts |= BIT(6); if (int_sts[i] & 0x2) rtc_int_sts |= BIT(7); if (int_sts[i] & 0x4) rtc_int_sts |= BIT(0); if (int_sts[i] & 0x8) rtc_int_sts |= BIT(5); } ret = rc5t583_write(rc5t583->dev, irq_clr_add[i], ~int_sts[i]); if (ret < 0) dev_warn(rc5t583->dev, "Error in reading reg 0x%02x error: %d\n", irq_clr_add[i], ret); if (main_int_type[i] & RTC_INT) int_sts[i] = rtc_int_sts; } /* Merge gpio interrupts for rising and falling case*/ int_sts[7] |= int_sts[8]; /* Call interrupt handler if enabled */ for (i = 0; i < RC5T583_MAX_IRQS; ++i) { const struct rc5t583_irq_data *data = &rc5t583_irqs[i]; if ((int_sts[data->mask_reg_index] & (1 << data->int_en_bit)) && (rc5t583->group_irq_en[data->master_bit] & (1 << data->grp_index))) handle_nested_irq(rc5t583->irq_base + i); } return IRQ_HANDLED; } static struct irq_chip rc5t583_irq_chip = { .name = "rc5t583-irq", .irq_mask = rc5t583_irq_mask, .irq_unmask = rc5t583_irq_unmask, .irq_bus_lock = rc5t583_irq_lock, .irq_bus_sync_unlock = rc5t583_irq_sync_unlock, .irq_set_type = rc5t583_irq_set_type, .irq_set_wake = rc5t583_irq_set_wake, }; int rc5t583_irq_init(struct rc5t583 *rc5t583, int irq, int irq_base) { int i, ret; if (!irq_base) { dev_warn(rc5t583->dev, "No interrupt support on IRQ base\n"); return -EINVAL; } mutex_init(&rc5t583->irq_lock); /* Initailize all int register to 0 */ for (i = 0; i < RC5T583_MAX_INTERRUPT_MASK_REGS; i++) { ret = rc5t583_write(rc5t583->dev, irq_en_add[i], rc5t583->irq_en_reg[i]); if (ret < 0) dev_warn(rc5t583->dev, "Error in writing reg 0x%02x error: %d\n", irq_en_add[i], ret); } for (i = 0; i < RC5T583_MAX_GPEDGE_REG; i++) { ret = rc5t583_write(rc5t583->dev, gpedge_add[i], rc5t583->gpedge_reg[i]); if (ret < 0) dev_warn(rc5t583->dev, "Error in writing reg 0x%02x error: %d\n", gpedge_add[i], ret); } ret = rc5t583_write(rc5t583->dev, RC5T583_INTC_INTEN, 0x0); if (ret < 0) dev_warn(rc5t583->dev, "Error in writing reg 0x%02x error: %d\n", RC5T583_INTC_INTEN, ret); /* Clear all interrupts in case they woke up active. */ for (i = 0; i < RC5T583_MAX_INTERRUPT_MASK_REGS; i++) { ret = rc5t583_write(rc5t583->dev, irq_clr_add[i], 0); if (ret < 0) dev_warn(rc5t583->dev, "Error in writing reg 0x%02x error: %d\n", irq_clr_add[i], ret); } rc5t583->irq_base = irq_base; rc5t583->chip_irq = irq; for (i = 0; i < RC5T583_MAX_IRQS; i++) { int __irq = i + rc5t583->irq_base; irq_set_chip_data(__irq, rc5t583); irq_set_chip_and_handler(__irq, &rc5t583_irq_chip, handle_simple_irq); irq_set_nested_thread(__irq, 1); #ifdef CONFIG_ARM set_irq_flags(__irq, IRQF_VALID); #endif } ret = request_threaded_irq(irq, NULL, rc5t583_irq, IRQF_ONESHOT, "rc5t583", rc5t583); if (ret < 0) dev_err(rc5t583->dev, "Error in registering interrupt error: %d\n", ret); return ret; } int rc5t583_irq_exit(struct rc5t583 *rc5t583) { if (rc5t583->chip_irq) free_irq(rc5t583->chip_irq, rc5t583); return 0; }
gpl-2.0
LeroViten/LerNex-Ancora-Kernel
drivers/gpu/drm/i915/dvo_tfp410.c
5666
7925
/* * Copyright © 2007 Dave Mueller * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Authors: * Dave Mueller <dave.mueller@gmx.ch> * */ #include "dvo.h" /* register definitions according to the TFP410 data sheet */ #define TFP410_VID 0x014C #define TFP410_DID 0x0410 #define TFP410_VID_LO 0x00 #define TFP410_VID_HI 0x01 #define TFP410_DID_LO 0x02 #define TFP410_DID_HI 0x03 #define TFP410_REV 0x04 #define TFP410_CTL_1 0x08 #define TFP410_CTL_1_TDIS (1<<6) #define TFP410_CTL_1_VEN (1<<5) #define TFP410_CTL_1_HEN (1<<4) #define TFP410_CTL_1_DSEL (1<<3) #define TFP410_CTL_1_BSEL (1<<2) #define TFP410_CTL_1_EDGE (1<<1) #define TFP410_CTL_1_PD (1<<0) #define TFP410_CTL_2 0x09 #define TFP410_CTL_2_VLOW (1<<7) #define TFP410_CTL_2_MSEL_MASK (0x7<<4) #define TFP410_CTL_2_MSEL (1<<4) #define TFP410_CTL_2_TSEL (1<<3) #define TFP410_CTL_2_RSEN (1<<2) #define TFP410_CTL_2_HTPLG (1<<1) #define TFP410_CTL_2_MDI (1<<0) #define TFP410_CTL_3 0x0A #define TFP410_CTL_3_DK_MASK (0x7<<5) #define TFP410_CTL_3_DK (1<<5) #define TFP410_CTL_3_DKEN (1<<4) #define TFP410_CTL_3_CTL_MASK (0x7<<1) #define TFP410_CTL_3_CTL (1<<1) #define TFP410_USERCFG 0x0B #define TFP410_DE_DLY 0x32 #define TFP410_DE_CTL 0x33 #define TFP410_DE_CTL_DEGEN (1<<6) #define TFP410_DE_CTL_VSPOL (1<<5) #define TFP410_DE_CTL_HSPOL (1<<4) #define TFP410_DE_CTL_DEDLY8 (1<<0) #define TFP410_DE_TOP 0x34 #define TFP410_DE_CNT_LO 0x36 #define TFP410_DE_CNT_HI 0x37 #define TFP410_DE_LIN_LO 0x38 #define TFP410_DE_LIN_HI 0x39 #define TFP410_H_RES_LO 0x3A #define TFP410_H_RES_HI 0x3B #define TFP410_V_RES_LO 0x3C #define TFP410_V_RES_HI 0x3D struct tfp410_priv { bool quiet; }; static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) { struct tfp410_priv *tfp = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; u8 out_buf[2]; u8 in_buf[2]; struct i2c_msg msgs[] = { { .addr = dvo->slave_addr, .flags = 0, .len = 1, .buf = out_buf, }, { .addr = dvo->slave_addr, .flags = I2C_M_RD, .len = 1, .buf = in_buf, } }; out_buf[0] = addr; out_buf[1] = 0; if (i2c_transfer(adapter, msgs, 2) == 2) { *ch = in_buf[0]; return true; }; if (!tfp->quiet) { DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", addr, adapter->name, dvo->slave_addr); } return false; } static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) { struct tfp410_priv *tfp = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; uint8_t out_buf[2]; struct i2c_msg msg = { .addr = dvo->slave_addr, .flags = 0, .len = 2, .buf = out_buf, }; out_buf[0] = addr; out_buf[1] = ch; if (i2c_transfer(adapter, &msg, 1) == 1) return true; if (!tfp->quiet) { DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", addr, adapter->name, dvo->slave_addr); } return false; } static int tfp410_getid(struct intel_dvo_device *dvo, int addr) { uint8_t ch1, ch2; if (tfp410_readb(dvo, addr+0, &ch1) && tfp410_readb(dvo, addr+1, &ch2)) return ((ch2 << 8) & 0xFF00) | (ch1 & 0x00FF); return -1; } /* Ti TFP410 driver for chip on i2c bus */ static bool tfp410_init(struct intel_dvo_device *dvo, struct i2c_adapter *adapter) { /* this will detect the tfp410 chip on the specified i2c bus */ struct tfp410_priv *tfp; int id; tfp = kzalloc(sizeof(struct tfp410_priv), GFP_KERNEL); if (tfp == NULL) return false; dvo->i2c_bus = adapter; dvo->dev_priv = tfp; tfp->quiet = true; if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) { DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s " "Slave %d.\n", id, adapter->name, dvo->slave_addr); goto out; } if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) { DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s " "Slave %d.\n", id, adapter->name, dvo->slave_addr); goto out; } tfp->quiet = false; return true; out: kfree(tfp); return false; } static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo) { enum drm_connector_status ret = connector_status_disconnected; uint8_t ctl2; if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) { if (ctl2 & TFP410_CTL_2_RSEN) ret = connector_status_connected; else ret = connector_status_disconnected; } return ret; } static enum drm_mode_status tfp410_mode_valid(struct intel_dvo_device *dvo, struct drm_display_mode *mode) { return MODE_OK; } static void tfp410_mode_set(struct intel_dvo_device *dvo, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { /* As long as the basics are set up, since we don't have clock dependencies * in the mode setup, we can just leave the registers alone and everything * will work fine. */ /* don't do much */ return; } /* set the tfp410 power state */ static void tfp410_dpms(struct intel_dvo_device *dvo, int mode) { uint8_t ctl1; if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1)) return; if (mode == DRM_MODE_DPMS_ON) ctl1 |= TFP410_CTL_1_PD; else ctl1 &= ~TFP410_CTL_1_PD; tfp410_writeb(dvo, TFP410_CTL_1, ctl1); } static void tfp410_dump_regs(struct intel_dvo_device *dvo) { uint8_t val, val2; tfp410_readb(dvo, TFP410_REV, &val); DRM_LOG_KMS("TFP410_REV: 0x%02X\n", val); tfp410_readb(dvo, TFP410_CTL_1, &val); DRM_LOG_KMS("TFP410_CTL1: 0x%02X\n", val); tfp410_readb(dvo, TFP410_CTL_2, &val); DRM_LOG_KMS("TFP410_CTL2: 0x%02X\n", val); tfp410_readb(dvo, TFP410_CTL_3, &val); DRM_LOG_KMS("TFP410_CTL3: 0x%02X\n", val); tfp410_readb(dvo, TFP410_USERCFG, &val); DRM_LOG_KMS("TFP410_USERCFG: 0x%02X\n", val); tfp410_readb(dvo, TFP410_DE_DLY, &val); DRM_LOG_KMS("TFP410_DE_DLY: 0x%02X\n", val); tfp410_readb(dvo, TFP410_DE_CTL, &val); DRM_LOG_KMS("TFP410_DE_CTL: 0x%02X\n", val); tfp410_readb(dvo, TFP410_DE_TOP, &val); DRM_LOG_KMS("TFP410_DE_TOP: 0x%02X\n", val); tfp410_readb(dvo, TFP410_DE_CNT_LO, &val); tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2); DRM_LOG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val); tfp410_readb(dvo, TFP410_DE_LIN_LO, &val); tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2); DRM_LOG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val); tfp410_readb(dvo, TFP410_H_RES_LO, &val); tfp410_readb(dvo, TFP410_H_RES_HI, &val2); DRM_LOG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val); tfp410_readb(dvo, TFP410_V_RES_LO, &val); tfp410_readb(dvo, TFP410_V_RES_HI, &val2); DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val); } static void tfp410_destroy(struct intel_dvo_device *dvo) { struct tfp410_priv *tfp = dvo->dev_priv; if (tfp) { kfree(tfp); dvo->dev_priv = NULL; } } struct intel_dvo_dev_ops tfp410_ops = { .init = tfp410_init, .detect = tfp410_detect, .mode_valid = tfp410_mode_valid, .mode_set = tfp410_mode_set, .dpms = tfp410_dpms, .dump_regs = tfp410_dump_regs, .destroy = tfp410_destroy, };
gpl-2.0
DeviceTREE/android_kernel_samsung_jf
drivers/xen/xen-pciback/conf_space_header.c
6434
8786
/* * PCI Backend - Handles the virtual fields in the configuration space headers. * * Author: Ryan Wilson <hap9@epoch.ncsc.mil> */ #include <linux/kernel.h> #include <linux/pci.h> #include "pciback.h" #include "conf_space.h" struct pci_bar_info { u32 val; u32 len_val; int which; }; #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO)) #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER) static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data) { int i; int ret; ret = xen_pcibk_read_config_word(dev, offset, value, data); if (!pci_is_enabled(dev)) return ret; for (i = 0; i < PCI_ROM_RESOURCE; i++) { if (dev->resource[i].flags & IORESOURCE_IO) *value |= PCI_COMMAND_IO; if (dev->resource[i].flags & IORESOURCE_MEM) *value |= PCI_COMMAND_MEMORY; } return ret; } static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) { struct xen_pcibk_dev_data *dev_data; int err; dev_data = pci_get_drvdata(dev); if (!pci_is_enabled(dev) && is_enable_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: enable\n", pci_name(dev)); err = pci_enable_device(dev); if (err) return err; if (dev_data) dev_data->enable_intx = 1; } else if (pci_is_enabled(dev) && !is_enable_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: disable\n", pci_name(dev)); pci_disable_device(dev); if (dev_data) dev_data->enable_intx = 0; } if (!dev->is_busmaster && is_master_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: set bus master\n", pci_name(dev)); pci_set_master(dev); } if (value & PCI_COMMAND_INVALIDATE) { if (unlikely(verbose_request)) printk(KERN_DEBUG DRV_NAME ": %s: enable memory-write-invalidate\n", pci_name(dev)); err = pci_set_mwi(dev); if (err) { printk(KERN_WARNING DRV_NAME ": %s: cannot enable " "memory-write-invalidate (%d)\n", pci_name(dev), err); value &= ~PCI_COMMAND_INVALIDATE; } } return pci_write_config_word(dev, offset, value); } static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data) { struct pci_bar_info *bar = data; if (unlikely(!bar)) { printk(KERN_WARNING DRV_NAME ": driver data not found for %s\n", pci_name(dev)); return XEN_PCI_ERR_op_failed; } /* A write to obtain the length must happen as a 32-bit write. * This does not (yet) support writing individual bytes */ if (value == ~PCI_ROM_ADDRESS_ENABLE) bar->which = 1; else { u32 tmpval; pci_read_config_dword(dev, offset, &tmpval); if (tmpval != bar->val && value == bar->val) { /* Allow restoration of bar value. */ pci_write_config_dword(dev, offset, bar->val); } bar->which = 0; } /* Do we need to support enabling/disabling the rom address here? */ return 0; } /* For the BARs, only allow writes which write ~0 or * the correct resource information * (Needed for when the driver probes the resource usage) */ static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data) { struct pci_bar_info *bar = data; if (unlikely(!bar)) { printk(KERN_WARNING DRV_NAME ": driver data not found for %s\n", pci_name(dev)); return XEN_PCI_ERR_op_failed; } /* A write to obtain the length must happen as a 32-bit write. * This does not (yet) support writing individual bytes */ if (value == ~0) bar->which = 1; else { u32 tmpval; pci_read_config_dword(dev, offset, &tmpval); if (tmpval != bar->val && value == bar->val) { /* Allow restoration of bar value. */ pci_write_config_dword(dev, offset, bar->val); } bar->which = 0; } return 0; } static int bar_read(struct pci_dev *dev, int offset, u32 * value, void *data) { struct pci_bar_info *bar = data; if (unlikely(!bar)) { printk(KERN_WARNING DRV_NAME ": driver data not found for %s\n", pci_name(dev)); return XEN_PCI_ERR_op_failed; } *value = bar->which ? bar->len_val : bar->val; return 0; } static inline void read_dev_bar(struct pci_dev *dev, struct pci_bar_info *bar_info, int offset, u32 len_mask) { int pos; struct resource *res = dev->resource; if (offset == PCI_ROM_ADDRESS || offset == PCI_ROM_ADDRESS1) pos = PCI_ROM_RESOURCE; else { pos = (offset - PCI_BASE_ADDRESS_0) / 4; if (pos && ((res[pos - 1].flags & (PCI_BASE_ADDRESS_SPACE | PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == (PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64))) { bar_info->val = res[pos - 1].start >> 32; bar_info->len_val = res[pos - 1].end >> 32; return; } } bar_info->val = res[pos].start | (res[pos].flags & PCI_REGION_FLAG_MASK); bar_info->len_val = resource_size(&res[pos]); } static void *bar_init(struct pci_dev *dev, int offset) { struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL); if (!bar) return ERR_PTR(-ENOMEM); read_dev_bar(dev, bar, offset, ~0); bar->which = 0; return bar; } static void *rom_init(struct pci_dev *dev, int offset) { struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL); if (!bar) return ERR_PTR(-ENOMEM); read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE); bar->which = 0; return bar; } static void bar_reset(struct pci_dev *dev, int offset, void *data) { struct pci_bar_info *bar = data; bar->which = 0; } static void bar_release(struct pci_dev *dev, int offset, void *data) { kfree(data); } static int xen_pcibk_read_vendor(struct pci_dev *dev, int offset, u16 *value, void *data) { *value = dev->vendor; return 0; } static int xen_pcibk_read_device(struct pci_dev *dev, int offset, u16 *value, void *data) { *value = dev->device; return 0; } static int interrupt_read(struct pci_dev *dev, int offset, u8 * value, void *data) { *value = (u8) dev->irq; return 0; } static int bist_write(struct pci_dev *dev, int offset, u8 value, void *data) { u8 cur_value; int err; err = pci_read_config_byte(dev, offset, &cur_value); if (err) goto out; if ((cur_value & ~PCI_BIST_START) == (value & ~PCI_BIST_START) || value == PCI_BIST_START) err = pci_write_config_byte(dev, offset, value); out: return err; } static const struct config_field header_common[] = { { .offset = PCI_VENDOR_ID, .size = 2, .u.w.read = xen_pcibk_read_vendor, }, { .offset = PCI_DEVICE_ID, .size = 2, .u.w.read = xen_pcibk_read_device, }, { .offset = PCI_COMMAND, .size = 2, .u.w.read = command_read, .u.w.write = command_write, }, { .offset = PCI_INTERRUPT_LINE, .size = 1, .u.b.read = interrupt_read, }, { .offset = PCI_INTERRUPT_PIN, .size = 1, .u.b.read = xen_pcibk_read_config_byte, }, { /* Any side effects of letting driver domain control cache line? */ .offset = PCI_CACHE_LINE_SIZE, .size = 1, .u.b.read = xen_pcibk_read_config_byte, .u.b.write = xen_pcibk_write_config_byte, }, { .offset = PCI_LATENCY_TIMER, .size = 1, .u.b.read = xen_pcibk_read_config_byte, }, { .offset = PCI_BIST, .size = 1, .u.b.read = xen_pcibk_read_config_byte, .u.b.write = bist_write, }, {} }; #define CFG_FIELD_BAR(reg_offset) \ { \ .offset = reg_offset, \ .size = 4, \ .init = bar_init, \ .reset = bar_reset, \ .release = bar_release, \ .u.dw.read = bar_read, \ .u.dw.write = bar_write, \ } #define CFG_FIELD_ROM(reg_offset) \ { \ .offset = reg_offset, \ .size = 4, \ .init = rom_init, \ .reset = bar_reset, \ .release = bar_release, \ .u.dw.read = bar_read, \ .u.dw.write = rom_write, \ } static const struct config_field header_0[] = { CFG_FIELD_BAR(PCI_BASE_ADDRESS_0), CFG_FIELD_BAR(PCI_BASE_ADDRESS_1), CFG_FIELD_BAR(PCI_BASE_ADDRESS_2), CFG_FIELD_BAR(PCI_BASE_ADDRESS_3), CFG_FIELD_BAR(PCI_BASE_ADDRESS_4), CFG_FIELD_BAR(PCI_BASE_ADDRESS_5), CFG_FIELD_ROM(PCI_ROM_ADDRESS), {} }; static const struct config_field header_1[] = { CFG_FIELD_BAR(PCI_BASE_ADDRESS_0), CFG_FIELD_BAR(PCI_BASE_ADDRESS_1), CFG_FIELD_ROM(PCI_ROM_ADDRESS1), {} }; int xen_pcibk_config_header_add_fields(struct pci_dev *dev) { int err; err = xen_pcibk_config_add_fields(dev, header_common); if (err) goto out; switch (dev->hdr_type) { case PCI_HEADER_TYPE_NORMAL: err = xen_pcibk_config_add_fields(dev, header_0); break; case PCI_HEADER_TYPE_BRIDGE: err = xen_pcibk_config_add_fields(dev, header_1); break; default: err = -EINVAL; printk(KERN_ERR DRV_NAME ": %s: Unsupported header type %d!\n", pci_name(dev), dev->hdr_type); break; } out: return err; }
gpl-2.0
nbars/SM-P600-linux-kernel
drivers/pci/hotplug/acpi_pcihp.c
7458
13447
/* * Common ACPI functions for hot plug platforms * * Copyright (C) 2006 Intel Corporation * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <kristen.c.accardi@intel.com> * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/acpi.h> #include <linux/pci-acpi.h> #include <linux/slab.h> #define MY_NAME "acpi_pcihp" #define dbg(fmt, arg...) do { if (debug_acpi) printk(KERN_DEBUG "%s: %s: " fmt , MY_NAME , __func__ , ## arg); } while (0) #define err(format, arg...) printk(KERN_ERR "%s: " format , MY_NAME , ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format , MY_NAME , ## arg) #define warn(format, arg...) printk(KERN_WARNING "%s: " format , MY_NAME , ## arg) #define METHOD_NAME__SUN "_SUN" #define METHOD_NAME_OSHP "OSHP" static bool debug_acpi; static acpi_status decode_type0_hpx_record(union acpi_object *record, struct hotplug_params *hpx) { int i; union acpi_object *fields = record->package.elements; u32 revision = fields[1].integer.value; switch (revision) { case 1: if (record->package.count != 6) return AE_ERROR; for (i = 2; i < 6; i++) if (fields[i].type != ACPI_TYPE_INTEGER) return AE_ERROR; hpx->t0 = &hpx->type0_data; hpx->t0->revision = revision; hpx->t0->cache_line_size = fields[2].integer.value; hpx->t0->latency_timer = fields[3].integer.value; hpx->t0->enable_serr = fields[4].integer.value; hpx->t0->enable_perr = fields[5].integer.value; break; default: printk(KERN_WARNING "%s: Type 0 Revision %d record not supported\n", __func__, revision); return AE_ERROR; } return AE_OK; } static acpi_status decode_type1_hpx_record(union acpi_object *record, struct hotplug_params *hpx) { int i; union acpi_object *fields = record->package.elements; u32 revision = fields[1].integer.value; switch (revision) { case 1: if (record->package.count != 5) return AE_ERROR; for (i = 2; i < 5; i++) if (fields[i].type != ACPI_TYPE_INTEGER) return AE_ERROR; hpx->t1 = &hpx->type1_data; hpx->t1->revision = revision; hpx->t1->max_mem_read = fields[2].integer.value; hpx->t1->avg_max_split = fields[3].integer.value; hpx->t1->tot_max_split = fields[4].integer.value; break; default: printk(KERN_WARNING "%s: Type 1 Revision %d record not supported\n", __func__, revision); return AE_ERROR; } return AE_OK; } static acpi_status decode_type2_hpx_record(union acpi_object *record, struct hotplug_params *hpx) { int i; union acpi_object *fields = record->package.elements; u32 revision = fields[1].integer.value; switch (revision) { case 1: if (record->package.count != 18) return AE_ERROR; for (i = 2; i < 18; i++) if (fields[i].type != ACPI_TYPE_INTEGER) return AE_ERROR; hpx->t2 = &hpx->type2_data; hpx->t2->revision = revision; hpx->t2->unc_err_mask_and = fields[2].integer.value; hpx->t2->unc_err_mask_or = fields[3].integer.value; hpx->t2->unc_err_sever_and = fields[4].integer.value; hpx->t2->unc_err_sever_or = fields[5].integer.value; hpx->t2->cor_err_mask_and = fields[6].integer.value; hpx->t2->cor_err_mask_or = fields[7].integer.value; hpx->t2->adv_err_cap_and = fields[8].integer.value; hpx->t2->adv_err_cap_or = fields[9].integer.value; hpx->t2->pci_exp_devctl_and = fields[10].integer.value; hpx->t2->pci_exp_devctl_or = fields[11].integer.value; hpx->t2->pci_exp_lnkctl_and = fields[12].integer.value; hpx->t2->pci_exp_lnkctl_or = fields[13].integer.value; hpx->t2->sec_unc_err_sever_and = fields[14].integer.value; hpx->t2->sec_unc_err_sever_or = fields[15].integer.value; hpx->t2->sec_unc_err_mask_and = fields[16].integer.value; hpx->t2->sec_unc_err_mask_or = fields[17].integer.value; break; default: printk(KERN_WARNING "%s: Type 2 Revision %d record not supported\n", __func__, revision); return AE_ERROR; } return AE_OK; } static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx) { acpi_status status; struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *package, *record, *fields; u32 type; int i; /* Clear the return buffer with zeros */ memset(hpx, 0, sizeof(struct hotplug_params)); status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer); if (ACPI_FAILURE(status)) return status; package = (union acpi_object *)buffer.pointer; if (package->type != ACPI_TYPE_PACKAGE) { status = AE_ERROR; goto exit; } for (i = 0; i < package->package.count; i++) { record = &package->package.elements[i]; if (record->type != ACPI_TYPE_PACKAGE) { status = AE_ERROR; goto exit; } fields = record->package.elements; if (fields[0].type != ACPI_TYPE_INTEGER || fields[1].type != ACPI_TYPE_INTEGER) { status = AE_ERROR; goto exit; } type = fields[0].integer.value; switch (type) { case 0: status = decode_type0_hpx_record(record, hpx); if (ACPI_FAILURE(status)) goto exit; break; case 1: status = decode_type1_hpx_record(record, hpx); if (ACPI_FAILURE(status)) goto exit; break; case 2: status = decode_type2_hpx_record(record, hpx); if (ACPI_FAILURE(status)) goto exit; break; default: printk(KERN_ERR "%s: Type %d record not supported\n", __func__, type); status = AE_ERROR; goto exit; } } exit: kfree(buffer.pointer); return status; } static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp) { acpi_status status; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *package, *fields; int i; memset(hpp, 0, sizeof(struct hotplug_params)); status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer); if (ACPI_FAILURE(status)) return status; package = (union acpi_object *) buffer.pointer; if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 4) { status = AE_ERROR; goto exit; } fields = package->package.elements; for (i = 0; i < 4; i++) { if (fields[i].type != ACPI_TYPE_INTEGER) { status = AE_ERROR; goto exit; } } hpp->t0 = &hpp->type0_data; hpp->t0->revision = 1; hpp->t0->cache_line_size = fields[0].integer.value; hpp->t0->latency_timer = fields[1].integer.value; hpp->t0->enable_serr = fields[2].integer.value; hpp->t0->enable_perr = fields[3].integer.value; exit: kfree(buffer.pointer); return status; } /* acpi_run_oshp - get control of hotplug from the firmware * * @handle - the handle of the hotplug controller. */ static acpi_status acpi_run_oshp(acpi_handle handle) { acpi_status status; struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); /* run OSHP */ status = acpi_evaluate_object(handle, METHOD_NAME_OSHP, NULL, NULL); if (ACPI_FAILURE(status)) if (status != AE_NOT_FOUND) printk(KERN_ERR "%s:%s OSHP fails=0x%x\n", __func__, (char *)string.pointer, status); else dbg("%s:%s OSHP not found\n", __func__, (char *)string.pointer); else pr_debug("%s:%s OSHP passes\n", __func__, (char *)string.pointer); kfree(string.pointer); return status; } /* pci_get_hp_params * * @dev - the pci_dev for which we want parameters * @hpp - allocated by the caller */ int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp) { acpi_status status; acpi_handle handle, phandle; struct pci_bus *pbus; handle = NULL; for (pbus = dev->bus; pbus; pbus = pbus->parent) { handle = acpi_pci_get_bridge_handle(pbus); if (handle) break; } /* * _HPP settings apply to all child buses, until another _HPP is * encountered. If we don't find an _HPP for the input pci dev, * look for it in the parent device scope since that would apply to * this pci dev. */ while (handle) { status = acpi_run_hpx(handle, hpp); if (ACPI_SUCCESS(status)) return 0; status = acpi_run_hpp(handle, hpp); if (ACPI_SUCCESS(status)) return 0; if (acpi_is_root_bridge(handle)) break; status = acpi_get_parent(handle, &phandle); if (ACPI_FAILURE(status)) break; handle = phandle; } return -ENODEV; } EXPORT_SYMBOL_GPL(pci_get_hp_params); /** * acpi_get_hp_hw_control_from_firmware * @dev: the pci_dev of the bridge that has a hotplug controller * @flags: requested control bits for _OSC * * Attempt to take hotplug control from firmware. */ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags) { acpi_status status; acpi_handle chandle, handle; struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; flags &= OSC_SHPC_NATIVE_HP_CONTROL; if (!flags) { err("Invalid flags %u specified!\n", flags); return -EINVAL; } /* * Per PCI firmware specification, we should run the ACPI _OSC * method to get control of hotplug hardware before using it. If * an _OSC is missing, we look for an OSHP to do the same thing. * To handle different BIOS behavior, we look for _OSC on a root * bridge preferentially (according to PCI fw spec). Later for * OSHP within the scope of the hotplug controller and its parents, * up to the host bridge under which this controller exists. */ handle = acpi_find_root_bridge_handle(pdev); if (handle) { acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); dbg("Trying to get hotplug control for %s\n", (char *)string.pointer); status = acpi_pci_osc_control_set(handle, &flags, flags); if (ACPI_SUCCESS(status)) goto got_one; if (status == AE_SUPPORT) goto no_control; kfree(string.pointer); string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL }; } handle = DEVICE_ACPI_HANDLE(&pdev->dev); if (!handle) { /* * This hotplug controller was not listed in the ACPI name * space at all. Try to get acpi handle of parent pci bus. */ struct pci_bus *pbus; for (pbus = pdev->bus; pbus; pbus = pbus->parent) { handle = acpi_pci_get_bridge_handle(pbus); if (handle) break; } } while (handle) { acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); dbg("Trying to get hotplug control for %s \n", (char *)string.pointer); status = acpi_run_oshp(handle); if (ACPI_SUCCESS(status)) goto got_one; if (acpi_is_root_bridge(handle)) break; chandle = handle; status = acpi_get_parent(chandle, &handle); if (ACPI_FAILURE(status)) break; } no_control: dbg("Cannot get control of hotplug hardware for pci %s\n", pci_name(pdev)); kfree(string.pointer); return -ENODEV; got_one: dbg("Gained control for hotplug HW for pci %s (%s)\n", pci_name(pdev), (char *)string.pointer); kfree(string.pointer); return 0; } EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware); static int pcihp_is_ejectable(acpi_handle handle) { acpi_status status; acpi_handle tmp; unsigned long long removable; status = acpi_get_handle(handle, "_ADR", &tmp); if (ACPI_FAILURE(status)) return 0; status = acpi_get_handle(handle, "_EJ0", &tmp); if (ACPI_SUCCESS(status)) return 1; status = acpi_evaluate_integer(handle, "_RMV", NULL, &removable); if (ACPI_SUCCESS(status) && removable) return 1; return 0; } /** * acpi_pcihp_check_ejectable - check if handle is ejectable ACPI PCI slot * @pbus: the PCI bus of the PCI slot corresponding to 'handle' * @handle: ACPI handle to check * * Return 1 if handle is ejectable PCI slot, 0 otherwise. */ int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle) { acpi_handle bridge_handle, parent_handle; if (!(bridge_handle = acpi_pci_get_bridge_handle(pbus))) return 0; if ((ACPI_FAILURE(acpi_get_parent(handle, &parent_handle)))) return 0; if (bridge_handle != parent_handle) return 0; return pcihp_is_ejectable(handle); } EXPORT_SYMBOL_GPL(acpi_pci_check_ejectable); static acpi_status check_hotplug(acpi_handle handle, u32 lvl, void *context, void **rv) { int *found = (int *)context; if (pcihp_is_ejectable(handle)) { *found = 1; return AE_CTRL_TERMINATE; } return AE_OK; } /** * acpi_pci_detect_ejectable - check if the PCI bus has ejectable slots * @handle - handle of the PCI bus to scan * * Returns 1 if the PCI bus has ACPI based ejectable slots, 0 otherwise. */ int acpi_pci_detect_ejectable(acpi_handle handle) { int found = 0; if (!handle) return found; acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, check_hotplug, NULL, (void *)&found, NULL); return found; } EXPORT_SYMBOL_GPL(acpi_pci_detect_ejectable); module_param(debug_acpi, bool, 0644); MODULE_PARM_DESC(debug_acpi, "Debugging mode for ACPI enabled or not");
gpl-2.0
NachiketNamjoshi/BlackReactor_onyx
arch/arm/mach-imx/mx31lilly-db.c
7714
5404
/* * LILLY-1131 development board support * * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de> * * based on code for other MX31 boards, * * Copyright 2005-2007 Freescale Semiconductor * Copyright (c) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com> * Copyright (C) 2009 Valentin Longchamp, EPFL Mobots group * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/gpio.h> #include <linux/platform_device.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/hardware.h> #include <mach/common.h> #include <mach/iomux-mx3.h> #include <mach/board-mx31lilly.h> #include "devices-imx31.h" /* * This file contains board-specific initialization routines for the * LILLY-1131 development board. If you design an own baseboard for the * module, use this file as base for support code. */ static unsigned int lilly_db_board_pins[] __initdata = { MX31_PIN_CTS1__CTS1, MX31_PIN_RTS1__RTS1, MX31_PIN_TXD1__TXD1, MX31_PIN_RXD1__RXD1, MX31_PIN_CTS2__CTS2, MX31_PIN_RTS2__RTS2, MX31_PIN_TXD2__TXD2, MX31_PIN_RXD2__RXD2, MX31_PIN_CSPI3_MOSI__RXD3, MX31_PIN_CSPI3_MISO__TXD3, MX31_PIN_CSPI3_SCLK__RTS3, MX31_PIN_CSPI3_SPI_RDY__CTS3, MX31_PIN_SD1_DATA3__SD1_DATA3, MX31_PIN_SD1_DATA2__SD1_DATA2, MX31_PIN_SD1_DATA1__SD1_DATA1, MX31_PIN_SD1_DATA0__SD1_DATA0, MX31_PIN_SD1_CLK__SD1_CLK, MX31_PIN_SD1_CMD__SD1_CMD, MX31_PIN_LD0__LD0, MX31_PIN_LD1__LD1, MX31_PIN_LD2__LD2, MX31_PIN_LD3__LD3, MX31_PIN_LD4__LD4, MX31_PIN_LD5__LD5, MX31_PIN_LD6__LD6, MX31_PIN_LD7__LD7, MX31_PIN_LD8__LD8, MX31_PIN_LD9__LD9, MX31_PIN_LD10__LD10, MX31_PIN_LD11__LD11, MX31_PIN_LD12__LD12, MX31_PIN_LD13__LD13, MX31_PIN_LD14__LD14, MX31_PIN_LD15__LD15, MX31_PIN_LD16__LD16, MX31_PIN_LD17__LD17, MX31_PIN_VSYNC3__VSYNC3, MX31_PIN_HSYNC__HSYNC, MX31_PIN_FPSHIFT__FPSHIFT, MX31_PIN_DRDY0__DRDY0, MX31_PIN_CONTRAST__CONTRAST, }; /* UART */ static const struct imxuart_platform_data uart_pdata __initconst = { .flags = IMXUART_HAVE_RTSCTS, }; /* MMC support */ static int mxc_mmc1_get_ro(struct device *dev) { return gpio_get_value(IOMUX_TO_GPIO(MX31_PIN_LCS0)); } static int gpio_det, gpio_wp; #define MMC_PAD_CFG (PAD_CTL_DRV_MAX | PAD_CTL_SRE_FAST | PAD_CTL_HYS_CMOS | \ PAD_CTL_ODE_CMOS | PAD_CTL_100K_PU) static int mxc_mmc1_init(struct device *dev, irq_handler_t detect_irq, void *data) { int ret; gpio_det = IOMUX_TO_GPIO(MX31_PIN_GPIO1_1); gpio_wp = IOMUX_TO_GPIO(MX31_PIN_LCS0); mxc_iomux_set_pad(MX31_PIN_SD1_DATA0, MMC_PAD_CFG); mxc_iomux_set_pad(MX31_PIN_SD1_DATA1, MMC_PAD_CFG); mxc_iomux_set_pad(MX31_PIN_SD1_DATA2, MMC_PAD_CFG); mxc_iomux_set_pad(MX31_PIN_SD1_DATA3, MMC_PAD_CFG); mxc_iomux_set_pad(MX31_PIN_SD1_CLK, MMC_PAD_CFG); mxc_iomux_set_pad(MX31_PIN_SD1_CMD, MMC_PAD_CFG); ret = gpio_request(gpio_det, "MMC detect"); if (ret) return ret; ret = gpio_request(gpio_wp, "MMC w/p"); if (ret) goto exit_free_det; gpio_direction_input(gpio_det); gpio_direction_input(gpio_wp); ret = request_irq(IOMUX_TO_IRQ(MX31_PIN_GPIO1_1), detect_irq, IRQF_DISABLED | IRQF_TRIGGER_FALLING, "MMC detect", data); if (ret) goto exit_free_wp; return 0; exit_free_wp: gpio_free(gpio_wp); exit_free_det: gpio_free(gpio_det); return ret; } static void mxc_mmc1_exit(struct device *dev, void *data) { gpio_free(gpio_det); gpio_free(gpio_wp); free_irq(IOMUX_TO_IRQ(MX31_PIN_GPIO1_1), data); } static const struct imxmmc_platform_data mmc_pdata __initconst = { .get_ro = mxc_mmc1_get_ro, .init = mxc_mmc1_init, .exit = mxc_mmc1_exit, }; /* Framebuffer support */ static const struct ipu_platform_data ipu_data __initconst = { .irq_base = MXC_IPU_IRQ_START, }; static const struct fb_videomode fb_modedb = { /* 640x480 TFT panel (IPS-056T) */ .name = "CRT-VGA", .refresh = 64, .xres = 640, .yres = 480, .pixclock = 30000, .left_margin = 200, .right_margin = 2, .upper_margin = 2, .lower_margin = 2, .hsync_len = 3, .vsync_len = 1, .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_OE_ACT_HIGH, .vmode = FB_VMODE_NONINTERLACED, .flag = 0, }; static struct mx3fb_platform_data fb_pdata __initdata = { .name = "CRT-VGA", .mode = &fb_modedb, .num_modes = 1, }; #define LCD_VCC_EN_GPIO (7) static void __init mx31lilly_init_fb(void) { if (gpio_request(LCD_VCC_EN_GPIO, "LCD enable") != 0) { printk(KERN_WARNING "unable to request LCD_VCC_EN pin.\n"); return; } imx31_add_ipu_core(&ipu_data); imx31_add_mx3_sdc_fb(&fb_pdata); gpio_direction_output(LCD_VCC_EN_GPIO, 1); } void __init mx31lilly_db_init(void) { mxc_iomux_setup_multiple_pins(lilly_db_board_pins, ARRAY_SIZE(lilly_db_board_pins), "development board pins"); imx31_add_imx_uart0(&uart_pdata); imx31_add_imx_uart1(&uart_pdata); imx31_add_imx_uart2(&uart_pdata); imx31_add_mxc_mmc(0, &mmc_pdata); mx31lilly_init_fb(); }
gpl-2.0
TeamRegular/android_kernel_samsung_codinatmo
arch/mips/loongson/lemote-2f/reset.c
8738
3568
/* Board-specific reboot/shutdown routines * * Copyright (c) 2009 Philippe Vachon <philippe@cowpig.ca> * * Copyright (C) 2009 Lemote Inc. * Author: Wu Zhangjin, wuzhangjin@gmail.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/io.h> #include <linux/delay.h> #include <linux/types.h> #include <asm/bootinfo.h> #include <loongson.h> #include <cs5536/cs5536.h> #include "ec_kb3310b.h" static void reset_cpu(void) { /* * reset cpu to full speed, this is needed when enabling cpu frequency * scalling */ LOONGSON_CHIPCFG0 |= 0x7; } /* reset support for fuloong2f */ static void fl2f_reboot(void) { reset_cpu(); /* send a reset signal to south bridge. * * NOTE: if enable "Power Management" in kernel, rtl8169 will not reset * normally with this reset operation and it will not work in PMON, but * you can type halt command and then reboot, seems the hardware reset * logic not work normally. */ { u32 hi, lo; _rdmsr(DIVIL_MSR_REG(DIVIL_SOFT_RESET), &hi, &lo); lo |= 0x00000001; _wrmsr(DIVIL_MSR_REG(DIVIL_SOFT_RESET), hi, lo); } } static void fl2f_shutdown(void) { u32 hi, lo, val; int gpio_base; /* get gpio base */ _rdmsr(DIVIL_MSR_REG(DIVIL_LBAR_GPIO), &hi, &lo); gpio_base = lo & 0xff00; /* make cs5536 gpio13 output enable */ val = inl(gpio_base + GPIOL_OUT_EN); val &= ~(1 << (16 + 13)); val |= (1 << 13); outl(val, gpio_base + GPIOL_OUT_EN); mmiowb(); /* make cs5536 gpio13 output low level voltage. */ val = inl(gpio_base + GPIOL_OUT_VAL) & ~(1 << (13)); val |= (1 << (16 + 13)); outl(val, gpio_base + GPIOL_OUT_VAL); mmiowb(); } /* reset support for yeeloong2f and mengloong2f notebook */ void ml2f_reboot(void) { reset_cpu(); /* sending an reset signal to EC(embedded controller) */ ec_write(REG_RESET, BIT_RESET_ON); } #define yl2f89_reboot ml2f_reboot /* menglong(7inches) laptop has different shutdown logic from 8.9inches */ #define EC_SHUTDOWN_IO_PORT_HIGH 0xff2d #define EC_SHUTDOWN_IO_PORT_LOW 0xff2e #define EC_SHUTDOWN_IO_PORT_DATA 0xff2f #define REG_SHUTDOWN_HIGH 0xFC #define REG_SHUTDOWN_LOW 0x29 #define BIT_SHUTDOWN_ON (1 << 1) static void ml2f_shutdown(void) { u8 val; u64 i; outb(REG_SHUTDOWN_HIGH, EC_SHUTDOWN_IO_PORT_HIGH); outb(REG_SHUTDOWN_LOW, EC_SHUTDOWN_IO_PORT_LOW); mmiowb(); val = inb(EC_SHUTDOWN_IO_PORT_DATA); outb(val & (~BIT_SHUTDOWN_ON), EC_SHUTDOWN_IO_PORT_DATA); mmiowb(); /* need enough wait here... how many microseconds needs? */ for (i = 0; i < 0x10000; i++) delay(); outb(val | BIT_SHUTDOWN_ON, EC_SHUTDOWN_IO_PORT_DATA); mmiowb(); } static void yl2f89_shutdown(void) { /* cpu-gpio0 output low */ LOONGSON_GPIODATA &= ~0x00000001; /* cpu-gpio0 as output */ LOONGSON_GPIOIE &= ~0x00000001; } void mach_prepare_reboot(void) { switch (mips_machtype) { case MACH_LEMOTE_FL2F: case MACH_LEMOTE_NAS: case MACH_LEMOTE_LL2F: fl2f_reboot(); break; case MACH_LEMOTE_ML2F7: ml2f_reboot(); break; case MACH_LEMOTE_YL2F89: yl2f89_reboot(); break; default: break; } } void mach_prepare_shutdown(void) { switch (mips_machtype) { case MACH_LEMOTE_FL2F: case MACH_LEMOTE_NAS: case MACH_LEMOTE_LL2F: fl2f_shutdown(); break; case MACH_LEMOTE_ML2F7: ml2f_shutdown(); break; case MACH_LEMOTE_YL2F89: yl2f89_shutdown(); break; default: break; } }
gpl-2.0
fdroid/gp-peak-kernel
drivers/usb/storage/initializers.c
9250
3567
/* Special Initializers for certain USB Mass Storage devices * * Current development and maintenance by: * (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net) * * This driver is based on the 'USB Mass Storage Class' document. This * describes in detail the protocol used to communicate with such * devices. Clearly, the designers had SCSI and ATAPI commands in * mind when they created this document. The commands are all very * similar to commands in the SCSI-II and ATAPI specifications. * * It is important to note that in a number of cases this class * exhibits class-specific exemptions from the USB specification. * Notably the usage of NAK, STALL and ACK differs from the norm, in * that they are used to communicate wait, failed and OK on commands. * * Also, for certain devices, the interrupt endpoint is used to convey * status of a command. * * Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more * information about this driver. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/errno.h> #include "usb.h" #include "initializers.h" #include "debug.h" #include "transport.h" /* This places the Shuttle/SCM USB<->SCSI bridge devices in multi-target * mode */ int usb_stor_euscsi_init(struct us_data *us) { int result; US_DEBUGP("Attempting to init eUSCSI bridge...\n"); us->iobuf[0] = 0x1; result = usb_stor_control_msg(us, us->send_ctrl_pipe, 0x0C, USB_RECIP_INTERFACE | USB_TYPE_VENDOR, 0x01, 0x0, us->iobuf, 0x1, 5000); US_DEBUGP("-- result is %d\n", result); return 0; } /* This function is required to activate all four slots on the UCR-61S2B * flash reader */ int usb_stor_ucr61s2b_init(struct us_data *us) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap*) us->iobuf; struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap*) us->iobuf; int res; unsigned int partial; static char init_string[] = "\xec\x0a\x06\x00$PCCHIPS"; US_DEBUGP("Sending UCR-61S2B initialization packet...\n"); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->Tag = 0; bcb->DataTransferLength = cpu_to_le32(0); bcb->Flags = bcb->Lun = 0; bcb->Length = sizeof(init_string) - 1; memset(bcb->CDB, 0, sizeof(bcb->CDB)); memcpy(bcb->CDB, init_string, sizeof(init_string) - 1); res = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, US_BULK_CB_WRAP_LEN, &partial); if (res) return -EIO; US_DEBUGP("Getting status packet...\n"); res = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, &partial); if (res) return -EIO; return 0; } /* This places the HUAWEI E220 devices in multi-port mode */ int usb_stor_huawei_e220_init(struct us_data *us) { int result; result = usb_stor_control_msg(us, us->send_ctrl_pipe, USB_REQ_SET_FEATURE, USB_TYPE_STANDARD | USB_RECIP_DEVICE, 0x01, 0x0, NULL, 0x0, 1000); US_DEBUGP("Huawei mode set result is %d\n", result); return 0; }
gpl-2.0
halaszk/halaszk-UNIVERSAL5420
drivers/usb/storage/initializers.c
9250
3567
/* Special Initializers for certain USB Mass Storage devices * * Current development and maintenance by: * (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net) * * This driver is based on the 'USB Mass Storage Class' document. This * describes in detail the protocol used to communicate with such * devices. Clearly, the designers had SCSI and ATAPI commands in * mind when they created this document. The commands are all very * similar to commands in the SCSI-II and ATAPI specifications. * * It is important to note that in a number of cases this class * exhibits class-specific exemptions from the USB specification. * Notably the usage of NAK, STALL and ACK differs from the norm, in * that they are used to communicate wait, failed and OK on commands. * * Also, for certain devices, the interrupt endpoint is used to convey * status of a command. * * Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more * information about this driver. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/errno.h> #include "usb.h" #include "initializers.h" #include "debug.h" #include "transport.h" /* This places the Shuttle/SCM USB<->SCSI bridge devices in multi-target * mode */ int usb_stor_euscsi_init(struct us_data *us) { int result; US_DEBUGP("Attempting to init eUSCSI bridge...\n"); us->iobuf[0] = 0x1; result = usb_stor_control_msg(us, us->send_ctrl_pipe, 0x0C, USB_RECIP_INTERFACE | USB_TYPE_VENDOR, 0x01, 0x0, us->iobuf, 0x1, 5000); US_DEBUGP("-- result is %d\n", result); return 0; } /* This function is required to activate all four slots on the UCR-61S2B * flash reader */ int usb_stor_ucr61s2b_init(struct us_data *us) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap*) us->iobuf; struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap*) us->iobuf; int res; unsigned int partial; static char init_string[] = "\xec\x0a\x06\x00$PCCHIPS"; US_DEBUGP("Sending UCR-61S2B initialization packet...\n"); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->Tag = 0; bcb->DataTransferLength = cpu_to_le32(0); bcb->Flags = bcb->Lun = 0; bcb->Length = sizeof(init_string) - 1; memset(bcb->CDB, 0, sizeof(bcb->CDB)); memcpy(bcb->CDB, init_string, sizeof(init_string) - 1); res = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, US_BULK_CB_WRAP_LEN, &partial); if (res) return -EIO; US_DEBUGP("Getting status packet...\n"); res = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, &partial); if (res) return -EIO; return 0; } /* This places the HUAWEI E220 devices in multi-port mode */ int usb_stor_huawei_e220_init(struct us_data *us) { int result; result = usb_stor_control_msg(us, us->send_ctrl_pipe, USB_REQ_SET_FEATURE, USB_TYPE_STANDARD | USB_RECIP_DEVICE, 0x01, 0x0, NULL, 0x0, 1000); US_DEBUGP("Huawei mode set result is %d\n", result); return 0; }
gpl-2.0
YUPlayGodDev/android_kernel_caf_msm8916_64
kernel/exec_domain.c
10530
4393
/* * Handling of different ABIs (personalities). * * We group personalities into execution domains which have their * own handlers for kernel entry points, signal mapping, etc... * * 2001-05-06 Complete rewrite, Christoph Hellwig (hch@infradead.org) */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/module.h> #include <linux/personality.h> #include <linux/proc_fs.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/syscalls.h> #include <linux/sysctl.h> #include <linux/types.h> #include <linux/fs_struct.h> static void default_handler(int, struct pt_regs *); static struct exec_domain *exec_domains = &default_exec_domain; static DEFINE_RWLOCK(exec_domains_lock); static unsigned long ident_map[32] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 }; struct exec_domain default_exec_domain = { .name = "Linux", /* name */ .handler = default_handler, /* lcall7 causes a seg fault. */ .pers_low = 0, /* PER_LINUX personality. */ .pers_high = 0, /* PER_LINUX personality. */ .signal_map = ident_map, /* Identity map signals. */ .signal_invmap = ident_map, /* - both ways. */ }; static void default_handler(int segment, struct pt_regs *regp) { set_personality(0); if (current_thread_info()->exec_domain->handler != default_handler) current_thread_info()->exec_domain->handler(segment, regp); else send_sig(SIGSEGV, current, 1); } static struct exec_domain * lookup_exec_domain(unsigned int personality) { unsigned int pers = personality(personality); struct exec_domain *ep; read_lock(&exec_domains_lock); for (ep = exec_domains; ep; ep = ep->next) { if (pers >= ep->pers_low && pers <= ep->pers_high) if (try_module_get(ep->module)) goto out; } #ifdef CONFIG_MODULES read_unlock(&exec_domains_lock); request_module("personality-%d", pers); read_lock(&exec_domains_lock); for (ep = exec_domains; ep; ep = ep->next) { if (pers >= ep->pers_low && pers <= ep->pers_high) if (try_module_get(ep->module)) goto out; } #endif ep = &default_exec_domain; out: read_unlock(&exec_domains_lock); return (ep); } int register_exec_domain(struct exec_domain *ep) { struct exec_domain *tmp; int err = -EBUSY; if (ep == NULL) return -EINVAL; if (ep->next != NULL) return -EBUSY; write_lock(&exec_domains_lock); for (tmp = exec_domains; tmp; tmp = tmp->next) { if (tmp == ep) goto out; } ep->next = exec_domains; exec_domains = ep; err = 0; out: write_unlock(&exec_domains_lock); return (err); } int unregister_exec_domain(struct exec_domain *ep) { struct exec_domain **epp; epp = &exec_domains; write_lock(&exec_domains_lock); for (epp = &exec_domains; *epp; epp = &(*epp)->next) { if (ep == *epp) goto unregister; } write_unlock(&exec_domains_lock); return -EINVAL; unregister: *epp = ep->next; ep->next = NULL; write_unlock(&exec_domains_lock); return 0; } int __set_personality(unsigned int personality) { struct exec_domain *oep = current_thread_info()->exec_domain; current_thread_info()->exec_domain = lookup_exec_domain(personality); current->personality = personality; module_put(oep->module); return 0; } #ifdef CONFIG_PROC_FS static int execdomains_proc_show(struct seq_file *m, void *v) { struct exec_domain *ep; read_lock(&exec_domains_lock); for (ep = exec_domains; ep; ep = ep->next) seq_printf(m, "%d-%d\t%-16s\t[%s]\n", ep->pers_low, ep->pers_high, ep->name, module_name(ep->module)); read_unlock(&exec_domains_lock); return 0; } static int execdomains_proc_open(struct inode *inode, struct file *file) { return single_open(file, execdomains_proc_show, NULL); } static const struct file_operations execdomains_proc_fops = { .open = execdomains_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init proc_execdomains_init(void) { proc_create("execdomains", 0, NULL, &execdomains_proc_fops); return 0; } module_init(proc_execdomains_init); #endif SYSCALL_DEFINE1(personality, unsigned int, personality) { unsigned int old = current->personality; if (personality != 0xffffffff) set_personality(personality); return old; } EXPORT_SYMBOL(register_exec_domain); EXPORT_SYMBOL(unregister_exec_domain); EXPORT_SYMBOL(__set_personality);
gpl-2.0
sfjro/aufs3-linux
arch/sh/mm/ioremap_fixed.c
13090
2628
/* * Re-map IO memory to kernel address space so that we can access it. * * These functions should only be used when it is necessary to map a * physical address space into the kernel address space before ioremap() * can be used, e.g. early in boot before paging_init(). * * Copyright (C) 2009 Matt Fleming */ #include <linux/vmalloc.h> #include <linux/ioport.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/io.h> #include <linux/bootmem.h> #include <linux/proc_fs.h> #include <asm/fixmap.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/addrspace.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/mmu.h> #include <asm/mmu_context.h> struct ioremap_map { void __iomem *addr; unsigned long size; unsigned long fixmap_addr; }; static struct ioremap_map ioremap_maps[FIX_N_IOREMAPS]; void __init ioremap_fixed_init(void) { struct ioremap_map *map; int i; for (i = 0; i < FIX_N_IOREMAPS; i++) { map = &ioremap_maps[i]; map->fixmap_addr = __fix_to_virt(FIX_IOREMAP_BEGIN + i); } } void __init __iomem * ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot) { enum fixed_addresses idx0, idx; struct ioremap_map *map; unsigned int nrpages; unsigned long offset; int i, slot; /* * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; size = PAGE_ALIGN(phys_addr + size) - phys_addr; slot = -1; for (i = 0; i < FIX_N_IOREMAPS; i++) { map = &ioremap_maps[i]; if (!map->addr) { map->size = size; slot = i; break; } } if (slot < 0) return NULL; /* * Mappings have to fit in the FIX_IOREMAP area. */ nrpages = size >> PAGE_SHIFT; if (nrpages > FIX_N_IOREMAPS) return NULL; /* * Ok, go for it.. */ idx0 = FIX_IOREMAP_BEGIN + slot; idx = idx0; while (nrpages > 0) { pgprot_val(prot) |= _PAGE_WIRED; __set_fixmap(idx, phys_addr, prot); phys_addr += PAGE_SIZE; idx++; --nrpages; } map->addr = (void __iomem *)(offset + map->fixmap_addr); return map->addr; } int iounmap_fixed(void __iomem *addr) { enum fixed_addresses idx; struct ioremap_map *map; unsigned int nrpages; int i, slot; slot = -1; for (i = 0; i < FIX_N_IOREMAPS; i++) { map = &ioremap_maps[i]; if (map->addr == addr) { slot = i; break; } } /* * If we don't match, it's not for us. */ if (slot < 0) return -EINVAL; nrpages = map->size >> PAGE_SHIFT; idx = FIX_IOREMAP_BEGIN + slot + nrpages - 1; while (nrpages > 0) { __clear_fixmap(idx, __pgprot(_PAGE_WIRED)); --idx; --nrpages; } map->size = 0; map->addr = NULL; return 0; }
gpl-2.0
pengdonglin137/linux-4.4_tiny4412
arch/mips/pci/fixup-jmr3927.c
13858
2617
/* * * BRIEF MODULE DESCRIPTION * Board specific pci fixups. * * Copyright 2001 MontaVista Software Inc. * Author: MontaVista Software, Inc. * ppopov@mvista.com or source@mvista.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/types.h> #include <asm/txx9/pci.h> #include <asm/txx9/jmr3927.h> int __init jmr3927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { unsigned char irq = pin; /* IRQ rotation (PICMG) */ irq--; /* 0-3 */ if (slot == TX3927_PCIC_IDSEL_AD_TO_SLOT(23)) { /* PCI CardSlot (IDSEL=A23, DevNu=12) */ /* PCIA => PCIC (IDSEL=A23) */ /* NOTE: JMR3927 JP1 must be set to OPEN */ irq = (irq + 2) % 4; } else if (slot == TX3927_PCIC_IDSEL_AD_TO_SLOT(22)) { /* PCI CardSlot (IDSEL=A22, DevNu=11) */ /* PCIA => PCIA (IDSEL=A22) */ /* NOTE: JMR3927 JP1 must be set to OPEN */ irq = (irq + 0) % 4; } else { /* PCI Backplane */ if (txx9_pci_option & TXX9_PCI_OPT_PICMG) irq = (irq + 33 - slot) % 4; else irq = (irq + 3 + slot) % 4; } irq++; /* 1-4 */ switch (irq) { case 1: irq = JMR3927_IRQ_IOC_PCIA; break; case 2: irq = JMR3927_IRQ_IOC_PCIB; break; case 3: irq = JMR3927_IRQ_IOC_PCIC; break; case 4: irq = JMR3927_IRQ_IOC_PCID; break; } /* Check OnBoard Ethernet (IDSEL=A24, DevNu=13) */ if (dev->bus->parent == NULL && slot == TX3927_PCIC_IDSEL_AD_TO_SLOT(24)) irq = JMR3927_IRQ_ETHER0; return irq; }
gpl-2.0
jameshilliard/m8whl-3.4.0-g278eae8
scripts/dtc/data.c
35
5098
/* * (C) Copyright David Gibson <dwg@au1.ibm.com>, IBM Corporation. 2005. * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA */ #include "dtc.h" void data_free(struct data d) { struct marker *m, *nm; m = d.markers; while (m) { nm = m->next; free(m->ref); free(m); m = nm; } if (d.val) free(d.val); } struct data data_grow_for(struct data d, int xlen) { struct data nd; int newsize; if (xlen == 0) return d; nd = d; newsize = xlen; while ((d.len + xlen) > newsize) newsize *= 2; nd.val = xrealloc(d.val, newsize); return nd; } struct data data_copy_mem(const char *mem, int len) { struct data d; d = data_grow_for(empty_data, len); d.len = len; memcpy(d.val, mem, len); return d; } struct data data_copy_escape_string(const char *s, int len) { int i = 0; struct data d; char *q; d = data_grow_for(empty_data, strlen(s)+1); q = d.val; while (i < len) { char c = s[i++]; if (c == '\\') c = get_escape_char(s, &i); q[d.len++] = c; } q[d.len++] = '\0'; return d; } struct data data_copy_file(FILE *f, size_t maxlen) { struct data d = empty_data; while (!feof(f) && (d.len < maxlen)) { size_t chunksize, ret; if (maxlen == -1) chunksize = 4096; else chunksize = maxlen - d.len; d = data_grow_for(d, chunksize); ret = fread(d.val + d.len, 1, chunksize, f); if (ferror(f)) die("Error reading file into data: %s", strerror(errno)); if (d.len + ret < d.len) die("Overflow reading file into data\n"); d.len += ret; } return d; } struct data data_append_data(struct data d, const void *p, int len) { d = data_grow_for(d, len); memcpy(d.val + d.len, p, len); d.len += len; return d; } struct data data_insert_at_marker(struct data d, struct marker *m, const void *p, int len) { d = data_grow_for(d, len); memmove(d.val + m->offset + len, d.val + m->offset, d.len - m->offset); memcpy(d.val + m->offset, p, len); d.len += len; m = m->next; for_each_marker(m) m->offset += len; return d; } static struct data data_append_markers(struct data d, struct marker *m) { struct marker **mp = &d.markers; while (*mp) mp = &((*mp)->next); *mp = m; return d; } struct data data_merge(struct data d1, struct data d2) { struct data d; struct marker *m2 = d2.markers; d = data_append_markers(data_append_data(d1, d2.val, d2.len), m2); for_each_marker(m2) m2->offset += d1.len; d2.markers = NULL; data_free(d2); return d; } struct data data_append_integer(struct data d, uint64_t value, int bits) { uint8_t value_8; uint16_t value_16; uint32_t value_32; uint64_t value_64; switch (bits) { case 8: value_8 = value; return data_append_data(d, &value_8, 1); case 16: value_16 = cpu_to_fdt16(value); return data_append_data(d, &value_16, 2); case 32: value_32 = cpu_to_fdt32(value); return data_append_data(d, &value_32, 4); case 64: value_64 = cpu_to_fdt64(value); return data_append_data(d, &value_64, 8); default: die("Invalid literal size (%d)\n", bits); } } struct data data_append_re(struct data d, const struct fdt_reserve_entry *re) { struct fdt_reserve_entry bere; bere.address = cpu_to_fdt64(re->address); bere.size = cpu_to_fdt64(re->size); return data_append_data(d, &bere, sizeof(bere)); } struct data data_append_cell(struct data d, cell_t word) { return data_append_integer(d, word, sizeof(word) * 8); } struct data data_append_addr(struct data d, uint64_t addr) { return data_append_integer(d, addr, sizeof(addr) * 8); } struct data data_append_byte(struct data d, uint8_t byte) { return data_append_data(d, &byte, 1); } struct data data_append_zeroes(struct data d, int len) { d = data_grow_for(d, len); memset(d.val + d.len, 0, len); d.len += len; return d; } struct data data_append_align(struct data d, int align) { int newlen = ALIGN(d.len, align); return data_append_zeroes(d, newlen - d.len); } struct data data_add_marker(struct data d, enum markertype type, char *ref) { struct marker *m; m = xmalloc(sizeof(*m)); m->offset = d.len; m->type = type; m->ref = ref; m->next = NULL; return data_append_markers(d, m); } int data_is_one_string(struct data d) { int i; int len = d.len; if (len == 0) return 0; for (i = 0; i < len-1; i++) if (d.val[i] == '\0') return 0; if (d.val[len-1] != '\0') return 0; return 1; }
gpl-2.0
gleb-cloudius/openjdk8-hotspot
src/share/vm/shark/sharkCompiler.cpp
35
11940
/* * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright 2008, 2009, 2010, 2011 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #include "precompiled.hpp" #include "ci/ciEnv.hpp" #include "ci/ciMethod.hpp" #include "code/debugInfoRec.hpp" #include "code/dependencies.hpp" #include "code/exceptionHandlerTable.hpp" #include "code/oopRecorder.hpp" #include "compiler/abstractCompiler.hpp" #include "compiler/oopMap.hpp" #include "shark/llvmHeaders.hpp" #include "shark/sharkBuilder.hpp" #include "shark/sharkCodeBuffer.hpp" #include "shark/sharkCompiler.hpp" #include "shark/sharkContext.hpp" #include "shark/sharkEntry.hpp" #include "shark/sharkFunction.hpp" #include "shark/sharkMemoryManager.hpp" #include "shark/sharkNativeWrapper.hpp" #include "shark/shark_globals.hpp" #include "utilities/debug.hpp" #include <fnmatch.h> using namespace llvm; namespace { cl::opt<std::string> MCPU("mcpu"); cl::list<std::string> MAttrs("mattr", cl::CommaSeparated); } SharkCompiler::SharkCompiler() : AbstractCompiler() { // Create the lock to protect the memory manager and execution engine _execution_engine_lock = new Monitor(Mutex::leaf, "SharkExecutionEngineLock"); MutexLocker locker(execution_engine_lock()); // Make LLVM safe for multithreading if (!llvm_start_multithreaded()) fatal("llvm_start_multithreaded() failed"); // Initialize the native target InitializeNativeTarget(); // MCJIT require a native AsmPrinter InitializeNativeTargetAsmPrinter(); // Create the two contexts which we'll use _normal_context = new SharkContext("normal"); _native_context = new SharkContext("native"); // Create the memory manager _memory_manager = new SharkMemoryManager(); // Finetune LLVM for the current host CPU. StringMap<bool> Features; bool gotCpuFeatures = llvm::sys::getHostCPUFeatures(Features); std::string cpu("-mcpu=" + llvm::sys::getHostCPUName()); std::vector<const char*> args; args.push_back(""); // program name args.push_back(cpu.c_str()); std::string mattr("-mattr="); if(gotCpuFeatures){ for(StringMap<bool>::iterator I = Features.begin(), E = Features.end(); I != E; ++I){ if(I->second){ std::string attr(I->first()); mattr+="+"+attr+","; } } args.push_back(mattr.c_str()); } args.push_back(0); // terminator cl::ParseCommandLineOptions(args.size() - 1, (char **) &args[0]); // Create the JIT std::string ErrorMsg; EngineBuilder builder(_normal_context->module()); builder.setMCPU(MCPU); builder.setMAttrs(MAttrs); builder.setJITMemoryManager(memory_manager()); builder.setEngineKind(EngineKind::JIT); builder.setErrorStr(&ErrorMsg); if (! fnmatch(SharkOptimizationLevel, "None", 0)) { tty->print_cr("Shark optimization level set to: None"); builder.setOptLevel(llvm::CodeGenOpt::None); } else if (! fnmatch(SharkOptimizationLevel, "Less", 0)) { tty->print_cr("Shark optimization level set to: Less"); builder.setOptLevel(llvm::CodeGenOpt::Less); } else if (! fnmatch(SharkOptimizationLevel, "Aggressive", 0)) { tty->print_cr("Shark optimization level set to: Aggressive"); builder.setOptLevel(llvm::CodeGenOpt::Aggressive); } // else Default is selected by, well, default :-) _execution_engine = builder.create(); if (!execution_engine()) { if (!ErrorMsg.empty()) printf("Error while creating Shark JIT: %s\n",ErrorMsg.c_str()); else printf("Unknown error while creating Shark JIT\n"); exit(1); } execution_engine()->addModule(_native_context->module()); // All done set_state(initialized); } void SharkCompiler::initialize() { ShouldNotCallThis(); } void SharkCompiler::compile_method(ciEnv* env, ciMethod* target, int entry_bci) { assert(is_initialized(), "should be"); ResourceMark rm; const char *name = methodname( target->holder()->name()->as_utf8(), target->name()->as_utf8()); // Do the typeflow analysis ciTypeFlow *flow; if (entry_bci == InvocationEntryBci) flow = target->get_flow_analysis(); else flow = target->get_osr_flow_analysis(entry_bci); if (flow->failing()) return; if (SharkPrintTypeflowOf != NULL) { if (!fnmatch(SharkPrintTypeflowOf, name, 0)) flow->print_on(tty); } // Create the recorders Arena arena; env->set_oop_recorder(new OopRecorder(&arena)); OopMapSet oopmaps; env->set_debug_info(new DebugInformationRecorder(env->oop_recorder())); env->debug_info()->set_oopmaps(&oopmaps); env->set_dependencies(new Dependencies(env)); // Create the code buffer and builder CodeBuffer hscb("Shark", 256 * K, 64 * K); hscb.initialize_oop_recorder(env->oop_recorder()); MacroAssembler *masm = new MacroAssembler(&hscb); SharkCodeBuffer cb(masm); SharkBuilder builder(&cb); // Emit the entry point SharkEntry *entry = (SharkEntry *) cb.malloc(sizeof(SharkEntry)); // Build the LLVM IR for the method Function *function = SharkFunction::build(env, &builder, flow, name); if (env->failing()) { return; } // Generate native code. It's unpleasant that we have to drop into // the VM to do this -- it blocks safepoints -- but I can't see any // other way to handle the locking. { ThreadInVMfromNative tiv(JavaThread::current()); generate_native_code(entry, function, name); } // Install the method into the VM CodeOffsets offsets; offsets.set_value(CodeOffsets::Deopt, 0); offsets.set_value(CodeOffsets::Exceptions, 0); offsets.set_value(CodeOffsets::Verified_Entry, target->is_static() ? 0 : wordSize); ExceptionHandlerTable handler_table; ImplicitExceptionTable inc_table; env->register_method(target, entry_bci, &offsets, 0, &hscb, 0, &oopmaps, &handler_table, &inc_table, this, env->comp_level(), false, false); } nmethod* SharkCompiler::generate_native_wrapper(MacroAssembler* masm, methodHandle target, int compile_id, BasicType* arg_types, BasicType return_type) { assert(is_initialized(), "should be"); ResourceMark rm; const char *name = methodname( target->klass_name()->as_utf8(), target->name()->as_utf8()); // Create the code buffer and builder SharkCodeBuffer cb(masm); SharkBuilder builder(&cb); // Emit the entry point SharkEntry *entry = (SharkEntry *) cb.malloc(sizeof(SharkEntry)); // Build the LLVM IR for the method SharkNativeWrapper *wrapper = SharkNativeWrapper::build( &builder, target, name, arg_types, return_type); // Generate native code generate_native_code(entry, wrapper->function(), name); // Return the nmethod for installation in the VM return nmethod::new_native_nmethod(target, compile_id, masm->code(), 0, 0, wrapper->frame_size(), wrapper->receiver_offset(), wrapper->lock_offset(), wrapper->oop_maps()); } void SharkCompiler::generate_native_code(SharkEntry* entry, Function* function, const char* name) { // Print the LLVM bitcode, if requested if (SharkPrintBitcodeOf != NULL) { if (!fnmatch(SharkPrintBitcodeOf, name, 0)) function->dump(); } if (SharkVerifyFunction != NULL) { if (!fnmatch(SharkVerifyFunction, name, 0)) { verifyFunction(*function); } } // Compile to native code address code = NULL; context()->add_function(function); { MutexLocker locker(execution_engine_lock()); free_queued_methods(); #ifndef NDEBUG #if SHARK_LLVM_VERSION <= 31 #define setCurrentDebugType SetCurrentDebugType #endif if (SharkPrintAsmOf != NULL) { if (!fnmatch(SharkPrintAsmOf, name, 0)) { llvm::setCurrentDebugType(X86_ONLY("x86-emitter") NOT_X86("jit")); llvm::DebugFlag = true; } else { llvm::setCurrentDebugType(""); llvm::DebugFlag = false; } } #ifdef setCurrentDebugType #undef setCurrentDebugType #endif #endif // !NDEBUG memory_manager()->set_entry_for_function(function, entry); code = (address) execution_engine()->getPointerToFunction(function); } assert(code != NULL, "code must be != NULL"); entry->set_entry_point(code); entry->set_function(function); entry->set_context(context()); address code_start = entry->code_start(); address code_limit = entry->code_limit(); // Register generated code for profiling, etc if (JvmtiExport::should_post_dynamic_code_generated()) JvmtiExport::post_dynamic_code_generated(name, code_start, code_limit); // Print debug information, if requested if (SharkTraceInstalls) { tty->print_cr( " [%p-%p): %s (%d bytes code)", code_start, code_limit, name, code_limit - code_start); } } void SharkCompiler::free_compiled_method(address code) { // This method may only be called when the VM is at a safepoint. // All _thread_in_vm threads will be waiting for the safepoint to // finish with the exception of the VM thread, so we can consider // ourself the owner of the execution engine lock even though we // can't actually acquire it at this time. assert(Thread::current()->is_Compiler_thread(), "must be called by compiler thread"); assert_locked_or_safepoint(CodeCache_lock); SharkEntry *entry = (SharkEntry *) code; entry->context()->push_to_free_queue(entry->function()); } void SharkCompiler::free_queued_methods() { // The free queue is protected by the execution engine lock assert(execution_engine_lock()->owned_by_self(), "should be"); while (true) { Function *function = context()->pop_from_free_queue(); if (function == NULL) break; execution_engine()->freeMachineCodeForFunction(function); function->eraseFromParent(); } } const char* SharkCompiler::methodname(const char* klass, const char* method) { char *buf = NEW_RESOURCE_ARRAY(char, strlen(klass) + 2 + strlen(method) + 1); char *dst = buf; for (const char *c = klass; *c; c++) { if (*c == '/') *(dst++) = '.'; else *(dst++) = *c; } *(dst++) = ':'; *(dst++) = ':'; for (const char *c = method; *c; c++) { *(dst++) = *c; } *(dst++) = '\0'; return buf; }
gpl-2.0
z3bu/AH4222
kernel/linux/drivers/isdn/hisax/l3ni1.c
35
80833
/* $Id: l3ni1.c,v 2.8.2.3 2004/01/13 14:31:25 keil Exp $ * * NI1 D-channel protocol * * Author Matt Henderson & Guy Ellis * Copyright by Traverse Technologies Pty Ltd, www.travers.com.au * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * 2000.6.6 Initial implementation of routines for US NI1 * Layer 3 protocol based on the EURO/DSS1 D-channel protocol * driver written by Karsten Keil et al. * NI-1 Hall of Fame - Thanks to.... * Ragnar Paulson - for some handy code fragments * Will Scales - beta tester extraordinaire * Brett Whittacre - beta tester and remote devel system in Vegas * */ #include "hisax.h" #include "isdnl3.h" #include "l3ni1.h" #include <linux/ctype.h> extern char *HiSax_getrev(const char *revision); const char *ni1_revision = "$Revision: 2.8.2.3 $"; #define EXT_BEARER_CAPS 1 #define MsgHead(ptr, cref, mty) \ *ptr++ = 0x8; \ if (cref == -1) { \ *ptr++ = 0x0; \ } else { \ *ptr++ = 0x1; \ *ptr++ = cref^0x80; \ } \ *ptr++ = mty /**********************************************/ /* get a new invoke id for remote operations. */ /* Only a return value != 0 is valid */ /**********************************************/ static unsigned char new_invoke_id(struct PStack *p) { unsigned char retval; int i; i = 32; /* maximum search depth */ retval = p->prot.ni1.last_invoke_id + 1; /* try new id */ while ((i) && (p->prot.ni1.invoke_used[retval >> 3] == 0xFF)) { p->prot.ni1.last_invoke_id = (retval & 0xF8) + 8; i--; } if (i) { while (p->prot.ni1.invoke_used[retval >> 3] & (1 << (retval & 7))) retval++; } else retval = 0; p->prot.ni1.last_invoke_id = retval; p->prot.ni1.invoke_used[retval >> 3] |= (1 << (retval & 7)); return(retval); } /* new_invoke_id */ /*************************/ /* free a used invoke id */ /*************************/ static void free_invoke_id(struct PStack *p, unsigned char id) { if (!id) return; /* 0 = invalid value */ p->prot.ni1.invoke_used[id >> 3] &= ~(1 << (id & 7)); } /* free_invoke_id */ /**********************************************************/ /* create a new l3 process and fill in ni1 specific data */ /**********************************************************/ static struct l3_process *ni1_new_l3_process(struct PStack *st, int cr) { struct l3_process *proc; if (!(proc = new_l3_process(st, cr))) return(NULL); proc->prot.ni1.invoke_id = 0; proc->prot.ni1.remote_operation = 0; proc->prot.ni1.uus1_data[0] = '\0'; return(proc); } /* ni1_new_l3_process */ /************************************************/ /* free a l3 process and all ni1 specific data */ /************************************************/ static void ni1_release_l3_process(struct l3_process *p) { free_invoke_id(p->st,p->prot.ni1.invoke_id); release_l3_process(p); } /* ni1_release_l3_process */ /********************************************************/ /* search a process with invoke id id and dummy callref */ /********************************************************/ static struct l3_process * l3ni1_search_dummy_proc(struct PStack *st, int id) { struct l3_process *pc = st->l3.proc; /* start of processes */ if (!id) return(NULL); while (pc) { if ((pc->callref == -1) && (pc->prot.ni1.invoke_id == id)) return(pc); pc = pc->next; } return(NULL); } /* l3ni1_search_dummy_proc */ /*******************************************************************/ /* called when a facility message with a dummy callref is received */ /* and a return result is delivered. id specifies the invoke id. */ /*******************************************************************/ static void l3ni1_dummy_return_result(struct PStack *st, int id, u_char *p, u_char nlen) { isdn_ctrl ic; struct IsdnCardState *cs; struct l3_process *pc = NULL; if ((pc = l3ni1_search_dummy_proc(st, id))) { L3DelTimer(&pc->timer); /* remove timer */ cs = pc->st->l1.hardware; ic.driver = cs->myid; ic.command = ISDN_STAT_PROT; ic.arg = NI1_STAT_INVOKE_RES; ic.parm.ni1_io.hl_id = pc->prot.ni1.invoke_id; ic.parm.ni1_io.ll_id = pc->prot.ni1.ll_id; ic.parm.ni1_io.proc = pc->prot.ni1.proc; ic.parm.ni1_io.timeout= 0; ic.parm.ni1_io.datalen = nlen; ic.parm.ni1_io.data = p; free_invoke_id(pc->st, pc->prot.ni1.invoke_id); pc->prot.ni1.invoke_id = 0; /* reset id */ cs->iif.statcallb(&ic); ni1_release_l3_process(pc); } else l3_debug(st, "dummy return result id=0x%x result len=%d",id,nlen); } /* l3ni1_dummy_return_result */ /*******************************************************************/ /* called when a facility message with a dummy callref is received */ /* and a return error is delivered. id specifies the invoke id. */ /*******************************************************************/ static void l3ni1_dummy_error_return(struct PStack *st, int id, ulong error) { isdn_ctrl ic; struct IsdnCardState *cs; struct l3_process *pc = NULL; if ((pc = l3ni1_search_dummy_proc(st, id))) { L3DelTimer(&pc->timer); /* remove timer */ cs = pc->st->l1.hardware; ic.driver = cs->myid; ic.command = ISDN_STAT_PROT; ic.arg = NI1_STAT_INVOKE_ERR; ic.parm.ni1_io.hl_id = pc->prot.ni1.invoke_id; ic.parm.ni1_io.ll_id = pc->prot.ni1.ll_id; ic.parm.ni1_io.proc = pc->prot.ni1.proc; ic.parm.ni1_io.timeout= error; ic.parm.ni1_io.datalen = 0; ic.parm.ni1_io.data = NULL; free_invoke_id(pc->st, pc->prot.ni1.invoke_id); pc->prot.ni1.invoke_id = 0; /* reset id */ cs->iif.statcallb(&ic); ni1_release_l3_process(pc); } else l3_debug(st, "dummy return error id=0x%x error=0x%lx",id,error); } /* l3ni1_error_return */ /*******************************************************************/ /* called when a facility message with a dummy callref is received */ /* and a invoke is delivered. id specifies the invoke id. */ /*******************************************************************/ static void l3ni1_dummy_invoke(struct PStack *st, int cr, int id, int ident, u_char *p, u_char nlen) { isdn_ctrl ic; struct IsdnCardState *cs; l3_debug(st, "dummy invoke %s id=0x%x ident=0x%x datalen=%d", (cr == -1) ? "local" : "broadcast",id,ident,nlen); if (cr >= -1) return; /* ignore local data */ cs = st->l1.hardware; ic.driver = cs->myid; ic.command = ISDN_STAT_PROT; ic.arg = NI1_STAT_INVOKE_BRD; ic.parm.ni1_io.hl_id = id; ic.parm.ni1_io.ll_id = 0; ic.parm.ni1_io.proc = ident; ic.parm.ni1_io.timeout= 0; ic.parm.ni1_io.datalen = nlen; ic.parm.ni1_io.data = p; cs->iif.statcallb(&ic); } /* l3ni1_dummy_invoke */ static void l3ni1_parse_facility(struct PStack *st, struct l3_process *pc, int cr, u_char * p) { int qd_len = 0; unsigned char nlen = 0, ilen, cp_tag; int ident, id; ulong err_ret; if (pc) st = pc->st; /* valid Stack */ else if ((!st) || (cr >= 0)) return; /* neither pc nor st specified */ p++; qd_len = *p++; if (qd_len == 0) { l3_debug(st, "qd_len == 0"); return; } if ((*p & 0x1F) != 0x11) { /* Service discriminator, supplementary service */ l3_debug(st, "supplementary service != 0x11"); return; } while (qd_len > 0 && !(*p & 0x80)) { /* extension ? */ p++; qd_len--; } if (qd_len < 2) { l3_debug(st, "qd_len < 2"); return; } p++; qd_len--; if ((*p & 0xE0) != 0xA0) { /* class and form */ l3_debug(st, "class and form != 0xA0"); return; } cp_tag = *p & 0x1F; /* remember tag value */ p++; qd_len--; if (qd_len < 1) { l3_debug(st, "qd_len < 1"); return; } if (*p & 0x80) { /* length format indefinite or limited */ nlen = *p++ & 0x7F; /* number of len bytes or indefinite */ if ((qd_len-- < ((!nlen) ? 3 : (1 + nlen))) || (nlen > 1)) { l3_debug(st, "length format error or not implemented"); return; } if (nlen == 1) { nlen = *p++; /* complete length */ qd_len--; } else { qd_len -= 2; /* trailing null bytes */ if ((*(p+qd_len)) || (*(p+qd_len+1))) { l3_debug(st,"length format indefinite error"); return; } nlen = qd_len; } } else { nlen = *p++; qd_len--; } if (qd_len < nlen) { l3_debug(st, "qd_len < nlen"); return; } qd_len -= nlen; if (nlen < 2) { l3_debug(st, "nlen < 2"); return; } if (*p != 0x02) { /* invoke identifier tag */ l3_debug(st, "invoke identifier tag !=0x02"); return; } p++; nlen--; if (*p & 0x80) { /* length format */ l3_debug(st, "invoke id length format 2"); return; } ilen = *p++; nlen--; if (ilen > nlen || ilen == 0) { l3_debug(st, "ilen > nlen || ilen == 0"); return; } nlen -= ilen; id = 0; while (ilen > 0) { id = (id << 8) | (*p++ & 0xFF); /* invoke identifier */ ilen--; } switch (cp_tag) { /* component tag */ case 1: /* invoke */ if (nlen < 2) { l3_debug(st, "nlen < 2 22"); return; } if (*p != 0x02) { /* operation value */ l3_debug(st, "operation value !=0x02"); return; } p++; nlen--; ilen = *p++; nlen--; if (ilen > nlen || ilen == 0) { l3_debug(st, "ilen > nlen || ilen == 0 22"); return; } nlen -= ilen; ident = 0; while (ilen > 0) { ident = (ident << 8) | (*p++ & 0xFF); ilen--; } if (!pc) { l3ni1_dummy_invoke(st, cr, id, ident, p, nlen); return; } l3_debug(st, "invoke break"); break; case 2: /* return result */ /* if no process available handle separately */ if (!pc) { if (cr == -1) l3ni1_dummy_return_result(st, id, p, nlen); return; } if ((pc->prot.ni1.invoke_id) && (pc->prot.ni1.invoke_id == id)) { /* Diversion successful */ free_invoke_id(st,pc->prot.ni1.invoke_id); pc->prot.ni1.remote_result = 0; /* success */ pc->prot.ni1.invoke_id = 0; pc->redir_result = pc->prot.ni1.remote_result; st->l3.l3l4(st, CC_REDIR | INDICATION, pc); } /* Diversion successful */ else l3_debug(st,"return error unknown identifier"); break; case 3: /* return error */ err_ret = 0; if (nlen < 2) { l3_debug(st, "return error nlen < 2"); return; } if (*p != 0x02) { /* result tag */ l3_debug(st, "invoke error tag !=0x02"); return; } p++; nlen--; if (*p > 4) { /* length format */ l3_debug(st, "invoke return errlen > 4 "); return; } ilen = *p++; nlen--; if (ilen > nlen || ilen == 0) { l3_debug(st, "error return ilen > nlen || ilen == 0"); return; } nlen -= ilen; while (ilen > 0) { err_ret = (err_ret << 8) | (*p++ & 0xFF); /* error value */ ilen--; } /* if no process available handle separately */ if (!pc) { if (cr == -1) l3ni1_dummy_error_return(st, id, err_ret); return; } if ((pc->prot.ni1.invoke_id) && (pc->prot.ni1.invoke_id == id)) { /* Deflection error */ free_invoke_id(st,pc->prot.ni1.invoke_id); pc->prot.ni1.remote_result = err_ret; /* result */ pc->prot.ni1.invoke_id = 0; pc->redir_result = pc->prot.ni1.remote_result; st->l3.l3l4(st, CC_REDIR | INDICATION, pc); } /* Deflection error */ else l3_debug(st,"return result unknown identifier"); break; default: l3_debug(st, "facility default break tag=0x%02x",cp_tag); break; } } static void l3ni1_message(struct l3_process *pc, u_char mt) { struct sk_buff *skb; u_char *p; if (!(skb = l3_alloc_skb(4))) return; p = skb_put(skb, 4); MsgHead(p, pc->callref, mt); l3_msg(pc->st, DL_DATA | REQUEST, skb); } static void l3ni1_message_plus_chid(struct l3_process *pc, u_char mt) /* sends an l3 messages plus channel id - added GE 05/09/00 */ { struct sk_buff *skb; u_char tmp[16]; u_char *p = tmp; u_char chid; chid = (u_char)(pc->para.bchannel & 0x03) | 0x88; MsgHead(p, pc->callref, mt); *p++ = IE_CHANNEL_ID; *p++ = 0x01; *p++ = chid; if (!(skb = l3_alloc_skb(7))) return; memcpy(skb_put(skb, 7), tmp, 7); l3_msg(pc->st, DL_DATA | REQUEST, skb); } static void l3ni1_message_cause(struct l3_process *pc, u_char mt, u_char cause) { struct sk_buff *skb; u_char tmp[16]; u_char *p = tmp; int l; MsgHead(p, pc->callref, mt); *p++ = IE_CAUSE; *p++ = 0x2; *p++ = 0x80; *p++ = cause | 0x80; l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(pc->st, DL_DATA | REQUEST, skb); } static void l3ni1_status_send(struct l3_process *pc, u_char pr, void *arg) { u_char tmp[16]; u_char *p = tmp; int l; struct sk_buff *skb; MsgHead(p, pc->callref, MT_STATUS); *p++ = IE_CAUSE; *p++ = 0x2; *p++ = 0x80; *p++ = pc->para.cause | 0x80; *p++ = IE_CALL_STATE; *p++ = 0x1; *p++ = pc->state & 0x3f; l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(pc->st, DL_DATA | REQUEST, skb); } static void l3ni1_msg_without_setup(struct l3_process *pc, u_char pr, void *arg) { /* This routine is called if here was no SETUP made (checks in ni1up and in * l3ni1_setup) and a RELEASE_COMPLETE have to be sent with an error code * MT_STATUS_ENQUIRE in the NULL state is handled too */ u_char tmp[16]; u_char *p = tmp; int l; struct sk_buff *skb; switch (pc->para.cause) { case 81: /* invalid callreference */ case 88: /* incomp destination */ case 96: /* mandory IE missing */ case 100: /* invalid IE contents */ case 101: /* incompatible Callstate */ MsgHead(p, pc->callref, MT_RELEASE_COMPLETE); *p++ = IE_CAUSE; *p++ = 0x2; *p++ = 0x80; *p++ = pc->para.cause | 0x80; break; default: printk(KERN_ERR "HiSax l3ni1_msg_without_setup wrong cause %d\n", pc->para.cause); return; } l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(pc->st, DL_DATA | REQUEST, skb); ni1_release_l3_process(pc); } static int ie_ALERTING[] = {IE_BEARER, IE_CHANNEL_ID | IE_MANDATORY_1, IE_FACILITY, IE_PROGRESS, IE_DISPLAY, IE_SIGNAL, IE_HLC, IE_USER_USER, -1}; static int ie_CALL_PROCEEDING[] = {IE_BEARER, IE_CHANNEL_ID | IE_MANDATORY_1, IE_FACILITY, IE_PROGRESS, IE_DISPLAY, IE_HLC, -1}; static int ie_CONNECT[] = {IE_BEARER, IE_CHANNEL_ID | IE_MANDATORY_1, IE_FACILITY, IE_PROGRESS, IE_DISPLAY, IE_DATE, IE_SIGNAL, IE_CONNECT_PN, IE_CONNECT_SUB, IE_LLC, IE_HLC, IE_USER_USER, -1}; static int ie_CONNECT_ACKNOWLEDGE[] = {IE_CHANNEL_ID, IE_DISPLAY, IE_SIGNAL, -1}; static int ie_DISCONNECT[] = {IE_CAUSE | IE_MANDATORY, IE_FACILITY, IE_PROGRESS, IE_DISPLAY, IE_SIGNAL, IE_USER_USER, -1}; static int ie_INFORMATION[] = {IE_COMPLETE, IE_DISPLAY, IE_KEYPAD, IE_SIGNAL, IE_CALLED_PN, -1}; static int ie_NOTIFY[] = {IE_BEARER, IE_NOTIFY | IE_MANDATORY, IE_DISPLAY, -1}; static int ie_PROGRESS[] = {IE_BEARER, IE_CAUSE, IE_FACILITY, IE_PROGRESS | IE_MANDATORY, IE_DISPLAY, IE_HLC, IE_USER_USER, -1}; static int ie_RELEASE[] = {IE_CAUSE | IE_MANDATORY_1, IE_FACILITY, IE_DISPLAY, IE_SIGNAL, IE_USER_USER, -1}; /* a RELEASE_COMPLETE with errors don't require special actions static int ie_RELEASE_COMPLETE[] = {IE_CAUSE | IE_MANDATORY_1, IE_DISPLAY, IE_SIGNAL, IE_USER_USER, -1}; */ static int ie_RESUME_ACKNOWLEDGE[] = {IE_CHANNEL_ID| IE_MANDATORY, IE_FACILITY, IE_DISPLAY, -1}; static int ie_RESUME_REJECT[] = {IE_CAUSE | IE_MANDATORY, IE_DISPLAY, -1}; static int ie_SETUP[] = {IE_COMPLETE, IE_BEARER | IE_MANDATORY, IE_CHANNEL_ID| IE_MANDATORY, IE_FACILITY, IE_PROGRESS, IE_NET_FAC, IE_DISPLAY, IE_KEYPAD, IE_SIGNAL, IE_CALLING_PN, IE_CALLING_SUB, IE_CALLED_PN, IE_CALLED_SUB, IE_REDIR_NR, IE_LLC, IE_HLC, IE_USER_USER, -1}; static int ie_SETUP_ACKNOWLEDGE[] = {IE_CHANNEL_ID | IE_MANDATORY, IE_FACILITY, IE_PROGRESS, IE_DISPLAY, IE_SIGNAL, -1}; static int ie_STATUS[] = {IE_CAUSE | IE_MANDATORY, IE_CALL_STATE | IE_MANDATORY, IE_DISPLAY, -1}; static int ie_STATUS_ENQUIRY[] = {IE_DISPLAY, -1}; static int ie_SUSPEND_ACKNOWLEDGE[] = {IE_DISPLAY, IE_FACILITY, -1}; static int ie_SUSPEND_REJECT[] = {IE_CAUSE | IE_MANDATORY, IE_DISPLAY, -1}; /* not used * static int ie_CONGESTION_CONTROL[] = {IE_CONGESTION | IE_MANDATORY, * IE_CAUSE | IE_MANDATORY, IE_DISPLAY, -1}; * static int ie_USER_INFORMATION[] = {IE_MORE_DATA, IE_USER_USER | IE_MANDATORY, -1}; * static int ie_RESTART[] = {IE_CHANNEL_ID, IE_DISPLAY, IE_RESTART_IND | * IE_MANDATORY, -1}; */ static int ie_FACILITY[] = {IE_FACILITY | IE_MANDATORY, IE_DISPLAY, -1}; static int comp_required[] = {1,2,3,5,6,7,9,10,11,14,15,-1}; static int l3_valid_states[] = {0,1,2,3,4,6,7,8,9,10,11,12,15,17,19,25,-1}; struct ie_len { int ie; int len; }; static struct ie_len max_ie_len[] = { {IE_SEGMENT, 4}, {IE_BEARER, 12}, {IE_CAUSE, 32}, {IE_CALL_ID, 10}, {IE_CALL_STATE, 3}, {IE_CHANNEL_ID, 34}, {IE_FACILITY, 255}, {IE_PROGRESS, 4}, {IE_NET_FAC, 255}, {IE_NOTIFY, 3}, {IE_DISPLAY, 82}, {IE_DATE, 8}, {IE_KEYPAD, 34}, {IE_SIGNAL, 3}, {IE_INFORATE, 6}, {IE_E2E_TDELAY, 11}, {IE_TDELAY_SEL, 5}, {IE_PACK_BINPARA, 3}, {IE_PACK_WINSIZE, 4}, {IE_PACK_SIZE, 4}, {IE_CUG, 7}, {IE_REV_CHARGE, 3}, {IE_CALLING_PN, 24}, {IE_CALLING_SUB, 23}, {IE_CALLED_PN, 24}, {IE_CALLED_SUB, 23}, {IE_REDIR_NR, 255}, {IE_TRANS_SEL, 255}, {IE_RESTART_IND, 3}, {IE_LLC, 18}, {IE_HLC, 5}, {IE_USER_USER, 131}, {-1,0}, }; static int getmax_ie_len(u_char ie) { int i = 0; while (max_ie_len[i].ie != -1) { if (max_ie_len[i].ie == ie) return(max_ie_len[i].len); i++; } return(255); } static int ie_in_set(struct l3_process *pc, u_char ie, int *checklist) { int ret = 1; while (*checklist != -1) { if ((*checklist & 0xff) == ie) { if (ie & 0x80) return(-ret); else return(ret); } ret++; checklist++; } return(0); } static int check_infoelements(struct l3_process *pc, struct sk_buff *skb, int *checklist) { int *cl = checklist; u_char mt; u_char *p, ie; int l, newpos, oldpos; int err_seq = 0, err_len = 0, err_compr = 0, err_ureg = 0; u_char codeset = 0; u_char old_codeset = 0; u_char codelock = 1; p = skb->data; /* skip cr */ p++; l = (*p++) & 0xf; p += l; mt = *p++; oldpos = 0; while ((p - skb->data) < skb->len) { if ((*p & 0xf0) == 0x90) { /* shift codeset */ old_codeset = codeset; codeset = *p & 7; if (*p & 0x08) codelock = 0; else codelock = 1; if (pc->debug & L3_DEB_CHECK) l3_debug(pc->st, "check IE shift%scodeset %d->%d", codelock ? " locking ": " ", old_codeset, codeset); p++; continue; } if (!codeset) { /* only codeset 0 */ if ((newpos = ie_in_set(pc, *p, cl))) { if (newpos > 0) { if (newpos < oldpos) err_seq++; else oldpos = newpos; } } else { if (ie_in_set(pc, *p, comp_required)) err_compr++; else err_ureg++; } } ie = *p++; if (ie & 0x80) { l = 1; } else { l = *p++; p += l; l += 2; } if (!codeset && (l > getmax_ie_len(ie))) err_len++; if (!codelock) { if (pc->debug & L3_DEB_CHECK) l3_debug(pc->st, "check IE shift back codeset %d->%d", codeset, old_codeset); codeset = old_codeset; codelock = 1; } } if (err_compr | err_ureg | err_len | err_seq) { if (pc->debug & L3_DEB_CHECK) l3_debug(pc->st, "check IE MT(%x) %d/%d/%d/%d", mt, err_compr, err_ureg, err_len, err_seq); if (err_compr) return(ERR_IE_COMPREHENSION); if (err_ureg) return(ERR_IE_UNRECOGNIZED); if (err_len) return(ERR_IE_LENGTH); if (err_seq) return(ERR_IE_SEQUENCE); } return(0); } /* verify if a message type exists and contain no IE error */ static int l3ni1_check_messagetype_validity(struct l3_process *pc, int mt, void *arg) { switch (mt) { case MT_ALERTING: case MT_CALL_PROCEEDING: case MT_CONNECT: case MT_CONNECT_ACKNOWLEDGE: case MT_DISCONNECT: case MT_INFORMATION: case MT_FACILITY: case MT_NOTIFY: case MT_PROGRESS: case MT_RELEASE: case MT_RELEASE_COMPLETE: case MT_SETUP: case MT_SETUP_ACKNOWLEDGE: case MT_RESUME_ACKNOWLEDGE: case MT_RESUME_REJECT: case MT_SUSPEND_ACKNOWLEDGE: case MT_SUSPEND_REJECT: case MT_USER_INFORMATION: case MT_RESTART: case MT_RESTART_ACKNOWLEDGE: case MT_CONGESTION_CONTROL: case MT_STATUS: case MT_STATUS_ENQUIRY: if (pc->debug & L3_DEB_CHECK) l3_debug(pc->st, "l3ni1_check_messagetype_validity mt(%x) OK", mt); break; case MT_RESUME: /* RESUME only in user->net */ case MT_SUSPEND: /* SUSPEND only in user->net */ default: if (pc->debug & (L3_DEB_CHECK | L3_DEB_WARN)) l3_debug(pc->st, "l3ni1_check_messagetype_validity mt(%x) fail", mt); pc->para.cause = 97; l3ni1_status_send(pc, 0, NULL); return(1); } return(0); } static void l3ni1_std_ie_err(struct l3_process *pc, int ret) { if (pc->debug & L3_DEB_CHECK) l3_debug(pc->st, "check_infoelements ret %d", ret); switch(ret) { case 0: break; case ERR_IE_COMPREHENSION: pc->para.cause = 96; l3ni1_status_send(pc, 0, NULL); break; case ERR_IE_UNRECOGNIZED: pc->para.cause = 99; l3ni1_status_send(pc, 0, NULL); break; case ERR_IE_LENGTH: pc->para.cause = 100; l3ni1_status_send(pc, 0, NULL); break; case ERR_IE_SEQUENCE: default: break; } } static int l3ni1_get_channel_id(struct l3_process *pc, struct sk_buff *skb) { u_char *p; p = skb->data; if ((p = findie(p, skb->len, IE_CHANNEL_ID, 0))) { p++; if (*p != 1) { /* len for BRI = 1 */ if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "wrong chid len %d", *p); return (-2); } p++; if (*p & 0x60) { /* only base rate interface */ if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "wrong chid %x", *p); return (-3); } return(*p & 0x3); } else return(-1); } static int l3ni1_get_cause(struct l3_process *pc, struct sk_buff *skb) { u_char l, i=0; u_char *p; p = skb->data; pc->para.cause = 31; pc->para.loc = 0; if ((p = findie(p, skb->len, IE_CAUSE, 0))) { p++; l = *p++; if (l>30) return(1); if (l) { pc->para.loc = *p++; l--; } else { return(2); } if (l && !(pc->para.loc & 0x80)) { l--; p++; /* skip recommendation */ } if (l) { pc->para.cause = *p++; l--; if (!(pc->para.cause & 0x80)) return(3); } else return(4); while (l && (i<6)) { pc->para.diag[i++] = *p++; l--; } } else return(-1); return(0); } static void l3ni1_msg_with_uus(struct l3_process *pc, u_char cmd) { struct sk_buff *skb; u_char tmp[16+40]; u_char *p = tmp; int l; MsgHead(p, pc->callref, cmd); if (pc->prot.ni1.uus1_data[0]) { *p++ = IE_USER_USER; /* UUS info element */ *p++ = strlen(pc->prot.ni1.uus1_data) + 1; *p++ = 0x04; /* IA5 chars */ strcpy(p,pc->prot.ni1.uus1_data); p += strlen(pc->prot.ni1.uus1_data); pc->prot.ni1.uus1_data[0] = '\0'; } l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(pc->st, DL_DATA | REQUEST, skb); } /* l3ni1_msg_with_uus */ static void l3ni1_release_req(struct l3_process *pc, u_char pr, void *arg) { StopAllL3Timer(pc); newl3state(pc, 19); if (!pc->prot.ni1.uus1_data[0]) l3ni1_message(pc, MT_RELEASE); else l3ni1_msg_with_uus(pc, MT_RELEASE); L3AddTimer(&pc->timer, T308, CC_T308_1); } static void l3ni1_release_cmpl(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int ret; if ((ret = l3ni1_get_cause(pc, skb))>0) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "RELCMPL get_cause ret(%d)",ret); } else if (ret < 0) pc->para.cause = NO_CAUSE; StopAllL3Timer(pc); newl3state(pc, 0); pc->st->l3.l3l4(pc->st, CC_RELEASE | CONFIRM, pc); ni1_release_l3_process(pc); } #if EXT_BEARER_CAPS static u_char * EncodeASyncParams(u_char * p, u_char si2) { // 7c 06 88 90 21 42 00 bb p[0] = 0; p[1] = 0x40; // Intermediate rate: 16 kbit/s jj 2000.02.19 p[2] = 0x80; if (si2 & 32) // 7 data bits p[2] += 16; else // 8 data bits p[2] += 24; if (si2 & 16) // 2 stop bits p[2] += 96; else // 1 stop bit p[2] += 32; if (si2 & 8) // even parity p[2] += 2; else // no parity p[2] += 3; switch (si2 & 0x07) { case 0: p[0] = 66; // 1200 bit/s break; case 1: p[0] = 88; // 1200/75 bit/s break; case 2: p[0] = 87; // 75/1200 bit/s break; case 3: p[0] = 67; // 2400 bit/s break; case 4: p[0] = 69; // 4800 bit/s break; case 5: p[0] = 72; // 9600 bit/s break; case 6: p[0] = 73; // 14400 bit/s break; case 7: p[0] = 75; // 19200 bit/s break; } return p + 3; } static u_char EncodeSyncParams(u_char si2, u_char ai) { switch (si2) { case 0: return ai + 2; // 1200 bit/s case 1: return ai + 24; // 1200/75 bit/s case 2: return ai + 23; // 75/1200 bit/s case 3: return ai + 3; // 2400 bit/s case 4: return ai + 5; // 4800 bit/s case 5: return ai + 8; // 9600 bit/s case 6: return ai + 9; // 14400 bit/s case 7: return ai + 11; // 19200 bit/s case 8: return ai + 14; // 48000 bit/s case 9: return ai + 15; // 56000 bit/s case 15: return ai + 40; // negotiate bit/s default: break; } return ai; } static u_char DecodeASyncParams(u_char si2, u_char * p) { u_char info; switch (p[5]) { case 66: // 1200 bit/s break; // si2 don't change case 88: // 1200/75 bit/s si2 += 1; break; case 87: // 75/1200 bit/s si2 += 2; break; case 67: // 2400 bit/s si2 += 3; break; case 69: // 4800 bit/s si2 += 4; break; case 72: // 9600 bit/s si2 += 5; break; case 73: // 14400 bit/s si2 += 6; break; case 75: // 19200 bit/s si2 += 7; break; } info = p[7] & 0x7f; if ((info & 16) && (!(info & 8))) // 7 data bits si2 += 32; // else 8 data bits if ((info & 96) == 96) // 2 stop bits si2 += 16; // else 1 stop bit if ((info & 2) && (!(info & 1))) // even parity si2 += 8; // else no parity return si2; } static u_char DecodeSyncParams(u_char si2, u_char info) { info &= 0x7f; switch (info) { case 40: // bit/s negotiation failed ai := 165 not 175! return si2 + 15; case 15: // 56000 bit/s failed, ai := 0 not 169 ! return si2 + 9; case 14: // 48000 bit/s return si2 + 8; case 11: // 19200 bit/s return si2 + 7; case 9: // 14400 bit/s return si2 + 6; case 8: // 9600 bit/s return si2 + 5; case 5: // 4800 bit/s return si2 + 4; case 3: // 2400 bit/s return si2 + 3; case 23: // 75/1200 bit/s return si2 + 2; case 24: // 1200/75 bit/s return si2 + 1; default: // 1200 bit/s return si2; } } static u_char DecodeSI2(struct sk_buff *skb) { u_char *p; //, *pend=skb->data + skb->len; if ((p = findie(skb->data, skb->len, 0x7c, 0))) { switch (p[4] & 0x0f) { case 0x01: if (p[1] == 0x04) // sync. Bitratenadaption return DecodeSyncParams(160, p[5]); // V.110/X.30 else if (p[1] == 0x06) // async. Bitratenadaption return DecodeASyncParams(192, p); // V.110/X.30 break; case 0x08: // if (p[5] == 0x02) // sync. Bitratenadaption if (p[1] > 3) return DecodeSyncParams(176, p[5]); // V.120 break; } } return 0; } #endif static void l3ni1_setup_req(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb; u_char tmp[128]; u_char *p = tmp; u_char *teln; u_char *sub; u_char *sp; int l; MsgHead(p, pc->callref, MT_SETUP); teln = pc->para.setup.phone; *p++ = 0xa1; /* complete indicator */ /* * Set Bearer Capability, Map info from 1TR6-convention to NI1 */ switch (pc->para.setup.si1) { case 1: /* Telephony */ *p++ = IE_BEARER; *p++ = 0x3; /* Length */ *p++ = 0x90; /* 3.1khz Audio */ *p++ = 0x90; /* Circuit-Mode 64kbps */ *p++ = 0xa2; /* u-Law Audio */ break; case 5: /* Datatransmission 64k, BTX */ case 7: /* Datatransmission 64k */ default: *p++ = IE_BEARER; *p++ = 0x2; /* Length */ *p++ = 0x88; /* Coding Std. CCITT, unrestr. dig. Inform. */ *p++ = 0x90; /* Circuit-Mode 64kbps */ break; } sub = NULL; sp = teln; while (*sp) { if ('.' == *sp) { sub = sp; *sp = 0; } else sp++; } *p++ = IE_KEYPAD; *p++ = strlen(teln); while (*teln) *p++ = (*teln++) & 0x7F; if (sub) *sub++ = '.'; #if EXT_BEARER_CAPS if ((pc->para.setup.si2 >= 160) && (pc->para.setup.si2 <= 175)) { // sync. Bitratenadaption, V.110/X.30 *p++ = IE_LLC; *p++ = 0x04; *p++ = 0x88; *p++ = 0x90; *p++ = 0x21; *p++ = EncodeSyncParams(pc->para.setup.si2 - 160, 0x80); } else if ((pc->para.setup.si2 >= 176) && (pc->para.setup.si2 <= 191)) { // sync. Bitratenadaption, V.120 *p++ = IE_LLC; *p++ = 0x05; *p++ = 0x88; *p++ = 0x90; *p++ = 0x28; *p++ = EncodeSyncParams(pc->para.setup.si2 - 176, 0); *p++ = 0x82; } else if (pc->para.setup.si2 >= 192) { // async. Bitratenadaption, V.110/X.30 *p++ = IE_LLC; *p++ = 0x06; *p++ = 0x88; *p++ = 0x90; *p++ = 0x21; p = EncodeASyncParams(p, pc->para.setup.si2 - 192); } else { switch (pc->para.setup.si1) { case 1: /* Telephony */ *p++ = IE_LLC; *p++ = 0x3; /* Length */ *p++ = 0x90; /* Coding Std. CCITT, 3.1 kHz audio */ *p++ = 0x90; /* Circuit-Mode 64kbps */ *p++ = 0xa2; /* u-Law Audio */ break; case 5: /* Datatransmission 64k, BTX */ case 7: /* Datatransmission 64k */ default: *p++ = IE_LLC; *p++ = 0x2; /* Length */ *p++ = 0x88; /* Coding Std. CCITT, unrestr. dig. Inform. */ *p++ = 0x90; /* Circuit-Mode 64kbps */ break; } } #endif l = p - tmp; if (!(skb = l3_alloc_skb(l))) { return; } memcpy(skb_put(skb, l), tmp, l); L3DelTimer(&pc->timer); L3AddTimer(&pc->timer, T303, CC_T303); newl3state(pc, 1); l3_msg(pc->st, DL_DATA | REQUEST, skb); } static void l3ni1_call_proc(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int id, ret; if ((id = l3ni1_get_channel_id(pc, skb)) >= 0) { if ((0 == id) || ((3 == id) && (0x10 == pc->para.moderate))) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup answer with wrong chid %x", id); pc->para.cause = 100; l3ni1_status_send(pc, pr, NULL); return; } pc->para.bchannel = id; } else if (1 == pc->state) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup answer wrong chid (ret %d)", id); if (id == -1) pc->para.cause = 96; else pc->para.cause = 100; l3ni1_status_send(pc, pr, NULL); return; } /* Now we are on none mandatory IEs */ ret = check_infoelements(pc, skb, ie_CALL_PROCEEDING); if (ERR_IE_COMPREHENSION == ret) { l3ni1_std_ie_err(pc, ret); return; } L3DelTimer(&pc->timer); newl3state(pc, 3); L3AddTimer(&pc->timer, T310, CC_T310); if (ret) /* STATUS for none mandatory IE errors after actions are taken */ l3ni1_std_ie_err(pc, ret); pc->st->l3.l3l4(pc->st, CC_PROCEEDING | INDICATION, pc); } static void l3ni1_setup_ack(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int id, ret; if ((id = l3ni1_get_channel_id(pc, skb)) >= 0) { if ((0 == id) || ((3 == id) && (0x10 == pc->para.moderate))) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup answer with wrong chid %x", id); pc->para.cause = 100; l3ni1_status_send(pc, pr, NULL); return; } pc->para.bchannel = id; } else { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup answer wrong chid (ret %d)", id); if (id == -1) pc->para.cause = 96; else pc->para.cause = 100; l3ni1_status_send(pc, pr, NULL); return; } /* Now we are on none mandatory IEs */ ret = check_infoelements(pc, skb, ie_SETUP_ACKNOWLEDGE); if (ERR_IE_COMPREHENSION == ret) { l3ni1_std_ie_err(pc, ret); return; } L3DelTimer(&pc->timer); newl3state(pc, 2); L3AddTimer(&pc->timer, T304, CC_T304); if (ret) /* STATUS for none mandatory IE errors after actions are taken */ l3ni1_std_ie_err(pc, ret); pc->st->l3.l3l4(pc->st, CC_MORE_INFO | INDICATION, pc); } static void l3ni1_disconnect(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; u_char *p; int ret; u_char cause = 0; StopAllL3Timer(pc); if ((ret = l3ni1_get_cause(pc, skb))) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "DISC get_cause ret(%d)", ret); if (ret < 0) cause = 96; else if (ret > 0) cause = 100; } if ((p = findie(skb->data, skb->len, IE_FACILITY, 0))) l3ni1_parse_facility(pc->st, pc, pc->callref, p); ret = check_infoelements(pc, skb, ie_DISCONNECT); if (ERR_IE_COMPREHENSION == ret) cause = 96; else if ((!cause) && (ERR_IE_UNRECOGNIZED == ret)) cause = 99; ret = pc->state; newl3state(pc, 12); if (cause) newl3state(pc, 19); if (11 != ret) pc->st->l3.l3l4(pc->st, CC_DISCONNECT | INDICATION, pc); else if (!cause) l3ni1_release_req(pc, pr, NULL); if (cause) { l3ni1_message_cause(pc, MT_RELEASE, cause); L3AddTimer(&pc->timer, T308, CC_T308_1); } } static void l3ni1_connect(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int ret; ret = check_infoelements(pc, skb, ie_CONNECT); if (ERR_IE_COMPREHENSION == ret) { l3ni1_std_ie_err(pc, ret); return; } L3DelTimer(&pc->timer); /* T310 */ newl3state(pc, 10); pc->para.chargeinfo = 0; /* here should inserted COLP handling KKe */ if (ret) l3ni1_std_ie_err(pc, ret); pc->st->l3.l3l4(pc->st, CC_SETUP | CONFIRM, pc); } static void l3ni1_alerting(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int ret; ret = check_infoelements(pc, skb, ie_ALERTING); if (ERR_IE_COMPREHENSION == ret) { l3ni1_std_ie_err(pc, ret); return; } L3DelTimer(&pc->timer); /* T304 */ newl3state(pc, 4); if (ret) l3ni1_std_ie_err(pc, ret); pc->st->l3.l3l4(pc->st, CC_ALERTING | INDICATION, pc); } static void l3ni1_setup(struct l3_process *pc, u_char pr, void *arg) { u_char *p; int bcfound = 0; char tmp[80]; struct sk_buff *skb = arg; int id; int err = 0; /* * Bearer Capabilities */ p = skb->data; /* only the first occurence 'll be detected ! */ if ((p = findie(p, skb->len, 0x04, 0))) { if ((p[1] < 2) || (p[1] > 11)) err = 1; else { pc->para.setup.si2 = 0; switch (p[2] & 0x7f) { case 0x00: /* Speech */ case 0x10: /* 3.1 Khz audio */ pc->para.setup.si1 = 1; break; case 0x08: /* Unrestricted digital information */ pc->para.setup.si1 = 7; /* JIM, 05.11.97 I wanna set service indicator 2 */ #if EXT_BEARER_CAPS pc->para.setup.si2 = DecodeSI2(skb); #endif break; case 0x09: /* Restricted digital information */ pc->para.setup.si1 = 2; break; case 0x11: /* Unrestr. digital information with * tones/announcements ( or 7 kHz audio */ pc->para.setup.si1 = 3; break; case 0x18: /* Video */ pc->para.setup.si1 = 4; break; default: err = 2; break; } switch (p[3] & 0x7f) { case 0x40: /* packed mode */ pc->para.setup.si1 = 8; break; case 0x10: /* 64 kbit */ case 0x11: /* 2*64 kbit */ case 0x13: /* 384 kbit */ case 0x15: /* 1536 kbit */ case 0x17: /* 1920 kbit */ pc->para.moderate = p[3] & 0x7f; break; default: err = 3; break; } } if (pc->debug & L3_DEB_SI) l3_debug(pc->st, "SI=%d, AI=%d", pc->para.setup.si1, pc->para.setup.si2); if (err) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup with wrong bearer(l=%d:%x,%x)", p[1], p[2], p[3]); pc->para.cause = 100; l3ni1_msg_without_setup(pc, pr, NULL); return; } } else { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup without bearer capabilities"); /* ETS 300-104 1.3.3 */ pc->para.cause = 96; l3ni1_msg_without_setup(pc, pr, NULL); return; } /* * Channel Identification */ if ((id = l3ni1_get_channel_id(pc, skb)) >= 0) { if ((pc->para.bchannel = id)) { if ((3 == id) && (0x10 == pc->para.moderate)) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup with wrong chid %x", id); pc->para.cause = 100; l3ni1_msg_without_setup(pc, pr, NULL); return; } bcfound++; } else { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup without bchannel, call waiting"); bcfound++; } } else { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "setup with wrong chid ret %d", id); if (id == -1) pc->para.cause = 96; else pc->para.cause = 100; l3ni1_msg_without_setup(pc, pr, NULL); return; } /* Now we are on none mandatory IEs */ err = check_infoelements(pc, skb, ie_SETUP); if (ERR_IE_COMPREHENSION == err) { pc->para.cause = 96; l3ni1_msg_without_setup(pc, pr, NULL); return; } p = skb->data; if ((p = findie(p, skb->len, 0x70, 0))) iecpy(pc->para.setup.eazmsn, p, 1); else pc->para.setup.eazmsn[0] = 0; p = skb->data; if ((p = findie(p, skb->len, 0x71, 0))) { /* Called party subaddress */ if ((p[1] >= 2) && (p[2] == 0x80) && (p[3] == 0x50)) { tmp[0] = '.'; iecpy(&tmp[1], p, 2); strcat(pc->para.setup.eazmsn, tmp); } else if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "wrong called subaddress"); } p = skb->data; if ((p = findie(p, skb->len, 0x6c, 0))) { pc->para.setup.plan = p[2]; if (p[2] & 0x80) { iecpy(pc->para.setup.phone, p, 1); pc->para.setup.screen = 0; } else { iecpy(pc->para.setup.phone, p, 2); pc->para.setup.screen = p[3]; } } else { pc->para.setup.phone[0] = 0; pc->para.setup.plan = 0; pc->para.setup.screen = 0; } p = skb->data; if ((p = findie(p, skb->len, 0x6d, 0))) { /* Calling party subaddress */ if ((p[1] >= 2) && (p[2] == 0x80) && (p[3] == 0x50)) { tmp[0] = '.'; iecpy(&tmp[1], p, 2); strcat(pc->para.setup.phone, tmp); } else if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "wrong calling subaddress"); } newl3state(pc, 6); if (err) /* STATUS for none mandatory IE errors after actions are taken */ l3ni1_std_ie_err(pc, err); pc->st->l3.l3l4(pc->st, CC_SETUP | INDICATION, pc); } static void l3ni1_reset(struct l3_process *pc, u_char pr, void *arg) { ni1_release_l3_process(pc); } static void l3ni1_disconnect_req(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb; u_char tmp[16+40]; u_char *p = tmp; int l; u_char cause = 16; if (pc->para.cause != NO_CAUSE) cause = pc->para.cause; StopAllL3Timer(pc); MsgHead(p, pc->callref, MT_DISCONNECT); *p++ = IE_CAUSE; *p++ = 0x2; *p++ = 0x80; *p++ = cause | 0x80; if (pc->prot.ni1.uus1_data[0]) { *p++ = IE_USER_USER; /* UUS info element */ *p++ = strlen(pc->prot.ni1.uus1_data) + 1; *p++ = 0x04; /* IA5 chars */ strcpy(p,pc->prot.ni1.uus1_data); p += strlen(pc->prot.ni1.uus1_data); pc->prot.ni1.uus1_data[0] = '\0'; } l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); newl3state(pc, 11); l3_msg(pc->st, DL_DATA | REQUEST, skb); L3AddTimer(&pc->timer, T305, CC_T305); } static void l3ni1_setup_rsp(struct l3_process *pc, u_char pr, void *arg) { if (!pc->para.bchannel) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "D-chan connect for waiting call"); l3ni1_disconnect_req(pc, pr, arg); return; } newl3state(pc, 8); if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "D-chan connect for waiting call"); l3ni1_message_plus_chid(pc, MT_CONNECT); /* GE 05/09/00 */ L3DelTimer(&pc->timer); L3AddTimer(&pc->timer, T313, CC_T313); } static void l3ni1_connect_ack(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int ret; ret = check_infoelements(pc, skb, ie_CONNECT_ACKNOWLEDGE); if (ERR_IE_COMPREHENSION == ret) { l3ni1_std_ie_err(pc, ret); return; } newl3state(pc, 10); L3DelTimer(&pc->timer); if (ret) l3ni1_std_ie_err(pc, ret); pc->st->l3.l3l4(pc->st, CC_SETUP_COMPL | INDICATION, pc); } static void l3ni1_reject_req(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb; u_char tmp[16]; u_char *p = tmp; int l; u_char cause = 21; if (pc->para.cause != NO_CAUSE) cause = pc->para.cause; MsgHead(p, pc->callref, MT_RELEASE_COMPLETE); *p++ = IE_CAUSE; *p++ = 0x2; *p++ = 0x80; *p++ = cause | 0x80; l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(pc->st, DL_DATA | REQUEST, skb); pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc); newl3state(pc, 0); ni1_release_l3_process(pc); } static void l3ni1_release(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; u_char *p; int ret, cause=0; StopAllL3Timer(pc); if ((ret = l3ni1_get_cause(pc, skb))>0) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "REL get_cause ret(%d)", ret); } else if (ret<0) pc->para.cause = NO_CAUSE; if ((p = findie(skb->data, skb->len, IE_FACILITY, 0))) { l3ni1_parse_facility(pc->st, pc, pc->callref, p); } if ((ret<0) && (pc->state != 11)) cause = 96; else if (ret>0) cause = 100; ret = check_infoelements(pc, skb, ie_RELEASE); if (ERR_IE_COMPREHENSION == ret) cause = 96; else if ((ERR_IE_UNRECOGNIZED == ret) && (!cause)) cause = 99; if (cause) l3ni1_message_cause(pc, MT_RELEASE_COMPLETE, cause); else l3ni1_message(pc, MT_RELEASE_COMPLETE); pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc); newl3state(pc, 0); ni1_release_l3_process(pc); } static void l3ni1_alert_req(struct l3_process *pc, u_char pr, void *arg) { newl3state(pc, 7); if (!pc->prot.ni1.uus1_data[0]) l3ni1_message(pc, MT_ALERTING); else l3ni1_msg_with_uus(pc, MT_ALERTING); } static void l3ni1_proceed_req(struct l3_process *pc, u_char pr, void *arg) { newl3state(pc, 9); l3ni1_message(pc, MT_CALL_PROCEEDING); pc->st->l3.l3l4(pc->st, CC_PROCEED_SEND | INDICATION, pc); } static void l3ni1_setup_ack_req(struct l3_process *pc, u_char pr, void *arg) { newl3state(pc, 25); L3DelTimer(&pc->timer); L3AddTimer(&pc->timer, T302, CC_T302); l3ni1_message(pc, MT_SETUP_ACKNOWLEDGE); } /********************************************/ /* deliver a incoming display message to HL */ /********************************************/ static void l3ni1_deliver_display(struct l3_process *pc, int pr, u_char *infp) { u_char len; isdn_ctrl ic; struct IsdnCardState *cs; char *p; if (*infp++ != IE_DISPLAY) return; if ((len = *infp++) > 80) return; /* total length <= 82 */ if (!pc->chan) return; p = ic.parm.display; while (len--) *p++ = *infp++; *p = '\0'; ic.command = ISDN_STAT_DISPLAY; cs = pc->st->l1.hardware; ic.driver = cs->myid; ic.arg = pc->chan->chan; cs->iif.statcallb(&ic); } /* l3ni1_deliver_display */ static void l3ni1_progress(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int err = 0; u_char *p; if ((p = findie(skb->data, skb->len, IE_PROGRESS, 0))) { if (p[1] != 2) { err = 1; pc->para.cause = 100; } else if (!(p[2] & 0x70)) { switch (p[2]) { case 0x80: case 0x81: case 0x82: case 0x84: case 0x85: case 0x87: case 0x8a: switch (p[3]) { case 0x81: case 0x82: case 0x83: case 0x84: case 0x88: break; default: err = 2; pc->para.cause = 100; break; } break; default: err = 3; pc->para.cause = 100; break; } } } else { pc->para.cause = 96; err = 4; } if (err) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "progress error %d", err); l3ni1_status_send(pc, pr, NULL); return; } /* Now we are on none mandatory IEs */ err = check_infoelements(pc, skb, ie_PROGRESS); if (err) l3ni1_std_ie_err(pc, err); if (ERR_IE_COMPREHENSION != err) pc->st->l3.l3l4(pc->st, CC_PROGRESS | INDICATION, pc); } static void l3ni1_notify(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int err = 0; u_char *p; if ((p = findie(skb->data, skb->len, IE_NOTIFY, 0))) { if (p[1] != 1) { err = 1; pc->para.cause = 100; } else { switch (p[2]) { case 0x80: case 0x81: case 0x82: break; default: pc->para.cause = 100; err = 2; break; } } } else { pc->para.cause = 96; err = 3; } if (err) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "notify error %d", err); l3ni1_status_send(pc, pr, NULL); return; } /* Now we are on none mandatory IEs */ err = check_infoelements(pc, skb, ie_NOTIFY); if (err) l3ni1_std_ie_err(pc, err); if (ERR_IE_COMPREHENSION != err) pc->st->l3.l3l4(pc->st, CC_NOTIFY | INDICATION, pc); } static void l3ni1_status_enq(struct l3_process *pc, u_char pr, void *arg) { int ret; struct sk_buff *skb = arg; ret = check_infoelements(pc, skb, ie_STATUS_ENQUIRY); l3ni1_std_ie_err(pc, ret); pc->para.cause = 30; /* response to STATUS_ENQUIRY */ l3ni1_status_send(pc, pr, NULL); } static void l3ni1_information(struct l3_process *pc, u_char pr, void *arg) { int ret; struct sk_buff *skb = arg; u_char *p; char tmp[32]; ret = check_infoelements(pc, skb, ie_INFORMATION); if (ret) l3ni1_std_ie_err(pc, ret); if (pc->state == 25) { /* overlap receiving */ L3DelTimer(&pc->timer); p = skb->data; if ((p = findie(p, skb->len, 0x70, 0))) { iecpy(tmp, p, 1); strcat(pc->para.setup.eazmsn, tmp); pc->st->l3.l3l4(pc->st, CC_MORE_INFO | INDICATION, pc); } L3AddTimer(&pc->timer, T302, CC_T302); } } /******************************/ /* handle deflection requests */ /******************************/ static void l3ni1_redir_req(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb; u_char tmp[128]; u_char *p = tmp; u_char *subp; u_char len_phone = 0; u_char len_sub = 0; int l; strcpy(pc->prot.ni1.uus1_data,pc->chan->setup.eazmsn); /* copy uus element if available */ if (!pc->chan->setup.phone[0]) { pc->para.cause = -1; l3ni1_disconnect_req(pc,pr,arg); /* disconnect immediately */ return; } /* only uus */ if (pc->prot.ni1.invoke_id) free_invoke_id(pc->st,pc->prot.ni1.invoke_id); if (!(pc->prot.ni1.invoke_id = new_invoke_id(pc->st))) return; MsgHead(p, pc->callref, MT_FACILITY); for (subp = pc->chan->setup.phone; (*subp) && (*subp != '.'); subp++) len_phone++; /* len of phone number */ if (*subp++ == '.') len_sub = strlen(subp) + 2; /* length including info subaddress element */ *p++ = 0x1c; /* Facility info element */ *p++ = len_phone + len_sub + 2 + 2 + 8 + 3 + 3; /* length of element */ *p++ = 0x91; /* remote operations protocol */ *p++ = 0xa1; /* invoke component */ *p++ = len_phone + len_sub + 2 + 2 + 8 + 3; /* length of data */ *p++ = 0x02; /* invoke id tag, integer */ *p++ = 0x01; /* length */ *p++ = pc->prot.ni1.invoke_id; /* invoke id */ *p++ = 0x02; /* operation value tag, integer */ *p++ = 0x01; /* length */ *p++ = 0x0D; /* Call Deflect */ *p++ = 0x30; /* sequence phone number */ *p++ = len_phone + 2 + 2 + 3 + len_sub; /* length */ *p++ = 0x30; /* Deflected to UserNumber */ *p++ = len_phone+2+len_sub; /* length */ *p++ = 0x80; /* NumberDigits */ *p++ = len_phone; /* length */ for (l = 0; l < len_phone; l++) *p++ = pc->chan->setup.phone[l]; if (len_sub) { *p++ = 0x04; /* called party subaddress */ *p++ = len_sub - 2; while (*subp) *p++ = *subp++; } *p++ = 0x01; /* screening identifier */ *p++ = 0x01; *p++ = pc->chan->setup.screen; l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(pc->st, DL_DATA | REQUEST, skb); } /* l3ni1_redir_req */ /********************************************/ /* handle deflection request in early state */ /********************************************/ static void l3ni1_redir_req_early(struct l3_process *pc, u_char pr, void *arg) { l3ni1_proceed_req(pc,pr,arg); l3ni1_redir_req(pc,pr,arg); } /* l3ni1_redir_req_early */ /***********************************************/ /* handle special commands for this protocol. */ /* Examples are call independant services like */ /* remote operations with dummy callref. */ /***********************************************/ static int l3ni1_cmd_global(struct PStack *st, isdn_ctrl *ic) { u_char id; u_char temp[265]; u_char *p = temp; int i, l, proc_len; struct sk_buff *skb; struct l3_process *pc = NULL; switch (ic->arg) { case NI1_CMD_INVOKE: if (ic->parm.ni1_io.datalen < 0) return(-2); /* invalid parameter */ for (proc_len = 1, i = ic->parm.ni1_io.proc >> 8; i; i++) i = i >> 8; /* add one byte */ l = ic->parm.ni1_io.datalen + proc_len + 8; /* length excluding ie header */ if (l > 255) return(-2); /* too long */ if (!(id = new_invoke_id(st))) return(0); /* first get a invoke id -> return if no available */ i = -1; MsgHead(p, i, MT_FACILITY); /* build message head */ *p++ = 0x1C; /* Facility IE */ *p++ = l; /* length of ie */ *p++ = 0x91; /* remote operations */ *p++ = 0xA1; /* invoke */ *p++ = l - 3; /* length of invoke */ *p++ = 0x02; /* invoke id tag */ *p++ = 0x01; /* length is 1 */ *p++ = id; /* invoke id */ *p++ = 0x02; /* operation */ *p++ = proc_len; /* length of operation */ for (i = proc_len; i; i--) *p++ = (ic->parm.ni1_io.proc >> (i-1)) & 0xFF; memcpy(p, ic->parm.ni1_io.data, ic->parm.ni1_io.datalen); /* copy data */ l = (p - temp) + ic->parm.ni1_io.datalen; /* total length */ if (ic->parm.ni1_io.timeout > 0) if (!(pc = ni1_new_l3_process(st, -1))) { free_invoke_id(st, id); return(-2); } pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id; /* remember id */ pc->prot.ni1.proc = ic->parm.ni1_io.proc; /* and procedure */ if (!(skb = l3_alloc_skb(l))) { free_invoke_id(st, id); if (pc) ni1_release_l3_process(pc); return(-2); } memcpy(skb_put(skb, l), temp, l); if (pc) { pc->prot.ni1.invoke_id = id; /* remember id */ L3AddTimer(&pc->timer, ic->parm.ni1_io.timeout, CC_TNI1_IO | REQUEST); } l3_msg(st, DL_DATA | REQUEST, skb); ic->parm.ni1_io.hl_id = id; /* return id */ return(0); case NI1_CMD_INVOKE_ABORT: if ((pc = l3ni1_search_dummy_proc(st, ic->parm.ni1_io.hl_id))) { L3DelTimer(&pc->timer); /* remove timer */ ni1_release_l3_process(pc); return(0); } else { l3_debug(st, "l3ni1_cmd_global abort unknown id"); return(-2); } break; default: l3_debug(st, "l3ni1_cmd_global unknown cmd 0x%lx", ic->arg); return(-1); } /* switch ic-> arg */ return(-1); } /* l3ni1_cmd_global */ static void l3ni1_io_timer(struct l3_process *pc) { isdn_ctrl ic; struct IsdnCardState *cs = pc->st->l1.hardware; L3DelTimer(&pc->timer); /* remove timer */ ic.driver = cs->myid; ic.command = ISDN_STAT_PROT; ic.arg = NI1_STAT_INVOKE_ERR; ic.parm.ni1_io.hl_id = pc->prot.ni1.invoke_id; ic.parm.ni1_io.ll_id = pc->prot.ni1.ll_id; ic.parm.ni1_io.proc = pc->prot.ni1.proc; ic.parm.ni1_io.timeout= -1; ic.parm.ni1_io.datalen = 0; ic.parm.ni1_io.data = NULL; free_invoke_id(pc->st, pc->prot.ni1.invoke_id); pc->prot.ni1.invoke_id = 0; /* reset id */ cs->iif.statcallb(&ic); ni1_release_l3_process(pc); } /* l3ni1_io_timer */ static void l3ni1_release_ind(struct l3_process *pc, u_char pr, void *arg) { u_char *p; struct sk_buff *skb = arg; int callState = 0; p = skb->data; if ((p = findie(p, skb->len, IE_CALL_STATE, 0))) { p++; if (1 == *p++) callState = *p; } if (callState == 0) { /* ETS 300-104 7.6.1, 8.6.1, 10.6.1... and 16.1 * set down layer 3 without sending any message */ pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc); newl3state(pc, 0); ni1_release_l3_process(pc); } else { pc->st->l3.l3l4(pc->st, CC_IGNORE | INDICATION, pc); } } static void l3ni1_dummy(struct l3_process *pc, u_char pr, void *arg) { } static void l3ni1_t302(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->para.loc = 0; pc->para.cause = 28; /* invalid number */ l3ni1_disconnect_req(pc, pr, NULL); pc->st->l3.l3l4(pc->st, CC_SETUP_ERR, pc); } static void l3ni1_t303(struct l3_process *pc, u_char pr, void *arg) { if (pc->N303 > 0) { pc->N303--; L3DelTimer(&pc->timer); l3ni1_setup_req(pc, pr, arg); } else { L3DelTimer(&pc->timer); l3ni1_message_cause(pc, MT_RELEASE_COMPLETE, 102); pc->st->l3.l3l4(pc->st, CC_NOSETUP_RSP, pc); ni1_release_l3_process(pc); } } static void l3ni1_t304(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->para.loc = 0; pc->para.cause = 102; l3ni1_disconnect_req(pc, pr, NULL); pc->st->l3.l3l4(pc->st, CC_SETUP_ERR, pc); } static void l3ni1_t305(struct l3_process *pc, u_char pr, void *arg) { u_char tmp[16]; u_char *p = tmp; int l; struct sk_buff *skb; u_char cause = 16; L3DelTimer(&pc->timer); if (pc->para.cause != NO_CAUSE) cause = pc->para.cause; MsgHead(p, pc->callref, MT_RELEASE); *p++ = IE_CAUSE; *p++ = 0x2; *p++ = 0x80; *p++ = cause | 0x80; l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); newl3state(pc, 19); l3_msg(pc->st, DL_DATA | REQUEST, skb); L3AddTimer(&pc->timer, T308, CC_T308_1); } static void l3ni1_t310(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->para.loc = 0; pc->para.cause = 102; l3ni1_disconnect_req(pc, pr, NULL); pc->st->l3.l3l4(pc->st, CC_SETUP_ERR, pc); } static void l3ni1_t313(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->para.loc = 0; pc->para.cause = 102; l3ni1_disconnect_req(pc, pr, NULL); pc->st->l3.l3l4(pc->st, CC_CONNECT_ERR, pc); } static void l3ni1_t308_1(struct l3_process *pc, u_char pr, void *arg) { newl3state(pc, 19); L3DelTimer(&pc->timer); l3ni1_message(pc, MT_RELEASE); L3AddTimer(&pc->timer, T308, CC_T308_2); } static void l3ni1_t308_2(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->st->l3.l3l4(pc->st, CC_RELEASE_ERR, pc); ni1_release_l3_process(pc); } static void l3ni1_t318(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->para.cause = 102; /* Timer expiry */ pc->para.loc = 0; /* local */ pc->st->l3.l3l4(pc->st, CC_RESUME_ERR, pc); newl3state(pc, 19); l3ni1_message(pc, MT_RELEASE); L3AddTimer(&pc->timer, T308, CC_T308_1); } static void l3ni1_t319(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->para.cause = 102; /* Timer expiry */ pc->para.loc = 0; /* local */ pc->st->l3.l3l4(pc->st, CC_SUSPEND_ERR, pc); newl3state(pc, 10); } static void l3ni1_restart(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc); ni1_release_l3_process(pc); } static void l3ni1_status(struct l3_process *pc, u_char pr, void *arg) { u_char *p; struct sk_buff *skb = arg; int ret; u_char cause = 0, callState = 0; if ((ret = l3ni1_get_cause(pc, skb))) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "STATUS get_cause ret(%d)",ret); if (ret < 0) cause = 96; else if (ret > 0) cause = 100; } if ((p = findie(skb->data, skb->len, IE_CALL_STATE, 0))) { p++; if (1 == *p++) { callState = *p; if (!ie_in_set(pc, *p, l3_valid_states)) cause = 100; } else cause = 100; } else cause = 96; if (!cause) { /* no error before */ ret = check_infoelements(pc, skb, ie_STATUS); if (ERR_IE_COMPREHENSION == ret) cause = 96; else if (ERR_IE_UNRECOGNIZED == ret) cause = 99; } if (cause) { u_char tmp; if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "STATUS error(%d/%d)",ret,cause); tmp = pc->para.cause; pc->para.cause = cause; l3ni1_status_send(pc, 0, NULL); if (cause == 99) pc->para.cause = tmp; else return; } cause = pc->para.cause; if (((cause & 0x7f) == 111) && (callState == 0)) { /* ETS 300-104 7.6.1, 8.6.1, 10.6.1... * if received MT_STATUS with cause == 111 and call * state == 0, then we must set down layer 3 */ pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc); newl3state(pc, 0); ni1_release_l3_process(pc); } } static void l3ni1_facility(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int ret; ret = check_infoelements(pc, skb, ie_FACILITY); l3ni1_std_ie_err(pc, ret); { u_char *p; if ((p = findie(skb->data, skb->len, IE_FACILITY, 0))) l3ni1_parse_facility(pc->st, pc, pc->callref, p); } } static void l3ni1_suspend_req(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb; u_char tmp[32]; u_char *p = tmp; u_char i, l; u_char *msg = pc->chan->setup.phone; MsgHead(p, pc->callref, MT_SUSPEND); l = *msg++; if (l && (l <= 10)) { /* Max length 10 octets */ *p++ = IE_CALL_ID; *p++ = l; for (i = 0; i < l; i++) *p++ = *msg++; } else if (l) { l3_debug(pc->st, "SUS wrong CALL_ID len %d", l); return; } l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(pc->st, DL_DATA | REQUEST, skb); newl3state(pc, 15); L3AddTimer(&pc->timer, T319, CC_T319); } static void l3ni1_suspend_ack(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int ret; L3DelTimer(&pc->timer); newl3state(pc, 0); pc->para.cause = NO_CAUSE; pc->st->l3.l3l4(pc->st, CC_SUSPEND | CONFIRM, pc); /* We don't handle suspend_ack for IE errors now */ if ((ret = check_infoelements(pc, skb, ie_SUSPEND_ACKNOWLEDGE))) if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "SUSPACK check ie(%d)",ret); ni1_release_l3_process(pc); } static void l3ni1_suspend_rej(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int ret; if ((ret = l3ni1_get_cause(pc, skb))) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "SUSP_REJ get_cause ret(%d)",ret); if (ret < 0) pc->para.cause = 96; else pc->para.cause = 100; l3ni1_status_send(pc, pr, NULL); return; } ret = check_infoelements(pc, skb, ie_SUSPEND_REJECT); if (ERR_IE_COMPREHENSION == ret) { l3ni1_std_ie_err(pc, ret); return; } L3DelTimer(&pc->timer); pc->st->l3.l3l4(pc->st, CC_SUSPEND_ERR, pc); newl3state(pc, 10); if (ret) /* STATUS for none mandatory IE errors after actions are taken */ l3ni1_std_ie_err(pc, ret); } static void l3ni1_resume_req(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb; u_char tmp[32]; u_char *p = tmp; u_char i, l; u_char *msg = pc->para.setup.phone; MsgHead(p, pc->callref, MT_RESUME); l = *msg++; if (l && (l <= 10)) { /* Max length 10 octets */ *p++ = IE_CALL_ID; *p++ = l; for (i = 0; i < l; i++) *p++ = *msg++; } else if (l) { l3_debug(pc->st, "RES wrong CALL_ID len %d", l); return; } l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(pc->st, DL_DATA | REQUEST, skb); newl3state(pc, 17); L3AddTimer(&pc->timer, T318, CC_T318); } static void l3ni1_resume_ack(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int id, ret; if ((id = l3ni1_get_channel_id(pc, skb)) > 0) { if ((0 == id) || ((3 == id) && (0x10 == pc->para.moderate))) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "resume ack with wrong chid %x", id); pc->para.cause = 100; l3ni1_status_send(pc, pr, NULL); return; } pc->para.bchannel = id; } else if (1 == pc->state) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "resume ack without chid (ret %d)", id); pc->para.cause = 96; l3ni1_status_send(pc, pr, NULL); return; } ret = check_infoelements(pc, skb, ie_RESUME_ACKNOWLEDGE); if (ERR_IE_COMPREHENSION == ret) { l3ni1_std_ie_err(pc, ret); return; } L3DelTimer(&pc->timer); pc->st->l3.l3l4(pc->st, CC_RESUME | CONFIRM, pc); newl3state(pc, 10); if (ret) /* STATUS for none mandatory IE errors after actions are taken */ l3ni1_std_ie_err(pc, ret); } static void l3ni1_resume_rej(struct l3_process *pc, u_char pr, void *arg) { struct sk_buff *skb = arg; int ret; if ((ret = l3ni1_get_cause(pc, skb))) { if (pc->debug & L3_DEB_WARN) l3_debug(pc->st, "RES_REJ get_cause ret(%d)",ret); if (ret < 0) pc->para.cause = 96; else pc->para.cause = 100; l3ni1_status_send(pc, pr, NULL); return; } ret = check_infoelements(pc, skb, ie_RESUME_REJECT); if (ERR_IE_COMPREHENSION == ret) { l3ni1_std_ie_err(pc, ret); return; } L3DelTimer(&pc->timer); pc->st->l3.l3l4(pc->st, CC_RESUME_ERR, pc); newl3state(pc, 0); if (ret) /* STATUS for none mandatory IE errors after actions are taken */ l3ni1_std_ie_err(pc, ret); ni1_release_l3_process(pc); } static void l3ni1_global_restart(struct l3_process *pc, u_char pr, void *arg) { u_char tmp[32]; u_char *p; u_char ri, ch = 0, chan = 0; int l; struct sk_buff *skb = arg; struct l3_process *up; newl3state(pc, 2); L3DelTimer(&pc->timer); p = skb->data; if ((p = findie(p, skb->len, IE_RESTART_IND, 0))) { ri = p[2]; l3_debug(pc->st, "Restart %x", ri); } else { l3_debug(pc->st, "Restart without restart IE"); ri = 0x86; } p = skb->data; if ((p = findie(p, skb->len, IE_CHANNEL_ID, 0))) { chan = p[2] & 3; ch = p[2]; if (pc->st->l3.debug) l3_debug(pc->st, "Restart for channel %d", chan); } newl3state(pc, 2); up = pc->st->l3.proc; while (up) { if ((ri & 7) == 7) up->st->lli.l4l3(up->st, CC_RESTART | REQUEST, up); else if (up->para.bchannel == chan) up->st->lli.l4l3(up->st, CC_RESTART | REQUEST, up); up = up->next; } p = tmp; MsgHead(p, pc->callref, MT_RESTART_ACKNOWLEDGE); if (chan) { *p++ = IE_CHANNEL_ID; *p++ = 1; *p++ = ch | 0x80; } *p++ = 0x79; /* RESTART Ind */ *p++ = 1; *p++ = ri; l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); newl3state(pc, 0); l3_msg(pc->st, DL_DATA | REQUEST, skb); } static void l3ni1_dl_reset(struct l3_process *pc, u_char pr, void *arg) { pc->para.cause = 0x29; /* Temporary failure */ pc->para.loc = 0; l3ni1_disconnect_req(pc, pr, NULL); pc->st->l3.l3l4(pc->st, CC_SETUP_ERR, pc); } static void l3ni1_dl_release(struct l3_process *pc, u_char pr, void *arg) { newl3state(pc, 0); pc->para.cause = 0x1b; /* Destination out of order */ pc->para.loc = 0; pc->st->l3.l3l4(pc->st, CC_RELEASE | INDICATION, pc); release_l3_process(pc); } static void l3ni1_dl_reestablish(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); L3AddTimer(&pc->timer, T309, CC_T309); l3_msg(pc->st, DL_ESTABLISH | REQUEST, NULL); } static void l3ni1_dl_reest_status(struct l3_process *pc, u_char pr, void *arg) { L3DelTimer(&pc->timer); pc->para.cause = 0x1F; /* normal, unspecified */ l3ni1_status_send(pc, 0, NULL); } static void l3ni1_SendSpid( struct l3_process *pc, u_char pr, struct sk_buff *skb, int iNewState ) { u_char * p; char * pSPID; struct Channel * pChan = pc->st->lli.userdata; int l; if ( skb ) dev_kfree_skb( skb); if ( !( pSPID = strchr( pChan->setup.eazmsn, ':' ) ) ) { printk( KERN_ERR "SPID not supplied in EAZMSN %s\n", pChan->setup.eazmsn ); newl3state( pc, 0 ); pc->st->l3.l3l2( pc->st, DL_RELEASE | REQUEST, NULL ); return; } l = strlen( ++pSPID ); if ( !( skb = l3_alloc_skb( 5+l ) ) ) { printk( KERN_ERR "HiSax can't get memory to send SPID\n" ); return; } p = skb_put( skb, 5 ); *p++ = PROTO_DIS_EURO; *p++ = 0; *p++ = MT_INFORMATION; *p++ = IE_SPID; *p++ = l; memcpy( skb_put( skb, l ), pSPID, l ); newl3state( pc, iNewState ); L3DelTimer( &pc->timer ); L3AddTimer( &pc->timer, TSPID, CC_TSPID ); pc->st->l3.l3l2( pc->st, DL_DATA | REQUEST, skb ); } static void l3ni1_spid_send( struct l3_process *pc, u_char pr, void *arg ) { l3ni1_SendSpid( pc, pr, arg, 20 ); } void l3ni1_spid_epid( struct l3_process *pc, u_char pr, void *arg ) { struct sk_buff *skb = arg; if ( skb->data[ 1 ] == 0 ) if ( skb->data[ 3 ] == IE_ENDPOINT_ID ) { L3DelTimer( &pc->timer ); newl3state( pc, 0 ); l3_msg( pc->st, DL_ESTABLISH | CONFIRM, NULL ); } dev_kfree_skb( skb); } static void l3ni1_spid_tout( struct l3_process *pc, u_char pr, void *arg ) { if ( pc->state < 22 ) l3ni1_SendSpid( pc, pr, arg, pc->state+1 ); else { L3DelTimer( &pc->timer ); dev_kfree_skb( arg); printk( KERN_ERR "SPID not accepted\n" ); newl3state( pc, 0 ); pc->st->l3.l3l2( pc->st, DL_RELEASE | REQUEST, NULL ); } } /* *INDENT-OFF* */ static struct stateentry downstatelist[] = { {SBIT(0), CC_SETUP | REQUEST, l3ni1_setup_req}, {SBIT(0), CC_RESUME | REQUEST, l3ni1_resume_req}, {SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4) | SBIT(6) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) | SBIT(25), CC_DISCONNECT | REQUEST, l3ni1_disconnect_req}, {SBIT(12), CC_RELEASE | REQUEST, l3ni1_release_req}, {ALL_STATES, CC_RESTART | REQUEST, l3ni1_restart}, {SBIT(6) | SBIT(25), CC_IGNORE | REQUEST, l3ni1_reset}, {SBIT(6) | SBIT(25), CC_REJECT | REQUEST, l3ni1_reject_req}, {SBIT(6) | SBIT(25), CC_PROCEED_SEND | REQUEST, l3ni1_proceed_req}, {SBIT(6), CC_MORE_INFO | REQUEST, l3ni1_setup_ack_req}, {SBIT(25), CC_MORE_INFO | REQUEST, l3ni1_dummy}, {SBIT(6) | SBIT(9) | SBIT(25), CC_ALERTING | REQUEST, l3ni1_alert_req}, {SBIT(6) | SBIT(7) | SBIT(9) | SBIT(25), CC_SETUP | RESPONSE, l3ni1_setup_rsp}, {SBIT(10), CC_SUSPEND | REQUEST, l3ni1_suspend_req}, {SBIT(7) | SBIT(9) | SBIT(25), CC_REDIR | REQUEST, l3ni1_redir_req}, {SBIT(6), CC_REDIR | REQUEST, l3ni1_redir_req_early}, {SBIT(9) | SBIT(25), CC_DISCONNECT | REQUEST, l3ni1_disconnect_req}, {SBIT(25), CC_T302, l3ni1_t302}, {SBIT(1), CC_T303, l3ni1_t303}, {SBIT(2), CC_T304, l3ni1_t304}, {SBIT(3), CC_T310, l3ni1_t310}, {SBIT(8), CC_T313, l3ni1_t313}, {SBIT(11), CC_T305, l3ni1_t305}, {SBIT(15), CC_T319, l3ni1_t319}, {SBIT(17), CC_T318, l3ni1_t318}, {SBIT(19), CC_T308_1, l3ni1_t308_1}, {SBIT(19), CC_T308_2, l3ni1_t308_2}, {SBIT(10), CC_T309, l3ni1_dl_release}, { SBIT( 20 ) | SBIT( 21 ) | SBIT( 22 ), CC_TSPID, l3ni1_spid_tout }, }; #define DOWNSLLEN \ (sizeof(downstatelist) / sizeof(struct stateentry)) static struct stateentry datastatelist[] = { {ALL_STATES, MT_STATUS_ENQUIRY, l3ni1_status_enq}, {ALL_STATES, MT_FACILITY, l3ni1_facility}, {SBIT(19), MT_STATUS, l3ni1_release_ind}, {ALL_STATES, MT_STATUS, l3ni1_status}, {SBIT(0), MT_SETUP, l3ni1_setup}, {SBIT(6) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) | SBIT(11) | SBIT(12) | SBIT(15) | SBIT(17) | SBIT(19) | SBIT(25), MT_SETUP, l3ni1_dummy}, {SBIT(1) | SBIT(2), MT_CALL_PROCEEDING, l3ni1_call_proc}, {SBIT(1), MT_SETUP_ACKNOWLEDGE, l3ni1_setup_ack}, {SBIT(2) | SBIT(3), MT_ALERTING, l3ni1_alerting}, {SBIT(2) | SBIT(3), MT_PROGRESS, l3ni1_progress}, {SBIT(2) | SBIT(3) | SBIT(4) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) | SBIT(11) | SBIT(12) | SBIT(15) | SBIT(17) | SBIT(19) | SBIT(25), MT_INFORMATION, l3ni1_information}, {SBIT(10) | SBIT(11) | SBIT(15), MT_NOTIFY, l3ni1_notify}, {SBIT(0) | SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4) | SBIT(7) | SBIT(8) | SBIT(10) | SBIT(11) | SBIT(12) | SBIT(15) | SBIT(17) | SBIT(19) | SBIT(25), MT_RELEASE_COMPLETE, l3ni1_release_cmpl}, {SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) | SBIT(11) | SBIT(12) | SBIT(15) | SBIT(17) | SBIT(25), MT_RELEASE, l3ni1_release}, {SBIT(19), MT_RELEASE, l3ni1_release_ind}, {SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4) | SBIT(7) | SBIT(8) | SBIT(9) | SBIT(10) | SBIT(11) | SBIT(15) | SBIT(17) | SBIT(25), MT_DISCONNECT, l3ni1_disconnect}, {SBIT(19), MT_DISCONNECT, l3ni1_dummy}, {SBIT(1) | SBIT(2) | SBIT(3) | SBIT(4), MT_CONNECT, l3ni1_connect}, {SBIT(8), MT_CONNECT_ACKNOWLEDGE, l3ni1_connect_ack}, {SBIT(15), MT_SUSPEND_ACKNOWLEDGE, l3ni1_suspend_ack}, {SBIT(15), MT_SUSPEND_REJECT, l3ni1_suspend_rej}, {SBIT(17), MT_RESUME_ACKNOWLEDGE, l3ni1_resume_ack}, {SBIT(17), MT_RESUME_REJECT, l3ni1_resume_rej}, }; #define DATASLLEN \ (sizeof(datastatelist) / sizeof(struct stateentry)) static struct stateentry globalmes_list[] = { {ALL_STATES, MT_STATUS, l3ni1_status}, {SBIT(0), MT_RESTART, l3ni1_global_restart}, /* {SBIT(1), MT_RESTART_ACKNOWLEDGE, l3ni1_restart_ack}, */ { SBIT( 0 ), MT_DL_ESTABLISHED, l3ni1_spid_send }, { SBIT( 20 ) | SBIT( 21 ) | SBIT( 22 ), MT_INFORMATION, l3ni1_spid_epid }, }; #define GLOBALM_LEN \ (sizeof(globalmes_list) / sizeof(struct stateentry)) static struct stateentry manstatelist[] = { {SBIT(2), DL_ESTABLISH | INDICATION, l3ni1_dl_reset}, {SBIT(10), DL_ESTABLISH | CONFIRM, l3ni1_dl_reest_status}, {SBIT(10), DL_RELEASE | INDICATION, l3ni1_dl_reestablish}, {ALL_STATES, DL_RELEASE | INDICATION, l3ni1_dl_release}, }; #define MANSLLEN \ (sizeof(manstatelist) / sizeof(struct stateentry)) /* *INDENT-ON* */ static void global_handler(struct PStack *st, int mt, struct sk_buff *skb) { u_char tmp[16]; u_char *p = tmp; int l; int i; struct l3_process *proc = st->l3.global; if ( skb ) proc->callref = skb->data[2]; /* cr flag */ else proc->callref = 0; for (i = 0; i < GLOBALM_LEN; i++) if ((mt == globalmes_list[i].primitive) && ((1 << proc->state) & globalmes_list[i].state)) break; if (i == GLOBALM_LEN) { if (st->l3.debug & L3_DEB_STATE) { l3_debug(st, "ni1 global state %d mt %x unhandled", proc->state, mt); } MsgHead(p, proc->callref, MT_STATUS); *p++ = IE_CAUSE; *p++ = 0x2; *p++ = 0x80; *p++ = 81 |0x80; /* invalid cr */ *p++ = 0x14; /* CallState */ *p++ = 0x1; *p++ = proc->state & 0x3f; l = p - tmp; if (!(skb = l3_alloc_skb(l))) return; memcpy(skb_put(skb, l), tmp, l); l3_msg(proc->st, DL_DATA | REQUEST, skb); } else { if (st->l3.debug & L3_DEB_STATE) { l3_debug(st, "ni1 global %d mt %x", proc->state, mt); } globalmes_list[i].rout(proc, mt, skb); } } static void ni1up(struct PStack *st, int pr, void *arg) { int i, mt, cr, cause, callState; char *ptr; u_char *p; struct sk_buff *skb = arg; struct l3_process *proc; switch (pr) { case (DL_DATA | INDICATION): case (DL_UNIT_DATA | INDICATION): break; case (DL_ESTABLISH | INDICATION): case (DL_RELEASE | INDICATION): case (DL_RELEASE | CONFIRM): l3_msg(st, pr, arg); return; break; case (DL_ESTABLISH | CONFIRM): global_handler( st, MT_DL_ESTABLISHED, NULL ); return; default: printk(KERN_ERR "HiSax ni1up unknown pr=%04x\n", pr); return; } if (skb->len < 3) { l3_debug(st, "ni1up frame too short(%d)", skb->len); dev_kfree_skb(skb); return; } if (skb->data[0] != PROTO_DIS_EURO) { if (st->l3.debug & L3_DEB_PROTERR) { l3_debug(st, "ni1up%sunexpected discriminator %x message len %d", (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", skb->data[0], skb->len); } dev_kfree_skb(skb); return; } cr = getcallref(skb->data); if (skb->len < ((skb->data[1] & 0x0f) + 3)) { l3_debug(st, "ni1up frame too short(%d)", skb->len); dev_kfree_skb(skb); return; } mt = skb->data[skb->data[1] + 2]; if (st->l3.debug & L3_DEB_STATE) l3_debug(st, "ni1up cr %d", cr); if (cr == -2) { /* wrong Callref */ if (st->l3.debug & L3_DEB_WARN) l3_debug(st, "ni1up wrong Callref"); dev_kfree_skb(skb); return; } else if (cr == -1) { /* Dummy Callref */ if (mt == MT_FACILITY) { if ((p = findie(skb->data, skb->len, IE_FACILITY, 0))) { l3ni1_parse_facility(st, NULL, (pr == (DL_DATA | INDICATION)) ? -1 : -2, p); dev_kfree_skb(skb); return; } } else { global_handler(st, mt, skb); return; } if (st->l3.debug & L3_DEB_WARN) l3_debug(st, "ni1up dummy Callref (no facility msg or ie)"); dev_kfree_skb(skb); return; } else if ((((skb->data[1] & 0x0f) == 1) && (0==(cr & 0x7f))) || (((skb->data[1] & 0x0f) == 2) && (0==(cr & 0x7fff)))) { /* Global CallRef */ if (st->l3.debug & L3_DEB_STATE) l3_debug(st, "ni1up Global CallRef"); global_handler(st, mt, skb); dev_kfree_skb(skb); return; } else if (!(proc = getl3proc(st, cr))) { /* No transaction process exist, that means no call with * this callreference is active */ if (mt == MT_SETUP) { /* Setup creates a new transaction process */ if (skb->data[2] & 0x80) { /* Setup with wrong CREF flag */ if (st->l3.debug & L3_DEB_STATE) l3_debug(st, "ni1up wrong CRef flag"); dev_kfree_skb(skb); return; } if (!(proc = ni1_new_l3_process(st, cr))) { /* May be to answer with RELEASE_COMPLETE and * CAUSE 0x2f "Resource unavailable", but this * need a new_l3_process too ... arghh */ dev_kfree_skb(skb); return; } } else if (mt == MT_STATUS) { cause = 0; if ((ptr = findie(skb->data, skb->len, IE_CAUSE, 0)) != NULL) { ptr++; if (*ptr++ == 2) ptr++; cause = *ptr & 0x7f; } callState = 0; if ((ptr = findie(skb->data, skb->len, IE_CALL_STATE, 0)) != NULL) { ptr++; if (*ptr++ == 2) ptr++; callState = *ptr; } /* ETS 300-104 part 2.4.1 * if setup has not been made and a message type * MT_STATUS is received with call state == 0, * we must send nothing */ if (callState != 0) { /* ETS 300-104 part 2.4.2 * if setup has not been made and a message type * MT_STATUS is received with call state != 0, * we must send MT_RELEASE_COMPLETE cause 101 */ if ((proc = ni1_new_l3_process(st, cr))) { proc->para.cause = 101; l3ni1_msg_without_setup(proc, 0, NULL); } } dev_kfree_skb(skb); return; } else if (mt == MT_RELEASE_COMPLETE) { dev_kfree_skb(skb); return; } else { /* ETS 300-104 part 2 * if setup has not been made and a message type * (except MT_SETUP and RELEASE_COMPLETE) is received, * we must send MT_RELEASE_COMPLETE cause 81 */ dev_kfree_skb(skb); if ((proc = ni1_new_l3_process(st, cr))) { proc->para.cause = 81; l3ni1_msg_without_setup(proc, 0, NULL); } return; } } if (l3ni1_check_messagetype_validity(proc, mt, skb)) { dev_kfree_skb(skb); return; } if ((p = findie(skb->data, skb->len, IE_DISPLAY, 0)) != NULL) l3ni1_deliver_display(proc, pr, p); /* Display IE included */ for (i = 0; i < DATASLLEN; i++) if ((mt == datastatelist[i].primitive) && ((1 << proc->state) & datastatelist[i].state)) break; if (i == DATASLLEN) { if (st->l3.debug & L3_DEB_STATE) { l3_debug(st, "ni1up%sstate %d mt %#x unhandled", (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", proc->state, mt); } if ((MT_RELEASE_COMPLETE != mt) && (MT_RELEASE != mt)) { proc->para.cause = 101; l3ni1_status_send(proc, pr, skb); } } else { if (st->l3.debug & L3_DEB_STATE) { l3_debug(st, "ni1up%sstate %d mt %x", (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", proc->state, mt); } datastatelist[i].rout(proc, pr, skb); } dev_kfree_skb(skb); return; } static void ni1down(struct PStack *st, int pr, void *arg) { int i, cr; struct l3_process *proc; struct Channel *chan; if ((DL_ESTABLISH | REQUEST) == pr) { l3_msg(st, pr, NULL); return; } else if (((CC_SETUP | REQUEST) == pr) || ((CC_RESUME | REQUEST) == pr)) { chan = arg; cr = newcallref(); cr |= 0x80; if ((proc = ni1_new_l3_process(st, cr))) { proc->chan = chan; chan->proc = proc; memcpy(&proc->para.setup, &chan->setup, sizeof(setup_parm)); proc->callref = cr; } } else { proc = arg; } if (!proc) { printk(KERN_ERR "HiSax ni1down without proc pr=%04x\n", pr); return; } if ( pr == (CC_TNI1_IO | REQUEST)) { l3ni1_io_timer(proc); /* timer expires */ return; } for (i = 0; i < DOWNSLLEN; i++) if ((pr == downstatelist[i].primitive) && ((1 << proc->state) & downstatelist[i].state)) break; if (i == DOWNSLLEN) { if (st->l3.debug & L3_DEB_STATE) { l3_debug(st, "ni1down state %d prim %#x unhandled", proc->state, pr); } } else { if (st->l3.debug & L3_DEB_STATE) { l3_debug(st, "ni1down state %d prim %#x", proc->state, pr); } downstatelist[i].rout(proc, pr, arg); } } static void ni1man(struct PStack *st, int pr, void *arg) { int i; struct l3_process *proc = arg; if (!proc) { printk(KERN_ERR "HiSax ni1man without proc pr=%04x\n", pr); return; } for (i = 0; i < MANSLLEN; i++) if ((pr == manstatelist[i].primitive) && ((1 << proc->state) & manstatelist[i].state)) break; if (i == MANSLLEN) { if (st->l3.debug & L3_DEB_STATE) { l3_debug(st, "cr %d ni1man state %d prim %#x unhandled", proc->callref & 0x7f, proc->state, pr); } } else { if (st->l3.debug & L3_DEB_STATE) { l3_debug(st, "cr %d ni1man state %d prim %#x", proc->callref & 0x7f, proc->state, pr); } manstatelist[i].rout(proc, pr, arg); } } void setstack_ni1(struct PStack *st) { char tmp[64]; int i; st->lli.l4l3 = ni1down; st->lli.l4l3_proto = l3ni1_cmd_global; st->l2.l2l3 = ni1up; st->l3.l3ml3 = ni1man; st->l3.N303 = 1; st->prot.ni1.last_invoke_id = 0; st->prot.ni1.invoke_used[0] = 1; /* Bit 0 must always be set to 1 */ i = 1; while (i < 32) st->prot.ni1.invoke_used[i++] = 0; if (!(st->l3.global = kmalloc(sizeof(struct l3_process), GFP_ATOMIC))) { printk(KERN_ERR "HiSax can't get memory for ni1 global CR\n"); } else { st->l3.global->state = 0; st->l3.global->callref = 0; st->l3.global->next = NULL; st->l3.global->debug = L3_DEB_WARN; st->l3.global->st = st; st->l3.global->N303 = 1; st->l3.global->prot.ni1.invoke_id = 0; L3InitTimer(st->l3.global, &st->l3.global->timer); } strcpy(tmp, ni1_revision); printk(KERN_INFO "HiSax: National ISDN-1 Rev. %s\n", HiSax_getrev(tmp)); }
gpl-2.0
9034725985/vlc
modules/demux/playlist/xspf.c
35
26377
/******************************************************************************* * xspf.c : XSPF playlist import functions ******************************************************************************* * Copyright (C) 2006-2011 VLC authors and VideoLAN * $Id$ * * Authors: Daniel Stränger <vlc at schmaller dot de> * Yoann Peronneau <yoann@videolan.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA. ******************************************************************************/ /** * \file modules/demux/playlist/xspf.c * \brief XSPF playlist import functions */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include <vlc_common.h> #include <vlc_demux.h> #include <vlc_xml.h> #include <vlc_strings.h> #include <vlc_url.h> #include "playlist.h" #define FREE_VALUE() do { free(psz_value);psz_value=NULL; } while(0) #define SIMPLE_INTERFACE (input_item_t *p_input,\ const char *psz_name,\ char *psz_value) #define COMPLEX_INTERFACE (demux_t *p_demux,\ input_item_node_t *p_input_node,\ xml_reader_t *p_xml_reader,\ const char *psz_element) /* prototypes */ static bool parse_playlist_node COMPLEX_INTERFACE; static bool parse_tracklist_node COMPLEX_INTERFACE; static bool parse_track_node COMPLEX_INTERFACE; static bool parse_extension_node COMPLEX_INTERFACE; static bool parse_extitem_node COMPLEX_INTERFACE; static bool set_item_info SIMPLE_INTERFACE; static bool set_option SIMPLE_INTERFACE; static bool skip_element COMPLEX_INTERFACE; /* datatypes */ typedef struct { const char *name; union { bool (*smpl) SIMPLE_INTERFACE; bool (*cmplx) COMPLEX_INTERFACE; } pf_handler; bool cmplx; } xml_elem_hnd_t; struct demux_sys_t { input_item_t **pp_tracklist; int i_tracklist_entries; int i_track_id; char * psz_base; }; static int Demux(demux_t *); /** * \brief XSPF submodule initialization function */ int Import_xspf(vlc_object_t *p_this) { DEMUX_BY_EXTENSION_OR_MIMETYPE(".xspf", "application/xspf+xml", "using XSPF playlist reader"); return VLC_SUCCESS; } void Close_xspf(vlc_object_t *p_this) { demux_t *p_demux = (demux_t *)p_this; demux_sys_t *p_sys = p_demux->p_sys; for (int i = 0; i < p_sys->i_tracklist_entries; i++) if (p_sys->pp_tracklist[i]) vlc_gc_decref(p_sys->pp_tracklist[i]); free(p_sys->pp_tracklist); free(p_sys->psz_base); free(p_sys); } /** * \brief demuxer function for XSPF parsing */ static int Demux(demux_t *p_demux) { int i_ret = -1; xml_reader_t *p_xml_reader = NULL; const char *name = NULL; input_item_t *p_current_input = GetCurrentItem(p_demux); p_demux->p_sys->pp_tracklist = NULL; p_demux->p_sys->i_tracklist_entries = 0; p_demux->p_sys->i_track_id = -1; p_demux->p_sys->psz_base = FindPrefix(p_demux); /* create new xml parser from stream */ p_xml_reader = xml_ReaderCreate(p_demux, p_demux->s); if (!p_xml_reader) goto end; /* locating the root node */ if (xml_ReaderNextNode(p_xml_reader, &name) != XML_READER_STARTELEM) { msg_Err(p_demux, "can't read xml stream"); goto end; } /* checking root node name */ if (strcmp(name, "playlist")) { msg_Err(p_demux, "invalid root node name <%s>", name); goto end; } input_item_node_t *p_subitems = input_item_node_Create(p_current_input); i_ret = parse_playlist_node(p_demux, p_subitems, p_xml_reader, "playlist") ? 0 : -1; for (int i = 0 ; i < p_demux->p_sys->i_tracklist_entries ; i++) { input_item_t *p_new_input = p_demux->p_sys->pp_tracklist[i]; if (p_new_input) { input_item_node_AppendItem(p_subitems, p_new_input); } } input_item_node_PostAndDelete(p_subitems); end: vlc_gc_decref(p_current_input); if (p_xml_reader) xml_ReaderDelete(p_xml_reader); return i_ret; /* Needed for correct operation of go back */ } static const xml_elem_hnd_t *get_handler(const xml_elem_hnd_t *tab, size_t n, const char *name) { for (size_t i = 0; i < n / sizeof(xml_elem_hnd_t); i++) if (!strcmp(name, tab[i].name)) return &tab[i]; return NULL; } #define get_handler(tab, name) get_handler(tab, sizeof tab, name) /** * \brief parse the root node of a XSPF playlist * \param p_demux demuxer instance * \param p_input_item current input item * \param p_xml_reader xml reader instance * \param psz_element name of element to parse */ static bool parse_playlist_node COMPLEX_INTERFACE { input_item_t *p_input_item = p_input_node->p_item; char *psz_value = NULL; bool b_version_found = false; int i_node; bool b_ret = false; const xml_elem_hnd_t *p_handler = NULL; static const xml_elem_hnd_t pl_elements[] = { {"title", {.smpl = set_item_info}, false }, {"creator", {.smpl = set_item_info}, false }, {"annotation", {.smpl = set_item_info}, false }, {"info", {NULL}, false }, {"location", {NULL}, false }, {"identifier", {NULL}, false }, {"image", {.smpl = set_item_info}, false }, {"date", {NULL}, false }, {"license", {NULL}, false }, {"attribution", {.cmplx = skip_element}, true }, {"link", {NULL}, false }, {"meta", {NULL}, false }, {"extension", {.cmplx = parse_extension_node}, true }, {"trackList", {.cmplx = parse_tracklist_node}, true }, }; /* read all playlist attributes */ const char *name, *value; while ((name = xml_ReaderNextAttr(p_xml_reader, &value)) != NULL) { /* attribute: version */ if (!strcmp(name, "version")) { b_version_found = true; if (strcmp(value, "0") && strcmp(value, "1")) msg_Warn(p_demux, "unsupported XSPF version %s", value); } /* attribute: xmlns */ else if (!strcmp(name, "xmlns") || !strcmp(name, "xmlns:vlc")) ; else if (!strcmp(name, "xml:base")) { free(p_demux->p_sys->psz_base); p_demux->p_sys->psz_base = strdup(value); } /* unknown attribute */ else msg_Warn(p_demux, "invalid <playlist> attribute: \"%s\"", name); } /* attribute version is mandatory !!! */ if (!b_version_found) msg_Warn(p_demux, "<playlist> requires \"version\" attribute"); /* parse the child elements - we only take care of <trackList> */ psz_value = NULL; while ((i_node = xml_ReaderNextNode(p_xml_reader, &name)) > 0) switch (i_node) { /* element start tag */ case XML_READER_STARTELEM: if (!*name) { msg_Err(p_demux, "invalid XML stream"); goto end; } /* choose handler */ p_handler = get_handler(pl_elements, name); if (!p_handler) { msg_Err(p_demux, "unexpected element <%s>", name); goto end; } /* complex content is parsed in a separate function */ if (p_handler->cmplx) { FREE_VALUE(); if (!p_handler->pf_handler.cmplx(p_demux, p_input_node, p_xml_reader, p_handler->name)) return false; p_handler = NULL; } break; /* simple element content */ case XML_READER_TEXT: psz_value = strdup(name); if (unlikely(!name)) goto end; break; /* element end tag */ case XML_READER_ENDELEM: /* leave if the current parent node <playlist> is terminated */ if (!strcmp(name, psz_element)) { b_ret = true; goto end; } /* there MUST have been a start tag for that element name */ if (!p_handler || !p_handler->name || strcmp(p_handler->name, name)) { msg_Err(p_demux, "there's no open element left for <%s>", name); goto end; } if (p_handler->pf_handler.smpl) p_handler->pf_handler.smpl(p_input_item, p_handler->name, psz_value); FREE_VALUE(); p_handler = NULL; break; } end: free(psz_value); return b_ret; } /** * \brief parses the tracklist node which only may contain <track>s */ static bool parse_tracklist_node COMPLEX_INTERFACE { VLC_UNUSED(psz_element); const char *name; unsigned i_ntracks = 0; int i_node; /* now parse the <track>s */ while ((i_node = xml_ReaderNextNode(p_xml_reader, &name)) > 0) { if (i_node == XML_READER_STARTELEM) { if (strcmp(name, "track")) { msg_Err(p_demux, "unexpected child of <trackList>: <%s>", name); return false; } /* parse the track data in a separate function */ if (parse_track_node(p_demux, p_input_node, p_xml_reader, "track")) i_ntracks++; } else if (i_node == XML_READER_ENDELEM) break; } /* the <trackList> has to be terminated */ if (i_node != XML_READER_ENDELEM) { msg_Err(p_demux, "there's a missing </trackList>"); return false; } if (strcmp(name, "trackList")) { msg_Err(p_demux, "expected: </trackList>, found: </%s>", name); return false; } msg_Dbg(p_demux, "parsed %u tracks successfully", i_ntracks); return true; } /** * \brief parse one track element * \param COMPLEX_INTERFACE */ static bool parse_track_node COMPLEX_INTERFACE { input_item_t *p_input_item = p_input_node->p_item; const char *name; char *psz_value = NULL; const xml_elem_hnd_t *p_handler = NULL; demux_sys_t *p_sys = p_demux->p_sys; int i_node; static const xml_elem_hnd_t track_elements[] = { {"location", {NULL}, false }, {"identifier", {NULL}, false }, {"title", {.smpl = set_item_info}, false }, {"creator", {.smpl = set_item_info}, false }, {"annotation", {.smpl = set_item_info}, false }, {"info", {.smpl = set_item_info}, false }, {"image", {.smpl = set_item_info}, false }, {"album", {.smpl = set_item_info}, false }, {"trackNum", {.smpl = set_item_info}, false }, {"duration", {.smpl = set_item_info}, false }, {"link", {NULL}, false }, {"meta", {NULL}, false }, {"extension", {.cmplx = parse_extension_node}, true }, }; input_item_t *p_new_input = input_item_New(NULL, NULL); if (!p_new_input) return false; input_item_node_t *p_new_node = input_item_node_Create(p_new_input); /* reset i_track_id */ p_sys->i_track_id = -1; while ((i_node = xml_ReaderNextNode(p_xml_reader, &name)) > 0) switch (i_node) { /* element start tag */ case XML_READER_STARTELEM: if (!*name) { msg_Err(p_demux, "invalid XML stream"); goto end; } /* choose handler */ p_handler = get_handler(track_elements, name); if (!p_handler) { msg_Err(p_demux, "unexpected element <%s>", name); goto end; } /* complex content is parsed in a separate function */ if (p_handler->cmplx) { FREE_VALUE(); if (!p_handler->pf_handler.cmplx(p_demux, p_new_node, p_xml_reader, p_handler->name)) { input_item_node_Delete(p_new_node); return false; } p_handler = NULL; } break; /* simple element content */ case XML_READER_TEXT: free(psz_value); psz_value = strdup(name); if (unlikely(!psz_value)) goto end; break; /* element end tag */ case XML_READER_ENDELEM: /* leave if the current parent node <track> is terminated */ if (!strcmp(name, psz_element)) { free(psz_value); /* Make sure we have a URI */ char *psz_uri = input_item_GetURI(p_new_input); if (!psz_uri) input_item_SetURI(p_new_input, "vlc://nop"); else free(psz_uri); if (p_sys->i_track_id < 0 || (size_t)p_sys->i_track_id >= (SIZE_MAX / sizeof(p_new_input))) { input_item_node_AppendNode(p_input_node, p_new_node); vlc_gc_decref(p_new_input); return true; } if (p_sys->i_track_id >= p_sys->i_tracklist_entries) { input_item_t **pp; pp = realloc(p_sys->pp_tracklist, (p_sys->i_track_id + 1) * sizeof(*pp)); if (!pp) { vlc_gc_decref(p_new_input); input_item_node_Delete(p_new_node); return false; } p_sys->pp_tracklist = pp; while (p_sys->i_track_id >= p_sys->i_tracklist_entries) pp[p_sys->i_tracklist_entries++] = NULL; } else if (p_sys->pp_tracklist[p_sys->i_track_id] != NULL) { msg_Err(p_demux, "track ID %d collision", p_sys->i_track_id); vlc_gc_decref(p_new_input); input_item_node_Delete(p_new_node); return false; } p_sys->pp_tracklist[ p_sys->i_track_id ] = p_new_input; input_item_node_Delete(p_new_node); return true; } /* there MUST have been a start tag for that element name */ if (!p_handler || !p_handler->name || strcmp(p_handler->name, name)) { msg_Err(p_demux, "there's no open element left for <%s>", name); goto end; } /* special case: location */ if (!strcmp(p_handler->name, "location")) { if (psz_value == NULL) input_item_SetURI(p_new_input, "vlc://nop"); else /* FIXME (#4005): This is broken. Scheme-relative (//...) locations * and anchors (#...) are not resolved correctly. Also, * host-relative (/...) and directory-relative locations * ("relative path" in vernacular) should be resolved. * -- Courmisch */ if (p_sys->psz_base && !strstr(psz_value, "://")) { char* psz_tmp; if (asprintf(&psz_tmp, "%s%s", p_sys->psz_base, psz_value) == -1) { goto end; } input_item_SetURI(p_new_input, psz_tmp); free(psz_tmp); } else input_item_SetURI(p_new_input, psz_value); input_item_CopyOptions(p_input_item, p_new_input); } else { /* there MUST be an item */ if (p_handler->pf_handler.smpl) p_handler->pf_handler.smpl(p_new_input, p_handler->name, psz_value); } FREE_VALUE(); p_handler = NULL; break; } msg_Err(p_demux, "unexpected end of xml data"); end: input_item_node_Delete(p_new_node); free(psz_value); return false; } /** * \brief handles the supported <track> sub-elements */ static bool set_item_info SIMPLE_INTERFACE { /* exit if setting is impossible */ if (!psz_name || !psz_value || !p_input) return false; /* re-convert xml special characters inside psz_value */ resolve_xml_special_chars(psz_value); /* handle each info element in a separate "if" clause */ if (!strcmp(psz_name, "title")) input_item_SetTitle(p_input, psz_value); else if (!strcmp(psz_name, "creator")) input_item_SetArtist(p_input, psz_value); else if (!strcmp(psz_name, "album")) input_item_SetAlbum(p_input, psz_value); else if (!strcmp(psz_name, "trackNum")) input_item_SetTrackNum(p_input, psz_value); else if (!strcmp(psz_name, "duration")) { long i_num = atol(psz_value); input_item_SetDuration(p_input, (mtime_t) i_num*1000); } else if (!strcmp(psz_name, "annotation")) input_item_SetDescription(p_input, psz_value); else if (!strcmp(psz_name, "info")) input_item_SetURL(p_input, psz_value); else if (!strcmp(psz_name, "image") && *psz_value) input_item_SetArtURL(p_input, psz_value); return true; } /** * \brief handles the <vlc:option> elements */ static bool set_option SIMPLE_INTERFACE { /* exit if setting is impossible */ if (!psz_name || !psz_value || !p_input) return false; /* re-convert xml special characters inside psz_value */ resolve_xml_special_chars(psz_value); input_item_AddOption(p_input, psz_value, 0); return true; } /** * \brief parse the extension node of a XSPF playlist */ static bool parse_extension_node COMPLEX_INTERFACE { input_item_t *p_input_item = p_input_node->p_item; char *psz_value = NULL; char *psz_title = NULL; char *psz_application = NULL; int i_node; bool b_release_input_item = false; const xml_elem_hnd_t *p_handler = NULL; input_item_t *p_new_input = NULL; static const xml_elem_hnd_t pl_elements[] = { {"vlc:node", {.cmplx = parse_extension_node}, true }, {"vlc:item", {.cmplx = parse_extitem_node}, true }, {"vlc:id", {NULL}, false }, {"vlc:option", {.smpl = set_option}, false }, }; /* read all extension node attributes */ const char *name, *value; while ((name = xml_ReaderNextAttr(p_xml_reader, &value)) != NULL) { /* attribute: title */ if (!strcmp(name, "title")) { free(psz_title); psz_title = strdup(value); if (likely(psz_title != NULL)) resolve_xml_special_chars(psz_title); } /* extension attribute: application */ else if (!strcmp(name, "application")) { free(psz_application); psz_application = strdup(value); } /* unknown attribute */ else msg_Warn(p_demux, "invalid <%s> attribute:\"%s\"", psz_element, name); } /* attribute title is mandatory except for <extension> */ if (!strcmp(psz_element, "vlc:node")) { if (!psz_title) { msg_Warn(p_demux, "<vlc:node> requires \"title\" attribute"); return false; } p_new_input = input_item_NewWithType("vlc://nop", psz_title, 0, NULL, 0, -1, ITEM_TYPE_DIRECTORY); if (p_new_input) { p_input_node = input_item_node_AppendItem(p_input_node, p_new_input); p_input_item = p_new_input; b_release_input_item = true; } free(psz_title); } else if (!strcmp(psz_element, "extension")) { if (!psz_application) { msg_Warn(p_demux, "<extension> requires \"application\" attribute"); return false; } /* Skip the extension if the application is not vlc This will skip all children of the current node */ else if (strcmp(psz_application, "http://www.videolan.org/vlc/playlist/0")) { msg_Dbg(p_demux, "Skipping \"%s\" extension tag", psz_application); free(psz_application); /* Skip all children */ for (unsigned lvl = 1; lvl;) switch (xml_ReaderNextNode(p_xml_reader, NULL)) { case XML_READER_STARTELEM: lvl++; break; case XML_READER_ENDELEM: lvl--; break; case 0: case -1: return -1; } return true; } } free(psz_application); /* parse the child elements */ while ((i_node = xml_ReaderNextNode(p_xml_reader, &name)) > 0) { switch (i_node) { /* element start tag */ case XML_READER_STARTELEM: if (!*name) { msg_Err(p_demux, "invalid xml stream"); FREE_VALUE(); if (b_release_input_item) vlc_gc_decref(p_new_input); return false; } /* choose handler */ p_handler = get_handler(pl_elements, name); if (!p_handler) { msg_Err(p_demux, "unexpected element <%s>", name); FREE_VALUE(); if (b_release_input_item) vlc_gc_decref(p_new_input); return false; } /* complex content is parsed in a separate function */ if (p_handler->cmplx) { if (p_handler->pf_handler.cmplx(p_demux, p_input_node, p_xml_reader, p_handler->name)) { p_handler = NULL; FREE_VALUE(); } else { FREE_VALUE(); if (b_release_input_item) vlc_gc_decref(p_new_input); return false; } } break; case XML_READER_TEXT: /* simple element content */ FREE_VALUE(); psz_value = strdup(name); if (unlikely(!psz_value)) { FREE_VALUE(); if (b_release_input_item) vlc_gc_decref(p_new_input); return false; } break; /* element end tag */ case XML_READER_ENDELEM: /* leave if the current parent node is terminated */ if (!strcmp(name, psz_element)) { FREE_VALUE(); if (b_release_input_item) vlc_gc_decref(p_new_input); return true; } /* there MUST have been a start tag for that element name */ if (!p_handler || !p_handler->name || strcmp(p_handler->name, name)) { msg_Err(p_demux, "there's no open element left for <%s>", name); FREE_VALUE(); if (b_release_input_item) vlc_gc_decref(p_new_input); return false; } /* special tag <vlc:id> */ if (!strcmp(p_handler->name, "vlc:id")) { p_demux->p_sys->i_track_id = atoi(psz_value); } else if (p_handler->pf_handler.smpl) { p_handler->pf_handler.smpl(p_input_item, p_handler->name, psz_value); } FREE_VALUE(); p_handler = NULL; break; } } if (b_release_input_item) vlc_gc_decref(p_new_input); free(psz_value); return false; } /** * \brief parse the extension item node of a XSPF playlist */ static bool parse_extitem_node COMPLEX_INTERFACE { VLC_UNUSED(psz_element); input_item_t *p_new_input = NULL; int i_tid = -1; /* read all extension item attributes */ const char *name, *value; while ((name = xml_ReaderNextAttr(p_xml_reader, &value)) != NULL) { /* attribute: href */ if (!strcmp(name, "tid")) i_tid = atoi(value); /* unknown attribute */ else msg_Warn(p_demux, "invalid <vlc:item> attribute: \"%s\"", name); } /* attribute href is mandatory */ if (i_tid < 0) { msg_Warn(p_demux, "<vlc:item> requires \"tid\" attribute"); return false; } if (i_tid >= p_demux->p_sys->i_tracklist_entries) { msg_Warn(p_demux, "invalid \"tid\" attribute"); return false; } p_new_input = p_demux->p_sys->pp_tracklist[ i_tid ]; if (p_new_input) { input_item_node_AppendItem(p_input_node, p_new_input); vlc_gc_decref(p_new_input); p_demux->p_sys->pp_tracklist[i_tid] = NULL; } return true; } /** * \brief skips complex element content that we can't manage */ static bool skip_element COMPLEX_INTERFACE { VLC_UNUSED(p_demux); VLC_UNUSED(p_input_node); VLC_UNUSED(psz_element); for (unsigned lvl = 1; lvl;) switch (xml_ReaderNextNode(p_xml_reader, NULL)) { case XML_READER_STARTELEM: lvl++; break; case XML_READER_ENDELEM: lvl--; break; case 0: case -1: return false; } return true; }
gpl-2.0
temasek/GCC_SaberMod
libobjc/class.c
35
28413
/* GNU Objective C Runtime class related functions Copyright (C) 1993-2014 Free Software Foundation, Inc. Contributed by Kresten Krab Thorup and Dennis Glatting. Lock-free class table code designed and written from scratch by Nicola Pero, 2001. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* The code in this file critically affects class method invocation speed. This long preamble comment explains why, and the issues involved. One of the traditional weaknesses of the GNU Objective-C runtime is that class method invocations are slow. The reason is that when you write array = [NSArray new]; this gets basically compiled into the equivalent of array = [(objc_get_class ("NSArray")) new]; objc_get_class returns the class pointer corresponding to the string `NSArray'; and because of the lookup, the operation is more complicated and slow than a simple instance method invocation. Most high performance Objective-C code (using the GNU Objc runtime) I had the opportunity to read (or write) work around this problem by caching the class pointer: Class arrayClass = [NSArray class]; ... later on ... array = [arrayClass new]; array = [arrayClass new]; array = [arrayClass new]; In this case, you always perform a class lookup (the first one), but then all the [arrayClass new] methods run exactly as fast as an instance method invocation. It helps if you have many class method invocations to the same class. The long-term solution to this problem would be to modify the compiler to output tables of class pointers corresponding to all the class method invocations, and to add code to the runtime to update these tables - that should in the end allow class method invocations to perform precisely as fast as instance method invocations, because no class lookup would be involved. I think the Apple Objective-C runtime uses this technique. Doing this involves synchronized modifications in the runtime and in the compiler. As a first medicine to the problem, I [NP] have redesigned and rewritten the way the runtime is performing class lookup. This doesn't give as much speed as the other (definitive) approach, but at least a class method invocation now takes approximately 4.5 times an instance method invocation on my machine (it would take approx 12 times before the rewriting), which is a lot better. One of the main reason the new class lookup is so faster is because I implemented it in a way that can safely run multithreaded without using locks - a so-called `lock-free' data structure. The atomic operation is pointer assignment. The reason why in this problem lock-free data structures work so well is that you never remove classes from the table - and the difficult thing with lock-free data structures is freeing data when is removed from the structures. */ #include "objc-private/common.h" #include "objc-private/error.h" #include "objc/runtime.h" #include "objc/thr.h" #include "objc-private/module-abi-8.h" /* For CLS_ISCLASS and similar. */ #include "objc-private/runtime.h" /* the kitchen sink */ #include "objc-private/sarray.h" /* For sarray_put_at_safe. */ #include "objc-private/selector.h" /* For sarray_put_at_safe. */ #include <string.h> /* For memset */ /* We use a table which maps a class name to the corresponding class pointer. The first part of this file defines this table, and functions to do basic operations on the table. The second part of the file implements some higher level Objective-C functionality for classes by using the functions provided in the first part to manage the table. */ /** ** Class Table Internals **/ /* A node holding a class */ typedef struct class_node { struct class_node *next; /* Pointer to next entry on the list. NULL indicates end of list. */ const char *name; /* The class name string */ int length; /* The class name string length */ Class pointer; /* The Class pointer */ } *class_node_ptr; /* A table containing classes is a class_node_ptr (pointing to the first entry in the table - if it is NULL, then the table is empty). */ /* We have 1024 tables. Each table contains all class names which have the same hash (which is a number between 0 and 1023). To look up a class_name, we compute its hash, and get the corresponding table. Once we have the table, we simply compare strings directly till we find the one which we want (using the length first). The number of tables is quite big on purpose (a normal big application has less than 1000 classes), so that you shouldn't normally get any collisions, and get away with a single comparison (which we can't avoid since we need to know that you have got the right thing). */ #define CLASS_TABLE_SIZE 1024 #define CLASS_TABLE_MASK 1023 static class_node_ptr class_table_array[CLASS_TABLE_SIZE]; /* The table writing mutex - we lock on writing to avoid conflicts between different writers, but we read without locks. That is possible because we assume pointer assignment to be an atomic operation. TODO: This is only true under certain circumstances, which should be clarified. */ static objc_mutex_t __class_table_lock = NULL; /* CLASS_TABLE_HASH is how we compute the hash of a class name. It is a macro - *not* a function - arguments *are* modified directly. INDEX should be a variable holding an int; HASH should be a variable holding an int; CLASS_NAME should be a variable holding a (char *) to the class_name. After the macro is executed, INDEX contains the length of the string, and HASH the computed hash of the string; CLASS_NAME is untouched. */ #define CLASS_TABLE_HASH(INDEX, HASH, CLASS_NAME) \ HASH = 0; \ for (INDEX = 0; CLASS_NAME[INDEX] != '\0'; INDEX++) \ { \ HASH = (HASH << 4) ^ (HASH >> 28) ^ CLASS_NAME[INDEX]; \ } \ \ HASH = (HASH ^ (HASH >> 10) ^ (HASH >> 20)) & CLASS_TABLE_MASK; /* Setup the table. */ static void class_table_setup (void) { /* Start - nothing in the table. */ memset (class_table_array, 0, sizeof (class_node_ptr) * CLASS_TABLE_SIZE); /* The table writing mutex. */ __class_table_lock = objc_mutex_allocate (); } /* Insert a class in the table (used when a new class is registered). */ static void class_table_insert (const char *class_name, Class class_pointer) { int hash, length; class_node_ptr new_node; /* Find out the class name's hash and length. */ CLASS_TABLE_HASH (length, hash, class_name); /* Prepare the new node holding the class. */ new_node = objc_malloc (sizeof (struct class_node)); new_node->name = class_name; new_node->length = length; new_node->pointer = class_pointer; /* Lock the table for modifications. */ objc_mutex_lock (__class_table_lock); /* Insert the new node in the table at the beginning of the table at class_table_array[hash]. */ new_node->next = class_table_array[hash]; class_table_array[hash] = new_node; objc_mutex_unlock (__class_table_lock); } /* Get a class from the table. This does not need mutex protection. Currently, this function is called each time you call a static method, this is why it must be very fast. */ static inline Class class_table_get_safe (const char *class_name) { class_node_ptr node; int length, hash; /* Compute length and hash. */ CLASS_TABLE_HASH (length, hash, class_name); node = class_table_array[hash]; if (node != NULL) { do { if (node->length == length) { /* Compare the class names. */ int i; for (i = 0; i < length; i++) { if ((node->name)[i] != class_name[i]) break; } if (i == length) { /* They are equal! */ return node->pointer; } } } while ((node = node->next) != NULL); } return Nil; } /* Enumerate over the class table. */ struct class_table_enumerator { int hash; class_node_ptr node; }; static Class class_table_next (struct class_table_enumerator **e) { struct class_table_enumerator *enumerator = *e; class_node_ptr next; if (enumerator == NULL) { *e = objc_malloc (sizeof (struct class_table_enumerator)); enumerator = *e; enumerator->hash = 0; enumerator->node = NULL; next = class_table_array[enumerator->hash]; } else next = enumerator->node->next; if (next != NULL) { enumerator->node = next; return enumerator->node->pointer; } else { enumerator->hash++; while (enumerator->hash < CLASS_TABLE_SIZE) { next = class_table_array[enumerator->hash]; if (next != NULL) { enumerator->node = next; return enumerator->node->pointer; } enumerator->hash++; } /* Ok - table finished - done. */ objc_free (enumerator); return Nil; } } #if 0 /* DEBUGGING FUNCTIONS */ /* Debugging function - print the class table. */ void class_table_print (void) { int i; for (i = 0; i < CLASS_TABLE_SIZE; i++) { class_node_ptr node; printf ("%d:\n", i); node = class_table_array[i]; while (node != NULL) { printf ("\t%s\n", node->name); node = node->next; } } } /* Debugging function - print an histogram of number of classes in function of hash key values. Useful to evaluate the hash function in real cases. */ void class_table_print_histogram (void) { int i, j; int counter = 0; for (i = 0; i < CLASS_TABLE_SIZE; i++) { class_node_ptr node; node = class_table_array[i]; while (node != NULL) { counter++; node = node->next; } if (((i + 1) % 50) == 0) { printf ("%4d:", i + 1); for (j = 0; j < counter; j++) printf ("X"); printf ("\n"); counter = 0; } } printf ("%4d:", i + 1); for (j = 0; j < counter; j++) printf ("X"); printf ("\n"); } #endif /* DEBUGGING FUNCTIONS */ /** ** Objective-C runtime functions **/ /* From now on, the only access to the class table data structure should be via the class_table_* functions. */ /* This is a hook which is called by objc_get_class and objc_lookup_class if the runtime is not able to find the class. This may e.g. try to load in the class using dynamic loading. This hook was a public, global variable in the Traditional GNU Objective-C Runtime API (objc/objc-api.h). The modern GNU Objective-C Runtime API (objc/runtime.h) provides the objc_setGetUnknownClassHandler() function instead. */ Class (*_objc_lookup_class) (const char *name) = 0; /* !T:SAFE */ /* The handler currently in use. PS: if both __obj_get_unknown_class_handler and _objc_lookup_class are defined, __objc_get_unknown_class_handler is called first. */ static objc_get_unknown_class_handler __objc_get_unknown_class_handler = NULL; objc_get_unknown_class_handler objc_setGetUnknownClassHandler (objc_get_unknown_class_handler new_handler) { objc_get_unknown_class_handler old_handler = __objc_get_unknown_class_handler; __objc_get_unknown_class_handler = new_handler; return old_handler; } /* True when class links has been resolved. */ BOOL __objc_class_links_resolved = NO; /* !T:UNUSED */ void __objc_init_class_tables (void) { /* Allocate the class hash table. */ if (__class_table_lock) return; objc_mutex_lock (__objc_runtime_mutex); class_table_setup (); objc_mutex_unlock (__objc_runtime_mutex); } /* This function adds a class to the class hash table, and assigns the class a number, unless it's already known. Return 'YES' if the class was added. Return 'NO' if the class was already known. */ BOOL __objc_add_class_to_hash (Class class) { Class existing_class; objc_mutex_lock (__objc_runtime_mutex); /* Make sure the table is there. */ assert (__class_table_lock); /* Make sure it's not a meta class. */ assert (CLS_ISCLASS (class)); /* Check to see if the class is already in the hash table. */ existing_class = class_table_get_safe (class->name); if (existing_class) { objc_mutex_unlock (__objc_runtime_mutex); return NO; } else { /* The class isn't in the hash table. Add the class and assign a class number. */ static unsigned int class_number = 1; CLS_SETNUMBER (class, class_number); CLS_SETNUMBER (class->class_pointer, class_number); ++class_number; class_table_insert (class->name, class); objc_mutex_unlock (__objc_runtime_mutex); return YES; } } Class objc_getClass (const char *name) { Class class; if (name == NULL) return Nil; class = class_table_get_safe (name); if (class) return class; if (__objc_get_unknown_class_handler) return (*__objc_get_unknown_class_handler) (name); if (_objc_lookup_class) return (*_objc_lookup_class) (name); return Nil; } Class objc_lookUpClass (const char *name) { if (name == NULL) return Nil; else return class_table_get_safe (name); } Class objc_getMetaClass (const char *name) { Class class = objc_getClass (name); if (class) return class->class_pointer; else return Nil; } Class objc_getRequiredClass (const char *name) { Class class = objc_getClass (name); if (class) return class; else _objc_abort ("objc_getRequiredClass ('%s') failed: class not found\n", name); } int objc_getClassList (Class *returnValue, int maxNumberOfClassesToReturn) { /* Iterate over all entries in the table. */ int hash, count = 0; for (hash = 0; hash < CLASS_TABLE_SIZE; hash++) { class_node_ptr node = class_table_array[hash]; while (node != NULL) { if (returnValue) { if (count < maxNumberOfClassesToReturn) returnValue[count] = node->pointer; else return count; } count++; node = node->next; } } return count; } Class objc_allocateClassPair (Class super_class, const char *class_name, size_t extraBytes) { Class new_class; Class new_meta_class; if (class_name == NULL) return Nil; if (objc_getClass (class_name)) return Nil; if (super_class) { /* If you want to build a hierarchy of classes, you need to build and register them one at a time. The risk is that you are able to cause confusion by registering a subclass before the superclass or similar. */ if (CLS_IS_IN_CONSTRUCTION (super_class)) return Nil; } /* Technically, we should create the metaclass first, then use class_createInstance() to create the class. That complication would be relevant if we had class variables, but we don't, so we just ignore it and create everything directly and assume all classes have the same size. */ new_class = objc_calloc (1, sizeof (struct objc_class) + extraBytes); new_meta_class = objc_calloc (1, sizeof (struct objc_class) + extraBytes); /* We create an unresolved class, similar to one generated by the compiler. It will be resolved later when we register it. Note how the metaclass details are not that important; when the class is resolved, the ones that matter will be fixed up. */ new_class->class_pointer = new_meta_class; new_meta_class->class_pointer = 0; if (super_class) { /* Force the name of the superclass in place of the link to the actual superclass, which will be put there when the class is resolved. */ const char *super_class_name = class_getName (super_class); new_class->super_class = (void *)super_class_name; new_meta_class->super_class = (void *)super_class_name; } else { new_class->super_class = (void *)0; new_meta_class->super_class = (void *)0; } new_class->name = objc_malloc (strlen (class_name) + 1); strcpy ((char*)new_class->name, class_name); new_meta_class->name = new_class->name; new_class->version = 0; new_meta_class->version = 0; new_class->info = _CLS_CLASS | _CLS_IN_CONSTRUCTION; new_meta_class->info = _CLS_META | _CLS_IN_CONSTRUCTION; if (super_class) new_class->instance_size = super_class->instance_size; else new_class->instance_size = 0; new_meta_class->instance_size = sizeof (struct objc_class); return new_class; } void objc_registerClassPair (Class class_) { if (class_ == Nil) return; if ((! CLS_ISCLASS (class_)) || (! CLS_IS_IN_CONSTRUCTION (class_))) return; if ((! CLS_ISMETA (class_->class_pointer)) || (! CLS_IS_IN_CONSTRUCTION (class_->class_pointer))) return; objc_mutex_lock (__objc_runtime_mutex); if (objc_getClass (class_->name)) { objc_mutex_unlock (__objc_runtime_mutex); return; } CLS_SET_NOT_IN_CONSTRUCTION (class_); CLS_SET_NOT_IN_CONSTRUCTION (class_->class_pointer); __objc_init_class (class_); /* Resolve class links immediately. No point in waiting. */ __objc_resolve_class_links (); objc_mutex_unlock (__objc_runtime_mutex); } void objc_disposeClassPair (Class class_) { if (class_ == Nil) return; if ((! CLS_ISCLASS (class_)) || (! CLS_IS_IN_CONSTRUCTION (class_))) return; if ((! CLS_ISMETA (class_->class_pointer)) || (! CLS_IS_IN_CONSTRUCTION (class_->class_pointer))) return; /* Undo any class_addIvar(). */ if (class_->ivars) { int i; for (i = 0; i < class_->ivars->ivar_count; i++) { struct objc_ivar *ivar = &(class_->ivars->ivar_list[i]); objc_free ((char *)ivar->ivar_name); objc_free ((char *)ivar->ivar_type); } objc_free (class_->ivars); } /* Undo any class_addMethod(). */ if (class_->methods) { struct objc_method_list *list = class_->methods; while (list) { int i; struct objc_method_list *next = list->method_next; for (i = 0; i < list->method_count; i++) { struct objc_method *method = &(list->method_list[i]); objc_free ((char *)method->method_name); objc_free ((char *)method->method_types); } objc_free (list); list = next; } } /* Undo any class_addProtocol(). */ if (class_->protocols) { struct objc_protocol_list *list = class_->protocols; while (list) { struct objc_protocol_list *next = list->next; objc_free (list); list = next; } } /* Undo any class_addMethod() on the meta-class. */ if (class_->class_pointer->methods) { struct objc_method_list *list = class_->class_pointer->methods; while (list) { int i; struct objc_method_list *next = list->method_next; for (i = 0; i < list->method_count; i++) { struct objc_method *method = &(list->method_list[i]); objc_free ((char *)method->method_name); objc_free ((char *)method->method_types); } objc_free (list); list = next; } } /* Undo objc_allocateClassPair(). */ objc_free ((char *)(class_->name)); objc_free (class_->class_pointer); objc_free (class_); } /* Traditional GNU Objective-C Runtime API. Important: this method is called automatically by the compiler while messaging (if using the traditional ABI), so it is worth keeping it fast; don't make it just a wrapper around objc_getClass(). */ /* Note that this is roughly equivalent to objc_getRequiredClass(). */ /* Get the class object for the class named NAME. If NAME does not identify a known class, the hook _objc_lookup_class is called. If this fails, an error message is issued and the system aborts. */ Class objc_get_class (const char *name) { Class class; class = class_table_get_safe (name); if (class) return class; if (__objc_get_unknown_class_handler) class = (*__objc_get_unknown_class_handler) (name); if ((!class) && _objc_lookup_class) class = (*_objc_lookup_class) (name); if (class) return class; _objc_abort ("objc runtime: cannot find class %s\n", name); return 0; } /* This is used by the compiler too. */ Class objc_get_meta_class (const char *name) { return objc_get_class (name)->class_pointer; } /* This is not used by GCC, but the clang compiler seems to use it when targeting the GNU runtime. That's wrong, but we have it to be compatible. */ Class objc_lookup_class (const char *name) { return objc_getClass (name); } /* This is used when the implementation of a method changes. It goes through all classes, looking for the ones that have these methods (either method_a or method_b; method_b can be NULL), and reloads the implementation for these. You should call this with the runtime mutex already locked. */ void __objc_update_classes_with_methods (struct objc_method *method_a, struct objc_method *method_b) { int hash; /* Iterate over all classes. */ for (hash = 0; hash < CLASS_TABLE_SIZE; hash++) { class_node_ptr node = class_table_array[hash]; while (node != NULL) { /* We execute this loop twice: the first time, we iterate over all methods in the class (instance methods), while the second time we iterate over all methods in the meta class (class methods). */ Class class = Nil; BOOL done = NO; while (done == NO) { struct objc_method_list * method_list; if (class == Nil) { /* The first time, we work on the class. */ class = node->pointer; } else { /* The second time, we work on the meta class. */ class = class->class_pointer; done = YES; } method_list = class->methods; while (method_list) { int i; for (i = 0; i < method_list->method_count; ++i) { struct objc_method *method = &method_list->method_list[i]; /* If the method is one of the ones we are looking for, update the implementation. */ if (method == method_a) sarray_at_put_safe (class->dtable, (sidx) method_a->method_name->sel_id, method_a->method_imp); if (method == method_b) { if (method_b != NULL) sarray_at_put_safe (class->dtable, (sidx) method_b->method_name->sel_id, method_b->method_imp); } } method_list = method_list->method_next; } } node = node->next; } } } /* Resolve super/subclass links for all classes. The only thing we can be sure of is that the class_pointer for class objects point to the right meta class objects. */ void __objc_resolve_class_links (void) { struct class_table_enumerator *es = NULL; Class object_class = objc_get_class ("Object"); Class class1; assert (object_class); objc_mutex_lock (__objc_runtime_mutex); /* Assign subclass links. */ while ((class1 = class_table_next (&es))) { /* Make sure we have what we think we have. */ assert (CLS_ISCLASS (class1)); assert (CLS_ISMETA (class1->class_pointer)); /* The class_pointer of all meta classes point to Object's meta class. */ class1->class_pointer->class_pointer = object_class->class_pointer; if (! CLS_ISRESOLV (class1)) { CLS_SETRESOLV (class1); CLS_SETRESOLV (class1->class_pointer); if (class1->super_class) { Class a_super_class = objc_get_class ((char *) class1->super_class); assert (a_super_class); DEBUG_PRINTF ("making class connections for: %s\n", class1->name); /* Assign subclass links for superclass. */ class1->sibling_class = a_super_class->subclass_list; a_super_class->subclass_list = class1; /* Assign subclass links for meta class of superclass. */ if (a_super_class->class_pointer) { class1->class_pointer->sibling_class = a_super_class->class_pointer->subclass_list; a_super_class->class_pointer->subclass_list = class1->class_pointer; } } else /* A root class, make its meta object be a subclass of Object. */ { class1->class_pointer->sibling_class = object_class->subclass_list; object_class->subclass_list = class1->class_pointer; } } } /* Assign superclass links. */ es = NULL; while ((class1 = class_table_next (&es))) { Class sub_class; for (sub_class = class1->subclass_list; sub_class; sub_class = sub_class->sibling_class) { sub_class->super_class = class1; if (CLS_ISCLASS (sub_class)) sub_class->class_pointer->super_class = class1->class_pointer; } } objc_mutex_unlock (__objc_runtime_mutex); } const char * class_getName (Class class_) { if (class_ == Nil) return "nil"; return class_->name; } BOOL class_isMetaClass (Class class_) { /* CLS_ISMETA includes the check for Nil class_. */ return CLS_ISMETA (class_); } /* Even inside libobjc it may be worth using class_getSuperclass instead of accessing class_->super_class directly because it resolves the class links if needed. If you access class_->super_class directly, make sure to deal with the situation where the class is not resolved yet! */ Class class_getSuperclass (Class class_) { if (class_ == Nil) return Nil; /* Classes that are in construction are not resolved, and still have the class name (instead of a class pointer) in the class_->super_class field. In that case we need to lookup the superclass name to return the superclass. We can not resolve the class until it is registered. */ if (CLS_IS_IN_CONSTRUCTION (class_)) { if (CLS_ISMETA (class_)) return object_getClass ((id)objc_lookUpClass ((const char *)(class_->super_class))); else return objc_lookUpClass ((const char *)(class_->super_class)); } /* If the class is not resolved yet, super_class would point to a string (the name of the super class) as opposed to the actual super class. In that case, we need to resolve the class links before we can return super_class. */ if (! CLS_ISRESOLV (class_)) __objc_resolve_class_links (); return class_->super_class; } int class_getVersion (Class class_) { if (class_ == Nil) return 0; return (int)(class_->version); } void class_setVersion (Class class_, int version) { if (class_ == Nil) return; class_->version = version; } size_t class_getInstanceSize (Class class_) { if (class_ == Nil) return 0; return class_->instance_size; }
gpl-2.0
NamanG/coreboot_final
src/vendorcode/amd/agesa/f16kb/Proc/GNB/Modules/GnbInitKB/PcieMidInitKB.c
35
14506
/* $NoKeywords:$ */ /** * @file * * PCIe mid post initialization. * * * * @xrefitem bom "File Content Label" "Release Content" * @e project: AGESA * @e sub-project: GNB * @e \$Revision: 88079 $ @e \$Date: 2013-02-15 15:28:53 -0600 (Fri, 15 Feb 2013) $ * */ /* ***************************************************************************** * * Copyright (c) 2008 - 2013, Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *************************************************************************** * */ /*---------------------------------------------------------------------------------------- * M O D U L E S U S E D *---------------------------------------------------------------------------------------- */ #include "AGESA.h" #include "amdlib.h" #include "Ids.h" #include "Gnb.h" #include "GnbPcie.h" #include "GnbPcieConfig.h" #include "GnbPcieInitLibV1.h" #include "GnbPcieInitLibV4.h" #include "GnbPcieInitLibV5.h" #include "GnbFamServices.h" #include "PcieLibKB.h" #include "PciePortServicesV4.h" #include "GnbRegistersKB.h" #include "Filecode.h" #define FILECODE PROC_GNB_MODULES_GNBINITKB_PCIEMIDINITKB_FILECODE /*---------------------------------------------------------------------------------------- * D E F I N I T I O N S A N D M A C R O S *---------------------------------------------------------------------------------------- */ extern CONST PCIE_PORT_REGISTER_TABLE_HEADER ROMDATA PortInitMidTableKB; /*---------------------------------------------------------------------------------------- * T Y P E D E F S A N D S T R U C T U R E S *---------------------------------------------------------------------------------------- */ /*---------------------------------------------------------------------------------------- * P R O T O T Y P E S O F L O C A L F U N C T I O N S *---------------------------------------------------------------------------------------- */ AGESA_STATUS PcieMidInterfaceKB ( IN AMD_CONFIG_PARAMS *StdHeader ); /*----------------------------------------------------------------------------------------*/ /** * Callback to init various features on all active ports * * * * * @param[in] Engine Pointer to engine config descriptor * @param[in, out] Buffer Not used * @param[in] Pcie Pointer to global PCIe configuration * */ VOID STATIC PcieMidPortInitCallbackKB ( IN PCIe_ENGINE_CONFIG *Engine, IN OUT VOID *Buffer, IN PCIe_PLATFORM_CONFIG *Pcie ) { PciePortProgramRegisterTable (PortInitMidTableKB.Table, PortInitMidTableKB.Length, Engine, TRUE, Pcie); if (PcieConfigCheckPortStatus (Engine, INIT_STATUS_PCIE_TRAINING_SUCCESS) || Engine->Type.Port.PortData.LinkHotplug != HotplugDisabled) { PcieEnableSlotPowerLimitV5 (Engine, Pcie); // If StartLane == 4/7 and EndLane == 7/4, this is GFX port if (!(((Engine->EngineData.StartLane == 4) && (Engine->EngineData.EndLane == 7)) || ((Engine->EngineData.StartLane == 7) && (Engine->EngineData.EndLane == 4)))) { // Only count active Gpp ports *(UINT8 *)Buffer += 1; } } } /*----------------------------------------------------------------------------------------*/ /** * Callback to init ASPM on all active ports * * * * * @param[in] Engine Pointer to engine config descriptor * @param[in, out] Buffer PortCount * @param[in] Pcie Pointer to global PCIe configuration * */ VOID STATIC PcieMidAspmInitCallbackKB ( IN PCIe_ENGINE_CONFIG *Engine, IN OUT VOID *Buffer, IN PCIe_PLATFORM_CONFIG *Pcie ) { IDS_HDT_CONSOLE (GNB_TRACE, "PcieMidAspmInitCallbackKB Enter\n"); IDS_HDT_CONSOLE (GNB_TRACE, " PortCount = %02x\n", *(UINT8 *)Buffer); // If StartLane == 4/7 and EndLane == 7/4, this is GFX port if (!(((Engine->EngineData.StartLane == 4) && (Engine->EngineData.EndLane == 7)) || ((Engine->EngineData.StartLane == 7) && (Engine->EngineData.EndLane == 4)))) { // For GPP ports only set STRAP_MED_yTSx_COUNT=2, but only if active ports is > 2 switch (*(UINT8 *)Buffer) { case 0: case 1: break; case 2: PciePortRegisterRMW ( Engine, DxFxxE4_xA0_ADDRESS, DxFxxE4_xA0_Reserved_26_25_MASK | DxFxxE4_xA0_Reserved_28_28_MASK, (0 << DxFxxE4_xA0_Reserved_26_25_OFFSET) | (1 << DxFxxE4_xA0_Reserved_28_28_OFFSET), TRUE, Pcie ); break; default: PciePortRegisterRMW ( Engine, 0xC0, 0x30, 0x2 << 4, TRUE, Pcie ); break; } } PcieEnableAspm (Engine, Pcie); IDS_HDT_CONSOLE (GNB_TRACE, "PcieMidAspmInitCallbackKB Exit\n"); } /*----------------------------------------------------------------------------------------*/ /** * Master procedure to init various features on all active ports * * * * * @param[in] Pcie Pointer to global PCIe configuration * @retval AGESA_STATUS * */ AGESA_STATUS STATIC PcieMidPortInitKB ( IN PCIe_PLATFORM_CONFIG *Pcie ) { AGESA_STATUS Status; PCIE_LINK_SPEED_CAP GlobalSpeedCap; UINT8 PortCount; Status = AGESA_SUCCESS; PortCount = 0; PcieConfigRunProcForAllEngines ( DESCRIPTOR_ALLOCATED | DESCRIPTOR_PCIE_ENGINE, PcieMidPortInitCallbackKB, &PortCount, Pcie ); PcieConfigRunProcForAllEngines ( DESCRIPTOR_ALLOCATED | DESCRIPTOR_PCIE_ENGINE, PcieMidAspmInitCallbackKB, &PortCount, Pcie ); GlobalSpeedCap = PcieUtilGlobalGenCapability ( PCIE_PORT_GEN_CAP_BOOT | PCIE_GLOBAL_GEN_CAP_TRAINED_PORTS | PCIE_GLOBAL_GEN_CAP_HOTPLUG_PORTS, Pcie ); PcieSetVoltageKB (GlobalSpeedCap, Pcie); return Status; } /*----------------------------------------------------------------------------------------*/ /** * Clock gating * * * * @param[in] Wrapper Pointer to wrapper config descriptor * @param[in] Pcie Pointer to global PCIe configuration */ STATIC VOID PciePwrClockGatingKB ( IN PCIe_WRAPPER_CONFIG *Wrapper, IN PCIe_PLATFORM_CONFIG *Pcie ) { D0F0xE4_WRAP_8011_STRUCT D0F0xE4_WRAP_8011; D0F0xE4_WRAP_8012_STRUCT D0F0xE4_WRAP_8012; D0F0xE4_WRAP_8014_STRUCT D0F0xE4_WRAP_8014; D0F0xE4_WRAP_8015_STRUCT D0F0xE4_WRAP_8015; D0F0xE4_WRAP_8016_STRUCT D0F0xE4_WRAP_8016; UINT8 CoreId; IDS_HDT_CONSOLE (GNB_TRACE, "PciePwrClockGatingKB Enter\n"); D0F0xE4_WRAP_8014.Value = PcieRegisterRead ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_8014_ADDRESS), Pcie ); D0F0xE4_WRAP_8015.Value = PcieRegisterRead ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_8015_ADDRESS), Pcie ); D0F0xE4_WRAP_8012.Value = PcieRegisterRead ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_8012_ADDRESS), Pcie ); D0F0xE4_WRAP_8011.Value = PcieRegisterRead ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_8011_ADDRESS), Pcie ); if (Wrapper->Features.ClkGating == 0x1) { D0F0xE4_WRAP_8011.Field.TxclkPermGateLatency = 0; D0F0xE4_WRAP_8011.Field.Reserved_16_16 = 0x1; D0F0xE4_WRAP_8011.Field.TxclkPermGateEven = 0x1; D0F0xE4_WRAP_8011.Field.TxclkPermStop = 0; D0F0xE4_WRAP_8011.Field.TxclkDynGateEnable = 0x1; D0F0xE4_WRAP_8011.Field.TxclkDynGateLatency = 0; D0F0xE4_WRAP_8011.Field.TxclkRegsGateEnable = 0x1; D0F0xE4_WRAP_8011.Field.TxclkRegsGateLatency = 0; D0F0xE4_WRAP_8011.Field.TxclkLcntGateEnable = 0x1; D0F0xE4_WRAP_8012.Field.Pif1xIdleResumeLatency = 0x7; D0F0xE4_WRAP_8012.Field.Pif1xIdleGateEnable = 0x1; D0F0xE4_WRAP_8012.Field.Pif1xIdleGateLatency = 0; D0F0xE4_WRAP_8014.Field.TxclkPermGateEnable = 0x1; D0F0xE4_WRAP_8014.Field.TxclkPrbsGateEnable = 0x1; D0F0xE4_WRAP_8014.Field.PcieGatePifA1xEnable = 0x1; D0F0xE4_WRAP_8014.Field.PcieGatePifB1xEnable = 0x1; } if (Wrapper->Features.TxclkGatingPllPowerDown == 0x1) { D0F0xE4_WRAP_8014.Field.TxclkPermGateOnlyWhenPllPwrDn = 0x1; } PcieRegisterWrite ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_8014_ADDRESS), D0F0xE4_WRAP_8014.Value, TRUE, Pcie ); PcieRegisterWrite ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_8015_ADDRESS), D0F0xE4_WRAP_8015.Value, TRUE, Pcie ); PcieRegisterWrite ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_8012_ADDRESS), D0F0xE4_WRAP_8012.Value, TRUE, Pcie ); PcieRegisterWrite ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_8011_ADDRESS), D0F0xE4_WRAP_8011.Value, TRUE, Pcie ); for (CoreId = Wrapper->StartPcieCoreId; CoreId <= Wrapper->EndPcieCoreId; CoreId++) { PcieRegisterWriteField ( Wrapper, CORE_SPACE (CoreId, D0F0xE4_CORE_0011_ADDRESS), D0F0xE4_CORE_0011_DynClkLatency_OFFSET, D0F0xE4_CORE_0011_DynClkLatency_WIDTH, 0xf, TRUE, Pcie ); } if (Wrapper->Features.LclkGating == 0x1) { D0F0xE4_WRAP_8016.Value = PcieRegisterRead ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_8016_ADDRESS), Pcie ); D0F0xE4_WRAP_8016.Field.LclkDynGateEnable = 0x1; D0F0xE4_WRAP_8016.Field.LclkGateFree = 0x1; D0F0xE4_WRAP_8016.Field.LclkDynGateLatency = 0x3F; PcieRegisterWrite ( Wrapper, WRAP_SPACE (Wrapper->WrapId, D0F0xE4_WRAP_8016_ADDRESS), D0F0xE4_WRAP_8016.Value, TRUE, Pcie ); } IDS_HDT_CONSOLE (GNB_TRACE, "PciePwrClockGatingKB Exit\n"); } /*----------------------------------------------------------------------------------------*/ /** * Per wrapper Pcie Late Init. * * * @param[in] Wrapper Pointer to wrapper configuration descriptor * @param[in] Buffer Pointer buffer * @param[in] Pcie Pointer to global PCIe configuration */ AGESA_STATUS STATIC PcieMidInitCallbackKB ( IN PCIe_WRAPPER_CONFIG *Wrapper, IN OUT VOID *Buffer, IN PCIe_PLATFORM_CONFIG *Pcie ) { PciePwrPowerDownUnusedLanesKB (Wrapper, Pcie); PciePwrClockGatingKB (Wrapper, Pcie); PcieLockRegisters (Wrapper, Pcie); return AGESA_SUCCESS; } /*----------------------------------------------------------------------------------------*/ /** * Pcie Late Init * * Late PCIe initialization * * @param[in] Pcie Pointer to global PCIe configuration * @retval AGESA_SUCCESS Topology successfully mapped * @retval AGESA_ERROR Topology can not be mapped */ AGESA_STATUS STATIC PcieMidInitKB ( IN PCIe_PLATFORM_CONFIG *Pcie ) { AGESA_STATUS AgesaStatus; AGESA_STATUS Status; IDS_HDT_CONSOLE (GNB_TRACE, "PcieMidInitKB Enter\n"); AgesaStatus = AGESA_SUCCESS; Status = PcieConfigRunProcForAllWrappers (DESCRIPTOR_ALL_WRAPPERS, PcieMidInitCallbackKB, NULL, Pcie); AGESA_STATUS_UPDATE (Status, AgesaStatus); IDS_HDT_CONSOLE (GNB_TRACE, "PcieMidInitKB Exit [0x%x]\n", AgesaStatus); return AgesaStatus; } /*----------------------------------------------------------------------------------------*/ /** * PCIe Mid Init * * * * @param[in] StdHeader Standard configuration header * @retval AGESA_STATUS */ AGESA_STATUS PcieMidInterfaceKB ( IN AMD_CONFIG_PARAMS *StdHeader ) { AGESA_STATUS AgesaStatus; AGESA_STATUS Status; PCIe_PLATFORM_CONFIG *Pcie; IDS_HDT_CONSOLE (GNB_TRACE, "PcieMidInterfaceKB Enter\n"); AgesaStatus = AGESA_SUCCESS; Status = PcieLocateConfigurationData (StdHeader, &Pcie); AGESA_STATUS_UPDATE (Status, AgesaStatus); if (Status == AGESA_SUCCESS) { PciePortsVisibilityControlV5 (UnhidePorts, Pcie); Status = PcieMidPortInitKB (Pcie); AGESA_STATUS_UPDATE (Status, AgesaStatus); ASSERT (Status == AGESA_SUCCESS); Status = PcieMidInitKB (Pcie); AGESA_STATUS_UPDATE (Status, AgesaStatus); ASSERT (Status == AGESA_SUCCESS); PciePortsVisibilityControlV5 (HidePorts, Pcie); } IDS_HDT_CONSOLE (GNB_TRACE, "PcieMidInterfaceKB Exit [0x%x]\n", AgesaStatus); return AgesaStatus; }
gpl-2.0
victoredwardocallaghan/m5a99fx
src/vendorcode/amd/agesa/f16kb/Proc/CPU/Family/0x16/KB/F16KbSharedMsrTable.c
35
4706
/* $NoKeywords:$ */ /** * @file * * AMD Family_16 Kabini Shared MSR table with values as defined in BKDG * * @xrefitem bom "File Content Label" "Release Content" * @e project: AGESA * @e sub-project: CPU/Family/0x16/KB * @e \$Revision: 86705 $ @e \$Date: 2013-01-24 17:34:21 -0600 (Thu, 24 Jan 2013) $ * */ /* ****************************************************************************** * * Copyright (c) 2008 - 2013, Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ****************************************************************************** */ /*---------------------------------------------------------------------------------------- * M O D U L E S U S E D *---------------------------------------------------------------------------------------- */ #include "AGESA.h" #include "amdlib.h" #include "cpuRegisters.h" #include "Table.h" #include "Filecode.h" CODE_GROUP (G3_DXE) RDATA_GROUP (G3_DXE) #define FILECODE PROC_CPU_FAMILY_0X16_KB_F16KBSHAREDMSRTABLE_FILECODE /*---------------------------------------------------------------------------------------- * D E F I N I T I O N S A N D M A C R O S *---------------------------------------------------------------------------------------- */ /*---------------------------------------------------------------------------------------- * T Y P E D E F S A N D S T R U C T U R E S *---------------------------------------------------------------------------------------- */ /*---------------------------------------------------------------------------------------- * P R O T O T Y P E S O F L O C A L F U N C T I O N S *---------------------------------------------------------------------------------------- */ /*---------------------------------------------------------------------------------------- * E X P O R T E D F U N C T I O N S *---------------------------------------------------------------------------------------- */ // M S R T a b l e s // ---------------------- STATIC CONST MSR_TYPE_ENTRY_INITIALIZER ROMDATA F16KbSharedMsrRegisters[] = { // When there's a entry, change the number of entries below from 0 to (sizeof (F16KbSharedMsrRegisters) / sizeof (TABLE_ENTRY_FIELDS)) {0}, }; // Shared MSRs with Special Programming Requirements Table // ---------------------- STATIC CONST FAM_SPECIFIC_WORKAROUND_TYPE_ENTRY_INITIALIZER ROMDATA F16KbSharedMsrWorkarounds[] = { // When there's a entry, change the number of entries below from 0 to (sizeof (F16KbSharedMsrWorkarounds) / sizeof (TABLE_ENTRY_FIELDS)) {0}, }; CONST REGISTER_TABLE ROMDATA F16KbSharedMsrRegisterTable = { ComputeUnitPrimary, PERFORM_TP_AFTER_AP_LAUNCH, 0, //(sizeof (F16KbSharedMsrRegisters) / sizeof (TABLE_ENTRY_FIELDS)), (TABLE_ENTRY_FIELDS *) &F16KbSharedMsrRegisters, }; CONST REGISTER_TABLE ROMDATA F16KbSharedMsrWorkaroundTable = { ComputeUnitPrimary, PERFORM_TP_AFTER_AP_LAUNCH, 0, //(sizeof (F16KbSharedMsrWorkarounds) / sizeof (TABLE_ENTRY_FIELDS)), (TABLE_ENTRY_FIELDS *) &F16KbSharedMsrWorkarounds, };
gpl-2.0
rachitrawat/Vengeance-Kernel-MSM7x27-Nanhu
sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
291
20658
/* Copyright (c) 2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/init.h> #include <linux/err.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/pcm.h> #include <sound/initval.h> #include <sound/control.h> #include <asm/dma.h> #include <linux/dma-mapping.h> #include <linux/android_pmem.h> #include <linux/of_device.h> #include "msm-pcm-q6-v2.h" #include "msm-pcm-routing-v2.h" static struct audio_locks the_locks; struct snd_msm { struct snd_card *card; struct snd_pcm *pcm; }; #define PLAYBACK_NUM_PERIODS 8 #define PLAYBACK_PERIOD_SIZE 2048 #define CAPTURE_NUM_PERIODS 16 #define CAPTURE_PERIOD_SIZE 512 static struct snd_pcm_hardware msm_pcm_hardware_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = CAPTURE_NUM_PERIODS * CAPTURE_PERIOD_SIZE, .period_bytes_min = CAPTURE_PERIOD_SIZE, .period_bytes_max = CAPTURE_PERIOD_SIZE, .periods_min = CAPTURE_NUM_PERIODS, .periods_max = CAPTURE_NUM_PERIODS, .fifo_size = 0, }; static struct snd_pcm_hardware msm_pcm_hardware_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = PLAYBACK_NUM_PERIODS * PLAYBACK_PERIOD_SIZE, .period_bytes_min = PLAYBACK_PERIOD_SIZE, .period_bytes_max = PLAYBACK_PERIOD_SIZE, .periods_min = PLAYBACK_NUM_PERIODS, .periods_max = PLAYBACK_NUM_PERIODS, .fifo_size = 0, }; /* Conventional and unconventional sample rate supported */ static unsigned int supported_sample_rates[] = { 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000 }; static uint32_t in_frame_info[CAPTURE_NUM_PERIODS][2]; static struct snd_pcm_hw_constraint_list constraints_sample_rates = { .count = ARRAY_SIZE(supported_sample_rates), .list = supported_sample_rates, .mask = 0, }; static void event_handler(uint32_t opcode, uint32_t token, uint32_t *payload, void *priv) { struct msm_audio *prtd = priv; struct snd_pcm_substream *substream = prtd->substream; uint32_t *ptrmem = (uint32_t *)payload; uint32_t idx = 0; uint32_t size = 0; pr_err("%s\n", __func__); switch (opcode) { case ASM_DATA_EVENT_WRITE_DONE_V2: { pr_debug("ASM_DATA_EVENT_WRITE_DONE_V2\n"); pr_debug("Buffer Consumed = 0x%08x\n", *ptrmem); prtd->pcm_irq_pos += prtd->pcm_count; if (atomic_read(&prtd->start)) snd_pcm_period_elapsed(substream); atomic_inc(&prtd->out_count); wake_up(&the_locks.write_wait); if (!atomic_read(&prtd->start)) break; if (!prtd->mmap_flag) break; if (q6asm_is_cpu_buf_avail_nolock(IN, prtd->audio_client, &size, &idx)) { pr_debug("%s:writing %d bytes of buffer to dsp 2\n", __func__, prtd->pcm_count); q6asm_write_nolock(prtd->audio_client, prtd->pcm_count, 0, 0, NO_TIMESTAMP); } break; } case ASM_DATA_EVENT_RENDERED_EOS: pr_debug("ASM_DATA_EVENT_RENDERED_EOS\n"); prtd->cmd_ack = 1; wake_up(&the_locks.eos_wait); break; case ASM_DATA_EVENT_READ_DONE_V2: { pr_debug("ASM_DATA_EVENT_READ_DONE_V2\n"); pr_debug("token = 0x%08x\n", token); in_frame_info[token][0] = payload[4]; in_frame_info[token][1] = payload[5]; prtd->pcm_irq_pos += in_frame_info[token][0]; pr_debug("pcm_irq_pos=%d\n", prtd->pcm_irq_pos); if (atomic_read(&prtd->start)) snd_pcm_period_elapsed(substream); if (atomic_read(&prtd->in_count) <= prtd->periods) atomic_inc(&prtd->in_count); wake_up(&the_locks.read_wait); if (prtd->mmap_flag && q6asm_is_cpu_buf_avail_nolock(OUT, prtd->audio_client, &size, &idx)) q6asm_read_nolock(prtd->audio_client); break; } case APR_BASIC_RSP_RESULT: { switch (payload[0]) { case ASM_SESSION_CMD_RUN_V2: if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) { atomic_set(&prtd->start, 1); break; } if (prtd->mmap_flag) { pr_debug("%s:writing %d bytes of buffer to dsp\n", __func__, prtd->pcm_count); q6asm_write_nolock(prtd->audio_client, prtd->pcm_count, 0, 0, NO_TIMESTAMP); } else { while (atomic_read(&prtd->out_needed)) { pr_debug("%s:writing %d bytes of buffer to dsp\n", __func__, prtd->pcm_count); q6asm_write_nolock(prtd->audio_client, prtd->pcm_count, 0, 0, NO_TIMESTAMP); atomic_dec(&prtd->out_needed); wake_up(&the_locks.write_wait); }; } atomic_set(&prtd->start, 1); break; default: pr_debug("%s:Payload = [0x%x]stat[0x%x]\n", __func__, payload[0], payload[1]); break; } } break; default: pr_debug("Not Supported Event opcode[0x%x]\n", opcode); break; } } static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; int ret; pr_debug("%s\n", __func__); prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream); prtd->pcm_count = snd_pcm_lib_period_bytes(substream); prtd->pcm_irq_pos = 0; /* rate and channels are sent to audio driver */ prtd->samp_rate = runtime->rate; prtd->channel_mode = runtime->channels; if (prtd->enabled) return 0; ret = q6asm_media_format_block_pcm(prtd->audio_client, runtime->rate, runtime->channels); if (ret < 0) pr_info("%s: CMD Format block failed\n", __func__); atomic_set(&prtd->out_count, runtime->periods); prtd->enabled = 1; prtd->cmd_ack = 0; return 0; } static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; int ret = 0; int i = 0; pr_debug("%s\n", __func__); prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream); prtd->pcm_count = snd_pcm_lib_period_bytes(substream); prtd->pcm_irq_pos = 0; /* rate and channels are sent to audio driver */ prtd->samp_rate = runtime->rate; prtd->channel_mode = runtime->channels; if (prtd->enabled) return 0; pr_debug("Samp_rate = %d\n", prtd->samp_rate); pr_debug("Channel = %d\n", prtd->channel_mode); ret = q6asm_enc_cfg_blk_pcm(prtd->audio_client, prtd->samp_rate, prtd->channel_mode); if (ret < 0) pr_debug("%s: cmd cfg pcm was block failed", __func__); for (i = 0; i < runtime->periods; i++) q6asm_read(prtd->audio_client); prtd->periods = runtime->periods; prtd->enabled = 1; return ret; } static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { int ret = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: pr_debug("%s: Trigger start\n", __func__); q6asm_run_nowait(prtd->audio_client, 0, 0, 0); break; case SNDRV_PCM_TRIGGER_STOP: pr_debug("SNDRV_PCM_TRIGGER_STOP\n"); atomic_set(&prtd->start, 0); if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) break; prtd->cmd_ack = 0; q6asm_cmd_nowait(prtd->audio_client, CMD_EOS); break; case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: pr_debug("SNDRV_PCM_TRIGGER_PAUSE\n"); q6asm_cmd_nowait(prtd->audio_client, CMD_PAUSE); atomic_set(&prtd->start, 0); break; default: ret = -EINVAL; break; } return ret; } static int msm_pcm_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *soc_prtd = substream->private_data; struct msm_audio *prtd; int ret = 0; pr_debug("%s\n", __func__); prtd = kzalloc(sizeof(struct msm_audio), GFP_KERNEL); if (prtd == NULL) { pr_err("Failed to allocate memory for msm_audio\n"); return -ENOMEM; } prtd->substream = substream; prtd->audio_client = q6asm_audio_client_alloc( (app_cb)event_handler, prtd); if (!prtd->audio_client) { pr_info("%s: Could not allocate memory\n", __func__); kfree(prtd); return -ENOMEM; } if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { runtime->hw = msm_pcm_hardware_playback; ret = q6asm_open_write(prtd->audio_client, FORMAT_LINEAR_PCM); if (ret < 0) { pr_err("%s: pcm out open failed\n", __func__); q6asm_audio_client_free(prtd->audio_client); kfree(prtd); return -ENOMEM; } } /* Capture path */ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { runtime->hw = msm_pcm_hardware_capture; ret = q6asm_open_read(prtd->audio_client, FORMAT_LINEAR_PCM); if (ret < 0) { pr_err("%s: pcm in open failed\n", __func__); q6asm_audio_client_free(prtd->audio_client); kfree(prtd); return -ENOMEM; } } pr_debug("%s: session ID %d\n", __func__, prtd->audio_client->session); prtd->session_id = prtd->audio_client->session; msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->be_id, prtd->session_id, substream->stream); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) prtd->cmd_ack = 1; ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &constraints_sample_rates); if (ret < 0) pr_info("snd_pcm_hw_constraint_list failed\n"); /* Ensure that buffer size is a multiple of period size */ ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) pr_info("snd_pcm_hw_constraint_integer failed\n"); prtd->dsp_cnt = 0; runtime->private_data = prtd; return 0; } static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a, snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames) { int ret = 0; int fbytes = 0; int xfer = 0; char *bufptr = NULL; void *data = NULL; uint32_t idx = 0; uint32_t size = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; fbytes = frames_to_bytes(runtime, frames); pr_debug("%s: prtd->out_count = %d\n", __func__, atomic_read(&prtd->out_count)); ret = wait_event_timeout(the_locks.write_wait, (atomic_read(&prtd->out_count)), 5 * HZ); if (ret < 0) { pr_err("%s: wait_event_timeout failed\n", __func__); goto fail; } if (!atomic_read(&prtd->out_count)) { pr_err("%s: pcm stopped out_count 0\n", __func__); return 0; } data = q6asm_is_cpu_buf_avail(IN, prtd->audio_client, &size, &idx); bufptr = data; if (bufptr) { pr_debug("%s:fbytes =%d: xfer=%d size=%d\n", __func__, fbytes, xfer, size); xfer = fbytes; if (copy_from_user(bufptr, buf, xfer)) { ret = -EFAULT; goto fail; } buf += xfer; fbytes -= xfer; pr_debug("%s:fbytes = %d: xfer=%d\n", __func__, fbytes, xfer); if (atomic_read(&prtd->start)) { pr_debug("%s:writing %d bytes of buffer to dsp\n", __func__, xfer); ret = q6asm_write(prtd->audio_client, xfer, 0, 0, NO_TIMESTAMP); if (ret < 0) { ret = -EFAULT; goto fail; } } else atomic_inc(&prtd->out_needed); atomic_dec(&prtd->out_count); } fail: return ret; } static int msm_pcm_playback_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *soc_prtd = substream->private_data; struct msm_audio *prtd = runtime->private_data; int dir = 0; int ret = 0; pr_debug("%s\n", __func__); dir = IN; ret = wait_event_timeout(the_locks.eos_wait, prtd->cmd_ack, 5 * HZ); if (ret < 0) pr_err("%s: CMD_EOS failed\n", __func__); q6asm_cmd(prtd->audio_client, CMD_CLOSE); q6asm_audio_client_buf_free_contiguous(dir, prtd->audio_client); msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id, SNDRV_PCM_STREAM_PLAYBACK); q6asm_audio_client_free(prtd->audio_client); kfree(prtd); return 0; } static int msm_pcm_capture_copy(struct snd_pcm_substream *substream, int channel, snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames) { int ret = 0; int fbytes = 0; int xfer; char *bufptr; void *data = NULL; static uint32_t idx; static uint32_t size; uint32_t offset = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = substream->runtime->private_data; pr_debug("%s\n", __func__); fbytes = frames_to_bytes(runtime, frames); pr_debug("appl_ptr %d\n", (int)runtime->control->appl_ptr); pr_debug("hw_ptr %d\n", (int)runtime->status->hw_ptr); pr_debug("avail_min %d\n", (int)runtime->control->avail_min); ret = wait_event_timeout(the_locks.read_wait, (atomic_read(&prtd->in_count)), 5 * HZ); if (ret < 0) { pr_debug("%s: wait_event_timeout failed\n", __func__); goto fail; } if (!atomic_read(&prtd->in_count)) { pr_debug("%s: pcm stopped in_count 0\n", __func__); return 0; } pr_debug("Checking if valid buffer is available...%08x\n", (unsigned int) data); data = q6asm_is_cpu_buf_avail(OUT, prtd->audio_client, &size, &idx); bufptr = data; pr_debug("Size = %d\n", size); pr_debug("fbytes = %d\n", fbytes); pr_debug("idx = %d\n", idx); if (bufptr) { xfer = fbytes; if (xfer > size) xfer = size; offset = in_frame_info[idx][1]; pr_debug("Offset value = %d\n", offset); if (copy_to_user(buf, bufptr+offset, xfer)) { pr_err("Failed to copy buf to user\n"); ret = -EFAULT; goto fail; } fbytes -= xfer; size -= xfer; in_frame_info[idx][1] += xfer; pr_debug("%s:fbytes = %d: size=%d: xfer=%d\n", __func__, fbytes, size, xfer); pr_debug(" Sending next buffer to dsp\n"); memset(&in_frame_info[idx], 0, sizeof(uint32_t) * 2); atomic_dec(&prtd->in_count); ret = q6asm_read(prtd->audio_client); if (ret < 0) { pr_err("q6asm read failed\n"); ret = -EFAULT; goto fail; } } else pr_err("No valid buffer\n"); pr_debug("Returning from capture_copy... %d\n", ret); fail: return ret; } static int msm_pcm_capture_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *soc_prtd = substream->private_data; struct msm_audio *prtd = runtime->private_data; int dir = OUT; pr_debug("%s\n", __func__); q6asm_cmd(prtd->audio_client, CMD_CLOSE); q6asm_audio_client_buf_free_contiguous(dir, prtd->audio_client); msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id, SNDRV_PCM_STREAM_CAPTURE); q6asm_audio_client_free(prtd->audio_client); kfree(prtd); return 0; } static int msm_pcm_copy(struct snd_pcm_substream *substream, int a, snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames) { int ret = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ret = msm_pcm_playback_copy(substream, a, hwoff, buf, frames); else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ret = msm_pcm_capture_copy(substream, a, hwoff, buf, frames); return ret; } static int msm_pcm_close(struct snd_pcm_substream *substream) { int ret = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ret = msm_pcm_playback_close(substream); else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ret = msm_pcm_capture_close(substream); return ret; } static int msm_pcm_prepare(struct snd_pcm_substream *substream) { int ret = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ret = msm_pcm_playback_prepare(substream); else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ret = msm_pcm_capture_prepare(substream); return ret; } static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; if (prtd->pcm_irq_pos >= prtd->pcm_size) prtd->pcm_irq_pos = 0; pr_debug("pcm_irq_pos = %d\n", prtd->pcm_irq_pos); return bytes_to_frames(runtime, (prtd->pcm_irq_pos)); } static int msm_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { int result = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; pr_debug("%s\n", __func__); prtd->mmap_flag = 1; if (runtime->dma_addr && runtime->dma_bytes) { vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); result = remap_pfn_range(vma, vma->vm_start, runtime->dma_addr >> PAGE_SHIFT, runtime->dma_bytes, vma->vm_page_prot); } else { pr_err("Physical address or size of buf is NULL"); return -EINVAL; } return result; } static int msm_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_pcm_runtime *runtime = substream->runtime; struct msm_audio *prtd = runtime->private_data; struct snd_dma_buffer *dma_buf = &substream->dma_buffer; struct audio_buffer *buf; int dir, ret; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) dir = IN; else dir = OUT; pr_debug("%s: before buf alloc\n", __func__); ret = q6asm_audio_client_buf_alloc_contiguous(dir, prtd->audio_client, runtime->hw.period_bytes_min, runtime->hw.periods_max); if (ret < 0) { pr_err("Audio Start: Buffer Allocation failed rc = %d\n", ret); return -ENOMEM; } pr_debug("%s: after buf alloc\n", __func__); buf = prtd->audio_client->port[dir].buf; if (buf == NULL || buf[0].data == NULL) return -ENOMEM; pr_debug("%s:buf = %p\n", __func__, buf); dma_buf->dev.type = SNDRV_DMA_TYPE_DEV; dma_buf->dev.dev = substream->pcm->card->dev; dma_buf->private_data = NULL; dma_buf->area = buf[0].data; dma_buf->addr = buf[0].phys; dma_buf->bytes = runtime->hw.buffer_bytes_max; if (!dma_buf->area) return -ENOMEM; snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); return 0; } static struct snd_pcm_ops msm_pcm_ops = { .open = msm_pcm_open, .copy = msm_pcm_copy, .hw_params = msm_pcm_hw_params, .close = msm_pcm_close, .ioctl = snd_pcm_lib_ioctl, .prepare = msm_pcm_prepare, .trigger = msm_pcm_trigger, .pointer = msm_pcm_pointer, .mmap = msm_pcm_mmap, }; static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd) { struct snd_card *card = rtd->card->snd_card; int ret = 0; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = DMA_BIT_MASK(32); return ret; } static struct snd_soc_platform_driver msm_soc_platform = { .ops = &msm_pcm_ops, .pcm_new = msm_asoc_pcm_new, }; static __devinit int msm_pcm_probe(struct platform_device *pdev) { if (pdev->dev.of_node) dev_set_name(&pdev->dev, "%s", "msm-pcm-dsp"); pr_info("%s: dev name %s\n", __func__, dev_name(&pdev->dev)); return snd_soc_register_platform(&pdev->dev, &msm_soc_platform); } static int msm_pcm_remove(struct platform_device *pdev) { snd_soc_unregister_platform(&pdev->dev); return 0; } static const struct of_device_id msm_pcm_dt_match[] = { {.compatible = "qcom,msm-pcm-dsp"}, {} }; MODULE_DEVICE_TABLE(of, msm_pcm_dt_match); static struct platform_driver msm_pcm_driver = { .driver = { .name = "msm-pcm-dsp", .owner = THIS_MODULE, .of_match_table = msm_pcm_dt_match, }, .probe = msm_pcm_probe, .remove = __devexit_p(msm_pcm_remove), }; static int __init msm_soc_platform_init(void) { init_waitqueue_head(&the_locks.enable_wait); init_waitqueue_head(&the_locks.eos_wait); init_waitqueue_head(&the_locks.write_wait); init_waitqueue_head(&the_locks.read_wait); return platform_driver_register(&msm_pcm_driver); } module_init(msm_soc_platform_init); static void __exit msm_soc_platform_exit(void) { platform_driver_unregister(&msm_pcm_driver); } module_exit(msm_soc_platform_exit); MODULE_DESCRIPTION("PCM module platform driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
jeeb/spica-2.6.38
drivers/scsi/scsi_tgt_lib.c
291
16523
/* * SCSI target lib functions * * Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu> * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/blkdev.h> #include <linux/hash.h> #include <linux/module.h> #include <linux/pagemap.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_tgt.h> #include "scsi_tgt_priv.h" static struct workqueue_struct *scsi_tgtd; static struct kmem_cache *scsi_tgt_cmd_cache; /* * TODO: this struct will be killed when the block layer supports large bios * and James's work struct code is in */ struct scsi_tgt_cmd { /* TODO replace work with James b's code */ struct work_struct work; /* TODO fix limits of some drivers */ struct bio *bio; struct list_head hash_list; struct request *rq; u64 itn_id; u64 tag; }; #define TGT_HASH_ORDER 4 #define cmd_hashfn(tag) hash_long((unsigned long) (tag), TGT_HASH_ORDER) struct scsi_tgt_queuedata { struct Scsi_Host *shost; struct list_head cmd_hash[1 << TGT_HASH_ORDER]; spinlock_t cmd_hash_lock; }; /* * Function: scsi_host_get_command() * * Purpose: Allocate and setup a scsi command block and blk request * * Arguments: shost - scsi host * data_dir - dma data dir * gfp_mask- allocator flags * * Returns: The allocated scsi command structure. * * This should be called by target LLDs to get a command. */ struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost, enum dma_data_direction data_dir, gfp_t gfp_mask) { int write = (data_dir == DMA_TO_DEVICE); struct request *rq; struct scsi_cmnd *cmd; struct scsi_tgt_cmd *tcmd; /* Bail if we can't get a reference to the device */ if (!get_device(&shost->shost_gendev)) return NULL; tcmd = kmem_cache_alloc(scsi_tgt_cmd_cache, GFP_ATOMIC); if (!tcmd) goto put_dev; /* * The blk helpers are used to the READ/WRITE requests * transfering data from a initiator point of view. Since * we are in target mode we want the opposite. */ rq = blk_get_request(shost->uspace_req_q, !write, gfp_mask); if (!rq) goto free_tcmd; cmd = __scsi_get_command(shost, gfp_mask); if (!cmd) goto release_rq; cmd->sc_data_direction = data_dir; cmd->jiffies_at_alloc = jiffies; cmd->request = rq; cmd->cmnd = rq->cmd; rq->special = cmd; rq->cmd_type = REQ_TYPE_SPECIAL; rq->cmd_flags |= REQ_TYPE_BLOCK_PC; rq->end_io_data = tcmd; tcmd->rq = rq; return cmd; release_rq: blk_put_request(rq); free_tcmd: kmem_cache_free(scsi_tgt_cmd_cache, tcmd); put_dev: put_device(&shost->shost_gendev); return NULL; } EXPORT_SYMBOL_GPL(scsi_host_get_command); /* * Function: scsi_host_put_command() * * Purpose: Free a scsi command block * * Arguments: shost - scsi host * cmd - command block to free * * Returns: Nothing. * * Notes: The command must not belong to any lists. */ void scsi_host_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd) { struct request_queue *q = shost->uspace_req_q; struct request *rq = cmd->request; struct scsi_tgt_cmd *tcmd = rq->end_io_data; unsigned long flags; kmem_cache_free(scsi_tgt_cmd_cache, tcmd); spin_lock_irqsave(q->queue_lock, flags); __blk_put_request(q, rq); spin_unlock_irqrestore(q->queue_lock, flags); __scsi_put_command(shost, cmd, &shost->shost_gendev); } EXPORT_SYMBOL_GPL(scsi_host_put_command); static void cmd_hashlist_del(struct scsi_cmnd *cmd) { struct request_queue *q = cmd->request->q; struct scsi_tgt_queuedata *qdata = q->queuedata; unsigned long flags; struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data; spin_lock_irqsave(&qdata->cmd_hash_lock, flags); list_del(&tcmd->hash_list); spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); } static void scsi_unmap_user_pages(struct scsi_tgt_cmd *tcmd) { blk_rq_unmap_user(tcmd->bio); } static void scsi_tgt_cmd_destroy(struct work_struct *work) { struct scsi_tgt_cmd *tcmd = container_of(work, struct scsi_tgt_cmd, work); struct scsi_cmnd *cmd = tcmd->rq->special; dprintk("cmd %p %d %u\n", cmd, cmd->sc_data_direction, rq_data_dir(cmd->request)); scsi_unmap_user_pages(tcmd); tcmd->rq->bio = NULL; scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd); } static void init_scsi_tgt_cmd(struct request *rq, struct scsi_tgt_cmd *tcmd, u64 itn_id, u64 tag) { struct scsi_tgt_queuedata *qdata = rq->q->queuedata; unsigned long flags; struct list_head *head; tcmd->itn_id = itn_id; tcmd->tag = tag; tcmd->bio = NULL; INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy); spin_lock_irqsave(&qdata->cmd_hash_lock, flags); head = &qdata->cmd_hash[cmd_hashfn(tag)]; list_add(&tcmd->hash_list, head); spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); } /* * scsi_tgt_alloc_queue - setup queue used for message passing * shost: scsi host * * This should be called by the LLD after host allocation. * And will be released when the host is released. */ int scsi_tgt_alloc_queue(struct Scsi_Host *shost) { struct scsi_tgt_queuedata *queuedata; struct request_queue *q; int err, i; /* * Do we need to send a netlink event or should uspace * just respond to the hotplug event? */ q = __scsi_alloc_queue(shost, NULL); if (!q) return -ENOMEM; queuedata = kzalloc(sizeof(*queuedata), GFP_KERNEL); if (!queuedata) { err = -ENOMEM; goto cleanup_queue; } queuedata->shost = shost; q->queuedata = queuedata; /* * this is a silly hack. We should probably just queue as many * command as is recvd to userspace. uspace can then make * sure we do not overload the HBA */ q->nr_requests = shost->can_queue; /* * We currently only support software LLDs so this does * not matter for now. Do we need this for the cards we support? * If so we should make it a host template value. */ blk_queue_dma_alignment(q, 0); shost->uspace_req_q = q; for (i = 0; i < ARRAY_SIZE(queuedata->cmd_hash); i++) INIT_LIST_HEAD(&queuedata->cmd_hash[i]); spin_lock_init(&queuedata->cmd_hash_lock); return 0; cleanup_queue: blk_cleanup_queue(q); return err; } EXPORT_SYMBOL_GPL(scsi_tgt_alloc_queue); void scsi_tgt_free_queue(struct Scsi_Host *shost) { int i; unsigned long flags; struct request_queue *q = shost->uspace_req_q; struct scsi_cmnd *cmd; struct scsi_tgt_queuedata *qdata = q->queuedata; struct scsi_tgt_cmd *tcmd, *n; LIST_HEAD(cmds); spin_lock_irqsave(&qdata->cmd_hash_lock, flags); for (i = 0; i < ARRAY_SIZE(qdata->cmd_hash); i++) { list_for_each_entry_safe(tcmd, n, &qdata->cmd_hash[i], hash_list) { list_del(&tcmd->hash_list); list_add(&tcmd->hash_list, &cmds); } } spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); while (!list_empty(&cmds)) { tcmd = list_entry(cmds.next, struct scsi_tgt_cmd, hash_list); list_del(&tcmd->hash_list); cmd = tcmd->rq->special; shost->hostt->eh_abort_handler(cmd); scsi_tgt_cmd_destroy(&tcmd->work); } } EXPORT_SYMBOL_GPL(scsi_tgt_free_queue); struct Scsi_Host *scsi_tgt_cmd_to_host(struct scsi_cmnd *cmd) { struct scsi_tgt_queuedata *queue = cmd->request->q->queuedata; return queue->shost; } EXPORT_SYMBOL_GPL(scsi_tgt_cmd_to_host); /* * scsi_tgt_queue_command - queue command for userspace processing * @cmd: scsi command * @scsilun: scsi lun * @tag: unique value to identify this command for tmf */ int scsi_tgt_queue_command(struct scsi_cmnd *cmd, u64 itn_id, struct scsi_lun *scsilun, u64 tag) { struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data; int err; init_scsi_tgt_cmd(cmd->request, tcmd, itn_id, tag); err = scsi_tgt_uspace_send_cmd(cmd, itn_id, scsilun, tag); if (err) cmd_hashlist_del(cmd); return err; } EXPORT_SYMBOL_GPL(scsi_tgt_queue_command); /* * This is run from a interrupt handler normally and the unmap * needs process context so we must queue */ static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd) { struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data; dprintk("cmd %p %u\n", cmd, rq_data_dir(cmd->request)); scsi_tgt_uspace_send_status(cmd, tcmd->itn_id, tcmd->tag); scsi_release_buffers(cmd); queue_work(scsi_tgtd, &tcmd->work); } static int scsi_tgt_transfer_response(struct scsi_cmnd *cmd) { struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd); int err; dprintk("cmd %p %u\n", cmd, rq_data_dir(cmd->request)); err = shost->hostt->transfer_response(cmd, scsi_tgt_cmd_done); switch (err) { case SCSI_MLQUEUE_HOST_BUSY: case SCSI_MLQUEUE_DEVICE_BUSY: return -EAGAIN; } return 0; } /* TODO: test this crap and replace bio_map_user with new interface maybe */ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd, unsigned long uaddr, unsigned int len, int rw) { struct request_queue *q = cmd->request->q; struct request *rq = cmd->request; int err; dprintk("%lx %u\n", uaddr, len); err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL); if (err) { /* * TODO: need to fixup sg_tablesize, max_segment_size, * max_sectors, etc for modern HW and software drivers * where this value is bogus. * * TODO2: we can alloc a reserve buffer of max size * we can handle and do the slow copy path for really large * IO. */ eprintk("Could not handle request of size %u.\n", len); return err; } tcmd->bio = rq->bio; err = scsi_init_io(cmd, GFP_KERNEL); if (err) { scsi_release_buffers(cmd); goto unmap_rq; } /* * we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the * length for us. */ cmd->sdb.length = blk_rq_bytes(rq); return 0; unmap_rq: scsi_unmap_user_pages(tcmd); return err; } static int scsi_tgt_copy_sense(struct scsi_cmnd *cmd, unsigned long uaddr, unsigned len) { char __user *p = (char __user *) uaddr; if (copy_from_user(cmd->sense_buffer, p, min_t(unsigned, SCSI_SENSE_BUFFERSIZE, len))) { printk(KERN_ERR "Could not copy the sense buffer\n"); return -EIO; } return 0; } static int scsi_tgt_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd) { struct scsi_tgt_cmd *tcmd; int err; err = shost->hostt->eh_abort_handler(cmd); if (err) eprintk("fail to abort %p\n", cmd); tcmd = cmd->request->end_io_data; scsi_tgt_cmd_destroy(&tcmd->work); return err; } static struct request *tgt_cmd_hash_lookup(struct request_queue *q, u64 tag) { struct scsi_tgt_queuedata *qdata = q->queuedata; struct request *rq = NULL; struct list_head *head; struct scsi_tgt_cmd *tcmd; unsigned long flags; head = &qdata->cmd_hash[cmd_hashfn(tag)]; spin_lock_irqsave(&qdata->cmd_hash_lock, flags); list_for_each_entry(tcmd, head, hash_list) { if (tcmd->tag == tag) { rq = tcmd->rq; list_del(&tcmd->hash_list); break; } } spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); return rq; } int scsi_tgt_kspace_exec(int host_no, u64 itn_id, int result, u64 tag, unsigned long uaddr, u32 len, unsigned long sense_uaddr, u32 sense_len, u8 rw) { struct Scsi_Host *shost; struct scsi_cmnd *cmd; struct request *rq; struct scsi_tgt_cmd *tcmd; int err = 0; dprintk("%d %llu %d %u %lx %u\n", host_no, (unsigned long long) tag, result, len, uaddr, rw); /* TODO: replace with a O(1) alg */ shost = scsi_host_lookup(host_no); if (!shost) { printk(KERN_ERR "Could not find host no %d\n", host_no); return -EINVAL; } if (!shost->uspace_req_q) { printk(KERN_ERR "Not target scsi host %d\n", host_no); goto done; } rq = tgt_cmd_hash_lookup(shost->uspace_req_q, tag); if (!rq) { printk(KERN_ERR "Could not find tag %llu\n", (unsigned long long) tag); err = -EINVAL; goto done; } cmd = rq->special; dprintk("cmd %p scb %x result %d len %d bufflen %u %u %x\n", cmd, cmd->cmnd[0], result, len, scsi_bufflen(cmd), rq_data_dir(rq), cmd->cmnd[0]); if (result == TASK_ABORTED) { scsi_tgt_abort_cmd(shost, cmd); goto done; } /* * store the userspace values here, the working values are * in the request_* values */ tcmd = cmd->request->end_io_data; cmd->result = result; if (cmd->result == SAM_STAT_CHECK_CONDITION) scsi_tgt_copy_sense(cmd, sense_uaddr, sense_len); if (len) { err = scsi_map_user_pages(rq->end_io_data, cmd, uaddr, len, rw); if (err) { /* * user-space daemon bugs or OOM * TODO: we can do better for OOM. */ struct scsi_tgt_queuedata *qdata; struct list_head *head; unsigned long flags; eprintk("cmd %p ret %d uaddr %lx len %d rw %d\n", cmd, err, uaddr, len, rw); qdata = shost->uspace_req_q->queuedata; head = &qdata->cmd_hash[cmd_hashfn(tcmd->tag)]; spin_lock_irqsave(&qdata->cmd_hash_lock, flags); list_add(&tcmd->hash_list, head); spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); goto done; } } err = scsi_tgt_transfer_response(cmd); done: scsi_host_put(shost); return err; } int scsi_tgt_tsk_mgmt_request(struct Scsi_Host *shost, u64 itn_id, int function, u64 tag, struct scsi_lun *scsilun, void *data) { int err; /* TODO: need to retry if this fails. */ err = scsi_tgt_uspace_send_tsk_mgmt(shost->host_no, itn_id, function, tag, scsilun, data); if (err < 0) eprintk("The task management request lost!\n"); return err; } EXPORT_SYMBOL_GPL(scsi_tgt_tsk_mgmt_request); int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 itn_id, u64 mid, int result) { struct Scsi_Host *shost; int err = -EINVAL; dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid); shost = scsi_host_lookup(host_no); if (!shost) { printk(KERN_ERR "Could not find host no %d\n", host_no); return err; } if (!shost->uspace_req_q) { printk(KERN_ERR "Not target scsi host %d\n", host_no); goto done; } err = shost->transportt->tsk_mgmt_response(shost, itn_id, mid, result); done: scsi_host_put(shost); return err; } int scsi_tgt_it_nexus_create(struct Scsi_Host *shost, u64 itn_id, char *initiator) { int err; /* TODO: need to retry if this fails. */ err = scsi_tgt_uspace_send_it_nexus_request(shost->host_no, itn_id, 0, initiator); if (err < 0) eprintk("The i_t_neuxs request lost, %d %llx!\n", shost->host_no, (unsigned long long)itn_id); return err; } EXPORT_SYMBOL_GPL(scsi_tgt_it_nexus_create); int scsi_tgt_it_nexus_destroy(struct Scsi_Host *shost, u64 itn_id) { int err; /* TODO: need to retry if this fails. */ err = scsi_tgt_uspace_send_it_nexus_request(shost->host_no, itn_id, 1, NULL); if (err < 0) eprintk("The i_t_neuxs request lost, %d %llx!\n", shost->host_no, (unsigned long long)itn_id); return err; } EXPORT_SYMBOL_GPL(scsi_tgt_it_nexus_destroy); int scsi_tgt_kspace_it_nexus_rsp(int host_no, u64 itn_id, int result) { struct Scsi_Host *shost; int err = -EINVAL; dprintk("%d %d%llx\n", host_no, result, (unsigned long long)itn_id); shost = scsi_host_lookup(host_no); if (!shost) { printk(KERN_ERR "Could not find host no %d\n", host_no); return err; } if (!shost->uspace_req_q) { printk(KERN_ERR "Not target scsi host %d\n", host_no); goto done; } err = shost->transportt->it_nexus_response(shost, itn_id, result); done: scsi_host_put(shost); return err; } static int __init scsi_tgt_init(void) { int err; scsi_tgt_cmd_cache = KMEM_CACHE(scsi_tgt_cmd, 0); if (!scsi_tgt_cmd_cache) return -ENOMEM; scsi_tgtd = create_workqueue("scsi_tgtd"); if (!scsi_tgtd) { err = -ENOMEM; goto free_kmemcache; } err = scsi_tgt_if_init(); if (err) goto destroy_wq; return 0; destroy_wq: destroy_workqueue(scsi_tgtd); free_kmemcache: kmem_cache_destroy(scsi_tgt_cmd_cache); return err; } static void __exit scsi_tgt_exit(void) { destroy_workqueue(scsi_tgtd); scsi_tgt_if_exit(); kmem_cache_destroy(scsi_tgt_cmd_cache); } module_init(scsi_tgt_init); module_exit(scsi_tgt_exit); MODULE_DESCRIPTION("SCSI target core"); MODULE_LICENSE("GPL");
gpl-2.0
dan82840/Netgear-RBR40
git_home/linux.git/drivers/media/usb/au0828/au0828-dvb.c
291
14309
/* * Driver for the Auvitek USB bridge * * Copyright (c) 2008 Steven Toth <stoth@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/device.h> #include <linux/suspend.h> #include <media/v4l2-common.h> #include <media/tuner.h> #include "au0828.h" #include "au8522.h" #include "xc5000.h" #include "mxl5007t.h" #include "tda18271.h" static int preallocate_big_buffers; module_param_named(preallocate_big_buffers, preallocate_big_buffers, int, 0644); MODULE_PARM_DESC(preallocate_big_buffers, "Preallocate the larger transfer buffers at module load time"); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); #define _AU0828_BULKPIPE 0x83 #define _BULKPIPESIZE 0xe522 static u8 hauppauge_hvr950q_led_states[] = { 0x00, /* off */ 0x02, /* yellow */ 0x04, /* green */ }; static struct au8522_led_config hauppauge_hvr950q_led_cfg = { .gpio_output = 0x00e0, .gpio_output_enable = 0x6006, .gpio_output_disable = 0x0660, .gpio_leds = 0x00e2, .led_states = hauppauge_hvr950q_led_states, .num_led_states = sizeof(hauppauge_hvr950q_led_states), .vsb8_strong = 20 /* dB */ * 10, .qam64_strong = 25 /* dB */ * 10, .qam256_strong = 32 /* dB */ * 10, }; static struct au8522_config hauppauge_hvr950q_config = { .demod_address = 0x8e >> 1, .status_mode = AU8522_DEMODLOCKING, .qam_if = AU8522_IF_6MHZ, .vsb_if = AU8522_IF_6MHZ, .led_cfg = &hauppauge_hvr950q_led_cfg, }; static struct au8522_config fusionhdtv7usb_config = { .demod_address = 0x8e >> 1, .status_mode = AU8522_DEMODLOCKING, .qam_if = AU8522_IF_6MHZ, .vsb_if = AU8522_IF_6MHZ, }; static struct au8522_config hauppauge_woodbury_config = { .demod_address = 0x8e >> 1, .status_mode = AU8522_DEMODLOCKING, .qam_if = AU8522_IF_4MHZ, .vsb_if = AU8522_IF_3_25MHZ, }; static struct xc5000_config hauppauge_xc5000a_config = { .i2c_address = 0x61, .if_khz = 6000, .chip_id = XC5000A, }; static struct xc5000_config hauppauge_xc5000c_config = { .i2c_address = 0x61, .if_khz = 6000, .chip_id = XC5000C, }; static struct mxl5007t_config mxl5007t_hvr950q_config = { .xtal_freq_hz = MxL_XTAL_24_MHZ, .if_freq_hz = MxL_IF_6_MHZ, }; static struct tda18271_config hauppauge_woodbury_tunerconfig = { .gate = TDA18271_GATE_DIGITAL, }; static void au0828_restart_dvb_streaming(struct work_struct *work); /*-------------------------------------------------------------------*/ static void urb_completion(struct urb *purb) { struct au0828_dev *dev = purb->context; int ptype = usb_pipetype(purb->pipe); unsigned char *ptr; dprintk(2, "%s()\n", __func__); if (!dev) return; if (dev->urb_streaming == 0) return; if (ptype != PIPE_BULK) { printk(KERN_ERR "%s() Unsupported URB type %d\n", __func__, ptype); return; } /* See if the stream is corrupted (to work around a hardware bug where the stream gets misaligned */ ptr = purb->transfer_buffer; if (purb->actual_length > 0 && ptr[0] != 0x47) { dprintk(1, "Need to restart streaming %02x len=%d!\n", ptr[0], purb->actual_length); schedule_work(&dev->restart_streaming); return; } /* Feed the transport payload into the kernel demux */ dvb_dmx_swfilter_packets(&dev->dvb.demux, purb->transfer_buffer, purb->actual_length / 188); /* Clean the buffer before we requeue */ memset(purb->transfer_buffer, 0, URB_BUFSIZE); /* Requeue URB */ usb_submit_urb(purb, GFP_ATOMIC); } static int stop_urb_transfer(struct au0828_dev *dev) { int i; dprintk(2, "%s()\n", __func__); dev->urb_streaming = 0; for (i = 0; i < URB_COUNT; i++) { if (dev->urbs[i]) { usb_kill_urb(dev->urbs[i]); if (!preallocate_big_buffers) kfree(dev->urbs[i]->transfer_buffer); usb_free_urb(dev->urbs[i]); } } return 0; } static int start_urb_transfer(struct au0828_dev *dev) { struct urb *purb; int i, ret = -ENOMEM; dprintk(2, "%s()\n", __func__); if (dev->urb_streaming) { dprintk(2, "%s: bulk xfer already running!\n", __func__); return 0; } for (i = 0; i < URB_COUNT; i++) { dev->urbs[i] = usb_alloc_urb(0, GFP_KERNEL); if (!dev->urbs[i]) goto err; purb = dev->urbs[i]; if (preallocate_big_buffers) purb->transfer_buffer = dev->dig_transfer_buffer[i]; else purb->transfer_buffer = kzalloc(URB_BUFSIZE, GFP_KERNEL); if (!purb->transfer_buffer) { usb_free_urb(purb); dev->urbs[i] = NULL; printk(KERN_ERR "%s: failed big buffer allocation, err = %d\n", __func__, ret); goto err; } purb->status = -EINPROGRESS; usb_fill_bulk_urb(purb, dev->usbdev, usb_rcvbulkpipe(dev->usbdev, _AU0828_BULKPIPE), purb->transfer_buffer, URB_BUFSIZE, urb_completion, dev); } for (i = 0; i < URB_COUNT; i++) { ret = usb_submit_urb(dev->urbs[i], GFP_ATOMIC); if (ret != 0) { stop_urb_transfer(dev); printk(KERN_ERR "%s: failed urb submission, " "err = %d\n", __func__, ret); return ret; } } dev->urb_streaming = 1; ret = 0; err: return ret; } static void au0828_start_transport(struct au0828_dev *dev) { au0828_write(dev, 0x608, 0x90); au0828_write(dev, 0x609, 0x72); au0828_write(dev, 0x60a, 0x71); au0828_write(dev, 0x60b, 0x01); } static void au0828_stop_transport(struct au0828_dev *dev, int full_stop) { if (full_stop) { au0828_write(dev, 0x608, 0x00); au0828_write(dev, 0x609, 0x00); au0828_write(dev, 0x60a, 0x00); } au0828_write(dev, 0x60b, 0x00); } static int au0828_dvb_start_feed(struct dvb_demux_feed *feed) { struct dvb_demux *demux = feed->demux; struct au0828_dev *dev = (struct au0828_dev *) demux->priv; struct au0828_dvb *dvb = &dev->dvb; int ret = 0; dprintk(1, "%s()\n", __func__); if (!demux->dmx.frontend) return -EINVAL; if (dvb) { mutex_lock(&dvb->lock); dvb->start_count++; dprintk(1, "%s(), start_count: %d, stop_count: %d\n", __func__, dvb->start_count, dvb->stop_count); if (dvb->feeding++ == 0) { /* Start transport */ au0828_start_transport(dev); ret = start_urb_transfer(dev); if (ret < 0) { au0828_stop_transport(dev, 0); dvb->feeding--; /* We ran out of memory... */ } } mutex_unlock(&dvb->lock); } return ret; } static int au0828_dvb_stop_feed(struct dvb_demux_feed *feed) { struct dvb_demux *demux = feed->demux; struct au0828_dev *dev = (struct au0828_dev *) demux->priv; struct au0828_dvb *dvb = &dev->dvb; int ret = 0; dprintk(1, "%s()\n", __func__); if (dvb) { mutex_lock(&dvb->lock); dvb->stop_count++; dprintk(1, "%s(), start_count: %d, stop_count: %d\n", __func__, dvb->start_count, dvb->stop_count); if (dvb->feeding > 0) { dvb->feeding--; if (dvb->feeding == 0) { /* Stop transport */ ret = stop_urb_transfer(dev); au0828_stop_transport(dev, 0); } } mutex_unlock(&dvb->lock); } return ret; } static void au0828_restart_dvb_streaming(struct work_struct *work) { struct au0828_dev *dev = container_of(work, struct au0828_dev, restart_streaming); struct au0828_dvb *dvb = &dev->dvb; if (dev->urb_streaming == 0) return; dprintk(1, "Restarting streaming...!\n"); mutex_lock(&dvb->lock); /* Stop transport */ stop_urb_transfer(dev); au0828_stop_transport(dev, 1); /* Start transport */ au0828_start_transport(dev); start_urb_transfer(dev); mutex_unlock(&dvb->lock); } static int dvb_register(struct au0828_dev *dev) { struct au0828_dvb *dvb = &dev->dvb; int result; dprintk(1, "%s()\n", __func__); if (preallocate_big_buffers) { int i; for (i = 0; i < URB_COUNT; i++) { dev->dig_transfer_buffer[i] = kzalloc(URB_BUFSIZE, GFP_KERNEL); if (!dev->dig_transfer_buffer[i]) { result = -ENOMEM; printk(KERN_ERR "%s: failed buffer allocation (errno = %d)\n", DRIVER_NAME, result); goto fail_adapter; } } } INIT_WORK(&dev->restart_streaming, au0828_restart_dvb_streaming); /* register adapter */ result = dvb_register_adapter(&dvb->adapter, DRIVER_NAME, THIS_MODULE, &dev->usbdev->dev, adapter_nr); if (result < 0) { printk(KERN_ERR "%s: dvb_register_adapter failed " "(errno = %d)\n", DRIVER_NAME, result); goto fail_adapter; } dvb->adapter.priv = dev; /* register frontend */ result = dvb_register_frontend(&dvb->adapter, dvb->frontend); if (result < 0) { printk(KERN_ERR "%s: dvb_register_frontend failed " "(errno = %d)\n", DRIVER_NAME, result); goto fail_frontend; } /* register demux stuff */ dvb->demux.dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING | DMX_MEMORY_BASED_FILTERING; dvb->demux.priv = dev; dvb->demux.filternum = 256; dvb->demux.feednum = 256; dvb->demux.start_feed = au0828_dvb_start_feed; dvb->demux.stop_feed = au0828_dvb_stop_feed; result = dvb_dmx_init(&dvb->demux); if (result < 0) { printk(KERN_ERR "%s: dvb_dmx_init failed (errno = %d)\n", DRIVER_NAME, result); goto fail_dmx; } dvb->dmxdev.filternum = 256; dvb->dmxdev.demux = &dvb->demux.dmx; dvb->dmxdev.capabilities = 0; result = dvb_dmxdev_init(&dvb->dmxdev, &dvb->adapter); if (result < 0) { printk(KERN_ERR "%s: dvb_dmxdev_init failed (errno = %d)\n", DRIVER_NAME, result); goto fail_dmxdev; } dvb->fe_hw.source = DMX_FRONTEND_0; result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw); if (result < 0) { printk(KERN_ERR "%s: add_frontend failed " "(DMX_FRONTEND_0, errno = %d)\n", DRIVER_NAME, result); goto fail_fe_hw; } dvb->fe_mem.source = DMX_MEMORY_FE; result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem); if (result < 0) { printk(KERN_ERR "%s: add_frontend failed " "(DMX_MEMORY_FE, errno = %d)\n", DRIVER_NAME, result); goto fail_fe_mem; } result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw); if (result < 0) { printk(KERN_ERR "%s: connect_frontend failed (errno = %d)\n", DRIVER_NAME, result); goto fail_fe_conn; } /* register network adapter */ dvb_net_init(&dvb->adapter, &dvb->net, &dvb->demux.dmx); dvb->start_count = 0; dvb->stop_count = 0; return 0; fail_fe_conn: dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem); fail_fe_mem: dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw); fail_fe_hw: dvb_dmxdev_release(&dvb->dmxdev); fail_dmxdev: dvb_dmx_release(&dvb->demux); fail_dmx: dvb_unregister_frontend(dvb->frontend); fail_frontend: dvb_frontend_detach(dvb->frontend); dvb_unregister_adapter(&dvb->adapter); fail_adapter: if (preallocate_big_buffers) { int i; for (i = 0; i < URB_COUNT; i++) kfree(dev->dig_transfer_buffer[i]); } return result; } void au0828_dvb_unregister(struct au0828_dev *dev) { struct au0828_dvb *dvb = &dev->dvb; dprintk(1, "%s()\n", __func__); if (dvb->frontend == NULL) return; dvb_net_release(&dvb->net); dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem); dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw); dvb_dmxdev_release(&dvb->dmxdev); dvb_dmx_release(&dvb->demux); dvb_unregister_frontend(dvb->frontend); dvb_frontend_detach(dvb->frontend); dvb_unregister_adapter(&dvb->adapter); if (preallocate_big_buffers) { int i; for (i = 0; i < URB_COUNT; i++) kfree(dev->dig_transfer_buffer[i]); } } /* All the DVB attach calls go here, this function get's modified * for each new card. No other function in this file needs * to change. */ int au0828_dvb_register(struct au0828_dev *dev) { struct au0828_dvb *dvb = &dev->dvb; int ret; dprintk(1, "%s()\n", __func__); /* init frontend */ switch (dev->boardnr) { case AU0828_BOARD_HAUPPAUGE_HVR850: case AU0828_BOARD_HAUPPAUGE_HVR950Q: dvb->frontend = dvb_attach(au8522_attach, &hauppauge_hvr950q_config, &dev->i2c_adap); if (dvb->frontend != NULL) switch (dev->board.tuner_type) { default: case TUNER_XC5000: dvb_attach(xc5000_attach, dvb->frontend, &dev->i2c_adap, &hauppauge_xc5000a_config); break; case TUNER_XC5000C: dvb_attach(xc5000_attach, dvb->frontend, &dev->i2c_adap, &hauppauge_xc5000c_config); break; } break; case AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL: dvb->frontend = dvb_attach(au8522_attach, &hauppauge_hvr950q_config, &dev->i2c_adap); if (dvb->frontend != NULL) dvb_attach(mxl5007t_attach, dvb->frontend, &dev->i2c_adap, 0x60, &mxl5007t_hvr950q_config); break; case AU0828_BOARD_HAUPPAUGE_WOODBURY: dvb->frontend = dvb_attach(au8522_attach, &hauppauge_woodbury_config, &dev->i2c_adap); if (dvb->frontend != NULL) dvb_attach(tda18271_attach, dvb->frontend, 0x60, &dev->i2c_adap, &hauppauge_woodbury_tunerconfig); break; case AU0828_BOARD_DVICO_FUSIONHDTV7: dvb->frontend = dvb_attach(au8522_attach, &fusionhdtv7usb_config, &dev->i2c_adap); if (dvb->frontend != NULL) { dvb_attach(xc5000_attach, dvb->frontend, &dev->i2c_adap, &hauppauge_xc5000a_config); } break; default: printk(KERN_WARNING "The frontend of your DVB/ATSC card " "isn't supported yet\n"); break; } if (NULL == dvb->frontend) { printk(KERN_ERR "%s() Frontend initialization failed\n", __func__); return -1; } /* define general-purpose callback pointer */ dvb->frontend->callback = au0828_tuner_callback; /* register everything */ ret = dvb_register(dev); if (ret < 0) { if (dvb->frontend->ops.release) dvb->frontend->ops.release(dvb->frontend); return ret; } return 0; }
gpl-2.0
hisilicon/linaro-kernel
drivers/input/misc/twl6040-vibra.c
291
11134
/* * twl6040-vibra.c - TWL6040 Vibrator driver * * Author: Jorge Eduardo Candelaria <jorge.candelaria@ti.com> * Author: Misael Lopez Cruz <misael.lopez@ti.com> * * Copyright: (C) 2011 Texas Instruments, Inc. * * Based on twl4030-vibra.c by Henrik Saari <henrik.saari@nokia.com> * Felipe Balbi <felipe.balbi@nokia.com> * Jari Vanhala <ext-javi.vanhala@nokia.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/workqueue.h> #include <linux/input.h> #include <linux/mfd/twl6040.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/regulator/consumer.h> #define EFFECT_DIR_180_DEG 0x8000 /* Recommended modulation index 85% */ #define TWL6040_VIBRA_MOD 85 #define TWL6040_NUM_SUPPLIES 2 struct vibra_info { struct device *dev; struct input_dev *input_dev; struct workqueue_struct *workqueue; struct work_struct play_work; struct mutex mutex; int irq; bool enabled; int weak_speed; int strong_speed; int direction; unsigned int vibldrv_res; unsigned int vibrdrv_res; unsigned int viblmotor_res; unsigned int vibrmotor_res; struct regulator_bulk_data supplies[TWL6040_NUM_SUPPLIES]; struct twl6040 *twl6040; }; static irqreturn_t twl6040_vib_irq_handler(int irq, void *data) { struct vibra_info *info = data; struct twl6040 *twl6040 = info->twl6040; u8 status; status = twl6040_reg_read(twl6040, TWL6040_REG_STATUS); if (status & TWL6040_VIBLOCDET) { dev_warn(info->dev, "Left Vibrator overcurrent detected\n"); twl6040_clear_bits(twl6040, TWL6040_REG_VIBCTLL, TWL6040_VIBENA); } if (status & TWL6040_VIBROCDET) { dev_warn(info->dev, "Right Vibrator overcurrent detected\n"); twl6040_clear_bits(twl6040, TWL6040_REG_VIBCTLR, TWL6040_VIBENA); } return IRQ_HANDLED; } static void twl6040_vibra_enable(struct vibra_info *info) { struct twl6040 *twl6040 = info->twl6040; int ret; ret = regulator_bulk_enable(ARRAY_SIZE(info->supplies), info->supplies); if (ret) { dev_err(info->dev, "failed to enable regulators %d\n", ret); return; } twl6040_power(info->twl6040, 1); if (twl6040_get_revid(twl6040) <= TWL6040_REV_ES1_1) { /* * ERRATA: Disable overcurrent protection for at least * 3ms when enabling vibrator drivers to avoid false * overcurrent detection */ twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLL, TWL6040_VIBENA | TWL6040_VIBCTRL); twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLR, TWL6040_VIBENA | TWL6040_VIBCTRL); usleep_range(3000, 3500); } twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLL, TWL6040_VIBENA); twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLR, TWL6040_VIBENA); info->enabled = true; } static void twl6040_vibra_disable(struct vibra_info *info) { struct twl6040 *twl6040 = info->twl6040; twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLL, 0x00); twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLR, 0x00); twl6040_power(info->twl6040, 0); regulator_bulk_disable(ARRAY_SIZE(info->supplies), info->supplies); info->enabled = false; } static u8 twl6040_vibra_code(int vddvib, int vibdrv_res, int motor_res, int speed, int direction) { int vpk, max_code; u8 vibdat; /* output swing */ vpk = (vddvib * motor_res * TWL6040_VIBRA_MOD) / (100 * (vibdrv_res + motor_res)); /* 50mV per VIBDAT code step */ max_code = vpk / 50; if (max_code > TWL6040_VIBDAT_MAX) max_code = TWL6040_VIBDAT_MAX; /* scale speed to max allowed code */ vibdat = (u8)((speed * max_code) / USHRT_MAX); /* 2's complement for direction > 180 degrees */ vibdat *= direction; return vibdat; } static void twl6040_vibra_set_effect(struct vibra_info *info) { struct twl6040 *twl6040 = info->twl6040; u8 vibdatl, vibdatr; int volt; /* weak motor */ volt = regulator_get_voltage(info->supplies[0].consumer) / 1000; vibdatl = twl6040_vibra_code(volt, info->vibldrv_res, info->viblmotor_res, info->weak_speed, info->direction); /* strong motor */ volt = regulator_get_voltage(info->supplies[1].consumer) / 1000; vibdatr = twl6040_vibra_code(volt, info->vibrdrv_res, info->vibrmotor_res, info->strong_speed, info->direction); twl6040_reg_write(twl6040, TWL6040_REG_VIBDATL, vibdatl); twl6040_reg_write(twl6040, TWL6040_REG_VIBDATR, vibdatr); } static void vibra_play_work(struct work_struct *work) { struct vibra_info *info = container_of(work, struct vibra_info, play_work); mutex_lock(&info->mutex); if (info->weak_speed || info->strong_speed) { if (!info->enabled) twl6040_vibra_enable(info); twl6040_vibra_set_effect(info); } else if (info->enabled) twl6040_vibra_disable(info); mutex_unlock(&info->mutex); } static int vibra_play(struct input_dev *input, void *data, struct ff_effect *effect) { struct vibra_info *info = input_get_drvdata(input); int ret; /* Do not allow effect, while the routing is set to use audio */ ret = twl6040_get_vibralr_status(info->twl6040); if (ret & TWL6040_VIBSEL) { dev_info(&input->dev, "Vibra is configured for audio\n"); return -EBUSY; } info->weak_speed = effect->u.rumble.weak_magnitude; info->strong_speed = effect->u.rumble.strong_magnitude; info->direction = effect->direction < EFFECT_DIR_180_DEG ? 1 : -1; ret = queue_work(info->workqueue, &info->play_work); if (!ret) { dev_info(&input->dev, "work is already on queue\n"); return ret; } return 0; } static void twl6040_vibra_close(struct input_dev *input) { struct vibra_info *info = input_get_drvdata(input); cancel_work_sync(&info->play_work); mutex_lock(&info->mutex); if (info->enabled) twl6040_vibra_disable(info); mutex_unlock(&info->mutex); } #ifdef CONFIG_PM_SLEEP static int twl6040_vibra_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct vibra_info *info = platform_get_drvdata(pdev); mutex_lock(&info->mutex); if (info->enabled) twl6040_vibra_disable(info); mutex_unlock(&info->mutex); return 0; } #endif static SIMPLE_DEV_PM_OPS(twl6040_vibra_pm_ops, twl6040_vibra_suspend, NULL); static int twl6040_vibra_probe(struct platform_device *pdev) { struct device *twl6040_core_dev = pdev->dev.parent; struct device_node *twl6040_core_node; struct vibra_info *info; int vddvibl_uV = 0; int vddvibr_uV = 0; int ret; twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node, "vibra"); if (!twl6040_core_node) { dev_err(&pdev->dev, "parent of node is missing?\n"); return -EINVAL; } info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); if (!info) { of_node_put(twl6040_core_node); dev_err(&pdev->dev, "couldn't allocate memory\n"); return -ENOMEM; } info->dev = &pdev->dev; info->twl6040 = dev_get_drvdata(pdev->dev.parent); of_property_read_u32(twl6040_core_node, "ti,vibldrv-res", &info->vibldrv_res); of_property_read_u32(twl6040_core_node, "ti,vibrdrv-res", &info->vibrdrv_res); of_property_read_u32(twl6040_core_node, "ti,viblmotor-res", &info->viblmotor_res); of_property_read_u32(twl6040_core_node, "ti,vibrmotor-res", &info->vibrmotor_res); of_property_read_u32(twl6040_core_node, "ti,vddvibl-uV", &vddvibl_uV); of_property_read_u32(twl6040_core_node, "ti,vddvibr-uV", &vddvibr_uV); of_node_put(twl6040_core_node); if ((!info->vibldrv_res && !info->viblmotor_res) || (!info->vibrdrv_res && !info->vibrmotor_res)) { dev_err(info->dev, "invalid vibra driver/motor resistance\n"); return -EINVAL; } info->irq = platform_get_irq(pdev, 0); if (info->irq < 0) { dev_err(info->dev, "invalid irq\n"); return -EINVAL; } mutex_init(&info->mutex); ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL, twl6040_vib_irq_handler, 0, "twl6040_irq_vib", info); if (ret) { dev_err(info->dev, "VIB IRQ request failed: %d\n", ret); return ret; } info->supplies[0].supply = "vddvibl"; info->supplies[1].supply = "vddvibr"; /* * When booted with Device tree the regulators are attached to the * parent device (twl6040 MFD core) */ ret = regulator_bulk_get(twl6040_core_dev, ARRAY_SIZE(info->supplies), info->supplies); if (ret) { dev_err(info->dev, "couldn't get regulators %d\n", ret); return ret; } if (vddvibl_uV) { ret = regulator_set_voltage(info->supplies[0].consumer, vddvibl_uV, vddvibl_uV); if (ret) { dev_err(info->dev, "failed to set VDDVIBL volt %d\n", ret); goto err_regulator; } } if (vddvibr_uV) { ret = regulator_set_voltage(info->supplies[1].consumer, vddvibr_uV, vddvibr_uV); if (ret) { dev_err(info->dev, "failed to set VDDVIBR volt %d\n", ret); goto err_regulator; } } INIT_WORK(&info->play_work, vibra_play_work); info->input_dev = input_allocate_device(); if (info->input_dev == NULL) { dev_err(info->dev, "couldn't allocate input device\n"); ret = -ENOMEM; goto err_regulator; } input_set_drvdata(info->input_dev, info); info->input_dev->name = "twl6040:vibrator"; info->input_dev->id.version = 1; info->input_dev->dev.parent = pdev->dev.parent; info->input_dev->close = twl6040_vibra_close; __set_bit(FF_RUMBLE, info->input_dev->ffbit); ret = input_ff_create_memless(info->input_dev, NULL, vibra_play); if (ret < 0) { dev_err(info->dev, "couldn't register vibrator to FF\n"); goto err_ialloc; } ret = input_register_device(info->input_dev); if (ret < 0) { dev_err(info->dev, "couldn't register input device\n"); goto err_iff; } platform_set_drvdata(pdev, info); return 0; err_iff: input_ff_destroy(info->input_dev); err_ialloc: input_free_device(info->input_dev); err_regulator: regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies); return ret; } static int twl6040_vibra_remove(struct platform_device *pdev) { struct vibra_info *info = platform_get_drvdata(pdev); input_unregister_device(info->input_dev); regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies); return 0; } static struct platform_driver twl6040_vibra_driver = { .probe = twl6040_vibra_probe, .remove = twl6040_vibra_remove, .driver = { .name = "twl6040-vibra", .owner = THIS_MODULE, .pm = &twl6040_vibra_pm_ops, }, }; module_platform_driver(twl6040_vibra_driver); MODULE_ALIAS("platform:twl6040-vibra"); MODULE_DESCRIPTION("TWL6040 Vibra driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jorge Eduardo Candelaria <jorge.candelaria@ti.com>"); MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>");
gpl-2.0
jstotero/Cucciolone-Rewrite
drivers/s390/net/ctcm_sysfs.c
803
5343
/* * drivers/s390/net/ctcm_sysfs.c * * Copyright IBM Corp. 2007, 2007 * Authors: Peter Tiedemann (ptiedem@de.ibm.com) * */ #undef DEBUG #undef DEBUGDATA #undef DEBUGCCW #define KMSG_COMPONENT "ctcm" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/sysfs.h> #include <linux/slab.h> #include "ctcm_main.h" /* * sysfs attributes */ static ssize_t ctcm_buffer_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ctcm_priv *priv = dev_get_drvdata(dev); if (!priv) return -ENODEV; return sprintf(buf, "%d\n", priv->buffer_size); } static ssize_t ctcm_buffer_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct net_device *ndev; int bs1; struct ctcm_priv *priv = dev_get_drvdata(dev); if (!(priv && priv->channel[READ] && (ndev = priv->channel[READ]->netdev))) { CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev"); return -ENODEV; } sscanf(buf, "%u", &bs1); if (bs1 > CTCM_BUFSIZE_LIMIT) goto einval; if (bs1 < (576 + LL_HEADER_LENGTH + 2)) goto einval; priv->buffer_size = bs1; /* just to overwrite the default */ if ((ndev->flags & IFF_RUNNING) && (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2))) goto einval; priv->channel[READ]->max_bufsize = bs1; priv->channel[WRITE]->max_bufsize = bs1; if (!(ndev->flags & IFF_RUNNING)) ndev->mtu = bs1 - LL_HEADER_LENGTH - 2; priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED; priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED; CTCM_DBF_DEV(SETUP, ndev, buf); return count; einval: CTCM_DBF_DEV(SETUP, ndev, "buff_err"); return -EINVAL; } static void ctcm_print_statistics(struct ctcm_priv *priv) { char *sbuf; char *p; if (!priv) return; sbuf = kmalloc(2048, GFP_KERNEL); if (sbuf == NULL) return; p = sbuf; p += sprintf(p, " Device FSM state: %s\n", fsm_getstate_str(priv->fsm)); p += sprintf(p, " RX channel FSM state: %s\n", fsm_getstate_str(priv->channel[READ]->fsm)); p += sprintf(p, " TX channel FSM state: %s\n", fsm_getstate_str(priv->channel[WRITE]->fsm)); p += sprintf(p, " Max. TX buffer used: %ld\n", priv->channel[WRITE]->prof.maxmulti); p += sprintf(p, " Max. chained SKBs: %ld\n", priv->channel[WRITE]->prof.maxcqueue); p += sprintf(p, " TX single write ops: %ld\n", priv->channel[WRITE]->prof.doios_single); p += sprintf(p, " TX multi write ops: %ld\n", priv->channel[WRITE]->prof.doios_multi); p += sprintf(p, " Netto bytes written: %ld\n", priv->channel[WRITE]->prof.txlen); p += sprintf(p, " Max. TX IO-time: %ld\n", priv->channel[WRITE]->prof.tx_time); printk(KERN_INFO "Statistics for %s:\n%s", priv->channel[WRITE]->netdev->name, sbuf); kfree(sbuf); return; } static ssize_t stats_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ctcm_priv *priv = dev_get_drvdata(dev); if (!priv) return -ENODEV; ctcm_print_statistics(priv); return sprintf(buf, "0\n"); } static ssize_t stats_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ctcm_priv *priv = dev_get_drvdata(dev); if (!priv) return -ENODEV; /* Reset statistics */ memset(&priv->channel[WRITE]->prof, 0, sizeof(priv->channel[WRITE]->prof)); return count; } static ssize_t ctcm_proto_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ctcm_priv *priv = dev_get_drvdata(dev); if (!priv) return -ENODEV; return sprintf(buf, "%d\n", priv->protocol); } static ssize_t ctcm_proto_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int value; struct ctcm_priv *priv = dev_get_drvdata(dev); if (!priv) return -ENODEV; sscanf(buf, "%u", &value); if (!((value == CTCM_PROTO_S390) || (value == CTCM_PROTO_LINUX) || (value == CTCM_PROTO_MPC) || (value == CTCM_PROTO_OS390))) return -EINVAL; priv->protocol = value; CTCM_DBF_DEV(SETUP, dev, buf); return count; } const char *ctcm_type[] = { "not a channel", "CTC/A", "FICON channel", "ESCON channel", "unknown channel type", "unsupported channel type", }; static ssize_t ctcm_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ccwgroup_device *cgdev; cgdev = to_ccwgroupdev(dev); if (!cgdev) return -ENODEV; return sprintf(buf, "%s\n", ctcm_type[cgdev->cdev[0]->id.driver_info]); } static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write); static DEVICE_ATTR(protocol, 0644, ctcm_proto_show, ctcm_proto_store); static DEVICE_ATTR(type, 0444, ctcm_type_show, NULL); static DEVICE_ATTR(stats, 0644, stats_show, stats_write); static struct attribute *ctcm_attr[] = { &dev_attr_protocol.attr, &dev_attr_type.attr, &dev_attr_buffer.attr, NULL, }; static struct attribute_group ctcm_attr_group = { .attrs = ctcm_attr, }; int ctcm_add_attributes(struct device *dev) { int rc; rc = device_create_file(dev, &dev_attr_stats); return rc; } void ctcm_remove_attributes(struct device *dev) { device_remove_file(dev, &dev_attr_stats); } int ctcm_add_files(struct device *dev) { return sysfs_create_group(&dev->kobj, &ctcm_attr_group); } void ctcm_remove_files(struct device *dev) { sysfs_remove_group(&dev->kobj, &ctcm_attr_group); }
gpl-2.0
CyanogenMod/lge-kernel-msm7x27
drivers/xen/balloon.c
803
14919
/****************************************************************************** * balloon.c * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic * Copyright (c) 2003-2004, M Williamson, K Fraser * Copyright (c) 2005 Dan M. Smith, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/bootmem.h> #include <linux/pagemap.h> #include <linux/highmem.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/sysdev.h> #include <linux/gfp.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/uaccess.h> #include <asm/tlb.h> #include <asm/xen/hypervisor.h> #include <asm/xen/hypercall.h> #include <xen/xen.h> #include <xen/interface/xen.h> #include <xen/interface/memory.h> #include <xen/xenbus.h> #include <xen/features.h> #include <xen/page.h> #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) #define BALLOON_CLASS_NAME "xen_memory" struct balloon_stats { /* We aim for 'current allocation' == 'target allocation'. */ unsigned long current_pages; unsigned long target_pages; /* * Drivers may alter the memory reservation independently, but they * must inform the balloon driver so we avoid hitting the hard limit. */ unsigned long driver_pages; /* Number of pages in high- and low-memory balloons. */ unsigned long balloon_low; unsigned long balloon_high; }; static DEFINE_MUTEX(balloon_mutex); static struct sys_device balloon_sysdev; static int register_balloon(struct sys_device *sysdev); /* * Protects atomic reservation decrease/increase against concurrent increases. * Also protects non-atomic updates of current_pages and driver_pages, and * balloon lists. */ static DEFINE_SPINLOCK(balloon_lock); static struct balloon_stats balloon_stats; /* We increase/decrease in batches which fit in a page */ static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; #ifdef CONFIG_HIGHMEM #define inc_totalhigh_pages() (totalhigh_pages++) #define dec_totalhigh_pages() (totalhigh_pages--) #else #define inc_totalhigh_pages() do {} while(0) #define dec_totalhigh_pages() do {} while(0) #endif /* List of ballooned pages, threaded through the mem_map array. */ static LIST_HEAD(ballooned_pages); /* Main work function, always executed in process context. */ static void balloon_process(struct work_struct *work); static DECLARE_WORK(balloon_worker, balloon_process); static struct timer_list balloon_timer; /* When ballooning out (allocating memory to return to Xen) we don't really want the kernel to try too hard since that can trigger the oom killer. */ #define GFP_BALLOON \ (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC) static void scrub_page(struct page *page) { #ifdef CONFIG_XEN_SCRUB_PAGES clear_highpage(page); #endif } /* balloon_append: add the given page to the balloon. */ static void balloon_append(struct page *page) { /* Lowmem is re-populated first, so highmem pages go at list tail. */ if (PageHighMem(page)) { list_add_tail(&page->lru, &ballooned_pages); balloon_stats.balloon_high++; dec_totalhigh_pages(); } else { list_add(&page->lru, &ballooned_pages); balloon_stats.balloon_low++; } totalram_pages--; } /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ static struct page *balloon_retrieve(void) { struct page *page; if (list_empty(&ballooned_pages)) return NULL; page = list_entry(ballooned_pages.next, struct page, lru); list_del(&page->lru); if (PageHighMem(page)) { balloon_stats.balloon_high--; inc_totalhigh_pages(); } else balloon_stats.balloon_low--; totalram_pages++; return page; } static struct page *balloon_first_page(void) { if (list_empty(&ballooned_pages)) return NULL; return list_entry(ballooned_pages.next, struct page, lru); } static struct page *balloon_next_page(struct page *page) { struct list_head *next = page->lru.next; if (next == &ballooned_pages) return NULL; return list_entry(next, struct page, lru); } static void balloon_alarm(unsigned long unused) { schedule_work(&balloon_worker); } static unsigned long current_target(void) { unsigned long target = balloon_stats.target_pages; target = min(target, balloon_stats.current_pages + balloon_stats.balloon_low + balloon_stats.balloon_high); return target; } static int increase_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; long rc; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); spin_lock_irqsave(&balloon_lock, flags); page = balloon_first_page(); for (i = 0; i < nr_pages; i++) { BUG_ON(page == NULL); frame_list[i] = page_to_pfn(page); page = balloon_next_page(page); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); if (rc < 0) goto out; for (i = 0; i < rc; i++) { page = balloon_retrieve(); BUG_ON(page == NULL); pfn = page_to_pfn(page); BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) && phys_to_machine_mapping_valid(pfn)); set_phys_to_machine(pfn, frame_list[i]); /* Link back into the page tables if not highmem. */ if (pfn < max_low_pfn) { int ret; ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), mfn_pte(frame_list[i], PAGE_KERNEL), 0); BUG_ON(ret); } /* Relinquish the page back to the allocator. */ ClearPageReserved(page); init_page_count(page); __free_page(page); } balloon_stats.current_pages += rc; out: spin_unlock_irqrestore(&balloon_lock, flags); return rc < 0 ? rc : rc != nr_pages; } static int decrease_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; int need_sleep = 0; int ret; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); for (i = 0; i < nr_pages; i++) { if ((page = alloc_page(GFP_BALLOON)) == NULL) { nr_pages = i; need_sleep = 1; break; } pfn = page_to_pfn(page); frame_list[i] = pfn_to_mfn(pfn); scrub_page(page); if (!PageHighMem(page)) { ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), __pte_ma(0), 0); BUG_ON(ret); } } /* Ensure that ballooned highmem pages don't have kmaps. */ kmap_flush_unused(); flush_tlb_all(); spin_lock_irqsave(&balloon_lock, flags); /* No more mappings: invalidate P2M and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = mfn_to_pfn(frame_list[i]); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); balloon_append(pfn_to_page(pfn)); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != nr_pages); balloon_stats.current_pages -= nr_pages; spin_unlock_irqrestore(&balloon_lock, flags); return need_sleep; } /* * We avoid multiple worker processes conflicting via the balloon mutex. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. */ static void balloon_process(struct work_struct *work) { int need_sleep = 0; long credit; mutex_lock(&balloon_mutex); do { credit = current_target() - balloon_stats.current_pages; if (credit > 0) need_sleep = (increase_reservation(credit) != 0); if (credit < 0) need_sleep = (decrease_reservation(-credit) != 0); #ifndef CONFIG_PREEMPT if (need_resched()) schedule(); #endif } while ((credit != 0) && !need_sleep); /* Schedule more work if there is some still to be done. */ if (current_target() != balloon_stats.current_pages) mod_timer(&balloon_timer, jiffies + HZ); mutex_unlock(&balloon_mutex); } /* Resets the Xen limit, sets new target, and kicks off processing. */ static void balloon_set_new_target(unsigned long target) { /* No need for lock. Not read-modify-write updates. */ balloon_stats.target_pages = target; schedule_work(&balloon_worker); } static struct xenbus_watch target_watch = { .node = "memory/target" }; /* React to a change in the target key */ static void watch_target(struct xenbus_watch *watch, const char **vec, unsigned int len) { unsigned long long new_target; int err; err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target); if (err != 1) { /* This is ok (for domain0 at least) - so just return */ return; } /* The given memory/target value is in KiB, so it needs converting to * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. */ balloon_set_new_target(new_target >> (PAGE_SHIFT - 10)); } static int balloon_init_watcher(struct notifier_block *notifier, unsigned long event, void *data) { int err; err = register_xenbus_watch(&target_watch); if (err) printk(KERN_ERR "Failed to set balloon watcher\n"); return NOTIFY_DONE; } static struct notifier_block xenstore_notifier; static int __init balloon_init(void) { unsigned long pfn; struct page *page; if (!xen_pv_domain()) return -ENODEV; pr_info("xen_balloon: Initialising balloon driver.\n"); balloon_stats.current_pages = min(xen_start_info->nr_pages, max_pfn); balloon_stats.target_pages = balloon_stats.current_pages; balloon_stats.balloon_low = 0; balloon_stats.balloon_high = 0; balloon_stats.driver_pages = 0UL; init_timer(&balloon_timer); balloon_timer.data = 0; balloon_timer.function = balloon_alarm; register_balloon(&balloon_sysdev); /* Initialise the balloon with excess memory space. */ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) { page = pfn_to_page(pfn); if (!PageReserved(page)) balloon_append(page); } target_watch.callback = watch_target; xenstore_notifier.notifier_call = balloon_init_watcher; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(balloon_init); static void balloon_exit(void) { /* XXX - release balloon here */ return; } module_exit(balloon_exit); #define BALLOON_SHOW(name, format, args...) \ static ssize_t show_##name(struct sys_device *dev, \ struct sysdev_attribute *attr, \ char *buf) \ { \ return sprintf(buf, format, ##args); \ } \ static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL) BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages)); BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low)); BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high)); BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(balloon_stats.driver_pages)); static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages)); } static ssize_t store_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, const char *buf, size_t count) { char *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; target_bytes = simple_strtoull(buf, &endchar, 0) * 1024; balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR, show_target_kb, store_target_kb); static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr, char *buf) { return sprintf(buf, "%llu\n", (unsigned long long)balloon_stats.target_pages << PAGE_SHIFT); } static ssize_t store_target(struct sys_device *dev, struct sysdev_attribute *attr, const char *buf, size_t count) { char *endchar; unsigned long long target_bytes; if (!capable(CAP_SYS_ADMIN)) return -EPERM; target_bytes = memparse(buf, &endchar); balloon_set_new_target(target_bytes >> PAGE_SHIFT); return count; } static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR, show_target, store_target); static struct sysdev_attribute *balloon_attrs[] = { &attr_target_kb, &attr_target, }; static struct attribute *balloon_info_attrs[] = { &attr_current_kb.attr, &attr_low_kb.attr, &attr_high_kb.attr, &attr_driver_kb.attr, NULL }; static struct attribute_group balloon_info_group = { .name = "info", .attrs = balloon_info_attrs, }; static struct sysdev_class balloon_sysdev_class = { .name = BALLOON_CLASS_NAME, }; static int register_balloon(struct sys_device *sysdev) { int i, error; error = sysdev_class_register(&balloon_sysdev_class); if (error) return error; sysdev->id = 0; sysdev->cls = &balloon_sysdev_class; error = sysdev_register(sysdev); if (error) { sysdev_class_unregister(&balloon_sysdev_class); return error; } for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) { error = sysdev_create_file(sysdev, balloon_attrs[i]); if (error) goto fail; } error = sysfs_create_group(&sysdev->kobj, &balloon_info_group); if (error) goto fail; return 0; fail: while (--i >= 0) sysdev_remove_file(sysdev, balloon_attrs[i]); sysdev_unregister(sysdev); sysdev_class_unregister(&balloon_sysdev_class); return error; } MODULE_LICENSE("GPL");
gpl-2.0
Perferom/android_kernel_htc_msm7x27
sound/pci/sis7019.c
803
39720
/* * Driver for SiS7019 Audio Accelerator * * Copyright (C) 2004-2007, David Dillow * Written by David Dillow <dave@thedillows.org> * Inspired by the Trident 4D-WaveDX/NX driver. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/pci.h> #include <linux/time.h> #include <linux/slab.h> #include <linux/moduleparam.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <sound/core.h> #include <sound/ac97_codec.h> #include <sound/initval.h> #include "sis7019.h" MODULE_AUTHOR("David Dillow <dave@thedillows.org>"); MODULE_DESCRIPTION("SiS7019"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{SiS,SiS7019 Audio Accelerator}}"); static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */ static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */ static int enable = 1; module_param(index, int, 0444); MODULE_PARM_DESC(index, "Index value for SiS7019 Audio Accelerator."); module_param(id, charp, 0444); MODULE_PARM_DESC(id, "ID string for SiS7019 Audio Accelerator."); module_param(enable, bool, 0444); MODULE_PARM_DESC(enable, "Enable SiS7019 Audio Accelerator."); static DEFINE_PCI_DEVICE_TABLE(snd_sis7019_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x7019) }, { 0, } }; MODULE_DEVICE_TABLE(pci, snd_sis7019_ids); /* There are three timing modes for the voices. * * For both playback and capture, when the buffer is one or two periods long, * we use the hardware's built-in Mid-Loop Interrupt and End-Loop Interrupt * to let us know when the periods have ended. * * When performing playback with more than two periods per buffer, we set * the "Stop Sample Offset" and tell the hardware to interrupt us when we * reach it. We then update the offset and continue on until we are * interrupted for the next period. * * Capture channels do not have a SSO, so we allocate a playback channel to * use as a timer for the capture periods. We use the SSO on the playback * channel to clock out virtual periods, and adjust the virtual period length * to maintain synchronization. This algorithm came from the Trident driver. * * FIXME: It'd be nice to make use of some of the synth features in the * hardware, but a woeful lack of documentation is a significant roadblock. */ struct voice { u16 flags; #define VOICE_IN_USE 1 #define VOICE_CAPTURE 2 #define VOICE_SSO_TIMING 4 #define VOICE_SYNC_TIMING 8 u16 sync_cso; u16 period_size; u16 buffer_size; u16 sync_period_size; u16 sync_buffer_size; u32 sso; u32 vperiod; struct snd_pcm_substream *substream; struct voice *timing; void __iomem *ctrl_base; void __iomem *wave_base; void __iomem *sync_base; int num; }; /* We need four pages to store our wave parameters during a suspend. If * we're not doing power management, we still need to allocate a page * for the silence buffer. */ #ifdef CONFIG_PM #define SIS_SUSPEND_PAGES 4 #else #define SIS_SUSPEND_PAGES 1 #endif struct sis7019 { unsigned long ioport; void __iomem *ioaddr; int irq; int codecs_present; struct pci_dev *pci; struct snd_pcm *pcm; struct snd_card *card; struct snd_ac97 *ac97[3]; /* Protect against more than one thread hitting the AC97 * registers (in a more polite manner than pounding the hardware * semaphore) */ struct mutex ac97_mutex; /* voice_lock protects allocation/freeing of the voice descriptions */ spinlock_t voice_lock; struct voice voices[64]; struct voice capture_voice; /* Allocate pages to store the internal wave state during * suspends. When we're operating, this can be used as a silence * buffer for a timing channel. */ void *suspend_state[SIS_SUSPEND_PAGES]; int silence_users; dma_addr_t silence_dma_addr; }; #define SIS_PRIMARY_CODEC_PRESENT 0x0001 #define SIS_SECONDARY_CODEC_PRESENT 0x0002 #define SIS_TERTIARY_CODEC_PRESENT 0x0004 /* The HW offset parameters (Loop End, Stop Sample, End Sample) have a * documented range of 8-0xfff8 samples. Given that they are 0-based, * that places our period/buffer range at 9-0xfff9 samples. That makes the * max buffer size 0xfff9 samples * 2 channels * 2 bytes per sample, and * max samples / min samples gives us the max periods in a buffer. * * We'll add a constraint upon open that limits the period and buffer sample * size to values that are legal for the hardware. */ static struct snd_pcm_hardware sis_playback_hw_info = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_SYNC_START | SNDRV_PCM_INFO_RESUME), .formats = (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE), .rates = SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_CONTINUOUS, .rate_min = 4000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (0xfff9 * 4), .period_bytes_min = 9, .period_bytes_max = (0xfff9 * 4), .periods_min = 1, .periods_max = (0xfff9 / 9), }; static struct snd_pcm_hardware sis_capture_hw_info = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_SYNC_START | SNDRV_PCM_INFO_RESUME), .formats = (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE), .rates = SNDRV_PCM_RATE_48000, .rate_min = 4000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (0xfff9 * 4), .period_bytes_min = 9, .period_bytes_max = (0xfff9 * 4), .periods_min = 1, .periods_max = (0xfff9 / 9), }; static void sis_update_sso(struct voice *voice, u16 period) { void __iomem *base = voice->ctrl_base; voice->sso += period; if (voice->sso >= voice->buffer_size) voice->sso -= voice->buffer_size; /* Enforce the documented hardware minimum offset */ if (voice->sso < 8) voice->sso = 8; /* The SSO is in the upper 16 bits of the register. */ writew(voice->sso & 0xffff, base + SIS_PLAY_DMA_SSO_ESO + 2); } static void sis_update_voice(struct voice *voice) { if (voice->flags & VOICE_SSO_TIMING) { sis_update_sso(voice, voice->period_size); } else if (voice->flags & VOICE_SYNC_TIMING) { int sync; /* If we've not hit the end of the virtual period, update * our records and keep going. */ if (voice->vperiod > voice->period_size) { voice->vperiod -= voice->period_size; if (voice->vperiod < voice->period_size) sis_update_sso(voice, voice->vperiod); else sis_update_sso(voice, voice->period_size); return; } /* Calculate our relative offset between the target and * the actual CSO value. Since we're operating in a loop, * if the value is more than half way around, we can * consider ourselves wrapped. */ sync = voice->sync_cso; sync -= readw(voice->sync_base + SIS_CAPTURE_DMA_FORMAT_CSO); if (sync > (voice->sync_buffer_size / 2)) sync -= voice->sync_buffer_size; /* If sync is positive, then we interrupted too early, and * we'll need to come back in a few samples and try again. * There's a minimum wait, as it takes some time for the DMA * engine to startup, etc... */ if (sync > 0) { if (sync < 16) sync = 16; sis_update_sso(voice, sync); return; } /* Ok, we interrupted right on time, or (hopefully) just * a bit late. We'll adjst our next waiting period based * on how close we got. * * We need to stay just behind the actual channel to ensure * it really is past a period when we get our interrupt -- * otherwise we'll fall into the early code above and have * a minimum wait time, which makes us quite late here, * eating into the user's time to refresh the buffer, esp. * if using small periods. * * If we're less than 9 samples behind, we're on target. */ if (sync > -9) voice->vperiod = voice->sync_period_size + 1; else voice->vperiod = voice->sync_period_size - 4; if (voice->vperiod < voice->buffer_size) { sis_update_sso(voice, voice->vperiod); voice->vperiod = 0; } else sis_update_sso(voice, voice->period_size); sync = voice->sync_cso + voice->sync_period_size; if (sync >= voice->sync_buffer_size) sync -= voice->sync_buffer_size; voice->sync_cso = sync; } snd_pcm_period_elapsed(voice->substream); } static void sis_voice_irq(u32 status, struct voice *voice) { int bit; while (status) { bit = __ffs(status); status >>= bit + 1; voice += bit; sis_update_voice(voice); voice++; } } static irqreturn_t sis_interrupt(int irq, void *dev) { struct sis7019 *sis = dev; unsigned long io = sis->ioport; struct voice *voice; u32 intr, status; /* We only use the DMA interrupts, and we don't enable any other * source of interrupts. But, it is possible to see an interupt * status that didn't actually interrupt us, so eliminate anything * we're not expecting to avoid falsely claiming an IRQ, and an * ensuing endless loop. */ intr = inl(io + SIS_GISR); intr &= SIS_GISR_AUDIO_PLAY_DMA_IRQ_STATUS | SIS_GISR_AUDIO_RECORD_DMA_IRQ_STATUS; if (!intr) return IRQ_NONE; do { status = inl(io + SIS_PISR_A); if (status) { sis_voice_irq(status, sis->voices); outl(status, io + SIS_PISR_A); } status = inl(io + SIS_PISR_B); if (status) { sis_voice_irq(status, &sis->voices[32]); outl(status, io + SIS_PISR_B); } status = inl(io + SIS_RISR); if (status) { voice = &sis->capture_voice; if (!voice->timing) snd_pcm_period_elapsed(voice->substream); outl(status, io + SIS_RISR); } outl(intr, io + SIS_GISR); intr = inl(io + SIS_GISR); intr &= SIS_GISR_AUDIO_PLAY_DMA_IRQ_STATUS | SIS_GISR_AUDIO_RECORD_DMA_IRQ_STATUS; } while (intr); return IRQ_HANDLED; } static u32 sis_rate_to_delta(unsigned int rate) { u32 delta; /* This was copied from the trident driver, but it seems its gotten * around a bit... nevertheless, it works well. * * We special case 44100 and 8000 since rounding with the equation * does not give us an accurate enough value. For 11025 and 22050 * the equation gives us the best answer. All other frequencies will * also use the equation. JDW */ if (rate == 44100) delta = 0xeb3; else if (rate == 8000) delta = 0x2ab; else if (rate == 48000) delta = 0x1000; else delta = (((rate << 12) + 24000) / 48000) & 0x0000ffff; return delta; } static void __sis_map_silence(struct sis7019 *sis) { /* Helper function: must hold sis->voice_lock on entry */ if (!sis->silence_users) sis->silence_dma_addr = pci_map_single(sis->pci, sis->suspend_state[0], 4096, PCI_DMA_TODEVICE); sis->silence_users++; } static void __sis_unmap_silence(struct sis7019 *sis) { /* Helper function: must hold sis->voice_lock on entry */ sis->silence_users--; if (!sis->silence_users) pci_unmap_single(sis->pci, sis->silence_dma_addr, 4096, PCI_DMA_TODEVICE); } static void sis_free_voice(struct sis7019 *sis, struct voice *voice) { unsigned long flags; spin_lock_irqsave(&sis->voice_lock, flags); if (voice->timing) { __sis_unmap_silence(sis); voice->timing->flags &= ~(VOICE_IN_USE | VOICE_SSO_TIMING | VOICE_SYNC_TIMING); voice->timing = NULL; } voice->flags &= ~(VOICE_IN_USE | VOICE_SSO_TIMING | VOICE_SYNC_TIMING); spin_unlock_irqrestore(&sis->voice_lock, flags); } static struct voice *__sis_alloc_playback_voice(struct sis7019 *sis) { /* Must hold the voice_lock on entry */ struct voice *voice; int i; for (i = 0; i < 64; i++) { voice = &sis->voices[i]; if (voice->flags & VOICE_IN_USE) continue; voice->flags |= VOICE_IN_USE; goto found_one; } voice = NULL; found_one: return voice; } static struct voice *sis_alloc_playback_voice(struct sis7019 *sis) { struct voice *voice; unsigned long flags; spin_lock_irqsave(&sis->voice_lock, flags); voice = __sis_alloc_playback_voice(sis); spin_unlock_irqrestore(&sis->voice_lock, flags); return voice; } static int sis_alloc_timing_voice(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct sis7019 *sis = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct voice *voice = runtime->private_data; unsigned int period_size, buffer_size; unsigned long flags; int needed; /* If there are one or two periods per buffer, we don't need a * timing voice, as we can use the capture channel's interrupts * to clock out the periods. */ period_size = params_period_size(hw_params); buffer_size = params_buffer_size(hw_params); needed = (period_size != buffer_size && period_size != (buffer_size / 2)); if (needed && !voice->timing) { spin_lock_irqsave(&sis->voice_lock, flags); voice->timing = __sis_alloc_playback_voice(sis); if (voice->timing) __sis_map_silence(sis); spin_unlock_irqrestore(&sis->voice_lock, flags); if (!voice->timing) return -ENOMEM; voice->timing->substream = substream; } else if (!needed && voice->timing) { sis_free_voice(sis, voice); voice->timing = NULL; } return 0; } static int sis_playback_open(struct snd_pcm_substream *substream) { struct sis7019 *sis = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct voice *voice; voice = sis_alloc_playback_voice(sis); if (!voice) return -EAGAIN; voice->substream = substream; runtime->private_data = voice; runtime->hw = sis_playback_hw_info; snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 9, 0xfff9); snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 9, 0xfff9); snd_pcm_set_sync(substream); return 0; } static int sis_substream_close(struct snd_pcm_substream *substream) { struct sis7019 *sis = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct voice *voice = runtime->private_data; sis_free_voice(sis, voice); return 0; } static int sis_playback_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int sis_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } static int sis_pcm_playback_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct voice *voice = runtime->private_data; void __iomem *ctrl_base = voice->ctrl_base; void __iomem *wave_base = voice->wave_base; u32 format, dma_addr, control, sso_eso, delta, reg; u16 leo; /* We rely on the PCM core to ensure that the parameters for this * substream do not change on us while we're programming the HW. */ format = 0; if (snd_pcm_format_width(runtime->format) == 8) format |= SIS_PLAY_DMA_FORMAT_8BIT; if (!snd_pcm_format_signed(runtime->format)) format |= SIS_PLAY_DMA_FORMAT_UNSIGNED; if (runtime->channels == 1) format |= SIS_PLAY_DMA_FORMAT_MONO; /* The baseline setup is for a single period per buffer, and * we add bells and whistles as needed from there. */ dma_addr = runtime->dma_addr; leo = runtime->buffer_size - 1; control = leo | SIS_PLAY_DMA_LOOP | SIS_PLAY_DMA_INTR_AT_LEO; sso_eso = leo; if (runtime->period_size == (runtime->buffer_size / 2)) { control |= SIS_PLAY_DMA_INTR_AT_MLP; } else if (runtime->period_size != runtime->buffer_size) { voice->flags |= VOICE_SSO_TIMING; voice->sso = runtime->period_size - 1; voice->period_size = runtime->period_size; voice->buffer_size = runtime->buffer_size; control &= ~SIS_PLAY_DMA_INTR_AT_LEO; control |= SIS_PLAY_DMA_INTR_AT_SSO; sso_eso |= (runtime->period_size - 1) << 16; } delta = sis_rate_to_delta(runtime->rate); /* Ok, we're ready to go, set up the channel. */ writel(format, ctrl_base + SIS_PLAY_DMA_FORMAT_CSO); writel(dma_addr, ctrl_base + SIS_PLAY_DMA_BASE); writel(control, ctrl_base + SIS_PLAY_DMA_CONTROL); writel(sso_eso, ctrl_base + SIS_PLAY_DMA_SSO_ESO); for (reg = 0; reg < SIS_WAVE_SIZE; reg += 4) writel(0, wave_base + reg); writel(SIS_WAVE_GENERAL_WAVE_VOLUME, wave_base + SIS_WAVE_GENERAL); writel(delta << 16, wave_base + SIS_WAVE_GENERAL_ARTICULATION); writel(SIS_WAVE_CHANNEL_CONTROL_FIRST_SAMPLE | SIS_WAVE_CHANNEL_CONTROL_AMP_ENABLE | SIS_WAVE_CHANNEL_CONTROL_INTERPOLATE_ENABLE, wave_base + SIS_WAVE_CHANNEL_CONTROL); /* Force PCI writes to post. */ readl(ctrl_base); return 0; } static int sis_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct sis7019 *sis = snd_pcm_substream_chip(substream); unsigned long io = sis->ioport; struct snd_pcm_substream *s; struct voice *voice; void *chip; int starting; u32 record = 0; u32 play[2] = { 0, 0 }; /* No locks needed, as the PCM core will hold the locks on the * substreams, and the HW will only start/stop the indicated voices * without changing the state of the others. */ switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: case SNDRV_PCM_TRIGGER_RESUME: starting = 1; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_SUSPEND: starting = 0; break; default: return -EINVAL; } snd_pcm_group_for_each_entry(s, substream) { /* Make sure it is for us... */ chip = snd_pcm_substream_chip(s); if (chip != sis) continue; voice = s->runtime->private_data; if (voice->flags & VOICE_CAPTURE) { record |= 1 << voice->num; voice = voice->timing; } /* voice could be NULL if this a recording stream, and it * doesn't have an external timing channel. */ if (voice) play[voice->num / 32] |= 1 << (voice->num & 0x1f); snd_pcm_trigger_done(s, substream); } if (starting) { if (record) outl(record, io + SIS_RECORD_START_REG); if (play[0]) outl(play[0], io + SIS_PLAY_START_A_REG); if (play[1]) outl(play[1], io + SIS_PLAY_START_B_REG); } else { if (record) outl(record, io + SIS_RECORD_STOP_REG); if (play[0]) outl(play[0], io + SIS_PLAY_STOP_A_REG); if (play[1]) outl(play[1], io + SIS_PLAY_STOP_B_REG); } return 0; } static snd_pcm_uframes_t sis_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct voice *voice = runtime->private_data; u32 cso; cso = readl(voice->ctrl_base + SIS_PLAY_DMA_FORMAT_CSO); cso &= 0xffff; return cso; } static int sis_capture_open(struct snd_pcm_substream *substream) { struct sis7019 *sis = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct voice *voice = &sis->capture_voice; unsigned long flags; /* FIXME: The driver only supports recording from one channel * at the moment, but it could support more. */ spin_lock_irqsave(&sis->voice_lock, flags); if (voice->flags & VOICE_IN_USE) voice = NULL; else voice->flags |= VOICE_IN_USE; spin_unlock_irqrestore(&sis->voice_lock, flags); if (!voice) return -EAGAIN; voice->substream = substream; runtime->private_data = voice; runtime->hw = sis_capture_hw_info; runtime->hw.rates = sis->ac97[0]->rates[AC97_RATES_ADC]; snd_pcm_limit_hw_rates(runtime); snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 9, 0xfff9); snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 9, 0xfff9); snd_pcm_set_sync(substream); return 0; } static int sis_capture_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct sis7019 *sis = snd_pcm_substream_chip(substream); int rc; rc = snd_ac97_set_rate(sis->ac97[0], AC97_PCM_LR_ADC_RATE, params_rate(hw_params)); if (rc) goto out; rc = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); if (rc < 0) goto out; rc = sis_alloc_timing_voice(substream, hw_params); out: return rc; } static void sis_prepare_timing_voice(struct voice *voice, struct snd_pcm_substream *substream) { struct sis7019 *sis = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct voice *timing = voice->timing; void __iomem *play_base = timing->ctrl_base; void __iomem *wave_base = timing->wave_base; u16 buffer_size, period_size; u32 format, control, sso_eso, delta; u32 vperiod, sso, reg; /* Set our initial buffer and period as large as we can given a * single page of silence. */ buffer_size = 4096 / runtime->channels; buffer_size /= snd_pcm_format_size(runtime->format, 1); period_size = buffer_size; /* Initially, we want to interrupt just a bit behind the end of * the period we're clocking out. 10 samples seems to give a good * delay. * * We want to spread our interrupts throughout the virtual period, * so that we don't end up with two interrupts back to back at the * end -- this helps minimize the effects of any jitter. Adjust our * clocking period size so that the last period is at least a fourth * of a full period. * * This is all moot if we don't need to use virtual periods. */ vperiod = runtime->period_size + 10; if (vperiod > period_size) { u16 tail = vperiod % period_size; u16 quarter_period = period_size / 4; if (tail && tail < quarter_period) { u16 loops = vperiod / period_size; tail = quarter_period - tail; tail += loops - 1; tail /= loops; period_size -= tail; } sso = period_size - 1; } else { /* The initial period will fit inside the buffer, so we * don't need to use virtual periods -- disable them. */ period_size = runtime->period_size; sso = vperiod - 1; vperiod = 0; } /* The interrupt handler implements the timing syncronization, so * setup its state. */ timing->flags |= VOICE_SYNC_TIMING; timing->sync_base = voice->ctrl_base; timing->sync_cso = runtime->period_size - 1; timing->sync_period_size = runtime->period_size; timing->sync_buffer_size = runtime->buffer_size; timing->period_size = period_size; timing->buffer_size = buffer_size; timing->sso = sso; timing->vperiod = vperiod; /* Using unsigned samples with the all-zero silence buffer * forces the output to the lower rail, killing playback. * So ignore unsigned vs signed -- it doesn't change the timing. */ format = 0; if (snd_pcm_format_width(runtime->format) == 8) format = SIS_CAPTURE_DMA_FORMAT_8BIT; if (runtime->channels == 1) format |= SIS_CAPTURE_DMA_FORMAT_MONO; control = timing->buffer_size - 1; control |= SIS_PLAY_DMA_LOOP | SIS_PLAY_DMA_INTR_AT_SSO; sso_eso = timing->buffer_size - 1; sso_eso |= timing->sso << 16; delta = sis_rate_to_delta(runtime->rate); /* We've done the math, now configure the channel. */ writel(format, play_base + SIS_PLAY_DMA_FORMAT_CSO); writel(sis->silence_dma_addr, play_base + SIS_PLAY_DMA_BASE); writel(control, play_base + SIS_PLAY_DMA_CONTROL); writel(sso_eso, play_base + SIS_PLAY_DMA_SSO_ESO); for (reg = 0; reg < SIS_WAVE_SIZE; reg += 4) writel(0, wave_base + reg); writel(SIS_WAVE_GENERAL_WAVE_VOLUME, wave_base + SIS_WAVE_GENERAL); writel(delta << 16, wave_base + SIS_WAVE_GENERAL_ARTICULATION); writel(SIS_WAVE_CHANNEL_CONTROL_FIRST_SAMPLE | SIS_WAVE_CHANNEL_CONTROL_AMP_ENABLE | SIS_WAVE_CHANNEL_CONTROL_INTERPOLATE_ENABLE, wave_base + SIS_WAVE_CHANNEL_CONTROL); } static int sis_pcm_capture_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct voice *voice = runtime->private_data; void __iomem *rec_base = voice->ctrl_base; u32 format, dma_addr, control; u16 leo; /* We rely on the PCM core to ensure that the parameters for this * substream do not change on us while we're programming the HW. */ format = 0; if (snd_pcm_format_width(runtime->format) == 8) format = SIS_CAPTURE_DMA_FORMAT_8BIT; if (!snd_pcm_format_signed(runtime->format)) format |= SIS_CAPTURE_DMA_FORMAT_UNSIGNED; if (runtime->channels == 1) format |= SIS_CAPTURE_DMA_FORMAT_MONO; dma_addr = runtime->dma_addr; leo = runtime->buffer_size - 1; control = leo | SIS_CAPTURE_DMA_LOOP; /* If we've got more than two periods per buffer, then we have * use a timing voice to clock out the periods. Otherwise, we can * use the capture channel's interrupts. */ if (voice->timing) { sis_prepare_timing_voice(voice, substream); } else { control |= SIS_CAPTURE_DMA_INTR_AT_LEO; if (runtime->period_size != runtime->buffer_size) control |= SIS_CAPTURE_DMA_INTR_AT_MLP; } writel(format, rec_base + SIS_CAPTURE_DMA_FORMAT_CSO); writel(dma_addr, rec_base + SIS_CAPTURE_DMA_BASE); writel(control, rec_base + SIS_CAPTURE_DMA_CONTROL); /* Force the writes to post. */ readl(rec_base); return 0; } static struct snd_pcm_ops sis_playback_ops = { .open = sis_playback_open, .close = sis_substream_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = sis_playback_hw_params, .hw_free = sis_hw_free, .prepare = sis_pcm_playback_prepare, .trigger = sis_pcm_trigger, .pointer = sis_pcm_pointer, }; static struct snd_pcm_ops sis_capture_ops = { .open = sis_capture_open, .close = sis_substream_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = sis_capture_hw_params, .hw_free = sis_hw_free, .prepare = sis_pcm_capture_prepare, .trigger = sis_pcm_trigger, .pointer = sis_pcm_pointer, }; static int __devinit sis_pcm_create(struct sis7019 *sis) { struct snd_pcm *pcm; int rc; /* We have 64 voices, and the driver currently records from * only one channel, though that could change in the future. */ rc = snd_pcm_new(sis->card, "SiS7019", 0, 64, 1, &pcm); if (rc) return rc; pcm->private_data = sis; strcpy(pcm->name, "SiS7019"); sis->pcm = pcm; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &sis_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &sis_capture_ops); /* Try to preallocate some memory, but it's not the end of the * world if this fails. */ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(sis->pci), 64*1024, 128*1024); return 0; } static unsigned short sis_ac97_rw(struct sis7019 *sis, int codec, u32 cmd) { unsigned long io = sis->ioport; unsigned short val = 0xffff; u16 status; u16 rdy; int count; static const u16 codec_ready[3] = { SIS_AC97_STATUS_CODEC_READY, SIS_AC97_STATUS_CODEC2_READY, SIS_AC97_STATUS_CODEC3_READY, }; rdy = codec_ready[codec]; /* Get the AC97 semaphore -- software first, so we don't spin * pounding out IO reads on the hardware semaphore... */ mutex_lock(&sis->ac97_mutex); count = 0xffff; while ((inw(io + SIS_AC97_SEMA) & SIS_AC97_SEMA_BUSY) && --count) udelay(1); if (!count) goto timeout; /* ... and wait for any outstanding commands to complete ... */ count = 0xffff; do { status = inw(io + SIS_AC97_STATUS); if ((status & rdy) && !(status & SIS_AC97_STATUS_BUSY)) break; udelay(1); } while (--count); if (!count) goto timeout_sema; /* ... before sending our command and waiting for it to finish ... */ outl(cmd, io + SIS_AC97_CMD); udelay(10); count = 0xffff; while ((inw(io + SIS_AC97_STATUS) & SIS_AC97_STATUS_BUSY) && --count) udelay(1); /* ... and reading the results (if any). */ val = inl(io + SIS_AC97_CMD) >> 16; timeout_sema: outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA); timeout: mutex_unlock(&sis->ac97_mutex); if (!count) { printk(KERN_ERR "sis7019: ac97 codec %d timeout cmd 0x%08x\n", codec, cmd); } return val; } static void sis_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { static const u32 cmd[3] = { SIS_AC97_CMD_CODEC_WRITE, SIS_AC97_CMD_CODEC2_WRITE, SIS_AC97_CMD_CODEC3_WRITE, }; sis_ac97_rw(ac97->private_data, ac97->num, (val << 16) | (reg << 8) | cmd[ac97->num]); } static unsigned short sis_ac97_read(struct snd_ac97 *ac97, unsigned short reg) { static const u32 cmd[3] = { SIS_AC97_CMD_CODEC_READ, SIS_AC97_CMD_CODEC2_READ, SIS_AC97_CMD_CODEC3_READ, }; return sis_ac97_rw(ac97->private_data, ac97->num, (reg << 8) | cmd[ac97->num]); } static int __devinit sis_mixer_create(struct sis7019 *sis) { struct snd_ac97_bus *bus; struct snd_ac97_template ac97; static struct snd_ac97_bus_ops ops = { .write = sis_ac97_write, .read = sis_ac97_read, }; int rc; memset(&ac97, 0, sizeof(ac97)); ac97.private_data = sis; rc = snd_ac97_bus(sis->card, 0, &ops, NULL, &bus); if (!rc && sis->codecs_present & SIS_PRIMARY_CODEC_PRESENT) rc = snd_ac97_mixer(bus, &ac97, &sis->ac97[0]); ac97.num = 1; if (!rc && (sis->codecs_present & SIS_SECONDARY_CODEC_PRESENT)) rc = snd_ac97_mixer(bus, &ac97, &sis->ac97[1]); ac97.num = 2; if (!rc && (sis->codecs_present & SIS_TERTIARY_CODEC_PRESENT)) rc = snd_ac97_mixer(bus, &ac97, &sis->ac97[2]); /* If we return an error here, then snd_card_free() should * free up any ac97 codecs that got created, as well as the bus. */ return rc; } static void sis_free_suspend(struct sis7019 *sis) { int i; for (i = 0; i < SIS_SUSPEND_PAGES; i++) kfree(sis->suspend_state[i]); } static int sis_chip_free(struct sis7019 *sis) { /* Reset the chip, and disable all interrputs. */ outl(SIS_GCR_SOFTWARE_RESET, sis->ioport + SIS_GCR); udelay(10); outl(0, sis->ioport + SIS_GCR); outl(0, sis->ioport + SIS_GIER); /* Now, free everything we allocated. */ if (sis->irq >= 0) free_irq(sis->irq, sis); if (sis->ioaddr) iounmap(sis->ioaddr); pci_release_regions(sis->pci); pci_disable_device(sis->pci); sis_free_suspend(sis); return 0; } static int sis_dev_free(struct snd_device *dev) { struct sis7019 *sis = dev->device_data; return sis_chip_free(sis); } static int sis_chip_init(struct sis7019 *sis) { unsigned long io = sis->ioport; void __iomem *ioaddr = sis->ioaddr; u16 status; int count; int i; /* Reset the audio controller */ outl(SIS_GCR_SOFTWARE_RESET, io + SIS_GCR); udelay(10); outl(0, io + SIS_GCR); /* Get the AC-link semaphore, and reset the codecs */ count = 0xffff; while ((inw(io + SIS_AC97_SEMA) & SIS_AC97_SEMA_BUSY) && --count) udelay(1); if (!count) return -EIO; outl(SIS_AC97_CMD_CODEC_COLD_RESET, io + SIS_AC97_CMD); udelay(10); count = 0xffff; while ((inw(io + SIS_AC97_STATUS) & SIS_AC97_STATUS_BUSY) && --count) udelay(1); /* Now that we've finished the reset, find out what's attached. */ status = inl(io + SIS_AC97_STATUS); if (status & SIS_AC97_STATUS_CODEC_READY) sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT; if (status & SIS_AC97_STATUS_CODEC2_READY) sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT; if (status & SIS_AC97_STATUS_CODEC3_READY) sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT; /* All done, let go of the semaphore, and check for errors */ outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA); if (!sis->codecs_present || !count) return -EIO; /* Let the hardware know that the audio driver is alive, * and enable PCM slots on the AC-link for L/R playback (3 & 4) and * record channels. We're going to want to use Variable Rate Audio * for recording, to avoid needlessly resampling from 48kHZ. */ outl(SIS_AC97_CONF_AUDIO_ALIVE, io + SIS_AC97_CONF); outl(SIS_AC97_CONF_AUDIO_ALIVE | SIS_AC97_CONF_PCM_LR_ENABLE | SIS_AC97_CONF_PCM_CAP_MIC_ENABLE | SIS_AC97_CONF_PCM_CAP_LR_ENABLE | SIS_AC97_CONF_CODEC_VRA_ENABLE, io + SIS_AC97_CONF); /* All AC97 PCM slots should be sourced from sub-mixer 0. */ outl(0, io + SIS_AC97_PSR); /* There is only one valid DMA setup for a PCI environment. */ outl(SIS_DMA_CSR_PCI_SETTINGS, io + SIS_DMA_CSR); /* Reset the syncronization groups for all of the channels * to be asyncronous. If we start doing SPDIF or 5.1 sound, etc. * we'll need to change how we handle these. Until then, we just * assign sub-mixer 0 to all playback channels, and avoid any * attenuation on the audio. */ outl(0, io + SIS_PLAY_SYNC_GROUP_A); outl(0, io + SIS_PLAY_SYNC_GROUP_B); outl(0, io + SIS_PLAY_SYNC_GROUP_C); outl(0, io + SIS_PLAY_SYNC_GROUP_D); outl(0, io + SIS_MIXER_SYNC_GROUP); for (i = 0; i < 64; i++) { writel(i, SIS_MIXER_START_ADDR(ioaddr, i)); writel(SIS_MIXER_RIGHT_NO_ATTEN | SIS_MIXER_LEFT_NO_ATTEN | SIS_MIXER_DEST_0, SIS_MIXER_ADDR(ioaddr, i)); } /* Don't attenuate any audio set for the wave amplifier. * * FIXME: Maximum attenuation is set for the music amp, which will * need to change if we start using the synth engine. */ outl(0xffff0000, io + SIS_WEVCR); /* Ensure that the wave engine is in normal operating mode. */ outl(0, io + SIS_WECCR); /* Go ahead and enable the DMA interrupts. They won't go live * until we start a channel. */ outl(SIS_GIER_AUDIO_PLAY_DMA_IRQ_ENABLE | SIS_GIER_AUDIO_RECORD_DMA_IRQ_ENABLE, io + SIS_GIER); return 0; } #ifdef CONFIG_PM static int sis_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct sis7019 *sis = card->private_data; void __iomem *ioaddr = sis->ioaddr; int i; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(sis->pcm); if (sis->codecs_present & SIS_PRIMARY_CODEC_PRESENT) snd_ac97_suspend(sis->ac97[0]); if (sis->codecs_present & SIS_SECONDARY_CODEC_PRESENT) snd_ac97_suspend(sis->ac97[1]); if (sis->codecs_present & SIS_TERTIARY_CODEC_PRESENT) snd_ac97_suspend(sis->ac97[2]); /* snd_pcm_suspend_all() stopped all channels, so we're quiescent. */ if (sis->irq >= 0) { free_irq(sis->irq, sis); sis->irq = -1; } /* Save the internal state away */ for (i = 0; i < 4; i++) { memcpy_fromio(sis->suspend_state[i], ioaddr, 4096); ioaddr += 4096; } pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return 0; } static int sis_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct sis7019 *sis = card->private_data; void __iomem *ioaddr = sis->ioaddr; int i; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "sis7019: unable to re-enable device\n"); goto error; } if (sis_chip_init(sis)) { printk(KERN_ERR "sis7019: unable to re-init controller\n"); goto error; } if (request_irq(pci->irq, sis_interrupt, IRQF_DISABLED|IRQF_SHARED, card->shortname, sis)) { printk(KERN_ERR "sis7019: unable to regain IRQ %d\n", pci->irq); goto error; } /* Restore saved state, then clear out the page we use for the * silence buffer. */ for (i = 0; i < 4; i++) { memcpy_toio(ioaddr, sis->suspend_state[i], 4096); ioaddr += 4096; } memset(sis->suspend_state[0], 0, 4096); sis->irq = pci->irq; pci_set_master(pci); if (sis->codecs_present & SIS_PRIMARY_CODEC_PRESENT) snd_ac97_resume(sis->ac97[0]); if (sis->codecs_present & SIS_SECONDARY_CODEC_PRESENT) snd_ac97_resume(sis->ac97[1]); if (sis->codecs_present & SIS_TERTIARY_CODEC_PRESENT) snd_ac97_resume(sis->ac97[2]); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; error: snd_card_disconnect(card); return -EIO; } #endif /* CONFIG_PM */ static int sis_alloc_suspend(struct sis7019 *sis) { int i; /* We need 16K to store the internal wave engine state during a * suspend, but we don't need it to be contiguous, so play nice * with the memory system. We'll also use this area for a silence * buffer. */ for (i = 0; i < SIS_SUSPEND_PAGES; i++) { sis->suspend_state[i] = kmalloc(4096, GFP_KERNEL); if (!sis->suspend_state[i]) return -ENOMEM; } memset(sis->suspend_state[0], 0, 4096); return 0; } static int __devinit sis_chip_create(struct snd_card *card, struct pci_dev *pci) { struct sis7019 *sis = card->private_data; struct voice *voice; static struct snd_device_ops ops = { .dev_free = sis_dev_free, }; int rc; int i; rc = pci_enable_device(pci); if (rc) goto error_out; if (pci_set_dma_mask(pci, DMA_BIT_MASK(30)) < 0) { printk(KERN_ERR "sis7019: architecture does not support " "30-bit PCI busmaster DMA"); goto error_out_enabled; } memset(sis, 0, sizeof(*sis)); mutex_init(&sis->ac97_mutex); spin_lock_init(&sis->voice_lock); sis->card = card; sis->pci = pci; sis->irq = -1; sis->ioport = pci_resource_start(pci, 0); rc = pci_request_regions(pci, "SiS7019"); if (rc) { printk(KERN_ERR "sis7019: unable request regions\n"); goto error_out_enabled; } rc = -EIO; sis->ioaddr = ioremap_nocache(pci_resource_start(pci, 1), 0x4000); if (!sis->ioaddr) { printk(KERN_ERR "sis7019: unable to remap MMIO, aborting\n"); goto error_out_cleanup; } rc = sis_alloc_suspend(sis); if (rc < 0) { printk(KERN_ERR "sis7019: unable to allocate state storage\n"); goto error_out_cleanup; } rc = sis_chip_init(sis); if (rc) goto error_out_cleanup; if (request_irq(pci->irq, sis_interrupt, IRQF_DISABLED|IRQF_SHARED, card->shortname, sis)) { printk(KERN_ERR "unable to allocate irq %d\n", sis->irq); goto error_out_cleanup; } sis->irq = pci->irq; pci_set_master(pci); for (i = 0; i < 64; i++) { voice = &sis->voices[i]; voice->num = i; voice->ctrl_base = SIS_PLAY_DMA_ADDR(sis->ioaddr, i); voice->wave_base = SIS_WAVE_ADDR(sis->ioaddr, i); } voice = &sis->capture_voice; voice->flags = VOICE_CAPTURE; voice->num = SIS_CAPTURE_CHAN_AC97_PCM_IN; voice->ctrl_base = SIS_CAPTURE_DMA_ADDR(sis->ioaddr, voice->num); rc = snd_device_new(card, SNDRV_DEV_LOWLEVEL, sis, &ops); if (rc) goto error_out_cleanup; snd_card_set_dev(card, &pci->dev); return 0; error_out_cleanup: sis_chip_free(sis); error_out_enabled: pci_disable_device(pci); error_out: return rc; } static int __devinit snd_sis7019_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { struct snd_card *card; struct sis7019 *sis; int rc; rc = -ENOENT; if (!enable) goto error_out; rc = snd_card_create(index, id, THIS_MODULE, sizeof(*sis), &card); if (rc < 0) goto error_out; strcpy(card->driver, "SiS7019"); strcpy(card->shortname, "SiS7019"); rc = sis_chip_create(card, pci); if (rc) goto card_error_out; sis = card->private_data; rc = sis_mixer_create(sis); if (rc) goto card_error_out; rc = sis_pcm_create(sis); if (rc) goto card_error_out; snprintf(card->longname, sizeof(card->longname), "%s Audio Accelerator with %s at 0x%lx, irq %d", card->shortname, snd_ac97_get_short_name(sis->ac97[0]), sis->ioport, sis->irq); rc = snd_card_register(card); if (rc) goto card_error_out; pci_set_drvdata(pci, card); return 0; card_error_out: snd_card_free(card); error_out: return rc; } static void __devexit snd_sis7019_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static struct pci_driver sis7019_driver = { .name = "SiS7019", .id_table = snd_sis7019_ids, .probe = snd_sis7019_probe, .remove = __devexit_p(snd_sis7019_remove), #ifdef CONFIG_PM .suspend = sis_suspend, .resume = sis_resume, #endif }; static int __init sis7019_init(void) { return pci_register_driver(&sis7019_driver); } static void __exit sis7019_exit(void) { pci_unregister_driver(&sis7019_driver); } module_init(sis7019_init); module_exit(sis7019_exit);
gpl-2.0
simone201/neak-kernel-sgs2
drivers/net/wireless/libertas/ethtool.c
803
3433
#include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/delay.h> #include "host.h" #include "decl.h" #include "defs.h" #include "dev.h" #include "wext.h" #include "cmd.h" #include "mesh.h" static void lbs_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct lbs_private *priv = dev->ml_priv; snprintf(info->fw_version, 32, "%u.%u.%u.p%u", priv->fwrelease >> 24 & 0xff, priv->fwrelease >> 16 & 0xff, priv->fwrelease >> 8 & 0xff, priv->fwrelease & 0xff); strcpy(info->driver, "libertas"); strcpy(info->version, lbs_driver_version); } /* All 8388 parts have 16KiB EEPROM size at the time of writing. * In case that changes this needs fixing. */ #define LBS_EEPROM_LEN 16384 static int lbs_ethtool_get_eeprom_len(struct net_device *dev) { return LBS_EEPROM_LEN; } static int lbs_ethtool_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 * bytes) { struct lbs_private *priv = dev->ml_priv; struct cmd_ds_802_11_eeprom_access cmd; int ret; lbs_deb_enter(LBS_DEB_ETHTOOL); if (eeprom->offset + eeprom->len > LBS_EEPROM_LEN || eeprom->len > LBS_EEPROM_READ_LEN) { ret = -EINVAL; goto out; } cmd.hdr.size = cpu_to_le16(sizeof(struct cmd_ds_802_11_eeprom_access) - LBS_EEPROM_READ_LEN + eeprom->len); cmd.action = cpu_to_le16(CMD_ACT_GET); cmd.offset = cpu_to_le16(eeprom->offset); cmd.len = cpu_to_le16(eeprom->len); ret = lbs_cmd_with_response(priv, CMD_802_11_EEPROM_ACCESS, &cmd); if (!ret) memcpy(bytes, cmd.value, eeprom->len); out: lbs_deb_leave_args(LBS_DEB_ETHTOOL, "ret %d", ret); return ret; } static void lbs_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct lbs_private *priv = dev->ml_priv; if (priv->wol_criteria == 0xffffffff) { /* Interface driver didn't configure wake */ wol->supported = wol->wolopts = 0; return; } wol->supported = WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY; if (priv->wol_criteria & EHS_WAKE_ON_UNICAST_DATA) wol->wolopts |= WAKE_UCAST; if (priv->wol_criteria & EHS_WAKE_ON_MULTICAST_DATA) wol->wolopts |= WAKE_MCAST; if (priv->wol_criteria & EHS_WAKE_ON_BROADCAST_DATA) wol->wolopts |= WAKE_BCAST; if (priv->wol_criteria & EHS_WAKE_ON_MAC_EVENT) wol->wolopts |= WAKE_PHY; } static int lbs_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct lbs_private *priv = dev->ml_priv; uint32_t criteria = 0; if (wol->wolopts & ~(WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY)) return -EOPNOTSUPP; if (wol->wolopts & WAKE_UCAST) criteria |= EHS_WAKE_ON_UNICAST_DATA; if (wol->wolopts & WAKE_MCAST) criteria |= EHS_WAKE_ON_MULTICAST_DATA; if (wol->wolopts & WAKE_BCAST) criteria |= EHS_WAKE_ON_BROADCAST_DATA; if (wol->wolopts & WAKE_PHY) criteria |= EHS_WAKE_ON_MAC_EVENT; if (wol->wolopts == 0) criteria |= EHS_REMOVE_WAKEUP; return lbs_host_sleep_cfg(priv, criteria, (struct wol_config *)NULL); } const struct ethtool_ops lbs_ethtool_ops = { .get_drvinfo = lbs_ethtool_get_drvinfo, .get_eeprom = lbs_ethtool_get_eeprom, .get_eeprom_len = lbs_ethtool_get_eeprom_len, #ifdef CONFIG_LIBERTAS_MESH .get_sset_count = lbs_mesh_ethtool_get_sset_count, .get_ethtool_stats = lbs_mesh_ethtool_get_stats, .get_strings = lbs_mesh_ethtool_get_strings, #endif .get_wol = lbs_ethtool_get_wol, .set_wol = lbs_ethtool_set_wol, };
gpl-2.0
xjljian/android_kernel_huawei_msm8916
drivers/net/ethernet/sun/niu.c
2083
234892
/* niu.c: Neptune ethernet driver. * * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net) */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/bitops.h> #include <linux/mii.h> #include <linux/if.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/ip.h> #include <linux/in.h> #include <linux/ipv6.h> #include <linux/log2.h> #include <linux/jiffies.h> #include <linux/crc32.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/of_device.h> #include "niu.h" #define DRV_MODULE_NAME "niu" #define DRV_MODULE_VERSION "1.1" #define DRV_MODULE_RELDATE "Apr 22, 2010" static char version[] = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); MODULE_DESCRIPTION("NIU ethernet driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); #ifndef readq static u64 readq(void __iomem *reg) { return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32); } static void writeq(u64 val, void __iomem *reg) { writel(val & 0xffffffff, reg); writel(val >> 32, reg + 0x4UL); } #endif static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)}, {} }; MODULE_DEVICE_TABLE(pci, niu_pci_tbl); #define NIU_TX_TIMEOUT (5 * HZ) #define nr64(reg) readq(np->regs + (reg)) #define nw64(reg, val) writeq((val), np->regs + (reg)) #define nr64_mac(reg) readq(np->mac_regs + (reg)) #define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg)) #define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg)) #define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg)) #define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg)) #define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg)) #define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg)) #define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg)) #define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) static int niu_debug; static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "NIU debug level"); #define niu_lock_parent(np, flags) \ spin_lock_irqsave(&np->parent->lock, flags) #define niu_unlock_parent(np, flags) \ spin_unlock_irqrestore(&np->parent->lock, flags) static int serdes_init_10g_serdes(struct niu *np); static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg, u64 bits, int limit, int delay) { while (--limit >= 0) { u64 val = nr64_mac(reg); if (!(val & bits)) break; udelay(delay); } if (limit < 0) return -ENODEV; return 0; } static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg, u64 bits, int limit, int delay, const char *reg_name) { int err; nw64_mac(reg, bits); err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay); if (err) netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", (unsigned long long)bits, reg_name, (unsigned long long)nr64_mac(reg)); return err; } #define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ }) static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg, u64 bits, int limit, int delay) { while (--limit >= 0) { u64 val = nr64_ipp(reg); if (!(val & bits)) break; udelay(delay); } if (limit < 0) return -ENODEV; return 0; } static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg, u64 bits, int limit, int delay, const char *reg_name) { int err; u64 val; val = nr64_ipp(reg); val |= bits; nw64_ipp(reg, val); err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay); if (err) netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", (unsigned long long)bits, reg_name, (unsigned long long)nr64_ipp(reg)); return err; } #define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ }) static int __niu_wait_bits_clear(struct niu *np, unsigned long reg, u64 bits, int limit, int delay) { while (--limit >= 0) { u64 val = nr64(reg); if (!(val & bits)) break; udelay(delay); } if (limit < 0) return -ENODEV; return 0; } #define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \ ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \ }) static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg, u64 bits, int limit, int delay, const char *reg_name) { int err; nw64(reg, bits); err = __niu_wait_bits_clear(np, reg, bits, limit, delay); if (err) netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", (unsigned long long)bits, reg_name, (unsigned long long)nr64(reg)); return err; } #define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ }) static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on) { u64 val = (u64) lp->timer; if (on) val |= LDG_IMGMT_ARM; nw64(LDG_IMGMT(lp->ldg_num), val); } static int niu_ldn_irq_enable(struct niu *np, int ldn, int on) { unsigned long mask_reg, bits; u64 val; if (ldn < 0 || ldn > LDN_MAX) return -EINVAL; if (ldn < 64) { mask_reg = LD_IM0(ldn); bits = LD_IM0_MASK; } else { mask_reg = LD_IM1(ldn - 64); bits = LD_IM1_MASK; } val = nr64(mask_reg); if (on) val &= ~bits; else val |= bits; nw64(mask_reg, val); return 0; } static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on) { struct niu_parent *parent = np->parent; int i; for (i = 0; i <= LDN_MAX; i++) { int err; if (parent->ldg_map[i] != lp->ldg_num) continue; err = niu_ldn_irq_enable(np, i, on); if (err) return err; } return 0; } static int niu_enable_interrupts(struct niu *np, int on) { int i; for (i = 0; i < np->num_ldg; i++) { struct niu_ldg *lp = &np->ldg[i]; int err; err = niu_enable_ldn_in_ldg(np, lp, on); if (err) return err; } for (i = 0; i < np->num_ldg; i++) niu_ldg_rearm(np, &np->ldg[i], on); return 0; } static u32 phy_encode(u32 type, int port) { return type << (port * 2); } static u32 phy_decode(u32 val, int port) { return (val >> (port * 2)) & PORT_TYPE_MASK; } static int mdio_wait(struct niu *np) { int limit = 1000; u64 val; while (--limit > 0) { val = nr64(MIF_FRAME_OUTPUT); if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1) return val & MIF_FRAME_OUTPUT_DATA; udelay(10); } return -ENODEV; } static int mdio_read(struct niu *np, int port, int dev, int reg) { int err; nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); err = mdio_wait(np); if (err < 0) return err; nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev)); return mdio_wait(np); } static int mdio_write(struct niu *np, int port, int dev, int reg, int data) { int err; nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); err = mdio_wait(np); if (err < 0) return err; nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data)); err = mdio_wait(np); if (err < 0) return err; return 0; } static int mii_read(struct niu *np, int port, int reg) { nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg)); return mdio_wait(np); } static int mii_write(struct niu *np, int port, int reg, int data) { int err; nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data)); err = mdio_wait(np); if (err < 0) return err; return 0; } static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val) { int err; err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_TX_CFG_L(channel), val & 0xffff); if (!err) err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_TX_CFG_H(channel), val >> 16); return err; } static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val) { int err; err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_RX_CFG_L(channel), val & 0xffff); if (!err) err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_RX_CFG_H(channel), val >> 16); return err; } /* Mode is always 10G fiber. */ static int serdes_init_niu_10g_fiber(struct niu *np) { struct niu_link_config *lp = &np->link_config; u32 tx_cfg, rx_cfg; unsigned long i; tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | PLL_RX_CFG_EQ_LP_ADAPTIVE); if (lp->loopback_mode == LOOPBACK_PHY) { u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_TEST_CFG_L, test_cfg); tx_cfg |= PLL_TX_CFG_ENTEST; rx_cfg |= PLL_RX_CFG_ENTEST; } /* Initialize all 4 lanes of the SERDES. */ for (i = 0; i < 4; i++) { int err = esr2_set_tx_cfg(np, i, tx_cfg); if (err) return err; } for (i = 0; i < 4; i++) { int err = esr2_set_rx_cfg(np, i, rx_cfg); if (err) return err; } return 0; } static int serdes_init_niu_1g_serdes(struct niu *np) { struct niu_link_config *lp = &np->link_config; u16 pll_cfg, pll_sts; int max_retry = 100; u64 uninitialized_var(sig), mask, val; u32 tx_cfg, rx_cfg; unsigned long i; int err; tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV | PLL_TX_CFG_RATE_HALF); rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | PLL_RX_CFG_RATE_HALF); if (np->port == 0) rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE; if (lp->loopback_mode == LOOPBACK_PHY) { u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_TEST_CFG_L, test_cfg); tx_cfg |= PLL_TX_CFG_ENTEST; rx_cfg |= PLL_RX_CFG_ENTEST; } /* Initialize PLL for 1G */ pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X); err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_CFG_L, pll_cfg); if (err) { netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n", np->port, __func__); return err; } pll_sts = PLL_CFG_ENPLL; err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_STS_L, pll_sts); if (err) { netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n", np->port, __func__); return err; } udelay(200); /* Initialize all 4 lanes of the SERDES. */ for (i = 0; i < 4; i++) { err = esr2_set_tx_cfg(np, i, tx_cfg); if (err) return err; } for (i = 0; i < 4; i++) { err = esr2_set_rx_cfg(np, i, rx_cfg); if (err) return err; } switch (np->port) { case 0: val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0); mask = val; break; case 1: val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1); mask = val; break; default: return -EINVAL; } while (max_retry--) { sig = nr64(ESR_INT_SIGNALS); if ((sig & mask) == val) break; mdelay(500); } if ((sig & mask) != val) { netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", np->port, (int)(sig & mask), (int)val); return -ENODEV; } return 0; } static int serdes_init_niu_10g_serdes(struct niu *np) { struct niu_link_config *lp = &np->link_config; u32 tx_cfg, rx_cfg, pll_cfg, pll_sts; int max_retry = 100; u64 uninitialized_var(sig), mask, val; unsigned long i; int err; tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | PLL_RX_CFG_EQ_LP_ADAPTIVE); if (lp->loopback_mode == LOOPBACK_PHY) { u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_TEST_CFG_L, test_cfg); tx_cfg |= PLL_TX_CFG_ENTEST; rx_cfg |= PLL_RX_CFG_ENTEST; } /* Initialize PLL for 10G */ pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X); err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff); if (err) { netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n", np->port, __func__); return err; } pll_sts = PLL_CFG_ENPLL; err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_STS_L, pll_sts & 0xffff); if (err) { netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n", np->port, __func__); return err; } udelay(200); /* Initialize all 4 lanes of the SERDES. */ for (i = 0; i < 4; i++) { err = esr2_set_tx_cfg(np, i, tx_cfg); if (err) return err; } for (i = 0; i < 4; i++) { err = esr2_set_rx_cfg(np, i, rx_cfg); if (err) return err; } /* check if serdes is ready */ switch (np->port) { case 0: mask = ESR_INT_SIGNALS_P0_BITS; val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0 | ESR_INT_XSRDY_P0 | ESR_INT_XDP_P0_CH3 | ESR_INT_XDP_P0_CH2 | ESR_INT_XDP_P0_CH1 | ESR_INT_XDP_P0_CH0); break; case 1: mask = ESR_INT_SIGNALS_P1_BITS; val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1 | ESR_INT_XSRDY_P1 | ESR_INT_XDP_P1_CH3 | ESR_INT_XDP_P1_CH2 | ESR_INT_XDP_P1_CH1 | ESR_INT_XDP_P1_CH0); break; default: return -EINVAL; } while (max_retry--) { sig = nr64(ESR_INT_SIGNALS); if ((sig & mask) == val) break; mdelay(500); } if ((sig & mask) != val) { pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n", np->port, (int)(sig & mask), (int)val); /* 10G failed, try initializing at 1G */ err = serdes_init_niu_1g_serdes(np); if (!err) { np->flags &= ~NIU_FLAGS_10G; np->mac_xcvr = MAC_XCVR_PCS; } else { netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n", np->port); return -ENODEV; } } return 0; } static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val) { int err; err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan)); if (err >= 0) { *val = (err & 0xffff); err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_H(chan)); if (err >= 0) *val |= ((err & 0xffff) << 16); err = 0; } return err; } static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val) { int err; err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_GLUE_CTRL0_L(chan)); if (err >= 0) { *val = (err & 0xffff); err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_GLUE_CTRL0_H(chan)); if (err >= 0) { *val |= ((err & 0xffff) << 16); err = 0; } } return err; } static int esr_read_reset(struct niu *np, u32 *val) { int err; err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_RESET_CTRL_L); if (err >= 0) { *val = (err & 0xffff); err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_RESET_CTRL_H); if (err >= 0) { *val |= ((err & 0xffff) << 16); err = 0; } } return err; } static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val) { int err; err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan), val & 0xffff); if (!err) err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_H(chan), (val >> 16)); return err; } static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val) { int err; err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, ESR_GLUE_CTRL0_L(chan), val & 0xffff); if (!err) err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, ESR_GLUE_CTRL0_H(chan), (val >> 16)); return err; } static int esr_reset(struct niu *np) { u32 uninitialized_var(reset); int err; err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_RESET_CTRL_L, 0x0000); if (err) return err; err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_RESET_CTRL_H, 0xffff); if (err) return err; udelay(200); err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_RESET_CTRL_L, 0xffff); if (err) return err; udelay(200); err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_RESET_CTRL_H, 0x0000); if (err) return err; udelay(200); err = esr_read_reset(np, &reset); if (err) return err; if (reset != 0) { netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n", np->port, reset); return -ENODEV; } return 0; } static int serdes_init_10g(struct niu *np) { struct niu_link_config *lp = &np->link_config; unsigned long ctrl_reg, test_cfg_reg, i; u64 ctrl_val, test_cfg_val, sig, mask, val; int err; switch (np->port) { case 0: ctrl_reg = ENET_SERDES_0_CTRL_CFG; test_cfg_reg = ENET_SERDES_0_TEST_CFG; break; case 1: ctrl_reg = ENET_SERDES_1_CTRL_CFG; test_cfg_reg = ENET_SERDES_1_TEST_CFG; break; default: return -EINVAL; } ctrl_val = (ENET_SERDES_CTRL_SDET_0 | ENET_SERDES_CTRL_SDET_1 | ENET_SERDES_CTRL_SDET_2 | ENET_SERDES_CTRL_SDET_3 | (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); test_cfg_val = 0; if (lp->loopback_mode == LOOPBACK_PHY) { test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_0_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_1_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_2_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_3_SHIFT)); } nw64(ctrl_reg, ctrl_val); nw64(test_cfg_reg, test_cfg_val); /* Initialize all 4 lanes of the SERDES. */ for (i = 0; i < 4; i++) { u32 rxtx_ctrl, glue0; err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); if (err) return err; err = esr_read_glue0(np, i, &glue0); if (err) return err; rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); glue0 &= ~(ESR_GLUE_CTRL0_SRATE | ESR_GLUE_CTRL0_THCNT | ESR_GLUE_CTRL0_BLTIME); glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | (BLTIME_300_CYCLES << ESR_GLUE_CTRL0_BLTIME_SHIFT)); err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); if (err) return err; err = esr_write_glue0(np, i, glue0); if (err) return err; } err = esr_reset(np); if (err) return err; sig = nr64(ESR_INT_SIGNALS); switch (np->port) { case 0: mask = ESR_INT_SIGNALS_P0_BITS; val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0 | ESR_INT_XSRDY_P0 | ESR_INT_XDP_P0_CH3 | ESR_INT_XDP_P0_CH2 | ESR_INT_XDP_P0_CH1 | ESR_INT_XDP_P0_CH0); break; case 1: mask = ESR_INT_SIGNALS_P1_BITS; val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1 | ESR_INT_XSRDY_P1 | ESR_INT_XDP_P1_CH3 | ESR_INT_XDP_P1_CH2 | ESR_INT_XDP_P1_CH1 | ESR_INT_XDP_P1_CH0); break; default: return -EINVAL; } if ((sig & mask) != val) { if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; return 0; } netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", np->port, (int)(sig & mask), (int)val); return -ENODEV; } if (np->flags & NIU_FLAGS_HOTPLUG_PHY) np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; return 0; } static int serdes_init_1g(struct niu *np) { u64 val; val = nr64(ENET_SERDES_1_PLL_CFG); val &= ~ENET_SERDES_PLL_FBDIV2; switch (np->port) { case 0: val |= ENET_SERDES_PLL_HRATE0; break; case 1: val |= ENET_SERDES_PLL_HRATE1; break; case 2: val |= ENET_SERDES_PLL_HRATE2; break; case 3: val |= ENET_SERDES_PLL_HRATE3; break; default: return -EINVAL; } nw64(ENET_SERDES_1_PLL_CFG, val); return 0; } static int serdes_init_1g_serdes(struct niu *np) { struct niu_link_config *lp = &np->link_config; unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i; u64 ctrl_val, test_cfg_val, sig, mask, val; int err; u64 reset_val, val_rd; val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 | ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 | ENET_SERDES_PLL_FBDIV0; switch (np->port) { case 0: reset_val = ENET_SERDES_RESET_0; ctrl_reg = ENET_SERDES_0_CTRL_CFG; test_cfg_reg = ENET_SERDES_0_TEST_CFG; pll_cfg = ENET_SERDES_0_PLL_CFG; break; case 1: reset_val = ENET_SERDES_RESET_1; ctrl_reg = ENET_SERDES_1_CTRL_CFG; test_cfg_reg = ENET_SERDES_1_TEST_CFG; pll_cfg = ENET_SERDES_1_PLL_CFG; break; default: return -EINVAL; } ctrl_val = (ENET_SERDES_CTRL_SDET_0 | ENET_SERDES_CTRL_SDET_1 | ENET_SERDES_CTRL_SDET_2 | ENET_SERDES_CTRL_SDET_3 | (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); test_cfg_val = 0; if (lp->loopback_mode == LOOPBACK_PHY) { test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_0_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_1_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_2_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_3_SHIFT)); } nw64(ENET_SERDES_RESET, reset_val); mdelay(20); val_rd = nr64(ENET_SERDES_RESET); val_rd &= ~reset_val; nw64(pll_cfg, val); nw64(ctrl_reg, ctrl_val); nw64(test_cfg_reg, test_cfg_val); nw64(ENET_SERDES_RESET, val_rd); mdelay(2000); /* Initialize all 4 lanes of the SERDES. */ for (i = 0; i < 4; i++) { u32 rxtx_ctrl, glue0; err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); if (err) return err; err = esr_read_glue0(np, i, &glue0); if (err) return err; rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); glue0 &= ~(ESR_GLUE_CTRL0_SRATE | ESR_GLUE_CTRL0_THCNT | ESR_GLUE_CTRL0_BLTIME); glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | (BLTIME_300_CYCLES << ESR_GLUE_CTRL0_BLTIME_SHIFT)); err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); if (err) return err; err = esr_write_glue0(np, i, glue0); if (err) return err; } sig = nr64(ESR_INT_SIGNALS); switch (np->port) { case 0: val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0); mask = val; break; case 1: val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1); mask = val; break; default: return -EINVAL; } if ((sig & mask) != val) { netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", np->port, (int)(sig & mask), (int)val); return -ENODEV; } return 0; } static int link_status_1g_serdes(struct niu *np, int *link_up_p) { struct niu_link_config *lp = &np->link_config; int link_up; u64 val; u16 current_speed; unsigned long flags; u8 current_duplex; link_up = 0; current_speed = SPEED_INVALID; current_duplex = DUPLEX_INVALID; spin_lock_irqsave(&np->lock, flags); val = nr64_pcs(PCS_MII_STAT); if (val & PCS_MII_STAT_LINK_STATUS) { link_up = 1; current_speed = SPEED_1000; current_duplex = DUPLEX_FULL; } lp->active_speed = current_speed; lp->active_duplex = current_duplex; spin_unlock_irqrestore(&np->lock, flags); *link_up_p = link_up; return 0; } static int link_status_10g_serdes(struct niu *np, int *link_up_p) { unsigned long flags; struct niu_link_config *lp = &np->link_config; int link_up = 0; int link_ok = 1; u64 val, val2; u16 current_speed; u8 current_duplex; if (!(np->flags & NIU_FLAGS_10G)) return link_status_1g_serdes(np, link_up_p); current_speed = SPEED_INVALID; current_duplex = DUPLEX_INVALID; spin_lock_irqsave(&np->lock, flags); val = nr64_xpcs(XPCS_STATUS(0)); val2 = nr64_mac(XMAC_INTER2); if (val2 & 0x01000000) link_ok = 0; if ((val & 0x1000ULL) && link_ok) { link_up = 1; current_speed = SPEED_10000; current_duplex = DUPLEX_FULL; } lp->active_speed = current_speed; lp->active_duplex = current_duplex; spin_unlock_irqrestore(&np->lock, flags); *link_up_p = link_up; return 0; } static int link_status_mii(struct niu *np, int *link_up_p) { struct niu_link_config *lp = &np->link_config; int err; int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus; int supported, advertising, active_speed, active_duplex; err = mii_read(np, np->phy_addr, MII_BMCR); if (unlikely(err < 0)) return err; bmcr = err; err = mii_read(np, np->phy_addr, MII_BMSR); if (unlikely(err < 0)) return err; bmsr = err; err = mii_read(np, np->phy_addr, MII_ADVERTISE); if (unlikely(err < 0)) return err; advert = err; err = mii_read(np, np->phy_addr, MII_LPA); if (unlikely(err < 0)) return err; lpa = err; if (likely(bmsr & BMSR_ESTATEN)) { err = mii_read(np, np->phy_addr, MII_ESTATUS); if (unlikely(err < 0)) return err; estatus = err; err = mii_read(np, np->phy_addr, MII_CTRL1000); if (unlikely(err < 0)) return err; ctrl1000 = err; err = mii_read(np, np->phy_addr, MII_STAT1000); if (unlikely(err < 0)) return err; stat1000 = err; } else estatus = ctrl1000 = stat1000 = 0; supported = 0; if (bmsr & BMSR_ANEGCAPABLE) supported |= SUPPORTED_Autoneg; if (bmsr & BMSR_10HALF) supported |= SUPPORTED_10baseT_Half; if (bmsr & BMSR_10FULL) supported |= SUPPORTED_10baseT_Full; if (bmsr & BMSR_100HALF) supported |= SUPPORTED_100baseT_Half; if (bmsr & BMSR_100FULL) supported |= SUPPORTED_100baseT_Full; if (estatus & ESTATUS_1000_THALF) supported |= SUPPORTED_1000baseT_Half; if (estatus & ESTATUS_1000_TFULL) supported |= SUPPORTED_1000baseT_Full; lp->supported = supported; advertising = mii_adv_to_ethtool_adv_t(advert); advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000); if (bmcr & BMCR_ANENABLE) { int neg, neg1000; lp->active_autoneg = 1; advertising |= ADVERTISED_Autoneg; neg = advert & lpa; neg1000 = (ctrl1000 << 2) & stat1000; if (neg1000 & (LPA_1000FULL | LPA_1000HALF)) active_speed = SPEED_1000; else if (neg & LPA_100) active_speed = SPEED_100; else if (neg & (LPA_10HALF | LPA_10FULL)) active_speed = SPEED_10; else active_speed = SPEED_INVALID; if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX)) active_duplex = DUPLEX_FULL; else if (active_speed != SPEED_INVALID) active_duplex = DUPLEX_HALF; else active_duplex = DUPLEX_INVALID; } else { lp->active_autoneg = 0; if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100)) active_speed = SPEED_1000; else if (bmcr & BMCR_SPEED100) active_speed = SPEED_100; else active_speed = SPEED_10; if (bmcr & BMCR_FULLDPLX) active_duplex = DUPLEX_FULL; else active_duplex = DUPLEX_HALF; } lp->active_advertising = advertising; lp->active_speed = active_speed; lp->active_duplex = active_duplex; *link_up_p = !!(bmsr & BMSR_LSTATUS); return 0; } static int link_status_1g_rgmii(struct niu *np, int *link_up_p) { struct niu_link_config *lp = &np->link_config; u16 current_speed, bmsr; unsigned long flags; u8 current_duplex; int err, link_up; link_up = 0; current_speed = SPEED_INVALID; current_duplex = DUPLEX_INVALID; spin_lock_irqsave(&np->lock, flags); err = -EINVAL; err = mii_read(np, np->phy_addr, MII_BMSR); if (err < 0) goto out; bmsr = err; if (bmsr & BMSR_LSTATUS) { u16 adv, lpa; err = mii_read(np, np->phy_addr, MII_ADVERTISE); if (err < 0) goto out; adv = err; err = mii_read(np, np->phy_addr, MII_LPA); if (err < 0) goto out; lpa = err; err = mii_read(np, np->phy_addr, MII_ESTATUS); if (err < 0) goto out; link_up = 1; current_speed = SPEED_1000; current_duplex = DUPLEX_FULL; } lp->active_speed = current_speed; lp->active_duplex = current_duplex; err = 0; out: spin_unlock_irqrestore(&np->lock, flags); *link_up_p = link_up; return err; } static int link_status_1g(struct niu *np, int *link_up_p) { struct niu_link_config *lp = &np->link_config; unsigned long flags; int err; spin_lock_irqsave(&np->lock, flags); err = link_status_mii(np, link_up_p); lp->supported |= SUPPORTED_TP; lp->active_advertising |= ADVERTISED_TP; spin_unlock_irqrestore(&np->lock, flags); return err; } static int bcm8704_reset(struct niu *np) { int err, limit; err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, MII_BMCR); if (err < 0 || err == 0xffff) return err; err |= BMCR_RESET; err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, MII_BMCR, err); if (err) return err; limit = 1000; while (--limit >= 0) { err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, MII_BMCR); if (err < 0) return err; if (!(err & BMCR_RESET)) break; } if (limit < 0) { netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n", np->port, (err & 0xffff)); return -ENODEV; } return 0; } /* When written, certain PHY registers need to be read back twice * in order for the bits to settle properly. */ static int bcm8704_user_dev3_readback(struct niu *np, int reg) { int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); if (err < 0) return err; err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); if (err < 0) return err; return 0; } static int bcm8706_init_user_dev3(struct niu *np) { int err; err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_OPT_DIGITAL_CTRL); if (err < 0) return err; err &= ~USER_ODIG_CTRL_GPIOS; err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); err |= USER_ODIG_CTRL_RESV2; err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_OPT_DIGITAL_CTRL, err); if (err) return err; mdelay(1000); return 0; } static int bcm8704_init_user_dev3(struct niu *np) { int err; err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL, (USER_CONTROL_OPTXRST_LVL | USER_CONTROL_OPBIASFLT_LVL | USER_CONTROL_OBTMPFLT_LVL | USER_CONTROL_OPPRFLT_LVL | USER_CONTROL_OPTXFLT_LVL | USER_CONTROL_OPRXLOS_LVL | USER_CONTROL_OPRXFLT_LVL | USER_CONTROL_OPTXON_LVL | (0x3f << USER_CONTROL_RES1_SHIFT))); if (err) return err; err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL, (USER_PMD_TX_CTL_XFP_CLKEN | (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) | (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) | USER_PMD_TX_CTL_TSCK_LPWREN)); if (err) return err; err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL); if (err) return err; err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL); if (err) return err; err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_OPT_DIGITAL_CTRL); if (err < 0) return err; err &= ~USER_ODIG_CTRL_GPIOS; err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_OPT_DIGITAL_CTRL, err); if (err) return err; mdelay(1000); return 0; } static int mrvl88x2011_act_led(struct niu *np, int val) { int err; err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, MRVL88X2011_LED_8_TO_11_CTL); if (err < 0) return err; err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK); err |= MRVL88X2011_LED(MRVL88X2011_LED_ACT,val); return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, MRVL88X2011_LED_8_TO_11_CTL, err); } static int mrvl88x2011_led_blink_rate(struct niu *np, int rate) { int err; err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, MRVL88X2011_LED_BLINK_CTL); if (err >= 0) { err &= ~MRVL88X2011_LED_BLKRATE_MASK; err |= (rate << 4); err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, MRVL88X2011_LED_BLINK_CTL, err); } return err; } static int xcvr_init_10g_mrvl88x2011(struct niu *np) { int err; /* Set LED functions */ err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS); if (err) return err; /* led activity */ err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF); if (err) return err; err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, MRVL88X2011_GENERAL_CTL); if (err < 0) return err; err |= MRVL88X2011_ENA_XFPREFCLK; err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, MRVL88X2011_GENERAL_CTL, err); if (err < 0) return err; err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, MRVL88X2011_PMA_PMD_CTL_1); if (err < 0) return err; if (np->link_config.loopback_mode == LOOPBACK_MAC) err |= MRVL88X2011_LOOPBACK; else err &= ~MRVL88X2011_LOOPBACK; err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, MRVL88X2011_PMA_PMD_CTL_1, err); if (err < 0) return err; /* Enable PMD */ return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX); } static int xcvr_diag_bcm870x(struct niu *np) { u16 analog_stat0, tx_alarm_status; int err = 0; #if 1 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, MII_STAT1000); if (err < 0) return err; pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err); err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20); if (err < 0) return err; pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err); err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, MII_NWAYTEST); if (err < 0) return err; pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err); #endif /* XXX dig this out it might not be so useful XXX */ err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_ANALOG_STATUS0); if (err < 0) return err; err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_ANALOG_STATUS0); if (err < 0) return err; analog_stat0 = err; err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_TX_ALARM_STATUS); if (err < 0) return err; err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_TX_ALARM_STATUS); if (err < 0) return err; tx_alarm_status = err; if (analog_stat0 != 0x03fc) { if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) { pr_info("Port %u cable not connected or bad cable\n", np->port); } else if (analog_stat0 == 0x639c) { pr_info("Port %u optical module is bad or missing\n", np->port); } } return 0; } static int xcvr_10g_set_lb_bcm870x(struct niu *np) { struct niu_link_config *lp = &np->link_config; int err; err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, MII_BMCR); if (err < 0) return err; err &= ~BMCR_LOOPBACK; if (lp->loopback_mode == LOOPBACK_MAC) err |= BMCR_LOOPBACK; err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, MII_BMCR, err); if (err) return err; return 0; } static int xcvr_init_10g_bcm8706(struct niu *np) { int err = 0; u64 val; if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) && (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0) return err; val = nr64_mac(XMAC_CONFIG); val &= ~XMAC_CONFIG_LED_POLARITY; val |= XMAC_CONFIG_FORCE_LED_ON; nw64_mac(XMAC_CONFIG, val); val = nr64(MIF_CONFIG); val |= MIF_CONFIG_INDIRECT_MODE; nw64(MIF_CONFIG, val); err = bcm8704_reset(np); if (err) return err; err = xcvr_10g_set_lb_bcm870x(np); if (err) return err; err = bcm8706_init_user_dev3(np); if (err) return err; err = xcvr_diag_bcm870x(np); if (err) return err; return 0; } static int xcvr_init_10g_bcm8704(struct niu *np) { int err; err = bcm8704_reset(np); if (err) return err; err = bcm8704_init_user_dev3(np); if (err) return err; err = xcvr_10g_set_lb_bcm870x(np); if (err) return err; err = xcvr_diag_bcm870x(np); if (err) return err; return 0; } static int xcvr_init_10g(struct niu *np) { int phy_id, err; u64 val; val = nr64_mac(XMAC_CONFIG); val &= ~XMAC_CONFIG_LED_POLARITY; val |= XMAC_CONFIG_FORCE_LED_ON; nw64_mac(XMAC_CONFIG, val); /* XXX shared resource, lock parent XXX */ val = nr64(MIF_CONFIG); val |= MIF_CONFIG_INDIRECT_MODE; nw64(MIF_CONFIG, val); phy_id = phy_decode(np->parent->port_phy, np->port); phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; /* handle different phy types */ switch (phy_id & NIU_PHY_ID_MASK) { case NIU_PHY_ID_MRVL88X2011: err = xcvr_init_10g_mrvl88x2011(np); break; default: /* bcom 8704 */ err = xcvr_init_10g_bcm8704(np); break; } return err; } static int mii_reset(struct niu *np) { int limit, err; err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET); if (err) return err; limit = 1000; while (--limit >= 0) { udelay(500); err = mii_read(np, np->phy_addr, MII_BMCR); if (err < 0) return err; if (!(err & BMCR_RESET)) break; } if (limit < 0) { netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n", np->port, err); return -ENODEV; } return 0; } static int xcvr_init_1g_rgmii(struct niu *np) { int err; u64 val; u16 bmcr, bmsr, estat; val = nr64(MIF_CONFIG); val &= ~MIF_CONFIG_INDIRECT_MODE; nw64(MIF_CONFIG, val); err = mii_reset(np); if (err) return err; err = mii_read(np, np->phy_addr, MII_BMSR); if (err < 0) return err; bmsr = err; estat = 0; if (bmsr & BMSR_ESTATEN) { err = mii_read(np, np->phy_addr, MII_ESTATUS); if (err < 0) return err; estat = err; } bmcr = 0; err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); if (err) return err; if (bmsr & BMSR_ESTATEN) { u16 ctrl1000 = 0; if (estat & ESTATUS_1000_TFULL) ctrl1000 |= ADVERTISE_1000FULL; err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000); if (err) return err; } bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX); err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); if (err) return err; err = mii_read(np, np->phy_addr, MII_BMCR); if (err < 0) return err; bmcr = mii_read(np, np->phy_addr, MII_BMCR); err = mii_read(np, np->phy_addr, MII_BMSR); if (err < 0) return err; return 0; } static int mii_init_common(struct niu *np) { struct niu_link_config *lp = &np->link_config; u16 bmcr, bmsr, adv, estat; int err; err = mii_reset(np); if (err) return err; err = mii_read(np, np->phy_addr, MII_BMSR); if (err < 0) return err; bmsr = err; estat = 0; if (bmsr & BMSR_ESTATEN) { err = mii_read(np, np->phy_addr, MII_ESTATUS); if (err < 0) return err; estat = err; } bmcr = 0; err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); if (err) return err; if (lp->loopback_mode == LOOPBACK_MAC) { bmcr |= BMCR_LOOPBACK; if (lp->active_speed == SPEED_1000) bmcr |= BMCR_SPEED1000; if (lp->active_duplex == DUPLEX_FULL) bmcr |= BMCR_FULLDPLX; } if (lp->loopback_mode == LOOPBACK_PHY) { u16 aux; aux = (BCM5464R_AUX_CTL_EXT_LB | BCM5464R_AUX_CTL_WRITE_1); err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux); if (err) return err; } if (lp->autoneg) { u16 ctrl1000; adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP; if ((bmsr & BMSR_10HALF) && (lp->advertising & ADVERTISED_10baseT_Half)) adv |= ADVERTISE_10HALF; if ((bmsr & BMSR_10FULL) && (lp->advertising & ADVERTISED_10baseT_Full)) adv |= ADVERTISE_10FULL; if ((bmsr & BMSR_100HALF) && (lp->advertising & ADVERTISED_100baseT_Half)) adv |= ADVERTISE_100HALF; if ((bmsr & BMSR_100FULL) && (lp->advertising & ADVERTISED_100baseT_Full)) adv |= ADVERTISE_100FULL; err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv); if (err) return err; if (likely(bmsr & BMSR_ESTATEN)) { ctrl1000 = 0; if ((estat & ESTATUS_1000_THALF) && (lp->advertising & ADVERTISED_1000baseT_Half)) ctrl1000 |= ADVERTISE_1000HALF; if ((estat & ESTATUS_1000_TFULL) && (lp->advertising & ADVERTISED_1000baseT_Full)) ctrl1000 |= ADVERTISE_1000FULL; err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000); if (err) return err; } bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); } else { /* !lp->autoneg */ int fulldpx; if (lp->duplex == DUPLEX_FULL) { bmcr |= BMCR_FULLDPLX; fulldpx = 1; } else if (lp->duplex == DUPLEX_HALF) fulldpx = 0; else return -EINVAL; if (lp->speed == SPEED_1000) { /* if X-full requested while not supported, or X-half requested while not supported... */ if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) || (!fulldpx && !(estat & ESTATUS_1000_THALF))) return -EINVAL; bmcr |= BMCR_SPEED1000; } else if (lp->speed == SPEED_100) { if ((fulldpx && !(bmsr & BMSR_100FULL)) || (!fulldpx && !(bmsr & BMSR_100HALF))) return -EINVAL; bmcr |= BMCR_SPEED100; } else if (lp->speed == SPEED_10) { if ((fulldpx && !(bmsr & BMSR_10FULL)) || (!fulldpx && !(bmsr & BMSR_10HALF))) return -EINVAL; } else return -EINVAL; } err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); if (err) return err; #if 0 err = mii_read(np, np->phy_addr, MII_BMCR); if (err < 0) return err; bmcr = err; err = mii_read(np, np->phy_addr, MII_BMSR); if (err < 0) return err; bmsr = err; pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n", np->port, bmcr, bmsr); #endif return 0; } static int xcvr_init_1g(struct niu *np) { u64 val; /* XXX shared resource, lock parent XXX */ val = nr64(MIF_CONFIG); val &= ~MIF_CONFIG_INDIRECT_MODE; nw64(MIF_CONFIG, val); return mii_init_common(np); } static int niu_xcvr_init(struct niu *np) { const struct niu_phy_ops *ops = np->phy_ops; int err; err = 0; if (ops->xcvr_init) err = ops->xcvr_init(np); return err; } static int niu_serdes_init(struct niu *np) { const struct niu_phy_ops *ops = np->phy_ops; int err; err = 0; if (ops->serdes_init) err = ops->serdes_init(np); return err; } static void niu_init_xif(struct niu *); static void niu_handle_led(struct niu *, int status); static int niu_link_status_common(struct niu *np, int link_up) { struct niu_link_config *lp = &np->link_config; struct net_device *dev = np->dev; unsigned long flags; if (!netif_carrier_ok(dev) && link_up) { netif_info(np, link, dev, "Link is up at %s, %s duplex\n", lp->active_speed == SPEED_10000 ? "10Gb/sec" : lp->active_speed == SPEED_1000 ? "1Gb/sec" : lp->active_speed == SPEED_100 ? "100Mbit/sec" : "10Mbit/sec", lp->active_duplex == DUPLEX_FULL ? "full" : "half"); spin_lock_irqsave(&np->lock, flags); niu_init_xif(np); niu_handle_led(np, 1); spin_unlock_irqrestore(&np->lock, flags); netif_carrier_on(dev); } else if (netif_carrier_ok(dev) && !link_up) { netif_warn(np, link, dev, "Link is down\n"); spin_lock_irqsave(&np->lock, flags); niu_handle_led(np, 0); spin_unlock_irqrestore(&np->lock, flags); netif_carrier_off(dev); } return 0; } static int link_status_10g_mrvl(struct niu *np, int *link_up_p) { int err, link_up, pma_status, pcs_status; link_up = 0; err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, MRVL88X2011_10G_PMD_STATUS_2); if (err < 0) goto out; /* Check PMA/PMD Register: 1.0001.2 == 1 */ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, MRVL88X2011_PMA_PMD_STATUS_1); if (err < 0) goto out; pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0); /* Check PMC Register : 3.0001.2 == 1: read twice */ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, MRVL88X2011_PMA_PMD_STATUS_1); if (err < 0) goto out; err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, MRVL88X2011_PMA_PMD_STATUS_1); if (err < 0) goto out; pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0); /* Check XGXS Register : 4.0018.[0-3,12] */ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR, MRVL88X2011_10G_XGXS_LANE_STAT); if (err < 0) goto out; if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 | PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 | PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC | 0x800)) link_up = (pma_status && pcs_status) ? 1 : 0; np->link_config.active_speed = SPEED_10000; np->link_config.active_duplex = DUPLEX_FULL; err = 0; out: mrvl88x2011_act_led(np, (link_up ? MRVL88X2011_LED_CTL_PCS_ACT : MRVL88X2011_LED_CTL_OFF)); *link_up_p = link_up; return err; } static int link_status_10g_bcm8706(struct niu *np, int *link_up_p) { int err, link_up; link_up = 0; err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, BCM8704_PMD_RCV_SIGDET); if (err < 0 || err == 0xffff) goto out; if (!(err & PMD_RCV_SIGDET_GLOBAL)) { err = 0; goto out; } err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, BCM8704_PCS_10G_R_STATUS); if (err < 0) goto out; if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { err = 0; goto out; } err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, BCM8704_PHYXS_XGXS_LANE_STAT); if (err < 0) goto out; if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_MAGIC | PHYXS_XGXS_LANE_STAT_PATTEST | PHYXS_XGXS_LANE_STAT_LANE3 | PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 | PHYXS_XGXS_LANE_STAT_LANE0)) { err = 0; np->link_config.active_speed = SPEED_INVALID; np->link_config.active_duplex = DUPLEX_INVALID; goto out; } link_up = 1; np->link_config.active_speed = SPEED_10000; np->link_config.active_duplex = DUPLEX_FULL; err = 0; out: *link_up_p = link_up; return err; } static int link_status_10g_bcom(struct niu *np, int *link_up_p) { int err, link_up; link_up = 0; err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, BCM8704_PMD_RCV_SIGDET); if (err < 0) goto out; if (!(err & PMD_RCV_SIGDET_GLOBAL)) { err = 0; goto out; } err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, BCM8704_PCS_10G_R_STATUS); if (err < 0) goto out; if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { err = 0; goto out; } err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, BCM8704_PHYXS_XGXS_LANE_STAT); if (err < 0) goto out; if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_MAGIC | PHYXS_XGXS_LANE_STAT_LANE3 | PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 | PHYXS_XGXS_LANE_STAT_LANE0)) { err = 0; goto out; } link_up = 1; np->link_config.active_speed = SPEED_10000; np->link_config.active_duplex = DUPLEX_FULL; err = 0; out: *link_up_p = link_up; return err; } static int link_status_10g(struct niu *np, int *link_up_p) { unsigned long flags; int err = -EINVAL; spin_lock_irqsave(&np->lock, flags); if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { int phy_id; phy_id = phy_decode(np->parent->port_phy, np->port); phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; /* handle different phy types */ switch (phy_id & NIU_PHY_ID_MASK) { case NIU_PHY_ID_MRVL88X2011: err = link_status_10g_mrvl(np, link_up_p); break; default: /* bcom 8704 */ err = link_status_10g_bcom(np, link_up_p); break; } } spin_unlock_irqrestore(&np->lock, flags); return err; } static int niu_10g_phy_present(struct niu *np) { u64 sig, mask, val; sig = nr64(ESR_INT_SIGNALS); switch (np->port) { case 0: mask = ESR_INT_SIGNALS_P0_BITS; val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0 | ESR_INT_XSRDY_P0 | ESR_INT_XDP_P0_CH3 | ESR_INT_XDP_P0_CH2 | ESR_INT_XDP_P0_CH1 | ESR_INT_XDP_P0_CH0); break; case 1: mask = ESR_INT_SIGNALS_P1_BITS; val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1 | ESR_INT_XSRDY_P1 | ESR_INT_XDP_P1_CH3 | ESR_INT_XDP_P1_CH2 | ESR_INT_XDP_P1_CH1 | ESR_INT_XDP_P1_CH0); break; default: return 0; } if ((sig & mask) != val) return 0; return 1; } static int link_status_10g_hotplug(struct niu *np, int *link_up_p) { unsigned long flags; int err = 0; int phy_present; int phy_present_prev; spin_lock_irqsave(&np->lock, flags); if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ? 1 : 0; phy_present = niu_10g_phy_present(np); if (phy_present != phy_present_prev) { /* state change */ if (phy_present) { /* A NEM was just plugged in */ np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; if (np->phy_ops->xcvr_init) err = np->phy_ops->xcvr_init(np); if (err) { err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, MII_BMCR); if (err == 0xffff) { /* No mdio, back-to-back XAUI */ goto out; } /* debounce */ np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; } } else { np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; *link_up_p = 0; netif_warn(np, link, np->dev, "Hotplug PHY Removed\n"); } } out: if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) { err = link_status_10g_bcm8706(np, link_up_p); if (err == 0xffff) { /* No mdio, back-to-back XAUI: it is C10NEM */ *link_up_p = 1; np->link_config.active_speed = SPEED_10000; np->link_config.active_duplex = DUPLEX_FULL; } } } spin_unlock_irqrestore(&np->lock, flags); return 0; } static int niu_link_status(struct niu *np, int *link_up_p) { const struct niu_phy_ops *ops = np->phy_ops; int err; err = 0; if (ops->link_status) err = ops->link_status(np, link_up_p); return err; } static void niu_timer(unsigned long __opaque) { struct niu *np = (struct niu *) __opaque; unsigned long off; int err, link_up; err = niu_link_status(np, &link_up); if (!err) niu_link_status_common(np, link_up); if (netif_carrier_ok(np->dev)) off = 5 * HZ; else off = 1 * HZ; np->timer.expires = jiffies + off; add_timer(&np->timer); } static const struct niu_phy_ops phy_ops_10g_serdes = { .serdes_init = serdes_init_10g_serdes, .link_status = link_status_10g_serdes, }; static const struct niu_phy_ops phy_ops_10g_serdes_niu = { .serdes_init = serdes_init_niu_10g_serdes, .link_status = link_status_10g_serdes, }; static const struct niu_phy_ops phy_ops_1g_serdes_niu = { .serdes_init = serdes_init_niu_1g_serdes, .link_status = link_status_1g_serdes, }; static const struct niu_phy_ops phy_ops_1g_rgmii = { .xcvr_init = xcvr_init_1g_rgmii, .link_status = link_status_1g_rgmii, }; static const struct niu_phy_ops phy_ops_10g_fiber_niu = { .serdes_init = serdes_init_niu_10g_fiber, .xcvr_init = xcvr_init_10g, .link_status = link_status_10g, }; static const struct niu_phy_ops phy_ops_10g_fiber = { .serdes_init = serdes_init_10g, .xcvr_init = xcvr_init_10g, .link_status = link_status_10g, }; static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = { .serdes_init = serdes_init_10g, .xcvr_init = xcvr_init_10g_bcm8706, .link_status = link_status_10g_hotplug, }; static const struct niu_phy_ops phy_ops_niu_10g_hotplug = { .serdes_init = serdes_init_niu_10g_fiber, .xcvr_init = xcvr_init_10g_bcm8706, .link_status = link_status_10g_hotplug, }; static const struct niu_phy_ops phy_ops_10g_copper = { .serdes_init = serdes_init_10g, .link_status = link_status_10g, /* XXX */ }; static const struct niu_phy_ops phy_ops_1g_fiber = { .serdes_init = serdes_init_1g, .xcvr_init = xcvr_init_1g, .link_status = link_status_1g, }; static const struct niu_phy_ops phy_ops_1g_copper = { .xcvr_init = xcvr_init_1g, .link_status = link_status_1g, }; struct niu_phy_template { const struct niu_phy_ops *ops; u32 phy_addr_base; }; static const struct niu_phy_template phy_template_niu_10g_fiber = { .ops = &phy_ops_10g_fiber_niu, .phy_addr_base = 16, }; static const struct niu_phy_template phy_template_niu_10g_serdes = { .ops = &phy_ops_10g_serdes_niu, .phy_addr_base = 0, }; static const struct niu_phy_template phy_template_niu_1g_serdes = { .ops = &phy_ops_1g_serdes_niu, .phy_addr_base = 0, }; static const struct niu_phy_template phy_template_10g_fiber = { .ops = &phy_ops_10g_fiber, .phy_addr_base = 8, }; static const struct niu_phy_template phy_template_10g_fiber_hotplug = { .ops = &phy_ops_10g_fiber_hotplug, .phy_addr_base = 8, }; static const struct niu_phy_template phy_template_niu_10g_hotplug = { .ops = &phy_ops_niu_10g_hotplug, .phy_addr_base = 8, }; static const struct niu_phy_template phy_template_10g_copper = { .ops = &phy_ops_10g_copper, .phy_addr_base = 10, }; static const struct niu_phy_template phy_template_1g_fiber = { .ops = &phy_ops_1g_fiber, .phy_addr_base = 0, }; static const struct niu_phy_template phy_template_1g_copper = { .ops = &phy_ops_1g_copper, .phy_addr_base = 0, }; static const struct niu_phy_template phy_template_1g_rgmii = { .ops = &phy_ops_1g_rgmii, .phy_addr_base = 0, }; static const struct niu_phy_template phy_template_10g_serdes = { .ops = &phy_ops_10g_serdes, .phy_addr_base = 0, }; static int niu_atca_port_num[4] = { 0, 0, 11, 10 }; static int serdes_init_10g_serdes(struct niu *np) { struct niu_link_config *lp = &np->link_config; unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i; u64 ctrl_val, test_cfg_val, sig, mask, val; switch (np->port) { case 0: ctrl_reg = ENET_SERDES_0_CTRL_CFG; test_cfg_reg = ENET_SERDES_0_TEST_CFG; pll_cfg = ENET_SERDES_0_PLL_CFG; break; case 1: ctrl_reg = ENET_SERDES_1_CTRL_CFG; test_cfg_reg = ENET_SERDES_1_TEST_CFG; pll_cfg = ENET_SERDES_1_PLL_CFG; break; default: return -EINVAL; } ctrl_val = (ENET_SERDES_CTRL_SDET_0 | ENET_SERDES_CTRL_SDET_1 | ENET_SERDES_CTRL_SDET_2 | ENET_SERDES_CTRL_SDET_3 | (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); test_cfg_val = 0; if (lp->loopback_mode == LOOPBACK_PHY) { test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_0_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_1_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_2_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_3_SHIFT)); } esr_reset(np); nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2); nw64(ctrl_reg, ctrl_val); nw64(test_cfg_reg, test_cfg_val); /* Initialize all 4 lanes of the SERDES. */ for (i = 0; i < 4; i++) { u32 rxtx_ctrl, glue0; int err; err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); if (err) return err; err = esr_read_glue0(np, i, &glue0); if (err) return err; rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); glue0 &= ~(ESR_GLUE_CTRL0_SRATE | ESR_GLUE_CTRL0_THCNT | ESR_GLUE_CTRL0_BLTIME); glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | (BLTIME_300_CYCLES << ESR_GLUE_CTRL0_BLTIME_SHIFT)); err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); if (err) return err; err = esr_write_glue0(np, i, glue0); if (err) return err; } sig = nr64(ESR_INT_SIGNALS); switch (np->port) { case 0: mask = ESR_INT_SIGNALS_P0_BITS; val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0 | ESR_INT_XSRDY_P0 | ESR_INT_XDP_P0_CH3 | ESR_INT_XDP_P0_CH2 | ESR_INT_XDP_P0_CH1 | ESR_INT_XDP_P0_CH0); break; case 1: mask = ESR_INT_SIGNALS_P1_BITS; val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1 | ESR_INT_XSRDY_P1 | ESR_INT_XDP_P1_CH3 | ESR_INT_XDP_P1_CH2 | ESR_INT_XDP_P1_CH1 | ESR_INT_XDP_P1_CH0); break; default: return -EINVAL; } if ((sig & mask) != val) { int err; err = serdes_init_1g_serdes(np); if (!err) { np->flags &= ~NIU_FLAGS_10G; np->mac_xcvr = MAC_XCVR_PCS; } else { netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n", np->port); return -ENODEV; } } return 0; } static int niu_determine_phy_disposition(struct niu *np) { struct niu_parent *parent = np->parent; u8 plat_type = parent->plat_type; const struct niu_phy_template *tp; u32 phy_addr_off = 0; if (plat_type == PLAT_TYPE_NIU) { switch (np->flags & (NIU_FLAGS_10G | NIU_FLAGS_FIBER | NIU_FLAGS_XCVR_SERDES)) { case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: /* 10G Serdes */ tp = &phy_template_niu_10g_serdes; break; case NIU_FLAGS_XCVR_SERDES: /* 1G Serdes */ tp = &phy_template_niu_1g_serdes; break; case NIU_FLAGS_10G | NIU_FLAGS_FIBER: /* 10G Fiber */ default: if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { tp = &phy_template_niu_10g_hotplug; if (np->port == 0) phy_addr_off = 8; if (np->port == 1) phy_addr_off = 12; } else { tp = &phy_template_niu_10g_fiber; phy_addr_off += np->port; } break; } } else { switch (np->flags & (NIU_FLAGS_10G | NIU_FLAGS_FIBER | NIU_FLAGS_XCVR_SERDES)) { case 0: /* 1G copper */ tp = &phy_template_1g_copper; if (plat_type == PLAT_TYPE_VF_P0) phy_addr_off = 10; else if (plat_type == PLAT_TYPE_VF_P1) phy_addr_off = 26; phy_addr_off += (np->port ^ 0x3); break; case NIU_FLAGS_10G: /* 10G copper */ tp = &phy_template_10g_copper; break; case NIU_FLAGS_FIBER: /* 1G fiber */ tp = &phy_template_1g_fiber; break; case NIU_FLAGS_10G | NIU_FLAGS_FIBER: /* 10G fiber */ tp = &phy_template_10g_fiber; if (plat_type == PLAT_TYPE_VF_P0 || plat_type == PLAT_TYPE_VF_P1) phy_addr_off = 8; phy_addr_off += np->port; if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { tp = &phy_template_10g_fiber_hotplug; if (np->port == 0) phy_addr_off = 8; if (np->port == 1) phy_addr_off = 12; } break; case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER: case NIU_FLAGS_XCVR_SERDES: switch(np->port) { case 0: case 1: tp = &phy_template_10g_serdes; break; case 2: case 3: tp = &phy_template_1g_rgmii; break; default: return -EINVAL; break; } phy_addr_off = niu_atca_port_num[np->port]; break; default: return -EINVAL; } } np->phy_ops = tp->ops; np->phy_addr = tp->phy_addr_base + phy_addr_off; return 0; } static int niu_init_link(struct niu *np) { struct niu_parent *parent = np->parent; int err, ignore; if (parent->plat_type == PLAT_TYPE_NIU) { err = niu_xcvr_init(np); if (err) return err; msleep(200); } err = niu_serdes_init(np); if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY)) return err; msleep(200); err = niu_xcvr_init(np); if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY)) niu_link_status(np, &ignore); return 0; } static void niu_set_primary_mac(struct niu *np, unsigned char *addr) { u16 reg0 = addr[4] << 8 | addr[5]; u16 reg1 = addr[2] << 8 | addr[3]; u16 reg2 = addr[0] << 8 | addr[1]; if (np->flags & NIU_FLAGS_XMAC) { nw64_mac(XMAC_ADDR0, reg0); nw64_mac(XMAC_ADDR1, reg1); nw64_mac(XMAC_ADDR2, reg2); } else { nw64_mac(BMAC_ADDR0, reg0); nw64_mac(BMAC_ADDR1, reg1); nw64_mac(BMAC_ADDR2, reg2); } } static int niu_num_alt_addr(struct niu *np) { if (np->flags & NIU_FLAGS_XMAC) return XMAC_NUM_ALT_ADDR; else return BMAC_NUM_ALT_ADDR; } static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr) { u16 reg0 = addr[4] << 8 | addr[5]; u16 reg1 = addr[2] << 8 | addr[3]; u16 reg2 = addr[0] << 8 | addr[1]; if (index >= niu_num_alt_addr(np)) return -EINVAL; if (np->flags & NIU_FLAGS_XMAC) { nw64_mac(XMAC_ALT_ADDR0(index), reg0); nw64_mac(XMAC_ALT_ADDR1(index), reg1); nw64_mac(XMAC_ALT_ADDR2(index), reg2); } else { nw64_mac(BMAC_ALT_ADDR0(index), reg0); nw64_mac(BMAC_ALT_ADDR1(index), reg1); nw64_mac(BMAC_ALT_ADDR2(index), reg2); } return 0; } static int niu_enable_alt_mac(struct niu *np, int index, int on) { unsigned long reg; u64 val, mask; if (index >= niu_num_alt_addr(np)) return -EINVAL; if (np->flags & NIU_FLAGS_XMAC) { reg = XMAC_ADDR_CMPEN; mask = 1 << index; } else { reg = BMAC_ADDR_CMPEN; mask = 1 << (index + 1); } val = nr64_mac(reg); if (on) val |= mask; else val &= ~mask; nw64_mac(reg, val); return 0; } static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg, int num, int mac_pref) { u64 val = nr64_mac(reg); val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR); val |= num; if (mac_pref) val |= HOST_INFO_MPR; nw64_mac(reg, val); } static int __set_rdc_table_num(struct niu *np, int xmac_index, int bmac_index, int rdc_table_num, int mac_pref) { unsigned long reg; if (rdc_table_num & ~HOST_INFO_MACRDCTBLN) return -EINVAL; if (np->flags & NIU_FLAGS_XMAC) reg = XMAC_HOST_INFO(xmac_index); else reg = BMAC_HOST_INFO(bmac_index); __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref); return 0; } static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num, int mac_pref) { return __set_rdc_table_num(np, 17, 0, table_num, mac_pref); } static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num, int mac_pref) { return __set_rdc_table_num(np, 16, 8, table_num, mac_pref); } static int niu_set_alt_mac_rdc_table(struct niu *np, int idx, int table_num, int mac_pref) { if (idx >= niu_num_alt_addr(np)) return -EINVAL; return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref); } static u64 vlan_entry_set_parity(u64 reg_val) { u64 port01_mask; u64 port23_mask; port01_mask = 0x00ff; port23_mask = 0xff00; if (hweight64(reg_val & port01_mask) & 1) reg_val |= ENET_VLAN_TBL_PARITY0; else reg_val &= ~ENET_VLAN_TBL_PARITY0; if (hweight64(reg_val & port23_mask) & 1) reg_val |= ENET_VLAN_TBL_PARITY1; else reg_val &= ~ENET_VLAN_TBL_PARITY1; return reg_val; } static void vlan_tbl_write(struct niu *np, unsigned long index, int port, int vpr, int rdc_table) { u64 reg_val = nr64(ENET_VLAN_TBL(index)); reg_val &= ~((ENET_VLAN_TBL_VPR | ENET_VLAN_TBL_VLANRDCTBLN) << ENET_VLAN_TBL_SHIFT(port)); if (vpr) reg_val |= (ENET_VLAN_TBL_VPR << ENET_VLAN_TBL_SHIFT(port)); reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port)); reg_val = vlan_entry_set_parity(reg_val); nw64(ENET_VLAN_TBL(index), reg_val); } static void vlan_tbl_clear(struct niu *np) { int i; for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) nw64(ENET_VLAN_TBL(i), 0); } static int tcam_wait_bit(struct niu *np, u64 bit) { int limit = 1000; while (--limit > 0) { if (nr64(TCAM_CTL) & bit) break; udelay(1); } if (limit <= 0) return -ENODEV; return 0; } static int tcam_flush(struct niu *np, int index) { nw64(TCAM_KEY_0, 0x00); nw64(TCAM_KEY_MASK_0, 0xff); nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); return tcam_wait_bit(np, TCAM_CTL_STAT); } #if 0 static int tcam_read(struct niu *np, int index, u64 *key, u64 *mask) { int err; nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index)); err = tcam_wait_bit(np, TCAM_CTL_STAT); if (!err) { key[0] = nr64(TCAM_KEY_0); key[1] = nr64(TCAM_KEY_1); key[2] = nr64(TCAM_KEY_2); key[3] = nr64(TCAM_KEY_3); mask[0] = nr64(TCAM_KEY_MASK_0); mask[1] = nr64(TCAM_KEY_MASK_1); mask[2] = nr64(TCAM_KEY_MASK_2); mask[3] = nr64(TCAM_KEY_MASK_3); } return err; } #endif static int tcam_write(struct niu *np, int index, u64 *key, u64 *mask) { nw64(TCAM_KEY_0, key[0]); nw64(TCAM_KEY_1, key[1]); nw64(TCAM_KEY_2, key[2]); nw64(TCAM_KEY_3, key[3]); nw64(TCAM_KEY_MASK_0, mask[0]); nw64(TCAM_KEY_MASK_1, mask[1]); nw64(TCAM_KEY_MASK_2, mask[2]); nw64(TCAM_KEY_MASK_3, mask[3]); nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); return tcam_wait_bit(np, TCAM_CTL_STAT); } #if 0 static int tcam_assoc_read(struct niu *np, int index, u64 *data) { int err; nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index)); err = tcam_wait_bit(np, TCAM_CTL_STAT); if (!err) *data = nr64(TCAM_KEY_1); return err; } #endif static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data) { nw64(TCAM_KEY_1, assoc_data); nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index)); return tcam_wait_bit(np, TCAM_CTL_STAT); } static void tcam_enable(struct niu *np, int on) { u64 val = nr64(FFLP_CFG_1); if (on) val &= ~FFLP_CFG_1_TCAM_DIS; else val |= FFLP_CFG_1_TCAM_DIS; nw64(FFLP_CFG_1, val); } static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio) { u64 val = nr64(FFLP_CFG_1); val &= ~(FFLP_CFG_1_FFLPINITDONE | FFLP_CFG_1_CAMLAT | FFLP_CFG_1_CAMRATIO); val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT); val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT); nw64(FFLP_CFG_1, val); val = nr64(FFLP_CFG_1); val |= FFLP_CFG_1_FFLPINITDONE; nw64(FFLP_CFG_1, val); } static int tcam_user_eth_class_enable(struct niu *np, unsigned long class, int on) { unsigned long reg; u64 val; if (class < CLASS_CODE_ETHERTYPE1 || class > CLASS_CODE_ETHERTYPE2) return -EINVAL; reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); val = nr64(reg); if (on) val |= L2_CLS_VLD; else val &= ~L2_CLS_VLD; nw64(reg, val); return 0; } #if 0 static int tcam_user_eth_class_set(struct niu *np, unsigned long class, u64 ether_type) { unsigned long reg; u64 val; if (class < CLASS_CODE_ETHERTYPE1 || class > CLASS_CODE_ETHERTYPE2 || (ether_type & ~(u64)0xffff) != 0) return -EINVAL; reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); val = nr64(reg); val &= ~L2_CLS_ETYPE; val |= (ether_type << L2_CLS_ETYPE_SHIFT); nw64(reg, val); return 0; } #endif static int tcam_user_ip_class_enable(struct niu *np, unsigned long class, int on) { unsigned long reg; u64 val; if (class < CLASS_CODE_USER_PROG1 || class > CLASS_CODE_USER_PROG4) return -EINVAL; reg = L3_CLS(class - CLASS_CODE_USER_PROG1); val = nr64(reg); if (on) val |= L3_CLS_VALID; else val &= ~L3_CLS_VALID; nw64(reg, val); return 0; } static int tcam_user_ip_class_set(struct niu *np, unsigned long class, int ipv6, u64 protocol_id, u64 tos_mask, u64 tos_val) { unsigned long reg; u64 val; if (class < CLASS_CODE_USER_PROG1 || class > CLASS_CODE_USER_PROG4 || (protocol_id & ~(u64)0xff) != 0 || (tos_mask & ~(u64)0xff) != 0 || (tos_val & ~(u64)0xff) != 0) return -EINVAL; reg = L3_CLS(class - CLASS_CODE_USER_PROG1); val = nr64(reg); val &= ~(L3_CLS_IPVER | L3_CLS_PID | L3_CLS_TOSMASK | L3_CLS_TOS); if (ipv6) val |= L3_CLS_IPVER; val |= (protocol_id << L3_CLS_PID_SHIFT); val |= (tos_mask << L3_CLS_TOSMASK_SHIFT); val |= (tos_val << L3_CLS_TOS_SHIFT); nw64(reg, val); return 0; } static int tcam_early_init(struct niu *np) { unsigned long i; int err; tcam_enable(np, 0); tcam_set_lat_and_ratio(np, DEFAULT_TCAM_LATENCY, DEFAULT_TCAM_ACCESS_RATIO); for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) { err = tcam_user_eth_class_enable(np, i, 0); if (err) return err; } for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) { err = tcam_user_ip_class_enable(np, i, 0); if (err) return err; } return 0; } static int tcam_flush_all(struct niu *np) { unsigned long i; for (i = 0; i < np->parent->tcam_num_entries; i++) { int err = tcam_flush(np, i); if (err) return err; } return 0; } static u64 hash_addr_regval(unsigned long index, unsigned long num_entries) { return (u64)index | (num_entries == 1 ? HASH_TBL_ADDR_AUTOINC : 0); } #if 0 static int hash_read(struct niu *np, unsigned long partition, unsigned long index, unsigned long num_entries, u64 *data) { u64 val = hash_addr_regval(index, num_entries); unsigned long i; if (partition >= FCRAM_NUM_PARTITIONS || index + num_entries > FCRAM_SIZE) return -EINVAL; nw64(HASH_TBL_ADDR(partition), val); for (i = 0; i < num_entries; i++) data[i] = nr64(HASH_TBL_DATA(partition)); return 0; } #endif static int hash_write(struct niu *np, unsigned long partition, unsigned long index, unsigned long num_entries, u64 *data) { u64 val = hash_addr_regval(index, num_entries); unsigned long i; if (partition >= FCRAM_NUM_PARTITIONS || index + (num_entries * 8) > FCRAM_SIZE) return -EINVAL; nw64(HASH_TBL_ADDR(partition), val); for (i = 0; i < num_entries; i++) nw64(HASH_TBL_DATA(partition), data[i]); return 0; } static void fflp_reset(struct niu *np) { u64 val; nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST); udelay(10); nw64(FFLP_CFG_1, 0); val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE; nw64(FFLP_CFG_1, val); } static void fflp_set_timings(struct niu *np) { u64 val = nr64(FFLP_CFG_1); val &= ~FFLP_CFG_1_FFLPINITDONE; val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT); nw64(FFLP_CFG_1, val); val = nr64(FFLP_CFG_1); val |= FFLP_CFG_1_FFLPINITDONE; nw64(FFLP_CFG_1, val); val = nr64(FCRAM_REF_TMR); val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN); val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT); val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT); nw64(FCRAM_REF_TMR, val); } static int fflp_set_partition(struct niu *np, u64 partition, u64 mask, u64 base, int enable) { unsigned long reg; u64 val; if (partition >= FCRAM_NUM_PARTITIONS || (mask & ~(u64)0x1f) != 0 || (base & ~(u64)0x1f) != 0) return -EINVAL; reg = FLW_PRT_SEL(partition); val = nr64(reg); val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE); val |= (mask << FLW_PRT_SEL_MASK_SHIFT); val |= (base << FLW_PRT_SEL_BASE_SHIFT); if (enable) val |= FLW_PRT_SEL_EXT; nw64(reg, val); return 0; } static int fflp_disable_all_partitions(struct niu *np) { unsigned long i; for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) { int err = fflp_set_partition(np, 0, 0, 0, 0); if (err) return err; } return 0; } static void fflp_llcsnap_enable(struct niu *np, int on) { u64 val = nr64(FFLP_CFG_1); if (on) val |= FFLP_CFG_1_LLCSNAP; else val &= ~FFLP_CFG_1_LLCSNAP; nw64(FFLP_CFG_1, val); } static void fflp_errors_enable(struct niu *np, int on) { u64 val = nr64(FFLP_CFG_1); if (on) val &= ~FFLP_CFG_1_ERRORDIS; else val |= FFLP_CFG_1_ERRORDIS; nw64(FFLP_CFG_1, val); } static int fflp_hash_clear(struct niu *np) { struct fcram_hash_ipv4 ent; unsigned long i; /* IPV4 hash entry with valid bit clear, rest is don't care. */ memset(&ent, 0, sizeof(ent)); ent.header = HASH_HEADER_EXT; for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) { int err = hash_write(np, 0, i, 1, (u64 *) &ent); if (err) return err; } return 0; } static int fflp_early_init(struct niu *np) { struct niu_parent *parent; unsigned long flags; int err; niu_lock_parent(np, flags); parent = np->parent; err = 0; if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) { if (np->parent->plat_type != PLAT_TYPE_NIU) { fflp_reset(np); fflp_set_timings(np); err = fflp_disable_all_partitions(np); if (err) { netif_printk(np, probe, KERN_DEBUG, np->dev, "fflp_disable_all_partitions failed, err=%d\n", err); goto out; } } err = tcam_early_init(np); if (err) { netif_printk(np, probe, KERN_DEBUG, np->dev, "tcam_early_init failed, err=%d\n", err); goto out; } fflp_llcsnap_enable(np, 1); fflp_errors_enable(np, 0); nw64(H1POLY, 0); nw64(H2POLY, 0); err = tcam_flush_all(np); if (err) { netif_printk(np, probe, KERN_DEBUG, np->dev, "tcam_flush_all failed, err=%d\n", err); goto out; } if (np->parent->plat_type != PLAT_TYPE_NIU) { err = fflp_hash_clear(np); if (err) { netif_printk(np, probe, KERN_DEBUG, np->dev, "fflp_hash_clear failed, err=%d\n", err); goto out; } } vlan_tbl_clear(np); parent->flags |= PARENT_FLGS_CLS_HWINIT; } out: niu_unlock_parent(np, flags); return err; } static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key) { if (class_code < CLASS_CODE_USER_PROG1 || class_code > CLASS_CODE_SCTP_IPV6) return -EINVAL; nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key); return 0; } static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key) { if (class_code < CLASS_CODE_USER_PROG1 || class_code > CLASS_CODE_SCTP_IPV6) return -EINVAL; nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key); return 0; } /* Entries for the ports are interleaved in the TCAM */ static u16 tcam_get_index(struct niu *np, u16 idx) { /* One entry reserved for IP fragment rule */ if (idx >= (np->clas.tcam_sz - 1)) idx = 0; return np->clas.tcam_top + ((idx+1) * np->parent->num_ports); } static u16 tcam_get_size(struct niu *np) { /* One entry reserved for IP fragment rule */ return np->clas.tcam_sz - 1; } static u16 tcam_get_valid_entry_cnt(struct niu *np) { /* One entry reserved for IP fragment rule */ return np->clas.tcam_valid_entries - 1; } static void niu_rx_skb_append(struct sk_buff *skb, struct page *page, u32 offset, u32 size, u32 truesize) { skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, offset, size); skb->len += size; skb->data_len += size; skb->truesize += truesize; } static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a) { a >>= PAGE_SHIFT; a ^= (a >> ilog2(MAX_RBR_RING_SIZE)); return a & (MAX_RBR_RING_SIZE - 1); } static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr, struct page ***link) { unsigned int h = niu_hash_rxaddr(rp, addr); struct page *p, **pp; addr &= PAGE_MASK; pp = &rp->rxhash[h]; for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) { if (p->index == addr) { *link = pp; goto found; } } BUG(); found: return p; } static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base) { unsigned int h = niu_hash_rxaddr(rp, base); page->index = base; page->mapping = (struct address_space *) rp->rxhash[h]; rp->rxhash[h] = page; } static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp, gfp_t mask, int start_index) { struct page *page; u64 addr; int i; page = alloc_page(mask); if (!page) return -ENOMEM; addr = np->ops->map_page(np->device, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); if (!addr) { __free_page(page); return -ENOMEM; } niu_hash_page(rp, page, addr); if (rp->rbr_blocks_per_page > 1) atomic_add(rp->rbr_blocks_per_page - 1, &compound_head(page)->_count); for (i = 0; i < rp->rbr_blocks_per_page; i++) { __le32 *rbr = &rp->rbr[start_index + i]; *rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT); addr += rp->rbr_block_size; } return 0; } static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) { int index = rp->rbr_index; rp->rbr_pending++; if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) { int err = niu_rbr_add_page(np, rp, mask, index); if (unlikely(err)) { rp->rbr_pending--; return; } rp->rbr_index += rp->rbr_blocks_per_page; BUG_ON(rp->rbr_index > rp->rbr_table_size); if (rp->rbr_index == rp->rbr_table_size) rp->rbr_index = 0; if (rp->rbr_pending >= rp->rbr_kick_thresh) { nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending); rp->rbr_pending = 0; } } } static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp) { unsigned int index = rp->rcr_index; int num_rcr = 0; rp->rx_dropped++; while (1) { struct page *page, **link; u64 addr, val; u32 rcr_size; num_rcr++; val = le64_to_cpup(&rp->rcr[index]); addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << RCR_ENTRY_PKT_BUF_ADDR_SHIFT; page = niu_find_rxpage(rp, addr, &link); rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> RCR_ENTRY_PKTBUFSZ_SHIFT]; if ((page->index + PAGE_SIZE) - rcr_size == addr) { *link = (struct page *) page->mapping; np->ops->unmap_page(np->device, page->index, PAGE_SIZE, DMA_FROM_DEVICE); page->index = 0; page->mapping = NULL; __free_page(page); rp->rbr_refill_pending++; } index = NEXT_RCR(rp, index); if (!(val & RCR_ENTRY_MULTI)) break; } rp->rcr_index = index; return num_rcr; } static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, struct rx_ring_info *rp) { unsigned int index = rp->rcr_index; struct rx_pkt_hdr1 *rh; struct sk_buff *skb; int len, num_rcr; skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE); if (unlikely(!skb)) return niu_rx_pkt_ignore(np, rp); num_rcr = 0; while (1) { struct page *page, **link; u32 rcr_size, append_size; u64 addr, val, off; num_rcr++; val = le64_to_cpup(&rp->rcr[index]); len = (val & RCR_ENTRY_L2_LEN) >> RCR_ENTRY_L2_LEN_SHIFT; len -= ETH_FCS_LEN; addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << RCR_ENTRY_PKT_BUF_ADDR_SHIFT; page = niu_find_rxpage(rp, addr, &link); rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> RCR_ENTRY_PKTBUFSZ_SHIFT]; off = addr & ~PAGE_MASK; append_size = rcr_size; if (num_rcr == 1) { int ptype; ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT); if ((ptype == RCR_PKT_TYPE_TCP || ptype == RCR_PKT_TYPE_UDP) && !(val & (RCR_ENTRY_NOPORT | RCR_ENTRY_ERROR))) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb_checksum_none_assert(skb); } else if (!(val & RCR_ENTRY_MULTI)) append_size = len - skb->len; niu_rx_skb_append(skb, page, off, append_size, rcr_size); if ((page->index + rp->rbr_block_size) - rcr_size == addr) { *link = (struct page *) page->mapping; np->ops->unmap_page(np->device, page->index, PAGE_SIZE, DMA_FROM_DEVICE); page->index = 0; page->mapping = NULL; rp->rbr_refill_pending++; } else get_page(page); index = NEXT_RCR(rp, index); if (!(val & RCR_ENTRY_MULTI)) break; } rp->rcr_index = index; len += sizeof(*rh); len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN); __pskb_pull_tail(skb, len); rh = (struct rx_pkt_hdr1 *) skb->data; if (np->dev->features & NETIF_F_RXHASH) skb->rxhash = ((u32)rh->hashval2_0 << 24 | (u32)rh->hashval2_1 << 16 | (u32)rh->hashval1_1 << 8 | (u32)rh->hashval1_2 << 0); skb_pull(skb, sizeof(*rh)); rp->rx_packets++; rp->rx_bytes += skb->len; skb->protocol = eth_type_trans(skb, np->dev); skb_record_rx_queue(skb, rp->rx_channel); napi_gro_receive(napi, skb); return num_rcr; } static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) { int blocks_per_page = rp->rbr_blocks_per_page; int err, index = rp->rbr_index; err = 0; while (index < (rp->rbr_table_size - blocks_per_page)) { err = niu_rbr_add_page(np, rp, mask, index); if (unlikely(err)) break; index += blocks_per_page; } rp->rbr_index = index; return err; } static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp) { int i; for (i = 0; i < MAX_RBR_RING_SIZE; i++) { struct page *page; page = rp->rxhash[i]; while (page) { struct page *next = (struct page *) page->mapping; u64 base = page->index; np->ops->unmap_page(np->device, base, PAGE_SIZE, DMA_FROM_DEVICE); page->index = 0; page->mapping = NULL; __free_page(page); page = next; } } for (i = 0; i < rp->rbr_table_size; i++) rp->rbr[i] = cpu_to_le32(0); rp->rbr_index = 0; } static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx) { struct tx_buff_info *tb = &rp->tx_buffs[idx]; struct sk_buff *skb = tb->skb; struct tx_pkt_hdr *tp; u64 tx_flags; int i, len; tp = (struct tx_pkt_hdr *) skb->data; tx_flags = le64_to_cpup(&tp->flags); rp->tx_packets++; rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) - ((tx_flags & TXHDR_PAD) / 2)); len = skb_headlen(skb); np->ops->unmap_single(np->device, tb->mapping, len, DMA_TO_DEVICE); if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK) rp->mark_pending--; tb->skb = NULL; do { idx = NEXT_TX(rp, idx); len -= MAX_TX_DESC_LEN; } while (len > 0); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { tb = &rp->tx_buffs[idx]; BUG_ON(tb->skb != NULL); np->ops->unmap_page(np->device, tb->mapping, skb_frag_size(&skb_shinfo(skb)->frags[i]), DMA_TO_DEVICE); idx = NEXT_TX(rp, idx); } dev_kfree_skb(skb); return idx; } #define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4) static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) { struct netdev_queue *txq; u16 pkt_cnt, tmp; int cons, index; u64 cs; index = (rp - np->tx_rings); txq = netdev_get_tx_queue(np->dev, index); cs = rp->tx_cs; if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK)))) goto out; tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT; pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) & (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT); rp->last_pkt_cnt = tmp; cons = rp->cons; netif_printk(np, tx_done, KERN_DEBUG, np->dev, "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons); while (pkt_cnt--) cons = release_tx_packet(np, rp, cons); rp->cons = cons; smp_mb(); out: if (unlikely(netif_tx_queue_stopped(txq) && (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { __netif_tx_lock(txq, smp_processor_id()); if (netif_tx_queue_stopped(txq) && (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))) netif_tx_wake_queue(txq); __netif_tx_unlock(txq); } } static inline void niu_sync_rx_discard_stats(struct niu *np, struct rx_ring_info *rp, const int limit) { /* This elaborate scheme is needed for reading the RX discard * counters, as they are only 16-bit and can overflow quickly, * and because the overflow indication bit is not usable as * the counter value does not wrap, but remains at max value * 0xFFFF. * * In theory and in practice counters can be lost in between * reading nr64() and clearing the counter nw64(). For this * reason, the number of counter clearings nw64() is * limited/reduced though the limit parameter. */ int rx_channel = rp->rx_channel; u32 misc, wred; /* RXMISC (Receive Miscellaneous Discard Count), covers the * following discard events: IPP (Input Port Process), * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive * Block Ring) prefetch buffer is empty. */ misc = nr64(RXMISC(rx_channel)); if (unlikely((misc & RXMISC_COUNT) > limit)) { nw64(RXMISC(rx_channel), 0); rp->rx_errors += misc & RXMISC_COUNT; if (unlikely(misc & RXMISC_OFLOW)) dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n", rx_channel); netif_printk(np, rx_err, KERN_DEBUG, np->dev, "rx-%d: MISC drop=%u over=%u\n", rx_channel, misc, misc-limit); } /* WRED (Weighted Random Early Discard) by hardware */ wred = nr64(RED_DIS_CNT(rx_channel)); if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) { nw64(RED_DIS_CNT(rx_channel), 0); rp->rx_dropped += wred & RED_DIS_CNT_COUNT; if (unlikely(wred & RED_DIS_CNT_OFLOW)) dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel); netif_printk(np, rx_err, KERN_DEBUG, np->dev, "rx-%d: WRED drop=%u over=%u\n", rx_channel, wred, wred-limit); } } static int niu_rx_work(struct napi_struct *napi, struct niu *np, struct rx_ring_info *rp, int budget) { int qlen, rcr_done = 0, work_done = 0; struct rxdma_mailbox *mbox = rp->mbox; u64 stat; #if 1 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN; #else stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN); #endif mbox->rx_dma_ctl_stat = 0; mbox->rcrstat_a = 0; netif_printk(np, rx_status, KERN_DEBUG, np->dev, "%s(chan[%d]), stat[%llx] qlen=%d\n", __func__, rp->rx_channel, (unsigned long long)stat, qlen); rcr_done = work_done = 0; qlen = min(qlen, budget); while (work_done < qlen) { rcr_done += niu_process_rx_pkt(napi, np, rp); work_done++; } if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) { unsigned int i; for (i = 0; i < rp->rbr_refill_pending; i++) niu_rbr_refill(np, rp, GFP_ATOMIC); rp->rbr_refill_pending = 0; } stat = (RX_DMA_CTL_STAT_MEX | ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) | ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT)); nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat); /* Only sync discards stats when qlen indicate potential for drops */ if (qlen > 10) niu_sync_rx_discard_stats(np, rp, 0x7FFF); return work_done; } static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget) { u64 v0 = lp->v0; u32 tx_vec = (v0 >> 32); u32 rx_vec = (v0 & 0xffffffff); int i, work_done = 0; netif_printk(np, intr, KERN_DEBUG, np->dev, "%s() v0[%016llx]\n", __func__, (unsigned long long)v0); for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; if (tx_vec & (1 << rp->tx_channel)) niu_tx_work(np, rp); nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0); } for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; if (rx_vec & (1 << rp->rx_channel)) { int this_work_done; this_work_done = niu_rx_work(&lp->napi, np, rp, budget); budget -= this_work_done; work_done += this_work_done; } nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0); } return work_done; } static int niu_poll(struct napi_struct *napi, int budget) { struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi); struct niu *np = lp->np; int work_done; work_done = niu_poll_core(np, lp, budget); if (work_done < budget) { napi_complete(napi); niu_ldg_rearm(np, lp, 1); } return work_done; } static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp, u64 stat) { netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel); if (stat & RX_DMA_CTL_STAT_RBR_TMOUT) pr_cont("RBR_TMOUT "); if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR) pr_cont("RSP_CNT "); if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS) pr_cont("BYTE_EN_BUS "); if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR) pr_cont("RSP_DAT "); if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR) pr_cont("RCR_ACK "); if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR) pr_cont("RCR_SHA_PAR "); if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR) pr_cont("RBR_PRE_PAR "); if (stat & RX_DMA_CTL_STAT_CONFIG_ERR) pr_cont("CONFIG "); if (stat & RX_DMA_CTL_STAT_RCRINCON) pr_cont("RCRINCON "); if (stat & RX_DMA_CTL_STAT_RCRFULL) pr_cont("RCRFULL "); if (stat & RX_DMA_CTL_STAT_RBRFULL) pr_cont("RBRFULL "); if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE) pr_cont("RBRLOGPAGE "); if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE) pr_cont("CFIGLOGPAGE "); if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR) pr_cont("DC_FIDO "); pr_cont(")\n"); } static int niu_rx_error(struct niu *np, struct rx_ring_info *rp) { u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); int err = 0; if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL | RX_DMA_CTL_STAT_PORT_FATAL)) err = -EINVAL; if (err) { netdev_err(np->dev, "RX channel %u error, stat[%llx]\n", rp->rx_channel, (unsigned long long) stat); niu_log_rxchan_errors(np, rp, stat); } nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat & RX_DMA_CTL_WRITE_CLEAR_ERRS); return err; } static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp, u64 cs) { netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel); if (cs & TX_CS_MBOX_ERR) pr_cont("MBOX "); if (cs & TX_CS_PKT_SIZE_ERR) pr_cont("PKT_SIZE "); if (cs & TX_CS_TX_RING_OFLOW) pr_cont("TX_RING_OFLOW "); if (cs & TX_CS_PREF_BUF_PAR_ERR) pr_cont("PREF_BUF_PAR "); if (cs & TX_CS_NACK_PREF) pr_cont("NACK_PREF "); if (cs & TX_CS_NACK_PKT_RD) pr_cont("NACK_PKT_RD "); if (cs & TX_CS_CONF_PART_ERR) pr_cont("CONF_PART "); if (cs & TX_CS_PKT_PRT_ERR) pr_cont("PKT_PTR "); pr_cont(")\n"); } static int niu_tx_error(struct niu *np, struct tx_ring_info *rp) { u64 cs, logh, logl; cs = nr64(TX_CS(rp->tx_channel)); logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel)); logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel)); netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n", rp->tx_channel, (unsigned long long)cs, (unsigned long long)logh, (unsigned long long)logl); niu_log_txchan_errors(np, rp, cs); return -ENODEV; } static int niu_mif_interrupt(struct niu *np) { u64 mif_status = nr64(MIF_STATUS); int phy_mdint = 0; if (np->flags & NIU_FLAGS_XMAC) { u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS); if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT) phy_mdint = 1; } netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n", (unsigned long long)mif_status, phy_mdint); return -ENODEV; } static void niu_xmac_interrupt(struct niu *np) { struct niu_xmac_stats *mp = &np->mac_stats.xmac; u64 val; val = nr64_mac(XTXMAC_STATUS); if (val & XTXMAC_STATUS_FRAME_CNT_EXP) mp->tx_frames += TXMAC_FRM_CNT_COUNT; if (val & XTXMAC_STATUS_BYTE_CNT_EXP) mp->tx_bytes += TXMAC_BYTE_CNT_COUNT; if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR) mp->tx_fifo_errors++; if (val & XTXMAC_STATUS_TXMAC_OFLOW) mp->tx_overflow_errors++; if (val & XTXMAC_STATUS_MAX_PSIZE_ERR) mp->tx_max_pkt_size_errors++; if (val & XTXMAC_STATUS_TXMAC_UFLOW) mp->tx_underflow_errors++; val = nr64_mac(XRXMAC_STATUS); if (val & XRXMAC_STATUS_LCL_FLT_STATUS) mp->rx_local_faults++; if (val & XRXMAC_STATUS_RFLT_DET) mp->rx_remote_faults++; if (val & XRXMAC_STATUS_LFLT_CNT_EXP) mp->rx_link_faults += LINK_FAULT_CNT_COUNT; if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP) mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT; if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP) mp->rx_frags += RXMAC_FRAG_CNT_COUNT; if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP) mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT; if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP) mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT; if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP) mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT; if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP) mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT; if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP) mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT; if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP) mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT; if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP) mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT; if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP) mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT; if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP) mp->rx_octets += RXMAC_BT_CNT_COUNT; if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP) mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT; if (val & XRXMAC_STATUS_LENERR_CNT_EXP) mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT; if (val & XRXMAC_STATUS_CRCERR_CNT_EXP) mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT; if (val & XRXMAC_STATUS_RXUFLOW) mp->rx_underflows++; if (val & XRXMAC_STATUS_RXOFLOW) mp->rx_overflows++; val = nr64_mac(XMAC_FC_STAT); if (val & XMAC_FC_STAT_TX_MAC_NPAUSE) mp->pause_off_state++; if (val & XMAC_FC_STAT_TX_MAC_PAUSE) mp->pause_on_state++; if (val & XMAC_FC_STAT_RX_MAC_RPAUSE) mp->pause_received++; } static void niu_bmac_interrupt(struct niu *np) { struct niu_bmac_stats *mp = &np->mac_stats.bmac; u64 val; val = nr64_mac(BTXMAC_STATUS); if (val & BTXMAC_STATUS_UNDERRUN) mp->tx_underflow_errors++; if (val & BTXMAC_STATUS_MAX_PKT_ERR) mp->tx_max_pkt_size_errors++; if (val & BTXMAC_STATUS_BYTE_CNT_EXP) mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT; if (val & BTXMAC_STATUS_FRAME_CNT_EXP) mp->tx_frames += BTXMAC_FRM_CNT_COUNT; val = nr64_mac(BRXMAC_STATUS); if (val & BRXMAC_STATUS_OVERFLOW) mp->rx_overflows++; if (val & BRXMAC_STATUS_FRAME_CNT_EXP) mp->rx_frames += BRXMAC_FRAME_CNT_COUNT; if (val & BRXMAC_STATUS_ALIGN_ERR_EXP) mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; if (val & BRXMAC_STATUS_CRC_ERR_EXP) mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; if (val & BRXMAC_STATUS_LEN_ERR_EXP) mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT; val = nr64_mac(BMAC_CTRL_STATUS); if (val & BMAC_CTRL_STATUS_NOPAUSE) mp->pause_off_state++; if (val & BMAC_CTRL_STATUS_PAUSE) mp->pause_on_state++; if (val & BMAC_CTRL_STATUS_PAUSE_RECV) mp->pause_received++; } static int niu_mac_interrupt(struct niu *np) { if (np->flags & NIU_FLAGS_XMAC) niu_xmac_interrupt(np); else niu_bmac_interrupt(np); return 0; } static void niu_log_device_error(struct niu *np, u64 stat) { netdev_err(np->dev, "Core device errors ( "); if (stat & SYS_ERR_MASK_META2) pr_cont("META2 "); if (stat & SYS_ERR_MASK_META1) pr_cont("META1 "); if (stat & SYS_ERR_MASK_PEU) pr_cont("PEU "); if (stat & SYS_ERR_MASK_TXC) pr_cont("TXC "); if (stat & SYS_ERR_MASK_RDMC) pr_cont("RDMC "); if (stat & SYS_ERR_MASK_TDMC) pr_cont("TDMC "); if (stat & SYS_ERR_MASK_ZCP) pr_cont("ZCP "); if (stat & SYS_ERR_MASK_FFLP) pr_cont("FFLP "); if (stat & SYS_ERR_MASK_IPP) pr_cont("IPP "); if (stat & SYS_ERR_MASK_MAC) pr_cont("MAC "); if (stat & SYS_ERR_MASK_SMX) pr_cont("SMX "); pr_cont(")\n"); } static int niu_device_error(struct niu *np) { u64 stat = nr64(SYS_ERR_STAT); netdev_err(np->dev, "Core device error, stat[%llx]\n", (unsigned long long)stat); niu_log_device_error(np, stat); return -ENODEV; } static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp, u64 v0, u64 v1, u64 v2) { int i, err = 0; lp->v0 = v0; lp->v1 = v1; lp->v2 = v2; if (v1 & 0x00000000ffffffffULL) { u32 rx_vec = (v1 & 0xffffffff); for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; if (rx_vec & (1 << rp->rx_channel)) { int r = niu_rx_error(np, rp); if (r) { err = r; } else { if (!v0) nw64(RX_DMA_CTL_STAT(rp->rx_channel), RX_DMA_CTL_STAT_MEX); } } } } if (v1 & 0x7fffffff00000000ULL) { u32 tx_vec = (v1 >> 32) & 0x7fffffff; for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; if (tx_vec & (1 << rp->tx_channel)) { int r = niu_tx_error(np, rp); if (r) err = r; } } } if ((v0 | v1) & 0x8000000000000000ULL) { int r = niu_mif_interrupt(np); if (r) err = r; } if (v2) { if (v2 & 0x01ef) { int r = niu_mac_interrupt(np); if (r) err = r; } if (v2 & 0x0210) { int r = niu_device_error(np); if (r) err = r; } } if (err) niu_enable_interrupts(np, 0); return err; } static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp, int ldn) { struct rxdma_mailbox *mbox = rp->mbox; u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); stat_write = (RX_DMA_CTL_STAT_RCRTHRES | RX_DMA_CTL_STAT_RCRTO); nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write); netif_printk(np, intr, KERN_DEBUG, np->dev, "%s() stat[%llx]\n", __func__, (unsigned long long)stat); } static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp, int ldn) { rp->tx_cs = nr64(TX_CS(rp->tx_channel)); netif_printk(np, intr, KERN_DEBUG, np->dev, "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs); } static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0) { struct niu_parent *parent = np->parent; u32 rx_vec, tx_vec; int i; tx_vec = (v0 >> 32); rx_vec = (v0 & 0xffffffff); for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; int ldn = LDN_RXDMA(rp->rx_channel); if (parent->ldg_map[ldn] != ldg) continue; nw64(LD_IM0(ldn), LD_IM0_MASK); if (rx_vec & (1 << rp->rx_channel)) niu_rxchan_intr(np, rp, ldn); } for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; int ldn = LDN_TXDMA(rp->tx_channel); if (parent->ldg_map[ldn] != ldg) continue; nw64(LD_IM0(ldn), LD_IM0_MASK); if (tx_vec & (1 << rp->tx_channel)) niu_txchan_intr(np, rp, ldn); } } static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp, u64 v0, u64 v1, u64 v2) { if (likely(napi_schedule_prep(&lp->napi))) { lp->v0 = v0; lp->v1 = v1; lp->v2 = v2; __niu_fastpath_interrupt(np, lp->ldg_num, v0); __napi_schedule(&lp->napi); } } static irqreturn_t niu_interrupt(int irq, void *dev_id) { struct niu_ldg *lp = dev_id; struct niu *np = lp->np; int ldg = lp->ldg_num; unsigned long flags; u64 v0, v1, v2; if (netif_msg_intr(np)) printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)", __func__, lp, ldg); spin_lock_irqsave(&np->lock, flags); v0 = nr64(LDSV0(ldg)); v1 = nr64(LDSV1(ldg)); v2 = nr64(LDSV2(ldg)); if (netif_msg_intr(np)) pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n", (unsigned long long) v0, (unsigned long long) v1, (unsigned long long) v2); if (unlikely(!v0 && !v1 && !v2)) { spin_unlock_irqrestore(&np->lock, flags); return IRQ_NONE; } if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) { int err = niu_slowpath_interrupt(np, lp, v0, v1, v2); if (err) goto out; } if (likely(v0 & ~((u64)1 << LDN_MIF))) niu_schedule_napi(np, lp, v0, v1, v2); else niu_ldg_rearm(np, lp, 1); out: spin_unlock_irqrestore(&np->lock, flags); return IRQ_HANDLED; } static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp) { if (rp->mbox) { np->ops->free_coherent(np->device, sizeof(struct rxdma_mailbox), rp->mbox, rp->mbox_dma); rp->mbox = NULL; } if (rp->rcr) { np->ops->free_coherent(np->device, MAX_RCR_RING_SIZE * sizeof(__le64), rp->rcr, rp->rcr_dma); rp->rcr = NULL; rp->rcr_table_size = 0; rp->rcr_index = 0; } if (rp->rbr) { niu_rbr_free(np, rp); np->ops->free_coherent(np->device, MAX_RBR_RING_SIZE * sizeof(__le32), rp->rbr, rp->rbr_dma); rp->rbr = NULL; rp->rbr_table_size = 0; rp->rbr_index = 0; } kfree(rp->rxhash); rp->rxhash = NULL; } static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp) { if (rp->mbox) { np->ops->free_coherent(np->device, sizeof(struct txdma_mailbox), rp->mbox, rp->mbox_dma); rp->mbox = NULL; } if (rp->descr) { int i; for (i = 0; i < MAX_TX_RING_SIZE; i++) { if (rp->tx_buffs[i].skb) (void) release_tx_packet(np, rp, i); } np->ops->free_coherent(np->device, MAX_TX_RING_SIZE * sizeof(__le64), rp->descr, rp->descr_dma); rp->descr = NULL; rp->pending = 0; rp->prod = 0; rp->cons = 0; rp->wrap_bit = 0; } } static void niu_free_channels(struct niu *np) { int i; if (np->rx_rings) { for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; niu_free_rx_ring_info(np, rp); } kfree(np->rx_rings); np->rx_rings = NULL; np->num_rx_rings = 0; } if (np->tx_rings) { for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; niu_free_tx_ring_info(np, rp); } kfree(np->tx_rings); np->tx_rings = NULL; np->num_tx_rings = 0; } } static int niu_alloc_rx_ring_info(struct niu *np, struct rx_ring_info *rp) { BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64); rp->rxhash = kcalloc(MAX_RBR_RING_SIZE, sizeof(struct page *), GFP_KERNEL); if (!rp->rxhash) return -ENOMEM; rp->mbox = np->ops->alloc_coherent(np->device, sizeof(struct rxdma_mailbox), &rp->mbox_dma, GFP_KERNEL); if (!rp->mbox) return -ENOMEM; if ((unsigned long)rp->mbox & (64UL - 1)) { netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n", rp->mbox); return -EINVAL; } rp->rcr = np->ops->alloc_coherent(np->device, MAX_RCR_RING_SIZE * sizeof(__le64), &rp->rcr_dma, GFP_KERNEL); if (!rp->rcr) return -ENOMEM; if ((unsigned long)rp->rcr & (64UL - 1)) { netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n", rp->rcr); return -EINVAL; } rp->rcr_table_size = MAX_RCR_RING_SIZE; rp->rcr_index = 0; rp->rbr = np->ops->alloc_coherent(np->device, MAX_RBR_RING_SIZE * sizeof(__le32), &rp->rbr_dma, GFP_KERNEL); if (!rp->rbr) return -ENOMEM; if ((unsigned long)rp->rbr & (64UL - 1)) { netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n", rp->rbr); return -EINVAL; } rp->rbr_table_size = MAX_RBR_RING_SIZE; rp->rbr_index = 0; rp->rbr_pending = 0; return 0; } static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp) { int mtu = np->dev->mtu; /* These values are recommended by the HW designers for fair * utilization of DRR amongst the rings. */ rp->max_burst = mtu + 32; if (rp->max_burst > 4096) rp->max_burst = 4096; } static int niu_alloc_tx_ring_info(struct niu *np, struct tx_ring_info *rp) { BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64); rp->mbox = np->ops->alloc_coherent(np->device, sizeof(struct txdma_mailbox), &rp->mbox_dma, GFP_KERNEL); if (!rp->mbox) return -ENOMEM; if ((unsigned long)rp->mbox & (64UL - 1)) { netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n", rp->mbox); return -EINVAL; } rp->descr = np->ops->alloc_coherent(np->device, MAX_TX_RING_SIZE * sizeof(__le64), &rp->descr_dma, GFP_KERNEL); if (!rp->descr) return -ENOMEM; if ((unsigned long)rp->descr & (64UL - 1)) { netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n", rp->descr); return -EINVAL; } rp->pending = MAX_TX_RING_SIZE; rp->prod = 0; rp->cons = 0; rp->wrap_bit = 0; /* XXX make these configurable... XXX */ rp->mark_freq = rp->pending / 4; niu_set_max_burst(np, rp); return 0; } static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp) { u16 bss; bss = min(PAGE_SHIFT, 15); rp->rbr_block_size = 1 << bss; rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss); rp->rbr_sizes[0] = 256; rp->rbr_sizes[1] = 1024; if (np->dev->mtu > ETH_DATA_LEN) { switch (PAGE_SIZE) { case 4 * 1024: rp->rbr_sizes[2] = 4096; break; default: rp->rbr_sizes[2] = 8192; break; } } else { rp->rbr_sizes[2] = 2048; } rp->rbr_sizes[3] = rp->rbr_block_size; } static int niu_alloc_channels(struct niu *np) { struct niu_parent *parent = np->parent; int first_rx_channel, first_tx_channel; int num_rx_rings, num_tx_rings; struct rx_ring_info *rx_rings; struct tx_ring_info *tx_rings; int i, port, err; port = np->port; first_rx_channel = first_tx_channel = 0; for (i = 0; i < port; i++) { first_rx_channel += parent->rxchan_per_port[i]; first_tx_channel += parent->txchan_per_port[i]; } num_rx_rings = parent->rxchan_per_port[port]; num_tx_rings = parent->txchan_per_port[port]; rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info), GFP_KERNEL); err = -ENOMEM; if (!rx_rings) goto out_err; np->num_rx_rings = num_rx_rings; smp_wmb(); np->rx_rings = rx_rings; netif_set_real_num_rx_queues(np->dev, num_rx_rings); for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; rp->np = np; rp->rx_channel = first_rx_channel + i; err = niu_alloc_rx_ring_info(np, rp); if (err) goto out_err; niu_size_rbr(np, rp); /* XXX better defaults, configurable, etc... XXX */ rp->nonsyn_window = 64; rp->nonsyn_threshold = rp->rcr_table_size - 64; rp->syn_window = 64; rp->syn_threshold = rp->rcr_table_size - 64; rp->rcr_pkt_threshold = 16; rp->rcr_timeout = 8; rp->rbr_kick_thresh = RBR_REFILL_MIN; if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page) rp->rbr_kick_thresh = rp->rbr_blocks_per_page; err = niu_rbr_fill(np, rp, GFP_KERNEL); if (err) return err; } tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info), GFP_KERNEL); err = -ENOMEM; if (!tx_rings) goto out_err; np->num_tx_rings = num_tx_rings; smp_wmb(); np->tx_rings = tx_rings; netif_set_real_num_tx_queues(np->dev, num_tx_rings); for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; rp->np = np; rp->tx_channel = first_tx_channel + i; err = niu_alloc_tx_ring_info(np, rp); if (err) goto out_err; } return 0; out_err: niu_free_channels(np); return err; } static int niu_tx_cs_sng_poll(struct niu *np, int channel) { int limit = 1000; while (--limit > 0) { u64 val = nr64(TX_CS(channel)); if (val & TX_CS_SNG_STATE) return 0; } return -ENODEV; } static int niu_tx_channel_stop(struct niu *np, int channel) { u64 val = nr64(TX_CS(channel)); val |= TX_CS_STOP_N_GO; nw64(TX_CS(channel), val); return niu_tx_cs_sng_poll(np, channel); } static int niu_tx_cs_reset_poll(struct niu *np, int channel) { int limit = 1000; while (--limit > 0) { u64 val = nr64(TX_CS(channel)); if (!(val & TX_CS_RST)) return 0; } return -ENODEV; } static int niu_tx_channel_reset(struct niu *np, int channel) { u64 val = nr64(TX_CS(channel)); int err; val |= TX_CS_RST; nw64(TX_CS(channel), val); err = niu_tx_cs_reset_poll(np, channel); if (!err) nw64(TX_RING_KICK(channel), 0); return err; } static int niu_tx_channel_lpage_init(struct niu *np, int channel) { u64 val; nw64(TX_LOG_MASK1(channel), 0); nw64(TX_LOG_VAL1(channel), 0); nw64(TX_LOG_MASK2(channel), 0); nw64(TX_LOG_VAL2(channel), 0); nw64(TX_LOG_PAGE_RELO1(channel), 0); nw64(TX_LOG_PAGE_RELO2(channel), 0); nw64(TX_LOG_PAGE_HDL(channel), 0); val = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT; val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1); nw64(TX_LOG_PAGE_VLD(channel), val); /* XXX TXDMA 32bit mode? XXX */ return 0; } static void niu_txc_enable_port(struct niu *np, int on) { unsigned long flags; u64 val, mask; niu_lock_parent(np, flags); val = nr64(TXC_CONTROL); mask = (u64)1 << np->port; if (on) { val |= TXC_CONTROL_ENABLE | mask; } else { val &= ~mask; if ((val & ~TXC_CONTROL_ENABLE) == 0) val &= ~TXC_CONTROL_ENABLE; } nw64(TXC_CONTROL, val); niu_unlock_parent(np, flags); } static void niu_txc_set_imask(struct niu *np, u64 imask) { unsigned long flags; u64 val; niu_lock_parent(np, flags); val = nr64(TXC_INT_MASK); val &= ~TXC_INT_MASK_VAL(np->port); val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port)); niu_unlock_parent(np, flags); } static void niu_txc_port_dma_enable(struct niu *np, int on) { u64 val = 0; if (on) { int i; for (i = 0; i < np->num_tx_rings; i++) val |= (1 << np->tx_rings[i].tx_channel); } nw64(TXC_PORT_DMA(np->port), val); } static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp) { int err, channel = rp->tx_channel; u64 val, ring_len; err = niu_tx_channel_stop(np, channel); if (err) return err; err = niu_tx_channel_reset(np, channel); if (err) return err; err = niu_tx_channel_lpage_init(np, channel); if (err) return err; nw64(TXC_DMA_MAX(channel), rp->max_burst); nw64(TX_ENT_MSK(channel), 0); if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE | TX_RNG_CFIG_STADDR)) { netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n", channel, (unsigned long long)rp->descr_dma); return -EINVAL; } /* The length field in TX_RNG_CFIG is measured in 64-byte * blocks. rp->pending is the number of TX descriptors in * our ring, 8 bytes each, thus we divide by 8 bytes more * to get the proper value the chip wants. */ ring_len = (rp->pending / 8); val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) | rp->descr_dma); nw64(TX_RNG_CFIG(channel), val); if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) || ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) { netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n", channel, (unsigned long long)rp->mbox_dma); return -EINVAL; } nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32); nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR); nw64(TX_CS(channel), 0); rp->last_pkt_cnt = 0; return 0; } static void niu_init_rdc_groups(struct niu *np) { struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port]; int i, first_table_num = tp->first_table_num; for (i = 0; i < tp->num_tables; i++) { struct rdc_table *tbl = &tp->tables[i]; int this_table = first_table_num + i; int slot; for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) nw64(RDC_TBL(this_table, slot), tbl->rxdma_channel[slot]); } nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]); } static void niu_init_drr_weight(struct niu *np) { int type = phy_decode(np->parent->port_phy, np->port); u64 val; switch (type) { case PORT_TYPE_10G: val = PT_DRR_WEIGHT_DEFAULT_10G; break; case PORT_TYPE_1G: default: val = PT_DRR_WEIGHT_DEFAULT_1G; break; } nw64(PT_DRR_WT(np->port), val); } static int niu_init_hostinfo(struct niu *np) { struct niu_parent *parent = np->parent; struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; int i, err, num_alt = niu_num_alt_addr(np); int first_rdc_table = tp->first_table_num; err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); if (err) return err; err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); if (err) return err; for (i = 0; i < num_alt; i++) { err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1); if (err) return err; } return 0; } static int niu_rx_channel_reset(struct niu *np, int channel) { return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel), RXDMA_CFIG1_RST, 1000, 10, "RXDMA_CFIG1"); } static int niu_rx_channel_lpage_init(struct niu *np, int channel) { u64 val; nw64(RX_LOG_MASK1(channel), 0); nw64(RX_LOG_VAL1(channel), 0); nw64(RX_LOG_MASK2(channel), 0); nw64(RX_LOG_VAL2(channel), 0); nw64(RX_LOG_PAGE_RELO1(channel), 0); nw64(RX_LOG_PAGE_RELO2(channel), 0); nw64(RX_LOG_PAGE_HDL(channel), 0); val = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT; val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1); nw64(RX_LOG_PAGE_VLD(channel), val); return 0; } static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp) { u64 val; val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) | ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) | ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) | ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT)); nw64(RDC_RED_PARA(rp->rx_channel), val); } static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret) { u64 val = 0; *ret = 0; switch (rp->rbr_block_size) { case 4 * 1024: val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT); break; case 8 * 1024: val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT); break; case 16 * 1024: val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT); break; case 32 * 1024: val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT); break; default: return -EINVAL; } val |= RBR_CFIG_B_VLD2; switch (rp->rbr_sizes[2]) { case 2 * 1024: val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT); break; case 4 * 1024: val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT); break; case 8 * 1024: val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT); break; case 16 * 1024: val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT); break; default: return -EINVAL; } val |= RBR_CFIG_B_VLD1; switch (rp->rbr_sizes[1]) { case 1 * 1024: val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT); break; case 2 * 1024: val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT); break; case 4 * 1024: val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT); break; case 8 * 1024: val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT); break; default: return -EINVAL; } val |= RBR_CFIG_B_VLD0; switch (rp->rbr_sizes[0]) { case 256: val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT); break; case 512: val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT); break; case 1 * 1024: val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT); break; case 2 * 1024: val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT); break; default: return -EINVAL; } *ret = val; return 0; } static int niu_enable_rx_channel(struct niu *np, int channel, int on) { u64 val = nr64(RXDMA_CFIG1(channel)); int limit; if (on) val |= RXDMA_CFIG1_EN; else val &= ~RXDMA_CFIG1_EN; nw64(RXDMA_CFIG1(channel), val); limit = 1000; while (--limit > 0) { if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST) break; udelay(10); } if (limit <= 0) return -ENODEV; return 0; } static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp) { int err, channel = rp->rx_channel; u64 val; err = niu_rx_channel_reset(np, channel); if (err) return err; err = niu_rx_channel_lpage_init(np, channel); if (err) return err; niu_rx_channel_wred_init(np, rp); nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY); nw64(RX_DMA_CTL_STAT(channel), (RX_DMA_CTL_STAT_MEX | RX_DMA_CTL_STAT_RCRTHRES | RX_DMA_CTL_STAT_RCRTO | RX_DMA_CTL_STAT_RBR_EMPTY)); nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32); nw64(RXDMA_CFIG2(channel), ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) | RXDMA_CFIG2_FULL_HDR)); nw64(RBR_CFIG_A(channel), ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) | (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR))); err = niu_compute_rbr_cfig_b(rp, &val); if (err) return err; nw64(RBR_CFIG_B(channel), val); nw64(RCRCFIG_A(channel), ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) | (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR))); nw64(RCRCFIG_B(channel), ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) | RCRCFIG_B_ENTOUT | ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT)); err = niu_enable_rx_channel(np, channel, 1); if (err) return err; nw64(RBR_KICK(channel), rp->rbr_index); val = nr64(RX_DMA_CTL_STAT(channel)); val |= RX_DMA_CTL_STAT_RBR_EMPTY; nw64(RX_DMA_CTL_STAT(channel), val); return 0; } static int niu_init_rx_channels(struct niu *np) { unsigned long flags; u64 seed = jiffies_64; int err, i; niu_lock_parent(np, flags); nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider); nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL)); niu_unlock_parent(np, flags); /* XXX RXDMA 32bit mode? XXX */ niu_init_rdc_groups(np); niu_init_drr_weight(np); err = niu_init_hostinfo(np); if (err) return err; for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; err = niu_init_one_rx_channel(np, rp); if (err) return err; } return 0; } static int niu_set_ip_frag_rule(struct niu *np) { struct niu_parent *parent = np->parent; struct niu_classifier *cp = &np->clas; struct niu_tcam_entry *tp; int index, err; index = cp->tcam_top; tp = &parent->tcam[index]; /* Note that the noport bit is the same in both ipv4 and * ipv6 format TCAM entries. */ memset(tp, 0, sizeof(*tp)); tp->key[1] = TCAM_V4KEY1_NOPORT; tp->key_mask[1] = TCAM_V4KEY1_NOPORT; tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT)); err = tcam_write(np, index, tp->key, tp->key_mask); if (err) return err; err = tcam_assoc_write(np, index, tp->assoc_data); if (err) return err; tp->valid = 1; cp->tcam_valid_entries++; return 0; } static int niu_init_classifier_hw(struct niu *np) { struct niu_parent *parent = np->parent; struct niu_classifier *cp = &np->clas; int i, err; nw64(H1POLY, cp->h1_init); nw64(H2POLY, cp->h2_init); err = niu_init_hostinfo(np); if (err) return err; for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) { struct niu_vlan_rdc *vp = &cp->vlan_mappings[i]; vlan_tbl_write(np, i, np->port, vp->vlan_pref, vp->rdc_num); } for (i = 0; i < cp->num_alt_mac_mappings; i++) { struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i]; err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num, ap->rdc_num, ap->mac_pref); if (err) return err; } for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { int index = i - CLASS_CODE_USER_PROG1; err = niu_set_tcam_key(np, i, parent->tcam_key[index]); if (err) return err; err = niu_set_flow_key(np, i, parent->flow_key[index]); if (err) return err; } err = niu_set_ip_frag_rule(np); if (err) return err; tcam_enable(np, 1); return 0; } static int niu_zcp_write(struct niu *np, int index, u64 *data) { nw64(ZCP_RAM_DATA0, data[0]); nw64(ZCP_RAM_DATA1, data[1]); nw64(ZCP_RAM_DATA2, data[2]); nw64(ZCP_RAM_DATA3, data[3]); nw64(ZCP_RAM_DATA4, data[4]); nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL); nw64(ZCP_RAM_ACC, (ZCP_RAM_ACC_WRITE | (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 1000, 100); } static int niu_zcp_read(struct niu *np, int index, u64 *data) { int err; err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 1000, 100); if (err) { netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n", (unsigned long long)nr64(ZCP_RAM_ACC)); return err; } nw64(ZCP_RAM_ACC, (ZCP_RAM_ACC_READ | (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 1000, 100); if (err) { netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n", (unsigned long long)nr64(ZCP_RAM_ACC)); return err; } data[0] = nr64(ZCP_RAM_DATA0); data[1] = nr64(ZCP_RAM_DATA1); data[2] = nr64(ZCP_RAM_DATA2); data[3] = nr64(ZCP_RAM_DATA3); data[4] = nr64(ZCP_RAM_DATA4); return 0; } static void niu_zcp_cfifo_reset(struct niu *np) { u64 val = nr64(RESET_CFIFO); val |= RESET_CFIFO_RST(np->port); nw64(RESET_CFIFO, val); udelay(10); val &= ~RESET_CFIFO_RST(np->port); nw64(RESET_CFIFO, val); } static int niu_init_zcp(struct niu *np) { u64 data[5], rbuf[5]; int i, max, err; if (np->parent->plat_type != PLAT_TYPE_NIU) { if (np->port == 0 || np->port == 1) max = ATLAS_P0_P1_CFIFO_ENTRIES; else max = ATLAS_P2_P3_CFIFO_ENTRIES; } else max = NIU_CFIFO_ENTRIES; data[0] = 0; data[1] = 0; data[2] = 0; data[3] = 0; data[4] = 0; for (i = 0; i < max; i++) { err = niu_zcp_write(np, i, data); if (err) return err; err = niu_zcp_read(np, i, rbuf); if (err) return err; } niu_zcp_cfifo_reset(np); nw64(CFIFO_ECC(np->port), 0); nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL); (void) nr64(ZCP_INT_STAT); nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL); return 0; } static void niu_ipp_write(struct niu *np, int index, u64 *data) { u64 val = nr64_ipp(IPP_CFIG); nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W); nw64_ipp(IPP_DFIFO_WR_PTR, index); nw64_ipp(IPP_DFIFO_WR0, data[0]); nw64_ipp(IPP_DFIFO_WR1, data[1]); nw64_ipp(IPP_DFIFO_WR2, data[2]); nw64_ipp(IPP_DFIFO_WR3, data[3]); nw64_ipp(IPP_DFIFO_WR4, data[4]); nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W); } static void niu_ipp_read(struct niu *np, int index, u64 *data) { nw64_ipp(IPP_DFIFO_RD_PTR, index); data[0] = nr64_ipp(IPP_DFIFO_RD0); data[1] = nr64_ipp(IPP_DFIFO_RD1); data[2] = nr64_ipp(IPP_DFIFO_RD2); data[3] = nr64_ipp(IPP_DFIFO_RD3); data[4] = nr64_ipp(IPP_DFIFO_RD4); } static int niu_ipp_reset(struct niu *np) { return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST, 1000, 100, "IPP_CFIG"); } static int niu_init_ipp(struct niu *np) { u64 data[5], rbuf[5], val; int i, max, err; if (np->parent->plat_type != PLAT_TYPE_NIU) { if (np->port == 0 || np->port == 1) max = ATLAS_P0_P1_DFIFO_ENTRIES; else max = ATLAS_P2_P3_DFIFO_ENTRIES; } else max = NIU_DFIFO_ENTRIES; data[0] = 0; data[1] = 0; data[2] = 0; data[3] = 0; data[4] = 0; for (i = 0; i < max; i++) { niu_ipp_write(np, i, data); niu_ipp_read(np, i, rbuf); } (void) nr64_ipp(IPP_INT_STAT); (void) nr64_ipp(IPP_INT_STAT); err = niu_ipp_reset(np); if (err) return err; (void) nr64_ipp(IPP_PKT_DIS); (void) nr64_ipp(IPP_BAD_CS_CNT); (void) nr64_ipp(IPP_ECC); (void) nr64_ipp(IPP_INT_STAT); nw64_ipp(IPP_MSK, ~IPP_MSK_ALL); val = nr64_ipp(IPP_CFIG); val &= ~IPP_CFIG_IP_MAX_PKT; val |= (IPP_CFIG_IPP_ENABLE | IPP_CFIG_DFIFO_ECC_EN | IPP_CFIG_DROP_BAD_CRC | IPP_CFIG_CKSUM_EN | (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT)); nw64_ipp(IPP_CFIG, val); return 0; } static void niu_handle_led(struct niu *np, int status) { u64 val; val = nr64_mac(XMAC_CONFIG); if ((np->flags & NIU_FLAGS_10G) != 0 && (np->flags & NIU_FLAGS_FIBER) != 0) { if (status) { val |= XMAC_CONFIG_LED_POLARITY; val &= ~XMAC_CONFIG_FORCE_LED_ON; } else { val |= XMAC_CONFIG_FORCE_LED_ON; val &= ~XMAC_CONFIG_LED_POLARITY; } } nw64_mac(XMAC_CONFIG, val); } static void niu_init_xif_xmac(struct niu *np) { struct niu_link_config *lp = &np->link_config; u64 val; if (np->flags & NIU_FLAGS_XCVR_SERDES) { val = nr64(MIF_CONFIG); val |= MIF_CONFIG_ATCA_GE; nw64(MIF_CONFIG, val); } val = nr64_mac(XMAC_CONFIG); val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; val |= XMAC_CONFIG_TX_OUTPUT_EN; if (lp->loopback_mode == LOOPBACK_MAC) { val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; val |= XMAC_CONFIG_LOOPBACK; } else { val &= ~XMAC_CONFIG_LOOPBACK; } if (np->flags & NIU_FLAGS_10G) { val &= ~XMAC_CONFIG_LFS_DISABLE; } else { val |= XMAC_CONFIG_LFS_DISABLE; if (!(np->flags & NIU_FLAGS_FIBER) && !(np->flags & NIU_FLAGS_XCVR_SERDES)) val |= XMAC_CONFIG_1G_PCS_BYPASS; else val &= ~XMAC_CONFIG_1G_PCS_BYPASS; } val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; if (lp->active_speed == SPEED_100) val |= XMAC_CONFIG_SEL_CLK_25MHZ; else val &= ~XMAC_CONFIG_SEL_CLK_25MHZ; nw64_mac(XMAC_CONFIG, val); val = nr64_mac(XMAC_CONFIG); val &= ~XMAC_CONFIG_MODE_MASK; if (np->flags & NIU_FLAGS_10G) { val |= XMAC_CONFIG_MODE_XGMII; } else { if (lp->active_speed == SPEED_1000) val |= XMAC_CONFIG_MODE_GMII; else val |= XMAC_CONFIG_MODE_MII; } nw64_mac(XMAC_CONFIG, val); } static void niu_init_xif_bmac(struct niu *np) { struct niu_link_config *lp = &np->link_config; u64 val; val = BMAC_XIF_CONFIG_TX_OUTPUT_EN; if (lp->loopback_mode == LOOPBACK_MAC) val |= BMAC_XIF_CONFIG_MII_LOOPBACK; else val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK; if (lp->active_speed == SPEED_1000) val |= BMAC_XIF_CONFIG_GMII_MODE; else val &= ~BMAC_XIF_CONFIG_GMII_MODE; val &= ~(BMAC_XIF_CONFIG_LINK_LED | BMAC_XIF_CONFIG_LED_POLARITY); if (!(np->flags & NIU_FLAGS_10G) && !(np->flags & NIU_FLAGS_FIBER) && lp->active_speed == SPEED_100) val |= BMAC_XIF_CONFIG_25MHZ_CLOCK; else val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK; nw64_mac(BMAC_XIF_CONFIG, val); } static void niu_init_xif(struct niu *np) { if (np->flags & NIU_FLAGS_XMAC) niu_init_xif_xmac(np); else niu_init_xif_bmac(np); } static void niu_pcs_mii_reset(struct niu *np) { int limit = 1000; u64 val = nr64_pcs(PCS_MII_CTL); val |= PCS_MII_CTL_RST; nw64_pcs(PCS_MII_CTL, val); while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) { udelay(100); val = nr64_pcs(PCS_MII_CTL); } } static void niu_xpcs_reset(struct niu *np) { int limit = 1000; u64 val = nr64_xpcs(XPCS_CONTROL1); val |= XPCS_CONTROL1_RESET; nw64_xpcs(XPCS_CONTROL1, val); while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) { udelay(100); val = nr64_xpcs(XPCS_CONTROL1); } } static int niu_init_pcs(struct niu *np) { struct niu_link_config *lp = &np->link_config; u64 val; switch (np->flags & (NIU_FLAGS_10G | NIU_FLAGS_FIBER | NIU_FLAGS_XCVR_SERDES)) { case NIU_FLAGS_FIBER: /* 1G fiber */ nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); nw64_pcs(PCS_DPATH_MODE, 0); niu_pcs_mii_reset(np); break; case NIU_FLAGS_10G: case NIU_FLAGS_10G | NIU_FLAGS_FIBER: case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: /* 10G SERDES */ if (!(np->flags & NIU_FLAGS_XMAC)) return -EINVAL; /* 10G copper or fiber */ val = nr64_mac(XMAC_CONFIG); val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; nw64_mac(XMAC_CONFIG, val); niu_xpcs_reset(np); val = nr64_xpcs(XPCS_CONTROL1); if (lp->loopback_mode == LOOPBACK_PHY) val |= XPCS_CONTROL1_LOOPBACK; else val &= ~XPCS_CONTROL1_LOOPBACK; nw64_xpcs(XPCS_CONTROL1, val); nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0); (void) nr64_xpcs(XPCS_SYMERR_CNT01); (void) nr64_xpcs(XPCS_SYMERR_CNT23); break; case NIU_FLAGS_XCVR_SERDES: /* 1G SERDES */ niu_pcs_mii_reset(np); nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); nw64_pcs(PCS_DPATH_MODE, 0); break; case 0: /* 1G copper */ case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER: /* 1G RGMII FIBER */ nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII); niu_pcs_mii_reset(np); break; default: return -EINVAL; } return 0; } static int niu_reset_tx_xmac(struct niu *np) { return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST, (XTXMAC_SW_RST_REG_RS | XTXMAC_SW_RST_SOFT_RST), 1000, 100, "XTXMAC_SW_RST"); } static int niu_reset_tx_bmac(struct niu *np) { int limit; nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET); limit = 1000; while (--limit >= 0) { if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET)) break; udelay(100); } if (limit < 0) { dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n", np->port, (unsigned long long) nr64_mac(BTXMAC_SW_RST)); return -ENODEV; } return 0; } static int niu_reset_tx_mac(struct niu *np) { if (np->flags & NIU_FLAGS_XMAC) return niu_reset_tx_xmac(np); else return niu_reset_tx_bmac(np); } static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max) { u64 val; val = nr64_mac(XMAC_MIN); val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE | XMAC_MIN_RX_MIN_PKT_SIZE); val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT); val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT); nw64_mac(XMAC_MIN, val); nw64_mac(XMAC_MAX, max); nw64_mac(XTXMAC_STAT_MSK, ~(u64)0); val = nr64_mac(XMAC_IPG); if (np->flags & NIU_FLAGS_10G) { val &= ~XMAC_IPG_IPG_XGMII; val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT); } else { val &= ~XMAC_IPG_IPG_MII_GMII; val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT); } nw64_mac(XMAC_IPG, val); val = nr64_mac(XMAC_CONFIG); val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC | XMAC_CONFIG_STRETCH_MODE | XMAC_CONFIG_VAR_MIN_IPG_EN | XMAC_CONFIG_TX_ENABLE); nw64_mac(XMAC_CONFIG, val); nw64_mac(TXMAC_FRM_CNT, 0); nw64_mac(TXMAC_BYTE_CNT, 0); } static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max) { u64 val; nw64_mac(BMAC_MIN_FRAME, min); nw64_mac(BMAC_MAX_FRAME, max); nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0); nw64_mac(BMAC_CTRL_TYPE, 0x8808); nw64_mac(BMAC_PREAMBLE_SIZE, 7); val = nr64_mac(BTXMAC_CONFIG); val &= ~(BTXMAC_CONFIG_FCS_DISABLE | BTXMAC_CONFIG_ENABLE); nw64_mac(BTXMAC_CONFIG, val); } static void niu_init_tx_mac(struct niu *np) { u64 min, max; min = 64; if (np->dev->mtu > ETH_DATA_LEN) max = 9216; else max = 1522; /* The XMAC_MIN register only accepts values for TX min which * have the low 3 bits cleared. */ BUG_ON(min & 0x7); if (np->flags & NIU_FLAGS_XMAC) niu_init_tx_xmac(np, min, max); else niu_init_tx_bmac(np, min, max); } static int niu_reset_rx_xmac(struct niu *np) { int limit; nw64_mac(XRXMAC_SW_RST, XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST); limit = 1000; while (--limit >= 0) { if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST))) break; udelay(100); } if (limit < 0) { dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n", np->port, (unsigned long long) nr64_mac(XRXMAC_SW_RST)); return -ENODEV; } return 0; } static int niu_reset_rx_bmac(struct niu *np) { int limit; nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET); limit = 1000; while (--limit >= 0) { if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET)) break; udelay(100); } if (limit < 0) { dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n", np->port, (unsigned long long) nr64_mac(BRXMAC_SW_RST)); return -ENODEV; } return 0; } static int niu_reset_rx_mac(struct niu *np) { if (np->flags & NIU_FLAGS_XMAC) return niu_reset_rx_xmac(np); else return niu_reset_rx_bmac(np); } static void niu_init_rx_xmac(struct niu *np) { struct niu_parent *parent = np->parent; struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; int first_rdc_table = tp->first_table_num; unsigned long i; u64 val; nw64_mac(XMAC_ADD_FILT0, 0); nw64_mac(XMAC_ADD_FILT1, 0); nw64_mac(XMAC_ADD_FILT2, 0); nw64_mac(XMAC_ADD_FILT12_MASK, 0); nw64_mac(XMAC_ADD_FILT00_MASK, 0); for (i = 0; i < MAC_NUM_HASH; i++) nw64_mac(XMAC_HASH_TBL(i), 0); nw64_mac(XRXMAC_STAT_MSK, ~(u64)0); niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); val = nr64_mac(XMAC_CONFIG); val &= ~(XMAC_CONFIG_RX_MAC_ENABLE | XMAC_CONFIG_PROMISCUOUS | XMAC_CONFIG_PROMISC_GROUP | XMAC_CONFIG_ERR_CHK_DIS | XMAC_CONFIG_RX_CRC_CHK_DIS | XMAC_CONFIG_RESERVED_MULTICAST | XMAC_CONFIG_RX_CODEV_CHK_DIS | XMAC_CONFIG_ADDR_FILTER_EN | XMAC_CONFIG_RCV_PAUSE_ENABLE | XMAC_CONFIG_STRIP_CRC | XMAC_CONFIG_PASS_FLOW_CTRL | XMAC_CONFIG_MAC2IPP_PKT_CNT_EN); val |= (XMAC_CONFIG_HASH_FILTER_EN); nw64_mac(XMAC_CONFIG, val); nw64_mac(RXMAC_BT_CNT, 0); nw64_mac(RXMAC_BC_FRM_CNT, 0); nw64_mac(RXMAC_MC_FRM_CNT, 0); nw64_mac(RXMAC_FRAG_CNT, 0); nw64_mac(RXMAC_HIST_CNT1, 0); nw64_mac(RXMAC_HIST_CNT2, 0); nw64_mac(RXMAC_HIST_CNT3, 0); nw64_mac(RXMAC_HIST_CNT4, 0); nw64_mac(RXMAC_HIST_CNT5, 0); nw64_mac(RXMAC_HIST_CNT6, 0); nw64_mac(RXMAC_HIST_CNT7, 0); nw64_mac(RXMAC_MPSZER_CNT, 0); nw64_mac(RXMAC_CRC_ER_CNT, 0); nw64_mac(RXMAC_CD_VIO_CNT, 0); nw64_mac(LINK_FAULT_CNT, 0); } static void niu_init_rx_bmac(struct niu *np) { struct niu_parent *parent = np->parent; struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; int first_rdc_table = tp->first_table_num; unsigned long i; u64 val; nw64_mac(BMAC_ADD_FILT0, 0); nw64_mac(BMAC_ADD_FILT1, 0); nw64_mac(BMAC_ADD_FILT2, 0); nw64_mac(BMAC_ADD_FILT12_MASK, 0); nw64_mac(BMAC_ADD_FILT00_MASK, 0); for (i = 0; i < MAC_NUM_HASH; i++) nw64_mac(BMAC_HASH_TBL(i), 0); niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0); val = nr64_mac(BRXMAC_CONFIG); val &= ~(BRXMAC_CONFIG_ENABLE | BRXMAC_CONFIG_STRIP_PAD | BRXMAC_CONFIG_STRIP_FCS | BRXMAC_CONFIG_PROMISC | BRXMAC_CONFIG_PROMISC_GRP | BRXMAC_CONFIG_ADDR_FILT_EN | BRXMAC_CONFIG_DISCARD_DIS); val |= (BRXMAC_CONFIG_HASH_FILT_EN); nw64_mac(BRXMAC_CONFIG, val); val = nr64_mac(BMAC_ADDR_CMPEN); val |= BMAC_ADDR_CMPEN_EN0; nw64_mac(BMAC_ADDR_CMPEN, val); } static void niu_init_rx_mac(struct niu *np) { niu_set_primary_mac(np, np->dev->dev_addr); if (np->flags & NIU_FLAGS_XMAC) niu_init_rx_xmac(np); else niu_init_rx_bmac(np); } static void niu_enable_tx_xmac(struct niu *np, int on) { u64 val = nr64_mac(XMAC_CONFIG); if (on) val |= XMAC_CONFIG_TX_ENABLE; else val &= ~XMAC_CONFIG_TX_ENABLE; nw64_mac(XMAC_CONFIG, val); } static void niu_enable_tx_bmac(struct niu *np, int on) { u64 val = nr64_mac(BTXMAC_CONFIG); if (on) val |= BTXMAC_CONFIG_ENABLE; else val &= ~BTXMAC_CONFIG_ENABLE; nw64_mac(BTXMAC_CONFIG, val); } static void niu_enable_tx_mac(struct niu *np, int on) { if (np->flags & NIU_FLAGS_XMAC) niu_enable_tx_xmac(np, on); else niu_enable_tx_bmac(np, on); } static void niu_enable_rx_xmac(struct niu *np, int on) { u64 val = nr64_mac(XMAC_CONFIG); val &= ~(XMAC_CONFIG_HASH_FILTER_EN | XMAC_CONFIG_PROMISCUOUS); if (np->flags & NIU_FLAGS_MCAST) val |= XMAC_CONFIG_HASH_FILTER_EN; if (np->flags & NIU_FLAGS_PROMISC) val |= XMAC_CONFIG_PROMISCUOUS; if (on) val |= XMAC_CONFIG_RX_MAC_ENABLE; else val &= ~XMAC_CONFIG_RX_MAC_ENABLE; nw64_mac(XMAC_CONFIG, val); } static void niu_enable_rx_bmac(struct niu *np, int on) { u64 val = nr64_mac(BRXMAC_CONFIG); val &= ~(BRXMAC_CONFIG_HASH_FILT_EN | BRXMAC_CONFIG_PROMISC); if (np->flags & NIU_FLAGS_MCAST) val |= BRXMAC_CONFIG_HASH_FILT_EN; if (np->flags & NIU_FLAGS_PROMISC) val |= BRXMAC_CONFIG_PROMISC; if (on) val |= BRXMAC_CONFIG_ENABLE; else val &= ~BRXMAC_CONFIG_ENABLE; nw64_mac(BRXMAC_CONFIG, val); } static void niu_enable_rx_mac(struct niu *np, int on) { if (np->flags & NIU_FLAGS_XMAC) niu_enable_rx_xmac(np, on); else niu_enable_rx_bmac(np, on); } static int niu_init_mac(struct niu *np) { int err; niu_init_xif(np); err = niu_init_pcs(np); if (err) return err; err = niu_reset_tx_mac(np); if (err) return err; niu_init_tx_mac(np); err = niu_reset_rx_mac(np); if (err) return err; niu_init_rx_mac(np); /* This looks hookey but the RX MAC reset we just did will * undo some of the state we setup in niu_init_tx_mac() so we * have to call it again. In particular, the RX MAC reset will * set the XMAC_MAX register back to it's default value. */ niu_init_tx_mac(np); niu_enable_tx_mac(np, 1); niu_enable_rx_mac(np, 1); return 0; } static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp) { (void) niu_tx_channel_stop(np, rp->tx_channel); } static void niu_stop_tx_channels(struct niu *np) { int i; for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; niu_stop_one_tx_channel(np, rp); } } static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp) { (void) niu_tx_channel_reset(np, rp->tx_channel); } static void niu_reset_tx_channels(struct niu *np) { int i; for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; niu_reset_one_tx_channel(np, rp); } } static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp) { (void) niu_enable_rx_channel(np, rp->rx_channel, 0); } static void niu_stop_rx_channels(struct niu *np) { int i; for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; niu_stop_one_rx_channel(np, rp); } } static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp) { int channel = rp->rx_channel; (void) niu_rx_channel_reset(np, channel); nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL); nw64(RX_DMA_CTL_STAT(channel), 0); (void) niu_enable_rx_channel(np, channel, 0); } static void niu_reset_rx_channels(struct niu *np) { int i; for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; niu_reset_one_rx_channel(np, rp); } } static void niu_disable_ipp(struct niu *np) { u64 rd, wr, val; int limit; rd = nr64_ipp(IPP_DFIFO_RD_PTR); wr = nr64_ipp(IPP_DFIFO_WR_PTR); limit = 100; while (--limit >= 0 && (rd != wr)) { rd = nr64_ipp(IPP_DFIFO_RD_PTR); wr = nr64_ipp(IPP_DFIFO_WR_PTR); } if (limit < 0 && (rd != 0 && wr != 1)) { netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n", (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR), (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR)); } val = nr64_ipp(IPP_CFIG); val &= ~(IPP_CFIG_IPP_ENABLE | IPP_CFIG_DFIFO_ECC_EN | IPP_CFIG_DROP_BAD_CRC | IPP_CFIG_CKSUM_EN); nw64_ipp(IPP_CFIG, val); (void) niu_ipp_reset(np); } static int niu_init_hw(struct niu *np) { int i, err; netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n"); niu_txc_enable_port(np, 1); niu_txc_port_dma_enable(np, 1); niu_txc_set_imask(np, 0); netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n"); for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; err = niu_init_one_tx_channel(np, rp); if (err) return err; } netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n"); err = niu_init_rx_channels(np); if (err) goto out_uninit_tx_channels; netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n"); err = niu_init_classifier_hw(np); if (err) goto out_uninit_rx_channels; netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n"); err = niu_init_zcp(np); if (err) goto out_uninit_rx_channels; netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n"); err = niu_init_ipp(np); if (err) goto out_uninit_rx_channels; netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n"); err = niu_init_mac(np); if (err) goto out_uninit_ipp; return 0; out_uninit_ipp: netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n"); niu_disable_ipp(np); out_uninit_rx_channels: netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n"); niu_stop_rx_channels(np); niu_reset_rx_channels(np); out_uninit_tx_channels: netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n"); niu_stop_tx_channels(np); niu_reset_tx_channels(np); return err; } static void niu_stop_hw(struct niu *np) { netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n"); niu_enable_interrupts(np, 0); netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n"); niu_enable_rx_mac(np, 0); netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n"); niu_disable_ipp(np); netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n"); niu_stop_tx_channels(np); netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n"); niu_stop_rx_channels(np); netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n"); niu_reset_tx_channels(np); netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n"); niu_reset_rx_channels(np); } static void niu_set_irq_name(struct niu *np) { int port = np->port; int i, j = 1; sprintf(np->irq_name[0], "%s:MAC", np->dev->name); if (port == 0) { sprintf(np->irq_name[1], "%s:MIF", np->dev->name); sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name); j = 3; } for (i = 0; i < np->num_ldg - j; i++) { if (i < np->num_rx_rings) sprintf(np->irq_name[i+j], "%s-rx-%d", np->dev->name, i); else if (i < np->num_tx_rings + np->num_rx_rings) sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name, i - np->num_rx_rings); } } static int niu_request_irq(struct niu *np) { int i, j, err; niu_set_irq_name(np); err = 0; for (i = 0; i < np->num_ldg; i++) { struct niu_ldg *lp = &np->ldg[i]; err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED, np->irq_name[i], lp); if (err) goto out_free_irqs; } return 0; out_free_irqs: for (j = 0; j < i; j++) { struct niu_ldg *lp = &np->ldg[j]; free_irq(lp->irq, lp); } return err; } static void niu_free_irq(struct niu *np) { int i; for (i = 0; i < np->num_ldg; i++) { struct niu_ldg *lp = &np->ldg[i]; free_irq(lp->irq, lp); } } static void niu_enable_napi(struct niu *np) { int i; for (i = 0; i < np->num_ldg; i++) napi_enable(&np->ldg[i].napi); } static void niu_disable_napi(struct niu *np) { int i; for (i = 0; i < np->num_ldg; i++) napi_disable(&np->ldg[i].napi); } static int niu_open(struct net_device *dev) { struct niu *np = netdev_priv(dev); int err; netif_carrier_off(dev); err = niu_alloc_channels(np); if (err) goto out_err; err = niu_enable_interrupts(np, 0); if (err) goto out_free_channels; err = niu_request_irq(np); if (err) goto out_free_channels; niu_enable_napi(np); spin_lock_irq(&np->lock); err = niu_init_hw(np); if (!err) { init_timer(&np->timer); np->timer.expires = jiffies + HZ; np->timer.data = (unsigned long) np; np->timer.function = niu_timer; err = niu_enable_interrupts(np, 1); if (err) niu_stop_hw(np); } spin_unlock_irq(&np->lock); if (err) { niu_disable_napi(np); goto out_free_irq; } netif_tx_start_all_queues(dev); if (np->link_config.loopback_mode != LOOPBACK_DISABLED) netif_carrier_on(dev); add_timer(&np->timer); return 0; out_free_irq: niu_free_irq(np); out_free_channels: niu_free_channels(np); out_err: return err; } static void niu_full_shutdown(struct niu *np, struct net_device *dev) { cancel_work_sync(&np->reset_task); niu_disable_napi(np); netif_tx_stop_all_queues(dev); del_timer_sync(&np->timer); spin_lock_irq(&np->lock); niu_stop_hw(np); spin_unlock_irq(&np->lock); } static int niu_close(struct net_device *dev) { struct niu *np = netdev_priv(dev); niu_full_shutdown(np, dev); niu_free_irq(np); niu_free_channels(np); niu_handle_led(np, 0); return 0; } static void niu_sync_xmac_stats(struct niu *np) { struct niu_xmac_stats *mp = &np->mac_stats.xmac; mp->tx_frames += nr64_mac(TXMAC_FRM_CNT); mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT); mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT); mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT); mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT); mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT); mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT); mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1); mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2); mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3); mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4); mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5); mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6); mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7); mp->rx_octets += nr64_mac(RXMAC_BT_CNT); mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT); mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT); mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT); } static void niu_sync_bmac_stats(struct niu *np) { struct niu_bmac_stats *mp = &np->mac_stats.bmac; mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT); mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT); mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT); mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT); } static void niu_sync_mac_stats(struct niu *np) { if (np->flags & NIU_FLAGS_XMAC) niu_sync_xmac_stats(np); else niu_sync_bmac_stats(np); } static void niu_get_rx_stats(struct niu *np, struct rtnl_link_stats64 *stats) { u64 pkts, dropped, errors, bytes; struct rx_ring_info *rx_rings; int i; pkts = dropped = errors = bytes = 0; rx_rings = ACCESS_ONCE(np->rx_rings); if (!rx_rings) goto no_rings; for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &rx_rings[i]; niu_sync_rx_discard_stats(np, rp, 0); pkts += rp->rx_packets; bytes += rp->rx_bytes; dropped += rp->rx_dropped; errors += rp->rx_errors; } no_rings: stats->rx_packets = pkts; stats->rx_bytes = bytes; stats->rx_dropped = dropped; stats->rx_errors = errors; } static void niu_get_tx_stats(struct niu *np, struct rtnl_link_stats64 *stats) { u64 pkts, errors, bytes; struct tx_ring_info *tx_rings; int i; pkts = errors = bytes = 0; tx_rings = ACCESS_ONCE(np->tx_rings); if (!tx_rings) goto no_rings; for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &tx_rings[i]; pkts += rp->tx_packets; bytes += rp->tx_bytes; errors += rp->tx_errors; } no_rings: stats->tx_packets = pkts; stats->tx_bytes = bytes; stats->tx_errors = errors; } static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct niu *np = netdev_priv(dev); if (netif_running(dev)) { niu_get_rx_stats(np, stats); niu_get_tx_stats(np, stats); } return stats; } static void niu_load_hash_xmac(struct niu *np, u16 *hash) { int i; for (i = 0; i < 16; i++) nw64_mac(XMAC_HASH_TBL(i), hash[i]); } static void niu_load_hash_bmac(struct niu *np, u16 *hash) { int i; for (i = 0; i < 16; i++) nw64_mac(BMAC_HASH_TBL(i), hash[i]); } static void niu_load_hash(struct niu *np, u16 *hash) { if (np->flags & NIU_FLAGS_XMAC) niu_load_hash_xmac(np, hash); else niu_load_hash_bmac(np, hash); } static void niu_set_rx_mode(struct net_device *dev) { struct niu *np = netdev_priv(dev); int i, alt_cnt, err; struct netdev_hw_addr *ha; unsigned long flags; u16 hash[16] = { 0, }; spin_lock_irqsave(&np->lock, flags); niu_enable_rx_mac(np, 0); np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC); if (dev->flags & IFF_PROMISC) np->flags |= NIU_FLAGS_PROMISC; if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev))) np->flags |= NIU_FLAGS_MCAST; alt_cnt = netdev_uc_count(dev); if (alt_cnt > niu_num_alt_addr(np)) { alt_cnt = 0; np->flags |= NIU_FLAGS_PROMISC; } if (alt_cnt) { int index = 0; netdev_for_each_uc_addr(ha, dev) { err = niu_set_alt_mac(np, index, ha->addr); if (err) netdev_warn(dev, "Error %d adding alt mac %d\n", err, index); err = niu_enable_alt_mac(np, index, 1); if (err) netdev_warn(dev, "Error %d enabling alt mac %d\n", err, index); index++; } } else { int alt_start; if (np->flags & NIU_FLAGS_XMAC) alt_start = 0; else alt_start = 1; for (i = alt_start; i < niu_num_alt_addr(np); i++) { err = niu_enable_alt_mac(np, i, 0); if (err) netdev_warn(dev, "Error %d disabling alt mac %d\n", err, i); } } if (dev->flags & IFF_ALLMULTI) { for (i = 0; i < 16; i++) hash[i] = 0xffff; } else if (!netdev_mc_empty(dev)) { netdev_for_each_mc_addr(ha, dev) { u32 crc = ether_crc_le(ETH_ALEN, ha->addr); crc >>= 24; hash[crc >> 4] |= (1 << (15 - (crc & 0xf))); } } if (np->flags & NIU_FLAGS_MCAST) niu_load_hash(np, hash); niu_enable_rx_mac(np, 1); spin_unlock_irqrestore(&np->lock, flags); } static int niu_set_mac_addr(struct net_device *dev, void *p) { struct niu *np = netdev_priv(dev); struct sockaddr *addr = p; unsigned long flags; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); if (!netif_running(dev)) return 0; spin_lock_irqsave(&np->lock, flags); niu_enable_rx_mac(np, 0); niu_set_primary_mac(np, dev->dev_addr); niu_enable_rx_mac(np, 1); spin_unlock_irqrestore(&np->lock, flags); return 0; } static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { return -EOPNOTSUPP; } static void niu_netif_stop(struct niu *np) { np->dev->trans_start = jiffies; /* prevent tx timeout */ niu_disable_napi(np); netif_tx_disable(np->dev); } static void niu_netif_start(struct niu *np) { /* NOTE: unconditional netif_wake_queue is only appropriate * so long as all callers are assured to have free tx slots * (such as after niu_init_hw). */ netif_tx_wake_all_queues(np->dev); niu_enable_napi(np); niu_enable_interrupts(np, 1); } static void niu_reset_buffers(struct niu *np) { int i, j, k, err; if (np->rx_rings) { for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) { struct page *page; page = rp->rxhash[j]; while (page) { struct page *next = (struct page *) page->mapping; u64 base = page->index; base = base >> RBR_DESCR_ADDR_SHIFT; rp->rbr[k++] = cpu_to_le32(base); page = next; } } for (; k < MAX_RBR_RING_SIZE; k++) { err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k); if (unlikely(err)) break; } rp->rbr_index = rp->rbr_table_size - 1; rp->rcr_index = 0; rp->rbr_pending = 0; rp->rbr_refill_pending = 0; } } if (np->tx_rings) { for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; for (j = 0; j < MAX_TX_RING_SIZE; j++) { if (rp->tx_buffs[j].skb) (void) release_tx_packet(np, rp, j); } rp->pending = MAX_TX_RING_SIZE; rp->prod = 0; rp->cons = 0; rp->wrap_bit = 0; } } } static void niu_reset_task(struct work_struct *work) { struct niu *np = container_of(work, struct niu, reset_task); unsigned long flags; int err; spin_lock_irqsave(&np->lock, flags); if (!netif_running(np->dev)) { spin_unlock_irqrestore(&np->lock, flags); return; } spin_unlock_irqrestore(&np->lock, flags); del_timer_sync(&np->timer); niu_netif_stop(np); spin_lock_irqsave(&np->lock, flags); niu_stop_hw(np); spin_unlock_irqrestore(&np->lock, flags); niu_reset_buffers(np); spin_lock_irqsave(&np->lock, flags); err = niu_init_hw(np); if (!err) { np->timer.expires = jiffies + HZ; add_timer(&np->timer); niu_netif_start(np); } spin_unlock_irqrestore(&np->lock, flags); } static void niu_tx_timeout(struct net_device *dev) { struct niu *np = netdev_priv(dev); dev_err(np->device, "%s: Transmit timed out, resetting\n", dev->name); schedule_work(&np->reset_task); } static void niu_set_txd(struct tx_ring_info *rp, int index, u64 mapping, u64 len, u64 mark, u64 n_frags) { __le64 *desc = &rp->descr[index]; *desc = cpu_to_le64(mark | (n_frags << TX_DESC_NUM_PTR_SHIFT) | (len << TX_DESC_TR_LEN_SHIFT) | (mapping & TX_DESC_SAD)); } static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr, u64 pad_bytes, u64 len) { u16 eth_proto, eth_proto_inner; u64 csum_bits, l3off, ihl, ret; u8 ip_proto; int ipv6; eth_proto = be16_to_cpu(ehdr->h_proto); eth_proto_inner = eth_proto; if (eth_proto == ETH_P_8021Q) { struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr; __be16 val = vp->h_vlan_encapsulated_proto; eth_proto_inner = be16_to_cpu(val); } ipv6 = ihl = 0; switch (skb->protocol) { case cpu_to_be16(ETH_P_IP): ip_proto = ip_hdr(skb)->protocol; ihl = ip_hdr(skb)->ihl; break; case cpu_to_be16(ETH_P_IPV6): ip_proto = ipv6_hdr(skb)->nexthdr; ihl = (40 >> 2); ipv6 = 1; break; default: ip_proto = ihl = 0; break; } csum_bits = TXHDR_CSUM_NONE; if (skb->ip_summed == CHECKSUM_PARTIAL) { u64 start, stuff; csum_bits = (ip_proto == IPPROTO_TCP ? TXHDR_CSUM_TCP : (ip_proto == IPPROTO_UDP ? TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP)); start = skb_checksum_start_offset(skb) - (pad_bytes + sizeof(struct tx_pkt_hdr)); stuff = start + skb->csum_offset; csum_bits |= (start / 2) << TXHDR_L4START_SHIFT; csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT; } l3off = skb_network_offset(skb) - (pad_bytes + sizeof(struct tx_pkt_hdr)); ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) | (len << TXHDR_LEN_SHIFT) | ((l3off / 2) << TXHDR_L3START_SHIFT) | (ihl << TXHDR_IHL_SHIFT) | ((eth_proto_inner < ETH_P_802_3_MIN) ? TXHDR_LLC : 0) | ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) | (ipv6 ? TXHDR_IP_VER : 0) | csum_bits); return ret; } static netdev_tx_t niu_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct niu *np = netdev_priv(dev); unsigned long align, headroom; struct netdev_queue *txq; struct tx_ring_info *rp; struct tx_pkt_hdr *tp; unsigned int len, nfg; struct ethhdr *ehdr; int prod, i, tlen; u64 mapping, mrk; i = skb_get_queue_mapping(skb); rp = &np->tx_rings[i]; txq = netdev_get_tx_queue(dev, i); if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) { netif_tx_stop_queue(txq); dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name); rp->tx_errors++; return NETDEV_TX_BUSY; } if (skb->len < ETH_ZLEN) { unsigned int pad_bytes = ETH_ZLEN - skb->len; if (skb_pad(skb, pad_bytes)) goto out; skb_put(skb, pad_bytes); } len = sizeof(struct tx_pkt_hdr) + 15; if (skb_headroom(skb) < len) { struct sk_buff *skb_new; skb_new = skb_realloc_headroom(skb, len); if (!skb_new) { rp->tx_errors++; goto out_drop; } kfree_skb(skb); skb = skb_new; } else skb_orphan(skb); align = ((unsigned long) skb->data & (16 - 1)); headroom = align + sizeof(struct tx_pkt_hdr); ehdr = (struct ethhdr *) skb->data; tp = (struct tx_pkt_hdr *) skb_push(skb, headroom); len = skb->len - sizeof(struct tx_pkt_hdr); tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len)); tp->resv = 0; len = skb_headlen(skb); mapping = np->ops->map_single(np->device, skb->data, len, DMA_TO_DEVICE); prod = rp->prod; rp->tx_buffs[prod].skb = skb; rp->tx_buffs[prod].mapping = mapping; mrk = TX_DESC_SOP; if (++rp->mark_counter == rp->mark_freq) { rp->mark_counter = 0; mrk |= TX_DESC_MARK; rp->mark_pending++; } tlen = len; nfg = skb_shinfo(skb)->nr_frags; while (tlen > 0) { tlen -= MAX_TX_DESC_LEN; nfg++; } while (len > 0) { unsigned int this_len = len; if (this_len > MAX_TX_DESC_LEN) this_len = MAX_TX_DESC_LEN; niu_set_txd(rp, prod, mapping, this_len, mrk, nfg); mrk = nfg = 0; prod = NEXT_TX(rp, prod); mapping += this_len; len -= this_len; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; len = skb_frag_size(frag); mapping = np->ops->map_page(np->device, skb_frag_page(frag), frag->page_offset, len, DMA_TO_DEVICE); rp->tx_buffs[prod].skb = NULL; rp->tx_buffs[prod].mapping = mapping; niu_set_txd(rp, prod, mapping, len, 0, 0); prod = NEXT_TX(rp, prod); } if (prod < rp->prod) rp->wrap_bit ^= TX_RING_KICK_WRAP; rp->prod = prod; nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3)); if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) { netif_tx_stop_queue(txq); if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)) netif_tx_wake_queue(txq); } out: return NETDEV_TX_OK; out_drop: rp->tx_errors++; kfree_skb(skb); goto out; } static int niu_change_mtu(struct net_device *dev, int new_mtu) { struct niu *np = netdev_priv(dev); int err, orig_jumbo, new_jumbo; if (new_mtu < 68 || new_mtu > NIU_MAX_MTU) return -EINVAL; orig_jumbo = (dev->mtu > ETH_DATA_LEN); new_jumbo = (new_mtu > ETH_DATA_LEN); dev->mtu = new_mtu; if (!netif_running(dev) || (orig_jumbo == new_jumbo)) return 0; niu_full_shutdown(np, dev); niu_free_channels(np); niu_enable_napi(np); err = niu_alloc_channels(np); if (err) return err; spin_lock_irq(&np->lock); err = niu_init_hw(np); if (!err) { init_timer(&np->timer); np->timer.expires = jiffies + HZ; np->timer.data = (unsigned long) np; np->timer.function = niu_timer; err = niu_enable_interrupts(np, 1); if (err) niu_stop_hw(np); } spin_unlock_irq(&np->lock); if (!err) { netif_tx_start_all_queues(dev); if (np->link_config.loopback_mode != LOOPBACK_DISABLED) netif_carrier_on(dev); add_timer(&np->timer); } return err; } static void niu_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct niu *np = netdev_priv(dev); struct niu_vpd *vpd = &np->vpd; strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d", vpd->fcode_major, vpd->fcode_minor); if (np->parent->plat_type != PLAT_TYPE_NIU) strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); } static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct niu *np = netdev_priv(dev); struct niu_link_config *lp; lp = &np->link_config; memset(cmd, 0, sizeof(*cmd)); cmd->phy_address = np->phy_addr; cmd->supported = lp->supported; cmd->advertising = lp->active_advertising; cmd->autoneg = lp->active_autoneg; ethtool_cmd_speed_set(cmd, lp->active_speed); cmd->duplex = lp->active_duplex; cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP; cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ? XCVR_EXTERNAL : XCVR_INTERNAL; return 0; } static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct niu *np = netdev_priv(dev); struct niu_link_config *lp = &np->link_config; lp->advertising = cmd->advertising; lp->speed = ethtool_cmd_speed(cmd); lp->duplex = cmd->duplex; lp->autoneg = cmd->autoneg; return niu_init_link(np); } static u32 niu_get_msglevel(struct net_device *dev) { struct niu *np = netdev_priv(dev); return np->msg_enable; } static void niu_set_msglevel(struct net_device *dev, u32 value) { struct niu *np = netdev_priv(dev); np->msg_enable = value; } static int niu_nway_reset(struct net_device *dev) { struct niu *np = netdev_priv(dev); if (np->link_config.autoneg) return niu_init_link(np); return 0; } static int niu_get_eeprom_len(struct net_device *dev) { struct niu *np = netdev_priv(dev); return np->eeprom_len; } static int niu_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct niu *np = netdev_priv(dev); u32 offset, len, val; offset = eeprom->offset; len = eeprom->len; if (offset + len < offset) return -EINVAL; if (offset >= np->eeprom_len) return -EINVAL; if (offset + len > np->eeprom_len) len = eeprom->len = np->eeprom_len - offset; if (offset & 3) { u32 b_offset, b_count; b_offset = offset & 3; b_count = 4 - b_offset; if (b_count > len) b_count = len; val = nr64(ESPC_NCR((offset - b_offset) / 4)); memcpy(data, ((char *)&val) + b_offset, b_count); data += b_count; len -= b_count; offset += b_count; } while (len >= 4) { val = nr64(ESPC_NCR(offset / 4)); memcpy(data, &val, 4); data += 4; len -= 4; offset += 4; } if (len) { val = nr64(ESPC_NCR(offset / 4)); memcpy(data, &val, len); } return 0; } static void niu_ethflow_to_l3proto(int flow_type, u8 *pid) { switch (flow_type) { case TCP_V4_FLOW: case TCP_V6_FLOW: *pid = IPPROTO_TCP; break; case UDP_V4_FLOW: case UDP_V6_FLOW: *pid = IPPROTO_UDP; break; case SCTP_V4_FLOW: case SCTP_V6_FLOW: *pid = IPPROTO_SCTP; break; case AH_V4_FLOW: case AH_V6_FLOW: *pid = IPPROTO_AH; break; case ESP_V4_FLOW: case ESP_V6_FLOW: *pid = IPPROTO_ESP; break; default: *pid = 0; break; } } static int niu_class_to_ethflow(u64 class, int *flow_type) { switch (class) { case CLASS_CODE_TCP_IPV4: *flow_type = TCP_V4_FLOW; break; case CLASS_CODE_UDP_IPV4: *flow_type = UDP_V4_FLOW; break; case CLASS_CODE_AH_ESP_IPV4: *flow_type = AH_V4_FLOW; break; case CLASS_CODE_SCTP_IPV4: *flow_type = SCTP_V4_FLOW; break; case CLASS_CODE_TCP_IPV6: *flow_type = TCP_V6_FLOW; break; case CLASS_CODE_UDP_IPV6: *flow_type = UDP_V6_FLOW; break; case CLASS_CODE_AH_ESP_IPV6: *flow_type = AH_V6_FLOW; break; case CLASS_CODE_SCTP_IPV6: *flow_type = SCTP_V6_FLOW; break; case CLASS_CODE_USER_PROG1: case CLASS_CODE_USER_PROG2: case CLASS_CODE_USER_PROG3: case CLASS_CODE_USER_PROG4: *flow_type = IP_USER_FLOW; break; default: return 0; } return 1; } static int niu_ethflow_to_class(int flow_type, u64 *class) { switch (flow_type) { case TCP_V4_FLOW: *class = CLASS_CODE_TCP_IPV4; break; case UDP_V4_FLOW: *class = CLASS_CODE_UDP_IPV4; break; case AH_ESP_V4_FLOW: case AH_V4_FLOW: case ESP_V4_FLOW: *class = CLASS_CODE_AH_ESP_IPV4; break; case SCTP_V4_FLOW: *class = CLASS_CODE_SCTP_IPV4; break; case TCP_V6_FLOW: *class = CLASS_CODE_TCP_IPV6; break; case UDP_V6_FLOW: *class = CLASS_CODE_UDP_IPV6; break; case AH_ESP_V6_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: *class = CLASS_CODE_AH_ESP_IPV6; break; case SCTP_V6_FLOW: *class = CLASS_CODE_SCTP_IPV6; break; default: return 0; } return 1; } static u64 niu_flowkey_to_ethflow(u64 flow_key) { u64 ethflow = 0; if (flow_key & FLOW_KEY_L2DA) ethflow |= RXH_L2DA; if (flow_key & FLOW_KEY_VLAN) ethflow |= RXH_VLAN; if (flow_key & FLOW_KEY_IPSA) ethflow |= RXH_IP_SRC; if (flow_key & FLOW_KEY_IPDA) ethflow |= RXH_IP_DST; if (flow_key & FLOW_KEY_PROTO) ethflow |= RXH_L3_PROTO; if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT)) ethflow |= RXH_L4_B_0_1; if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT)) ethflow |= RXH_L4_B_2_3; return ethflow; } static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key) { u64 key = 0; if (ethflow & RXH_L2DA) key |= FLOW_KEY_L2DA; if (ethflow & RXH_VLAN) key |= FLOW_KEY_VLAN; if (ethflow & RXH_IP_SRC) key |= FLOW_KEY_IPSA; if (ethflow & RXH_IP_DST) key |= FLOW_KEY_IPDA; if (ethflow & RXH_L3_PROTO) key |= FLOW_KEY_PROTO; if (ethflow & RXH_L4_B_0_1) key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT); if (ethflow & RXH_L4_B_2_3) key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT); *flow_key = key; return 1; } static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) { u64 class; nfc->data = 0; if (!niu_ethflow_to_class(nfc->flow_type, &class)) return -EINVAL; if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & TCAM_KEY_DISC) nfc->data = RXH_DISCARD; else nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class - CLASS_CODE_USER_PROG1]); return 0; } static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp, struct ethtool_rx_flow_spec *fsp) { u32 tmp; u16 prt; tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT; fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp); tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT; fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp); tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT; fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp); tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT; fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp); fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >> TCAM_V4KEY2_TOS_SHIFT; fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >> TCAM_V4KEY2_TOS_SHIFT; switch (fsp->flow_type) { case TCP_V4_FLOW: case UDP_V4_FLOW: case SCTP_V4_FLOW: prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt); prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt); prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt); prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt); break; case AH_V4_FLOW: case ESP_V4_FLOW: tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT; fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp); tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT; fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp); break; case IP_USER_FLOW: tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT; fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp); tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT; fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp); fsp->h_u.usr_ip4_spec.proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >> TCAM_V4KEY2_PROTO_SHIFT; fsp->m_u.usr_ip4_spec.proto = (tp->key_mask[2] & TCAM_V4KEY2_PROTO) >> TCAM_V4KEY2_PROTO_SHIFT; fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; break; default: break; } } static int niu_get_ethtool_tcam_entry(struct niu *np, struct ethtool_rxnfc *nfc) { struct niu_parent *parent = np->parent; struct niu_tcam_entry *tp; struct ethtool_rx_flow_spec *fsp = &nfc->fs; u16 idx; u64 class; int ret = 0; idx = tcam_get_index(np, (u16)nfc->fs.location); tp = &parent->tcam[idx]; if (!tp->valid) { netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n", parent->index, (u16)nfc->fs.location, idx); return -EINVAL; } /* fill the flow spec entry */ class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> TCAM_V4KEY0_CLASS_CODE_SHIFT; ret = niu_class_to_ethflow(class, &fsp->flow_type); if (ret < 0) { netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n", parent->index); ret = -EINVAL; goto out; } if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) { u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >> TCAM_V4KEY2_PROTO_SHIFT; if (proto == IPPROTO_ESP) { if (fsp->flow_type == AH_V4_FLOW) fsp->flow_type = ESP_V4_FLOW; else fsp->flow_type = ESP_V6_FLOW; } } switch (fsp->flow_type) { case TCP_V4_FLOW: case UDP_V4_FLOW: case SCTP_V4_FLOW: case AH_V4_FLOW: case ESP_V4_FLOW: niu_get_ip4fs_from_tcam_key(tp, fsp); break; case TCP_V6_FLOW: case UDP_V6_FLOW: case SCTP_V6_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: /* Not yet implemented */ ret = -EINVAL; break; case IP_USER_FLOW: niu_get_ip4fs_from_tcam_key(tp, fsp); break; default: ret = -EINVAL; break; } if (ret < 0) goto out; if (tp->assoc_data & TCAM_ASSOCDATA_DISC) fsp->ring_cookie = RX_CLS_FLOW_DISC; else fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >> TCAM_ASSOCDATA_OFFSET_SHIFT; /* put the tcam size here */ nfc->data = tcam_get_size(np); out: return ret; } static int niu_get_ethtool_tcam_all(struct niu *np, struct ethtool_rxnfc *nfc, u32 *rule_locs) { struct niu_parent *parent = np->parent; struct niu_tcam_entry *tp; int i, idx, cnt; unsigned long flags; int ret = 0; /* put the tcam size here */ nfc->data = tcam_get_size(np); niu_lock_parent(np, flags); for (cnt = 0, i = 0; i < nfc->data; i++) { idx = tcam_get_index(np, i); tp = &parent->tcam[idx]; if (!tp->valid) continue; if (cnt == nfc->rule_cnt) { ret = -EMSGSIZE; break; } rule_locs[cnt] = i; cnt++; } niu_unlock_parent(np, flags); nfc->rule_cnt = cnt; return ret; } static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { struct niu *np = netdev_priv(dev); int ret = 0; switch (cmd->cmd) { case ETHTOOL_GRXFH: ret = niu_get_hash_opts(np, cmd); break; case ETHTOOL_GRXRINGS: cmd->data = np->num_rx_rings; break; case ETHTOOL_GRXCLSRLCNT: cmd->rule_cnt = tcam_get_valid_entry_cnt(np); break; case ETHTOOL_GRXCLSRULE: ret = niu_get_ethtool_tcam_entry(np, cmd); break; case ETHTOOL_GRXCLSRLALL: ret = niu_get_ethtool_tcam_all(np, cmd, rule_locs); break; default: ret = -EINVAL; break; } return ret; } static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) { u64 class; u64 flow_key = 0; unsigned long flags; if (!niu_ethflow_to_class(nfc->flow_type, &class)) return -EINVAL; if (class < CLASS_CODE_USER_PROG1 || class > CLASS_CODE_SCTP_IPV6) return -EINVAL; if (nfc->data & RXH_DISCARD) { niu_lock_parent(np, flags); flow_key = np->parent->tcam_key[class - CLASS_CODE_USER_PROG1]; flow_key |= TCAM_KEY_DISC; nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key); np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key; niu_unlock_parent(np, flags); return 0; } else { /* Discard was set before, but is not set now */ if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & TCAM_KEY_DISC) { niu_lock_parent(np, flags); flow_key = np->parent->tcam_key[class - CLASS_CODE_USER_PROG1]; flow_key &= ~TCAM_KEY_DISC; nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key); np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key; niu_unlock_parent(np, flags); } } if (!niu_ethflow_to_flowkey(nfc->data, &flow_key)) return -EINVAL; niu_lock_parent(np, flags); nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key); np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key; niu_unlock_parent(np, flags); return 0; } static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp, struct niu_tcam_entry *tp, int l2_rdc_tab, u64 class) { u8 pid = 0; u32 sip, dip, sipm, dipm, spi, spim; u16 sport, dport, spm, dpm; sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src); sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src); dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst); dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst); tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT; tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE; tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT; tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM; tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT; tp->key[3] |= dip; tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT; tp->key_mask[3] |= dipm; tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos << TCAM_V4KEY2_TOS_SHIFT); tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos << TCAM_V4KEY2_TOS_SHIFT); switch (fsp->flow_type) { case TCP_V4_FLOW: case UDP_V4_FLOW: case SCTP_V4_FLOW: sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc); spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc); dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst); dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst); tp->key[2] |= (((u64)sport << 16) | dport); tp->key_mask[2] |= (((u64)spm << 16) | dpm); niu_ethflow_to_l3proto(fsp->flow_type, &pid); break; case AH_V4_FLOW: case ESP_V4_FLOW: spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi); spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi); tp->key[2] |= spi; tp->key_mask[2] |= spim; niu_ethflow_to_l3proto(fsp->flow_type, &pid); break; case IP_USER_FLOW: spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes); spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes); tp->key[2] |= spi; tp->key_mask[2] |= spim; pid = fsp->h_u.usr_ip4_spec.proto; break; default: break; } tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT); if (pid) { tp->key_mask[2] |= TCAM_V4KEY2_PROTO; } } static int niu_add_ethtool_tcam_entry(struct niu *np, struct ethtool_rxnfc *nfc) { struct niu_parent *parent = np->parent; struct niu_tcam_entry *tp; struct ethtool_rx_flow_spec *fsp = &nfc->fs; struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port]; int l2_rdc_table = rdc_table->first_table_num; u16 idx; u64 class; unsigned long flags; int err, ret; ret = 0; idx = nfc->fs.location; if (idx >= tcam_get_size(np)) return -EINVAL; if (fsp->flow_type == IP_USER_FLOW) { int i; int add_usr_cls = 0; struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec; struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec; if (uspec->ip_ver != ETH_RX_NFC_IP4) return -EINVAL; niu_lock_parent(np, flags); for (i = 0; i < NIU_L3_PROG_CLS; i++) { if (parent->l3_cls[i]) { if (uspec->proto == parent->l3_cls_pid[i]) { class = parent->l3_cls[i]; parent->l3_cls_refcnt[i]++; add_usr_cls = 1; break; } } else { /* Program new user IP class */ switch (i) { case 0: class = CLASS_CODE_USER_PROG1; break; case 1: class = CLASS_CODE_USER_PROG2; break; case 2: class = CLASS_CODE_USER_PROG3; break; case 3: class = CLASS_CODE_USER_PROG4; break; default: break; } ret = tcam_user_ip_class_set(np, class, 0, uspec->proto, uspec->tos, umask->tos); if (ret) goto out; ret = tcam_user_ip_class_enable(np, class, 1); if (ret) goto out; parent->l3_cls[i] = class; parent->l3_cls_pid[i] = uspec->proto; parent->l3_cls_refcnt[i]++; add_usr_cls = 1; break; } } if (!add_usr_cls) { netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n", parent->index, __func__, uspec->proto); ret = -EINVAL; goto out; } niu_unlock_parent(np, flags); } else { if (!niu_ethflow_to_class(fsp->flow_type, &class)) { return -EINVAL; } } niu_lock_parent(np, flags); idx = tcam_get_index(np, idx); tp = &parent->tcam[idx]; memset(tp, 0, sizeof(*tp)); /* fill in the tcam key and mask */ switch (fsp->flow_type) { case TCP_V4_FLOW: case UDP_V4_FLOW: case SCTP_V4_FLOW: case AH_V4_FLOW: case ESP_V4_FLOW: niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class); break; case TCP_V6_FLOW: case UDP_V6_FLOW: case SCTP_V6_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: /* Not yet implemented */ netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n", parent->index, __func__, fsp->flow_type); ret = -EINVAL; goto out; case IP_USER_FLOW: niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class); break; default: netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n", parent->index, __func__, fsp->flow_type); ret = -EINVAL; goto out; } /* fill in the assoc data */ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { tp->assoc_data = TCAM_ASSOCDATA_DISC; } else { if (fsp->ring_cookie >= np->num_rx_rings) { netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n", parent->index, __func__, (long long)fsp->ring_cookie); ret = -EINVAL; goto out; } tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | (fsp->ring_cookie << TCAM_ASSOCDATA_OFFSET_SHIFT)); } err = tcam_write(np, idx, tp->key, tp->key_mask); if (err) { ret = -EINVAL; goto out; } err = tcam_assoc_write(np, idx, tp->assoc_data); if (err) { ret = -EINVAL; goto out; } /* validate the entry */ tp->valid = 1; np->clas.tcam_valid_entries++; out: niu_unlock_parent(np, flags); return ret; } static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc) { struct niu_parent *parent = np->parent; struct niu_tcam_entry *tp; u16 idx; unsigned long flags; u64 class; int ret = 0; if (loc >= tcam_get_size(np)) return -EINVAL; niu_lock_parent(np, flags); idx = tcam_get_index(np, loc); tp = &parent->tcam[idx]; /* if the entry is of a user defined class, then update*/ class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> TCAM_V4KEY0_CLASS_CODE_SHIFT; if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) { int i; for (i = 0; i < NIU_L3_PROG_CLS; i++) { if (parent->l3_cls[i] == class) { parent->l3_cls_refcnt[i]--; if (!parent->l3_cls_refcnt[i]) { /* disable class */ ret = tcam_user_ip_class_enable(np, class, 0); if (ret) goto out; parent->l3_cls[i] = 0; parent->l3_cls_pid[i] = 0; } break; } } if (i == NIU_L3_PROG_CLS) { netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n", parent->index, __func__, (unsigned long long)class); ret = -EINVAL; goto out; } } ret = tcam_flush(np, idx); if (ret) goto out; /* invalidate the entry */ tp->valid = 0; np->clas.tcam_valid_entries--; out: niu_unlock_parent(np, flags); return ret; } static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) { struct niu *np = netdev_priv(dev); int ret = 0; switch (cmd->cmd) { case ETHTOOL_SRXFH: ret = niu_set_hash_opts(np, cmd); break; case ETHTOOL_SRXCLSRLINS: ret = niu_add_ethtool_tcam_entry(np, cmd); break; case ETHTOOL_SRXCLSRLDEL: ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location); break; default: ret = -EINVAL; break; } return ret; } static const struct { const char string[ETH_GSTRING_LEN]; } niu_xmac_stat_keys[] = { { "tx_frames" }, { "tx_bytes" }, { "tx_fifo_errors" }, { "tx_overflow_errors" }, { "tx_max_pkt_size_errors" }, { "tx_underflow_errors" }, { "rx_local_faults" }, { "rx_remote_faults" }, { "rx_link_faults" }, { "rx_align_errors" }, { "rx_frags" }, { "rx_mcasts" }, { "rx_bcasts" }, { "rx_hist_cnt1" }, { "rx_hist_cnt2" }, { "rx_hist_cnt3" }, { "rx_hist_cnt4" }, { "rx_hist_cnt5" }, { "rx_hist_cnt6" }, { "rx_hist_cnt7" }, { "rx_octets" }, { "rx_code_violations" }, { "rx_len_errors" }, { "rx_crc_errors" }, { "rx_underflows" }, { "rx_overflows" }, { "pause_off_state" }, { "pause_on_state" }, { "pause_received" }, }; #define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys) static const struct { const char string[ETH_GSTRING_LEN]; } niu_bmac_stat_keys[] = { { "tx_underflow_errors" }, { "tx_max_pkt_size_errors" }, { "tx_bytes" }, { "tx_frames" }, { "rx_overflows" }, { "rx_frames" }, { "rx_align_errors" }, { "rx_crc_errors" }, { "rx_len_errors" }, { "pause_off_state" }, { "pause_on_state" }, { "pause_received" }, }; #define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys) static const struct { const char string[ETH_GSTRING_LEN]; } niu_rxchan_stat_keys[] = { { "rx_channel" }, { "rx_packets" }, { "rx_bytes" }, { "rx_dropped" }, { "rx_errors" }, }; #define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys) static const struct { const char string[ETH_GSTRING_LEN]; } niu_txchan_stat_keys[] = { { "tx_channel" }, { "tx_packets" }, { "tx_bytes" }, { "tx_errors" }, }; #define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys) static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data) { struct niu *np = netdev_priv(dev); int i; if (stringset != ETH_SS_STATS) return; if (np->flags & NIU_FLAGS_XMAC) { memcpy(data, niu_xmac_stat_keys, sizeof(niu_xmac_stat_keys)); data += sizeof(niu_xmac_stat_keys); } else { memcpy(data, niu_bmac_stat_keys, sizeof(niu_bmac_stat_keys)); data += sizeof(niu_bmac_stat_keys); } for (i = 0; i < np->num_rx_rings; i++) { memcpy(data, niu_rxchan_stat_keys, sizeof(niu_rxchan_stat_keys)); data += sizeof(niu_rxchan_stat_keys); } for (i = 0; i < np->num_tx_rings; i++) { memcpy(data, niu_txchan_stat_keys, sizeof(niu_txchan_stat_keys)); data += sizeof(niu_txchan_stat_keys); } } static int niu_get_sset_count(struct net_device *dev, int stringset) { struct niu *np = netdev_priv(dev); if (stringset != ETH_SS_STATS) return -EINVAL; return (np->flags & NIU_FLAGS_XMAC ? NUM_XMAC_STAT_KEYS : NUM_BMAC_STAT_KEYS) + (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) + (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS); } static void niu_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct niu *np = netdev_priv(dev); int i; niu_sync_mac_stats(np); if (np->flags & NIU_FLAGS_XMAC) { memcpy(data, &np->mac_stats.xmac, sizeof(struct niu_xmac_stats)); data += (sizeof(struct niu_xmac_stats) / sizeof(u64)); } else { memcpy(data, &np->mac_stats.bmac, sizeof(struct niu_bmac_stats)); data += (sizeof(struct niu_bmac_stats) / sizeof(u64)); } for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; niu_sync_rx_discard_stats(np, rp, 0); data[0] = rp->rx_channel; data[1] = rp->rx_packets; data[2] = rp->rx_bytes; data[3] = rp->rx_dropped; data[4] = rp->rx_errors; data += 5; } for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; data[0] = rp->tx_channel; data[1] = rp->tx_packets; data[2] = rp->tx_bytes; data[3] = rp->tx_errors; data += 4; } } static u64 niu_led_state_save(struct niu *np) { if (np->flags & NIU_FLAGS_XMAC) return nr64_mac(XMAC_CONFIG); else return nr64_mac(BMAC_XIF_CONFIG); } static void niu_led_state_restore(struct niu *np, u64 val) { if (np->flags & NIU_FLAGS_XMAC) nw64_mac(XMAC_CONFIG, val); else nw64_mac(BMAC_XIF_CONFIG, val); } static void niu_force_led(struct niu *np, int on) { u64 val, reg, bit; if (np->flags & NIU_FLAGS_XMAC) { reg = XMAC_CONFIG; bit = XMAC_CONFIG_FORCE_LED_ON; } else { reg = BMAC_XIF_CONFIG; bit = BMAC_XIF_CONFIG_LINK_LED; } val = nr64_mac(reg); if (on) val |= bit; else val &= ~bit; nw64_mac(reg, val); } static int niu_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) { struct niu *np = netdev_priv(dev); if (!netif_running(dev)) return -EAGAIN; switch (state) { case ETHTOOL_ID_ACTIVE: np->orig_led_state = niu_led_state_save(np); return 1; /* cycle on/off once per second */ case ETHTOOL_ID_ON: niu_force_led(np, 1); break; case ETHTOOL_ID_OFF: niu_force_led(np, 0); break; case ETHTOOL_ID_INACTIVE: niu_led_state_restore(np, np->orig_led_state); } return 0; } static const struct ethtool_ops niu_ethtool_ops = { .get_drvinfo = niu_get_drvinfo, .get_link = ethtool_op_get_link, .get_msglevel = niu_get_msglevel, .set_msglevel = niu_set_msglevel, .nway_reset = niu_nway_reset, .get_eeprom_len = niu_get_eeprom_len, .get_eeprom = niu_get_eeprom, .get_settings = niu_get_settings, .set_settings = niu_set_settings, .get_strings = niu_get_strings, .get_sset_count = niu_get_sset_count, .get_ethtool_stats = niu_get_ethtool_stats, .set_phys_id = niu_set_phys_id, .get_rxnfc = niu_get_nfc, .set_rxnfc = niu_set_nfc, }; static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, int ldg, int ldn) { if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) return -EINVAL; if (ldn < 0 || ldn > LDN_MAX) return -EINVAL; parent->ldg_map[ldn] = ldg; if (np->parent->plat_type == PLAT_TYPE_NIU) { /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by * the firmware, and we're not supposed to change them. * Validate the mapping, because if it's wrong we probably * won't get any interrupts and that's painful to debug. */ if (nr64(LDG_NUM(ldn)) != ldg) { dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n", np->port, ldn, ldg, (unsigned long long) nr64(LDG_NUM(ldn))); return -EINVAL; } } else nw64(LDG_NUM(ldn), ldg); return 0; } static int niu_set_ldg_timer_res(struct niu *np, int res) { if (res < 0 || res > LDG_TIMER_RES_VAL) return -EINVAL; nw64(LDG_TIMER_RES, res); return 0; } static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector) { if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) || (func < 0 || func > 3) || (vector < 0 || vector > 0x1f)) return -EINVAL; nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector); return 0; } static int niu_pci_eeprom_read(struct niu *np, u32 addr) { u64 frame, frame_base = (ESPC_PIO_STAT_READ_START | (addr << ESPC_PIO_STAT_ADDR_SHIFT)); int limit; if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT)) return -EINVAL; frame = frame_base; nw64(ESPC_PIO_STAT, frame); limit = 64; do { udelay(5); frame = nr64(ESPC_PIO_STAT); if (frame & ESPC_PIO_STAT_READ_END) break; } while (limit--); if (!(frame & ESPC_PIO_STAT_READ_END)) { dev_err(np->device, "EEPROM read timeout frame[%llx]\n", (unsigned long long) frame); return -ENODEV; } frame = frame_base; nw64(ESPC_PIO_STAT, frame); limit = 64; do { udelay(5); frame = nr64(ESPC_PIO_STAT); if (frame & ESPC_PIO_STAT_READ_END) break; } while (limit--); if (!(frame & ESPC_PIO_STAT_READ_END)) { dev_err(np->device, "EEPROM read timeout frame[%llx]\n", (unsigned long long) frame); return -ENODEV; } frame = nr64(ESPC_PIO_STAT); return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT; } static int niu_pci_eeprom_read16(struct niu *np, u32 off) { int err = niu_pci_eeprom_read(np, off); u16 val; if (err < 0) return err; val = (err << 8); err = niu_pci_eeprom_read(np, off + 1); if (err < 0) return err; val |= (err & 0xff); return val; } static int niu_pci_eeprom_read16_swp(struct niu *np, u32 off) { int err = niu_pci_eeprom_read(np, off); u16 val; if (err < 0) return err; val = (err & 0xff); err = niu_pci_eeprom_read(np, off + 1); if (err < 0) return err; val |= (err & 0xff) << 8; return val; } static int niu_pci_vpd_get_propname(struct niu *np, u32 off, char *namebuf, int namebuf_len) { int i; for (i = 0; i < namebuf_len; i++) { int err = niu_pci_eeprom_read(np, off + i); if (err < 0) return err; *namebuf++ = err; if (!err) break; } if (i >= namebuf_len) return -EINVAL; return i + 1; } static void niu_vpd_parse_version(struct niu *np) { struct niu_vpd *vpd = &np->vpd; int len = strlen(vpd->version) + 1; const char *s = vpd->version; int i; for (i = 0; i < len - 5; i++) { if (!strncmp(s + i, "FCode ", 6)) break; } if (i >= len - 5) return; s += i + 5; sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor); netif_printk(np, probe, KERN_DEBUG, np->dev, "VPD_SCAN: FCODE major(%d) minor(%d)\n", vpd->fcode_major, vpd->fcode_minor); if (vpd->fcode_major > NIU_VPD_MIN_MAJOR || (vpd->fcode_major == NIU_VPD_MIN_MAJOR && vpd->fcode_minor >= NIU_VPD_MIN_MINOR)) np->flags |= NIU_FLAGS_VPD_VALID; } /* ESPC_PIO_EN_ENABLE must be set */ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end) { unsigned int found_mask = 0; #define FOUND_MASK_MODEL 0x00000001 #define FOUND_MASK_BMODEL 0x00000002 #define FOUND_MASK_VERS 0x00000004 #define FOUND_MASK_MAC 0x00000008 #define FOUND_MASK_NMAC 0x00000010 #define FOUND_MASK_PHY 0x00000020 #define FOUND_MASK_ALL 0x0000003f netif_printk(np, probe, KERN_DEBUG, np->dev, "VPD_SCAN: start[%x] end[%x]\n", start, end); while (start < end) { int len, err, prop_len; char namebuf[64]; u8 *prop_buf; int max_len; if (found_mask == FOUND_MASK_ALL) { niu_vpd_parse_version(np); return 1; } err = niu_pci_eeprom_read(np, start + 2); if (err < 0) return err; len = err; start += 3; prop_len = niu_pci_eeprom_read(np, start + 4); err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64); if (err < 0) return err; prop_buf = NULL; max_len = 0; if (!strcmp(namebuf, "model")) { prop_buf = np->vpd.model; max_len = NIU_VPD_MODEL_MAX; found_mask |= FOUND_MASK_MODEL; } else if (!strcmp(namebuf, "board-model")) { prop_buf = np->vpd.board_model; max_len = NIU_VPD_BD_MODEL_MAX; found_mask |= FOUND_MASK_BMODEL; } else if (!strcmp(namebuf, "version")) { prop_buf = np->vpd.version; max_len = NIU_VPD_VERSION_MAX; found_mask |= FOUND_MASK_VERS; } else if (!strcmp(namebuf, "local-mac-address")) { prop_buf = np->vpd.local_mac; max_len = ETH_ALEN; found_mask |= FOUND_MASK_MAC; } else if (!strcmp(namebuf, "num-mac-addresses")) { prop_buf = &np->vpd.mac_num; max_len = 1; found_mask |= FOUND_MASK_NMAC; } else if (!strcmp(namebuf, "phy-type")) { prop_buf = np->vpd.phy_type; max_len = NIU_VPD_PHY_TYPE_MAX; found_mask |= FOUND_MASK_PHY; } if (max_len && prop_len > max_len) { dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len); return -EINVAL; } if (prop_buf) { u32 off = start + 5 + err; int i; netif_printk(np, probe, KERN_DEBUG, np->dev, "VPD_SCAN: Reading in property [%s] len[%d]\n", namebuf, prop_len); for (i = 0; i < prop_len; i++) *prop_buf++ = niu_pci_eeprom_read(np, off + i); } start += len; } return 0; } /* ESPC_PIO_EN_ENABLE must be set */ static void niu_pci_vpd_fetch(struct niu *np, u32 start) { u32 offset; int err; err = niu_pci_eeprom_read16_swp(np, start + 1); if (err < 0) return; offset = err + 3; while (start + offset < ESPC_EEPROM_SIZE) { u32 here = start + offset; u32 end; err = niu_pci_eeprom_read(np, here); if (err != 0x90) return; err = niu_pci_eeprom_read16_swp(np, here + 1); if (err < 0) return; here = start + offset + 3; end = start + offset + err; offset += err; err = niu_pci_vpd_scan_props(np, here, end); if (err < 0 || err == 1) return; } } /* ESPC_PIO_EN_ENABLE must be set */ static u32 niu_pci_vpd_offset(struct niu *np) { u32 start = 0, end = ESPC_EEPROM_SIZE, ret; int err; while (start < end) { ret = start; /* ROM header signature? */ err = niu_pci_eeprom_read16(np, start + 0); if (err != 0x55aa) return 0; /* Apply offset to PCI data structure. */ err = niu_pci_eeprom_read16(np, start + 23); if (err < 0) return 0; start += err; /* Check for "PCIR" signature. */ err = niu_pci_eeprom_read16(np, start + 0); if (err != 0x5043) return 0; err = niu_pci_eeprom_read16(np, start + 2); if (err != 0x4952) return 0; /* Check for OBP image type. */ err = niu_pci_eeprom_read(np, start + 20); if (err < 0) return 0; if (err != 0x01) { err = niu_pci_eeprom_read(np, ret + 2); if (err < 0) return 0; start = ret + (err * 512); continue; } err = niu_pci_eeprom_read16_swp(np, start + 8); if (err < 0) return err; ret += err; err = niu_pci_eeprom_read(np, ret + 0); if (err != 0x82) return 0; return ret; } return 0; } static int niu_phy_type_prop_decode(struct niu *np, const char *phy_prop) { if (!strcmp(phy_prop, "mif")) { /* 1G copper, MII */ np->flags &= ~(NIU_FLAGS_FIBER | NIU_FLAGS_10G); np->mac_xcvr = MAC_XCVR_MII; } else if (!strcmp(phy_prop, "xgf")) { /* 10G fiber, XPCS */ np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER); np->mac_xcvr = MAC_XCVR_XPCS; } else if (!strcmp(phy_prop, "pcs")) { /* 1G fiber, PCS */ np->flags &= ~NIU_FLAGS_10G; np->flags |= NIU_FLAGS_FIBER; np->mac_xcvr = MAC_XCVR_PCS; } else if (!strcmp(phy_prop, "xgc")) { /* 10G copper, XPCS */ np->flags |= NIU_FLAGS_10G; np->flags &= ~NIU_FLAGS_FIBER; np->mac_xcvr = MAC_XCVR_XPCS; } else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) { /* 10G Serdes or 1G Serdes, default to 10G */ np->flags |= NIU_FLAGS_10G; np->flags &= ~NIU_FLAGS_FIBER; np->flags |= NIU_FLAGS_XCVR_SERDES; np->mac_xcvr = MAC_XCVR_XPCS; } else { return -EINVAL; } return 0; } static int niu_pci_vpd_get_nports(struct niu *np) { int ports = 0; if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) || (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) || (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) || (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) || (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) { ports = 4; } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) || (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) || (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) || (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) { ports = 2; } return ports; } static void niu_pci_vpd_validate(struct niu *np) { struct net_device *dev = np->dev; struct niu_vpd *vpd = &np->vpd; u8 val8; if (!is_valid_ether_addr(&vpd->local_mac[0])) { dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n"); np->flags &= ~NIU_FLAGS_VPD_VALID; return; } if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { np->flags |= NIU_FLAGS_10G; np->flags &= ~NIU_FLAGS_FIBER; np->flags |= NIU_FLAGS_XCVR_SERDES; np->mac_xcvr = MAC_XCVR_PCS; if (np->port > 1) { np->flags |= NIU_FLAGS_FIBER; np->flags &= ~NIU_FLAGS_10G; } if (np->flags & NIU_FLAGS_10G) np->mac_xcvr = MAC_XCVR_XPCS; } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | NIU_FLAGS_HOTPLUG_PHY); } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { dev_err(np->device, "Illegal phy string [%s]\n", np->vpd.phy_type); dev_err(np->device, "Falling back to SPROM\n"); np->flags &= ~NIU_FLAGS_VPD_VALID; return; } memcpy(dev->dev_addr, vpd->local_mac, ETH_ALEN); val8 = dev->dev_addr[5]; dev->dev_addr[5] += np->port; if (dev->dev_addr[5] < val8) dev->dev_addr[4]++; } static int niu_pci_probe_sprom(struct niu *np) { struct net_device *dev = np->dev; int len, i; u64 val, sum; u8 val8; val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ); val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT; len = val / 4; np->eeprom_len = len; netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: Image size %llu\n", (unsigned long long)val); sum = 0; for (i = 0; i < len; i++) { val = nr64(ESPC_NCR(i)); sum += (val >> 0) & 0xff; sum += (val >> 8) & 0xff; sum += (val >> 16) & 0xff; sum += (val >> 24) & 0xff; } netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: Checksum %x\n", (int)(sum & 0xff)); if ((sum & 0xff) != 0xab) { dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff)); return -EINVAL; } val = nr64(ESPC_PHY_TYPE); switch (np->port) { case 0: val8 = (val & ESPC_PHY_TYPE_PORT0) >> ESPC_PHY_TYPE_PORT0_SHIFT; break; case 1: val8 = (val & ESPC_PHY_TYPE_PORT1) >> ESPC_PHY_TYPE_PORT1_SHIFT; break; case 2: val8 = (val & ESPC_PHY_TYPE_PORT2) >> ESPC_PHY_TYPE_PORT2_SHIFT; break; case 3: val8 = (val & ESPC_PHY_TYPE_PORT3) >> ESPC_PHY_TYPE_PORT3_SHIFT; break; default: dev_err(np->device, "Bogus port number %u\n", np->port); return -EINVAL; } netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: PHY type %x\n", val8); switch (val8) { case ESPC_PHY_TYPE_1G_COPPER: /* 1G copper, MII */ np->flags &= ~(NIU_FLAGS_FIBER | NIU_FLAGS_10G); np->mac_xcvr = MAC_XCVR_MII; break; case ESPC_PHY_TYPE_1G_FIBER: /* 1G fiber, PCS */ np->flags &= ~NIU_FLAGS_10G; np->flags |= NIU_FLAGS_FIBER; np->mac_xcvr = MAC_XCVR_PCS; break; case ESPC_PHY_TYPE_10G_COPPER: /* 10G copper, XPCS */ np->flags |= NIU_FLAGS_10G; np->flags &= ~NIU_FLAGS_FIBER; np->mac_xcvr = MAC_XCVR_XPCS; break; case ESPC_PHY_TYPE_10G_FIBER: /* 10G fiber, XPCS */ np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER); np->mac_xcvr = MAC_XCVR_XPCS; break; default: dev_err(np->device, "Bogus SPROM phy type %u\n", val8); return -EINVAL; } val = nr64(ESPC_MAC_ADDR0); netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val); dev->dev_addr[0] = (val >> 0) & 0xff; dev->dev_addr[1] = (val >> 8) & 0xff; dev->dev_addr[2] = (val >> 16) & 0xff; dev->dev_addr[3] = (val >> 24) & 0xff; val = nr64(ESPC_MAC_ADDR1); netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val); dev->dev_addr[4] = (val >> 0) & 0xff; dev->dev_addr[5] = (val >> 8) & 0xff; if (!is_valid_ether_addr(&dev->dev_addr[0])) { dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n", dev->dev_addr); return -EINVAL; } val8 = dev->dev_addr[5]; dev->dev_addr[5] += np->port; if (dev->dev_addr[5] < val8) dev->dev_addr[4]++; val = nr64(ESPC_MOD_STR_LEN); netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val); if (val >= 8 * 4) return -EINVAL; for (i = 0; i < val; i += 4) { u64 tmp = nr64(ESPC_NCR(5 + (i / 4))); np->vpd.model[i + 3] = (tmp >> 0) & 0xff; np->vpd.model[i + 2] = (tmp >> 8) & 0xff; np->vpd.model[i + 1] = (tmp >> 16) & 0xff; np->vpd.model[i + 0] = (tmp >> 24) & 0xff; } np->vpd.model[val] = '\0'; val = nr64(ESPC_BD_MOD_STR_LEN); netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val); if (val >= 4 * 4) return -EINVAL; for (i = 0; i < val; i += 4) { u64 tmp = nr64(ESPC_NCR(14 + (i / 4))); np->vpd.board_model[i + 3] = (tmp >> 0) & 0xff; np->vpd.board_model[i + 2] = (tmp >> 8) & 0xff; np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff; np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff; } np->vpd.board_model[val] = '\0'; np->vpd.mac_num = nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL; netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num); return 0; } static int niu_get_and_validate_port(struct niu *np) { struct niu_parent *parent = np->parent; if (np->port <= 1) np->flags |= NIU_FLAGS_XMAC; if (!parent->num_ports) { if (parent->plat_type == PLAT_TYPE_NIU) { parent->num_ports = 2; } else { parent->num_ports = niu_pci_vpd_get_nports(np); if (!parent->num_ports) { /* Fall back to SPROM as last resort. * This will fail on most cards. */ parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL; /* All of the current probing methods fail on * Maramba on-board parts. */ if (!parent->num_ports) parent->num_ports = 4; } } } if (np->port >= parent->num_ports) return -ENODEV; return 0; } static int phy_record(struct niu_parent *parent, struct phy_probe_info *p, int dev_id_1, int dev_id_2, u8 phy_port, int type) { u32 id = (dev_id_1 << 16) | dev_id_2; u8 idx; if (dev_id_1 < 0 || dev_id_2 < 0) return 0; if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) { /* Because of the NIU_PHY_ID_MASK being applied, the 8704 * test covers the 8706 as well. */ if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) && ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011)) return 0; } else { if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R) return 0; } pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n", parent->index, id, type == PHY_TYPE_PMA_PMD ? "PMA/PMD" : type == PHY_TYPE_PCS ? "PCS" : "MII", phy_port); if (p->cur[type] >= NIU_MAX_PORTS) { pr_err("Too many PHY ports\n"); return -EINVAL; } idx = p->cur[type]; p->phy_id[type][idx] = id; p->phy_port[type][idx] = phy_port; p->cur[type] = idx + 1; return 0; } static int port_has_10g(struct phy_probe_info *p, int port) { int i; for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) { if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port) return 1; } for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) { if (p->phy_port[PHY_TYPE_PCS][i] == port) return 1; } return 0; } static int count_10g_ports(struct phy_probe_info *p, int *lowest) { int port, cnt; cnt = 0; *lowest = 32; for (port = 8; port < 32; port++) { if (port_has_10g(p, port)) { if (!cnt) *lowest = port; cnt++; } } return cnt; } static int count_1g_ports(struct phy_probe_info *p, int *lowest) { *lowest = 32; if (p->cur[PHY_TYPE_MII]) *lowest = p->phy_port[PHY_TYPE_MII][0]; return p->cur[PHY_TYPE_MII]; } static void niu_n2_divide_channels(struct niu_parent *parent) { int num_ports = parent->num_ports; int i; for (i = 0; i < num_ports; i++) { parent->rxchan_per_port[i] = (16 / num_ports); parent->txchan_per_port[i] = (16 / num_ports); pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n", parent->index, i, parent->rxchan_per_port[i], parent->txchan_per_port[i]); } } static void niu_divide_channels(struct niu_parent *parent, int num_10g, int num_1g) { int num_ports = parent->num_ports; int rx_chans_per_10g, rx_chans_per_1g; int tx_chans_per_10g, tx_chans_per_1g; int i, tot_rx, tot_tx; if (!num_10g || !num_1g) { rx_chans_per_10g = rx_chans_per_1g = (NIU_NUM_RXCHAN / num_ports); tx_chans_per_10g = tx_chans_per_1g = (NIU_NUM_TXCHAN / num_ports); } else { rx_chans_per_1g = NIU_NUM_RXCHAN / 8; rx_chans_per_10g = (NIU_NUM_RXCHAN - (rx_chans_per_1g * num_1g)) / num_10g; tx_chans_per_1g = NIU_NUM_TXCHAN / 6; tx_chans_per_10g = (NIU_NUM_TXCHAN - (tx_chans_per_1g * num_1g)) / num_10g; } tot_rx = tot_tx = 0; for (i = 0; i < num_ports; i++) { int type = phy_decode(parent->port_phy, i); if (type == PORT_TYPE_10G) { parent->rxchan_per_port[i] = rx_chans_per_10g; parent->txchan_per_port[i] = tx_chans_per_10g; } else { parent->rxchan_per_port[i] = rx_chans_per_1g; parent->txchan_per_port[i] = tx_chans_per_1g; } pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n", parent->index, i, parent->rxchan_per_port[i], parent->txchan_per_port[i]); tot_rx += parent->rxchan_per_port[i]; tot_tx += parent->txchan_per_port[i]; } if (tot_rx > NIU_NUM_RXCHAN) { pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n", parent->index, tot_rx); for (i = 0; i < num_ports; i++) parent->rxchan_per_port[i] = 1; } if (tot_tx > NIU_NUM_TXCHAN) { pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n", parent->index, tot_tx); for (i = 0; i < num_ports; i++) parent->txchan_per_port[i] = 1; } if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) { pr_warning("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n", parent->index, tot_rx, tot_tx); } } static void niu_divide_rdc_groups(struct niu_parent *parent, int num_10g, int num_1g) { int i, num_ports = parent->num_ports; int rdc_group, rdc_groups_per_port; int rdc_channel_base; rdc_group = 0; rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports; rdc_channel_base = 0; for (i = 0; i < num_ports; i++) { struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i]; int grp, num_channels = parent->rxchan_per_port[i]; int this_channel_offset; tp->first_table_num = rdc_group; tp->num_tables = rdc_groups_per_port; this_channel_offset = 0; for (grp = 0; grp < tp->num_tables; grp++) { struct rdc_table *rt = &tp->tables[grp]; int slot; pr_info("niu%d: Port %d RDC tbl(%d) [ ", parent->index, i, tp->first_table_num + grp); for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) { rt->rxdma_channel[slot] = rdc_channel_base + this_channel_offset; pr_cont("%d ", rt->rxdma_channel[slot]); if (++this_channel_offset == num_channels) this_channel_offset = 0; } pr_cont("]\n"); } parent->rdc_default[i] = rdc_channel_base; rdc_channel_base += num_channels; rdc_group += rdc_groups_per_port; } } static int fill_phy_probe_info(struct niu *np, struct niu_parent *parent, struct phy_probe_info *info) { unsigned long flags; int port, err; memset(info, 0, sizeof(*info)); /* Port 0 to 7 are reserved for onboard Serdes, probe the rest. */ niu_lock_parent(np, flags); err = 0; for (port = 8; port < 32; port++) { int dev_id_1, dev_id_2; dev_id_1 = mdio_read(np, port, NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1); dev_id_2 = mdio_read(np, port, NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2); err = phy_record(parent, info, dev_id_1, dev_id_2, port, PHY_TYPE_PMA_PMD); if (err) break; dev_id_1 = mdio_read(np, port, NIU_PCS_DEV_ADDR, MII_PHYSID1); dev_id_2 = mdio_read(np, port, NIU_PCS_DEV_ADDR, MII_PHYSID2); err = phy_record(parent, info, dev_id_1, dev_id_2, port, PHY_TYPE_PCS); if (err) break; dev_id_1 = mii_read(np, port, MII_PHYSID1); dev_id_2 = mii_read(np, port, MII_PHYSID2); err = phy_record(parent, info, dev_id_1, dev_id_2, port, PHY_TYPE_MII); if (err) break; } niu_unlock_parent(np, flags); return err; } static int walk_phys(struct niu *np, struct niu_parent *parent) { struct phy_probe_info *info = &parent->phy_probe_info; int lowest_10g, lowest_1g; int num_10g, num_1g; u32 val; int err; num_10g = num_1g = 0; if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { num_10g = 0; num_1g = 2; parent->plat_type = PLAT_TYPE_ATCA_CP3220; parent->num_ports = 4; val = (phy_encode(PORT_TYPE_1G, 0) | phy_encode(PORT_TYPE_1G, 1) | phy_encode(PORT_TYPE_1G, 2) | phy_encode(PORT_TYPE_1G, 3)); } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { num_10g = 2; num_1g = 0; parent->num_ports = 2; val = (phy_encode(PORT_TYPE_10G, 0) | phy_encode(PORT_TYPE_10G, 1)); } else if ((np->flags & NIU_FLAGS_XCVR_SERDES) && (parent->plat_type == PLAT_TYPE_NIU)) { /* this is the Monza case */ if (np->flags & NIU_FLAGS_10G) { val = (phy_encode(PORT_TYPE_10G, 0) | phy_encode(PORT_TYPE_10G, 1)); } else { val = (phy_encode(PORT_TYPE_1G, 0) | phy_encode(PORT_TYPE_1G, 1)); } } else { err = fill_phy_probe_info(np, parent, info); if (err) return err; num_10g = count_10g_ports(info, &lowest_10g); num_1g = count_1g_ports(info, &lowest_1g); switch ((num_10g << 4) | num_1g) { case 0x24: if (lowest_1g == 10) parent->plat_type = PLAT_TYPE_VF_P0; else if (lowest_1g == 26) parent->plat_type = PLAT_TYPE_VF_P1; else goto unknown_vg_1g_port; /* fallthru */ case 0x22: val = (phy_encode(PORT_TYPE_10G, 0) | phy_encode(PORT_TYPE_10G, 1) | phy_encode(PORT_TYPE_1G, 2) | phy_encode(PORT_TYPE_1G, 3)); break; case 0x20: val = (phy_encode(PORT_TYPE_10G, 0) | phy_encode(PORT_TYPE_10G, 1)); break; case 0x10: val = phy_encode(PORT_TYPE_10G, np->port); break; case 0x14: if (lowest_1g == 10) parent->plat_type = PLAT_TYPE_VF_P0; else if (lowest_1g == 26) parent->plat_type = PLAT_TYPE_VF_P1; else goto unknown_vg_1g_port; /* fallthru */ case 0x13: if ((lowest_10g & 0x7) == 0) val = (phy_encode(PORT_TYPE_10G, 0) | phy_encode(PORT_TYPE_1G, 1) | phy_encode(PORT_TYPE_1G, 2) | phy_encode(PORT_TYPE_1G, 3)); else val = (phy_encode(PORT_TYPE_1G, 0) | phy_encode(PORT_TYPE_10G, 1) | phy_encode(PORT_TYPE_1G, 2) | phy_encode(PORT_TYPE_1G, 3)); break; case 0x04: if (lowest_1g == 10) parent->plat_type = PLAT_TYPE_VF_P0; else if (lowest_1g == 26) parent->plat_type = PLAT_TYPE_VF_P1; else goto unknown_vg_1g_port; val = (phy_encode(PORT_TYPE_1G, 0) | phy_encode(PORT_TYPE_1G, 1) | phy_encode(PORT_TYPE_1G, 2) | phy_encode(PORT_TYPE_1G, 3)); break; default: pr_err("Unsupported port config 10G[%d] 1G[%d]\n", num_10g, num_1g); return -EINVAL; } } parent->port_phy = val; if (parent->plat_type == PLAT_TYPE_NIU) niu_n2_divide_channels(parent); else niu_divide_channels(parent, num_10g, num_1g); niu_divide_rdc_groups(parent, num_10g, num_1g); return 0; unknown_vg_1g_port: pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g); return -EINVAL; } static int niu_probe_ports(struct niu *np) { struct niu_parent *parent = np->parent; int err, i; if (parent->port_phy == PORT_PHY_UNKNOWN) { err = walk_phys(np, parent); if (err) return err; niu_set_ldg_timer_res(np, 2); for (i = 0; i <= LDN_MAX; i++) niu_ldn_irq_enable(np, i, 0); } if (parent->port_phy == PORT_PHY_INVALID) return -EINVAL; return 0; } static int niu_classifier_swstate_init(struct niu *np) { struct niu_classifier *cp = &np->clas; cp->tcam_top = (u16) np->port; cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports; cp->h1_init = 0xffffffff; cp->h2_init = 0xffff; return fflp_early_init(np); } static void niu_link_config_init(struct niu *np) { struct niu_link_config *lp = &np->link_config; lp->advertising = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | ADVERTISED_10000baseT_Full | ADVERTISED_Autoneg); lp->speed = lp->active_speed = SPEED_INVALID; lp->duplex = DUPLEX_FULL; lp->active_duplex = DUPLEX_INVALID; lp->autoneg = 1; #if 0 lp->loopback_mode = LOOPBACK_MAC; lp->active_speed = SPEED_10000; lp->active_duplex = DUPLEX_FULL; #else lp->loopback_mode = LOOPBACK_DISABLED; #endif } static int niu_init_mac_ipp_pcs_base(struct niu *np) { switch (np->port) { case 0: np->mac_regs = np->regs + XMAC_PORT0_OFF; np->ipp_off = 0x00000; np->pcs_off = 0x04000; np->xpcs_off = 0x02000; break; case 1: np->mac_regs = np->regs + XMAC_PORT1_OFF; np->ipp_off = 0x08000; np->pcs_off = 0x0a000; np->xpcs_off = 0x08000; break; case 2: np->mac_regs = np->regs + BMAC_PORT2_OFF; np->ipp_off = 0x04000; np->pcs_off = 0x0e000; np->xpcs_off = ~0UL; break; case 3: np->mac_regs = np->regs + BMAC_PORT3_OFF; np->ipp_off = 0x0c000; np->pcs_off = 0x12000; np->xpcs_off = ~0UL; break; default: dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port); return -EINVAL; } return 0; } static void niu_try_msix(struct niu *np, u8 *ldg_num_map) { struct msix_entry msi_vec[NIU_NUM_LDG]; struct niu_parent *parent = np->parent; struct pci_dev *pdev = np->pdev; int i, num_irqs, err; u8 first_ldg; first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port; for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++) ldg_num_map[i] = first_ldg + i; num_irqs = (parent->rxchan_per_port[np->port] + parent->txchan_per_port[np->port] + (np->port == 0 ? 3 : 1)); BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports)); retry: for (i = 0; i < num_irqs; i++) { msi_vec[i].vector = 0; msi_vec[i].entry = i; } err = pci_enable_msix(pdev, msi_vec, num_irqs); if (err < 0) { np->flags &= ~NIU_FLAGS_MSIX; return; } if (err > 0) { num_irqs = err; goto retry; } np->flags |= NIU_FLAGS_MSIX; for (i = 0; i < num_irqs; i++) np->ldg[i].irq = msi_vec[i].vector; np->num_ldg = num_irqs; } static int niu_n2_irq_init(struct niu *np, u8 *ldg_num_map) { #ifdef CONFIG_SPARC64 struct platform_device *op = np->op; const u32 *int_prop; int i; int_prop = of_get_property(op->dev.of_node, "interrupts", NULL); if (!int_prop) return -ENODEV; for (i = 0; i < op->archdata.num_irqs; i++) { ldg_num_map[i] = int_prop[i]; np->ldg[i].irq = op->archdata.irqs[i]; } np->num_ldg = op->archdata.num_irqs; return 0; #else return -EINVAL; #endif } static int niu_ldg_init(struct niu *np) { struct niu_parent *parent = np->parent; u8 ldg_num_map[NIU_NUM_LDG]; int first_chan, num_chan; int i, err, ldg_rotor; u8 port; np->num_ldg = 1; np->ldg[0].irq = np->dev->irq; if (parent->plat_type == PLAT_TYPE_NIU) { err = niu_n2_irq_init(np, ldg_num_map); if (err) return err; } else niu_try_msix(np, ldg_num_map); port = np->port; for (i = 0; i < np->num_ldg; i++) { struct niu_ldg *lp = &np->ldg[i]; netif_napi_add(np->dev, &lp->napi, niu_poll, 64); lp->np = np; lp->ldg_num = ldg_num_map[i]; lp->timer = 2; /* XXX */ /* On N2 NIU the firmware has setup the SID mappings so they go * to the correct values that will route the LDG to the proper * interrupt in the NCU interrupt table. */ if (np->parent->plat_type != PLAT_TYPE_NIU) { err = niu_set_ldg_sid(np, lp->ldg_num, port, i); if (err) return err; } } /* We adopt the LDG assignment ordering used by the N2 NIU * 'interrupt' properties because that simplifies a lot of * things. This ordering is: * * MAC * MIF (if port zero) * SYSERR (if port zero) * RX channels * TX channels */ ldg_rotor = 0; err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], LDN_MAC(port)); if (err) return err; ldg_rotor++; if (ldg_rotor == np->num_ldg) ldg_rotor = 0; if (port == 0) { err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], LDN_MIF); if (err) return err; ldg_rotor++; if (ldg_rotor == np->num_ldg) ldg_rotor = 0; err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], LDN_DEVICE_ERROR); if (err) return err; ldg_rotor++; if (ldg_rotor == np->num_ldg) ldg_rotor = 0; } first_chan = 0; for (i = 0; i < port; i++) first_chan += parent->rxchan_per_port[i]; num_chan = parent->rxchan_per_port[port]; for (i = first_chan; i < (first_chan + num_chan); i++) { err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], LDN_RXDMA(i)); if (err) return err; ldg_rotor++; if (ldg_rotor == np->num_ldg) ldg_rotor = 0; } first_chan = 0; for (i = 0; i < port; i++) first_chan += parent->txchan_per_port[i]; num_chan = parent->txchan_per_port[port]; for (i = first_chan; i < (first_chan + num_chan); i++) { err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], LDN_TXDMA(i)); if (err) return err; ldg_rotor++; if (ldg_rotor == np->num_ldg) ldg_rotor = 0; } return 0; } static void niu_ldg_free(struct niu *np) { if (np->flags & NIU_FLAGS_MSIX) pci_disable_msix(np->pdev); } static int niu_get_of_props(struct niu *np) { #ifdef CONFIG_SPARC64 struct net_device *dev = np->dev; struct device_node *dp; const char *phy_type; const u8 *mac_addr; const char *model; int prop_len; if (np->parent->plat_type == PLAT_TYPE_NIU) dp = np->op->dev.of_node; else dp = pci_device_to_OF_node(np->pdev); phy_type = of_get_property(dp, "phy-type", &prop_len); if (!phy_type) { netdev_err(dev, "%s: OF node lacks phy-type property\n", dp->full_name); return -EINVAL; } if (!strcmp(phy_type, "none")) return -ENODEV; strcpy(np->vpd.phy_type, phy_type); if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { netdev_err(dev, "%s: Illegal phy string [%s]\n", dp->full_name, np->vpd.phy_type); return -EINVAL; } mac_addr = of_get_property(dp, "local-mac-address", &prop_len); if (!mac_addr) { netdev_err(dev, "%s: OF node lacks local-mac-address property\n", dp->full_name); return -EINVAL; } if (prop_len != dev->addr_len) { netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n", dp->full_name, prop_len); } memcpy(dev->dev_addr, mac_addr, dev->addr_len); if (!is_valid_ether_addr(&dev->dev_addr[0])) { netdev_err(dev, "%s: OF MAC address is invalid\n", dp->full_name); netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->dev_addr); return -EINVAL; } model = of_get_property(dp, "model", &prop_len); if (model) strcpy(np->vpd.model, model); if (of_find_property(dp, "hot-swappable-phy", &prop_len)) { np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | NIU_FLAGS_HOTPLUG_PHY); } return 0; #else return -EINVAL; #endif } static int niu_get_invariants(struct niu *np) { int err, have_props; u32 offset; err = niu_get_of_props(np); if (err == -ENODEV) return err; have_props = !err; err = niu_init_mac_ipp_pcs_base(np); if (err) return err; if (have_props) { err = niu_get_and_validate_port(np); if (err) return err; } else { if (np->parent->plat_type == PLAT_TYPE_NIU) return -EINVAL; nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE); offset = niu_pci_vpd_offset(np); netif_printk(np, probe, KERN_DEBUG, np->dev, "%s() VPD offset [%08x]\n", __func__, offset); if (offset) niu_pci_vpd_fetch(np, offset); nw64(ESPC_PIO_EN, 0); if (np->flags & NIU_FLAGS_VPD_VALID) { niu_pci_vpd_validate(np); err = niu_get_and_validate_port(np); if (err) return err; } if (!(np->flags & NIU_FLAGS_VPD_VALID)) { err = niu_get_and_validate_port(np); if (err) return err; err = niu_pci_probe_sprom(np); if (err) return err; } } err = niu_probe_ports(np); if (err) return err; niu_ldg_init(np); niu_classifier_swstate_init(np); niu_link_config_init(np); err = niu_determine_phy_disposition(np); if (!err) err = niu_init_link(np); return err; } static LIST_HEAD(niu_parent_list); static DEFINE_MUTEX(niu_parent_lock); static int niu_parent_index; static ssize_t show_port_phy(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *plat_dev = to_platform_device(dev); struct niu_parent *p = plat_dev->dev.platform_data; u32 port_phy = p->port_phy; char *orig_buf = buf; int i; if (port_phy == PORT_PHY_UNKNOWN || port_phy == PORT_PHY_INVALID) return 0; for (i = 0; i < p->num_ports; i++) { const char *type_str; int type; type = phy_decode(port_phy, i); if (type == PORT_TYPE_10G) type_str = "10G"; else type_str = "1G"; buf += sprintf(buf, (i == 0) ? "%s" : " %s", type_str); } buf += sprintf(buf, "\n"); return buf - orig_buf; } static ssize_t show_plat_type(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *plat_dev = to_platform_device(dev); struct niu_parent *p = plat_dev->dev.platform_data; const char *type_str; switch (p->plat_type) { case PLAT_TYPE_ATLAS: type_str = "atlas"; break; case PLAT_TYPE_NIU: type_str = "niu"; break; case PLAT_TYPE_VF_P0: type_str = "vf_p0"; break; case PLAT_TYPE_VF_P1: type_str = "vf_p1"; break; default: type_str = "unknown"; break; } return sprintf(buf, "%s\n", type_str); } static ssize_t __show_chan_per_port(struct device *dev, struct device_attribute *attr, char *buf, int rx) { struct platform_device *plat_dev = to_platform_device(dev); struct niu_parent *p = plat_dev->dev.platform_data; char *orig_buf = buf; u8 *arr; int i; arr = (rx ? p->rxchan_per_port : p->txchan_per_port); for (i = 0; i < p->num_ports; i++) { buf += sprintf(buf, (i == 0) ? "%d" : " %d", arr[i]); } buf += sprintf(buf, "\n"); return buf - orig_buf; } static ssize_t show_rxchan_per_port(struct device *dev, struct device_attribute *attr, char *buf) { return __show_chan_per_port(dev, attr, buf, 1); } static ssize_t show_txchan_per_port(struct device *dev, struct device_attribute *attr, char *buf) { return __show_chan_per_port(dev, attr, buf, 1); } static ssize_t show_num_ports(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *plat_dev = to_platform_device(dev); struct niu_parent *p = plat_dev->dev.platform_data; return sprintf(buf, "%d\n", p->num_ports); } static struct device_attribute niu_parent_attributes[] = { __ATTR(port_phy, S_IRUGO, show_port_phy, NULL), __ATTR(plat_type, S_IRUGO, show_plat_type, NULL), __ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL), __ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL), __ATTR(num_ports, S_IRUGO, show_num_ports, NULL), {} }; static struct niu_parent *niu_new_parent(struct niu *np, union niu_parent_id *id, u8 ptype) { struct platform_device *plat_dev; struct niu_parent *p; int i; plat_dev = platform_device_register_simple("niu-board", niu_parent_index, NULL, 0); if (IS_ERR(plat_dev)) return NULL; for (i = 0; attr_name(niu_parent_attributes[i]); i++) { int err = device_create_file(&plat_dev->dev, &niu_parent_attributes[i]); if (err) goto fail_unregister; } p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) goto fail_unregister; p->index = niu_parent_index++; plat_dev->dev.platform_data = p; p->plat_dev = plat_dev; memcpy(&p->id, id, sizeof(*id)); p->plat_type = ptype; INIT_LIST_HEAD(&p->list); atomic_set(&p->refcnt, 0); list_add(&p->list, &niu_parent_list); spin_lock_init(&p->lock); p->rxdma_clock_divider = 7500; p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES; if (p->plat_type == PLAT_TYPE_NIU) p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES; for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { int index = i - CLASS_CODE_USER_PROG1; p->tcam_key[index] = TCAM_KEY_TSEL; p->flow_key[index] = (FLOW_KEY_IPSA | FLOW_KEY_IPDA | FLOW_KEY_PROTO | (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT) | (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT)); } for (i = 0; i < LDN_MAX + 1; i++) p->ldg_map[i] = LDG_INVALID; return p; fail_unregister: platform_device_unregister(plat_dev); return NULL; } static struct niu_parent *niu_get_parent(struct niu *np, union niu_parent_id *id, u8 ptype) { struct niu_parent *p, *tmp; int port = np->port; mutex_lock(&niu_parent_lock); p = NULL; list_for_each_entry(tmp, &niu_parent_list, list) { if (!memcmp(id, &tmp->id, sizeof(*id))) { p = tmp; break; } } if (!p) p = niu_new_parent(np, id, ptype); if (p) { char port_name[6]; int err; sprintf(port_name, "port%d", port); err = sysfs_create_link(&p->plat_dev->dev.kobj, &np->device->kobj, port_name); if (!err) { p->ports[port] = np; atomic_inc(&p->refcnt); } } mutex_unlock(&niu_parent_lock); return p; } static void niu_put_parent(struct niu *np) { struct niu_parent *p = np->parent; u8 port = np->port; char port_name[6]; BUG_ON(!p || p->ports[port] != np); netif_printk(np, probe, KERN_DEBUG, np->dev, "%s() port[%u]\n", __func__, port); sprintf(port_name, "port%d", port); mutex_lock(&niu_parent_lock); sysfs_remove_link(&p->plat_dev->dev.kobj, port_name); p->ports[port] = NULL; np->parent = NULL; if (atomic_dec_and_test(&p->refcnt)) { list_del(&p->list); platform_device_unregister(p->plat_dev); } mutex_unlock(&niu_parent_lock); } static void *niu_pci_alloc_coherent(struct device *dev, size_t size, u64 *handle, gfp_t flag) { dma_addr_t dh; void *ret; ret = dma_alloc_coherent(dev, size, &dh, flag); if (ret) *handle = dh; return ret; } static void niu_pci_free_coherent(struct device *dev, size_t size, void *cpu_addr, u64 handle) { dma_free_coherent(dev, size, cpu_addr, handle); } static u64 niu_pci_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { return dma_map_page(dev, page, offset, size, direction); } static void niu_pci_unmap_page(struct device *dev, u64 dma_address, size_t size, enum dma_data_direction direction) { dma_unmap_page(dev, dma_address, size, direction); } static u64 niu_pci_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction) { return dma_map_single(dev, cpu_addr, size, direction); } static void niu_pci_unmap_single(struct device *dev, u64 dma_address, size_t size, enum dma_data_direction direction) { dma_unmap_single(dev, dma_address, size, direction); } static const struct niu_ops niu_pci_ops = { .alloc_coherent = niu_pci_alloc_coherent, .free_coherent = niu_pci_free_coherent, .map_page = niu_pci_map_page, .unmap_page = niu_pci_unmap_page, .map_single = niu_pci_map_single, .unmap_single = niu_pci_unmap_single, }; static void niu_driver_version(void) { static int niu_version_printed; if (niu_version_printed++ == 0) pr_info("%s", version); } static struct net_device *niu_alloc_and_init(struct device *gen_dev, struct pci_dev *pdev, struct platform_device *op, const struct niu_ops *ops, u8 port) { struct net_device *dev; struct niu *np; dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN); if (!dev) return NULL; SET_NETDEV_DEV(dev, gen_dev); np = netdev_priv(dev); np->dev = dev; np->pdev = pdev; np->op = op; np->device = gen_dev; np->ops = ops; np->msg_enable = niu_debug; spin_lock_init(&np->lock); INIT_WORK(&np->reset_task, niu_reset_task); np->port = port; return dev; } static const struct net_device_ops niu_netdev_ops = { .ndo_open = niu_open, .ndo_stop = niu_close, .ndo_start_xmit = niu_start_xmit, .ndo_get_stats64 = niu_get_stats, .ndo_set_rx_mode = niu_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = niu_set_mac_addr, .ndo_do_ioctl = niu_ioctl, .ndo_tx_timeout = niu_tx_timeout, .ndo_change_mtu = niu_change_mtu, }; static void niu_assign_netdev_ops(struct net_device *dev) { dev->netdev_ops = &niu_netdev_ops; dev->ethtool_ops = &niu_ethtool_ops; dev->watchdog_timeo = NIU_TX_TIMEOUT; } static void niu_device_announce(struct niu *np) { struct net_device *dev = np->dev; pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr); if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) { pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", dev->name, (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), (np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"), (np->mac_xcvr == MAC_XCVR_MII ? "MII" : (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), np->vpd.phy_type); } else { pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", dev->name, (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), (np->flags & NIU_FLAGS_FIBER ? "FIBER" : (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" : "COPPER")), (np->mac_xcvr == MAC_XCVR_MII ? "MII" : (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), np->vpd.phy_type); } } static void niu_set_basic_features(struct net_device *dev) { dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH; dev->features |= dev->hw_features | NETIF_F_RXCSUM; } static int niu_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { union niu_parent_id parent_id; struct net_device *dev; struct niu *np; int err; u64 dma_mask; niu_driver_version(); err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); return err; } if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n"); err = -ENODEV; goto err_out_disable_pdev; } err = pci_request_regions(pdev, DRV_MODULE_NAME); if (err) { dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); goto err_out_disable_pdev; } if (!pci_is_pcie(pdev)) { dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n"); err = -ENODEV; goto err_out_free_res; } dev = niu_alloc_and_init(&pdev->dev, pdev, NULL, &niu_pci_ops, PCI_FUNC(pdev->devfn)); if (!dev) { err = -ENOMEM; goto err_out_free_res; } np = netdev_priv(dev); memset(&parent_id, 0, sizeof(parent_id)); parent_id.pci.domain = pci_domain_nr(pdev->bus); parent_id.pci.bus = pdev->bus->number; parent_id.pci.device = PCI_SLOT(pdev->devfn); np->parent = niu_get_parent(np, &parent_id, PLAT_TYPE_ATLAS); if (!np->parent) { err = -ENOMEM; goto err_out_free_dev; } pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_NOSNOOP_EN, PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_RELAX_EN); dma_mask = DMA_BIT_MASK(44); err = pci_set_dma_mask(pdev, dma_mask); if (!err) { dev->features |= NETIF_F_HIGHDMA; err = pci_set_consistent_dma_mask(pdev, dma_mask); if (err) { dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n"); goto err_out_release_parent; } } if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto err_out_release_parent; } } niu_set_basic_features(dev); dev->priv_flags |= IFF_UNICAST_FLT; np->regs = pci_ioremap_bar(pdev, 0); if (!np->regs) { dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); err = -ENOMEM; goto err_out_release_parent; } pci_set_master(pdev); pci_save_state(pdev); dev->irq = pdev->irq; niu_assign_netdev_ops(dev); err = niu_get_invariants(np); if (err) { if (err != -ENODEV) dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n"); goto err_out_iounmap; } err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "Cannot register net device, aborting\n"); goto err_out_iounmap; } pci_set_drvdata(pdev, dev); niu_device_announce(np); return 0; err_out_iounmap: if (np->regs) { iounmap(np->regs); np->regs = NULL; } err_out_release_parent: niu_put_parent(np); err_out_free_dev: free_netdev(dev); err_out_free_res: pci_release_regions(pdev); err_out_disable_pdev: pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); return err; } static void niu_pci_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); if (dev) { struct niu *np = netdev_priv(dev); unregister_netdev(dev); if (np->regs) { iounmap(np->regs); np->regs = NULL; } niu_ldg_free(np); niu_put_parent(np); free_netdev(dev); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } } static int niu_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pdev); struct niu *np = netdev_priv(dev); unsigned long flags; if (!netif_running(dev)) return 0; flush_work(&np->reset_task); niu_netif_stop(np); del_timer_sync(&np->timer); spin_lock_irqsave(&np->lock, flags); niu_enable_interrupts(np, 0); spin_unlock_irqrestore(&np->lock, flags); netif_device_detach(dev); spin_lock_irqsave(&np->lock, flags); niu_stop_hw(np); spin_unlock_irqrestore(&np->lock, flags); pci_save_state(pdev); return 0; } static int niu_resume(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct niu *np = netdev_priv(dev); unsigned long flags; int err; if (!netif_running(dev)) return 0; pci_restore_state(pdev); netif_device_attach(dev); spin_lock_irqsave(&np->lock, flags); err = niu_init_hw(np); if (!err) { np->timer.expires = jiffies + HZ; add_timer(&np->timer); niu_netif_start(np); } spin_unlock_irqrestore(&np->lock, flags); return err; } static struct pci_driver niu_pci_driver = { .name = DRV_MODULE_NAME, .id_table = niu_pci_tbl, .probe = niu_pci_init_one, .remove = niu_pci_remove_one, .suspend = niu_suspend, .resume = niu_resume, }; #ifdef CONFIG_SPARC64 static void *niu_phys_alloc_coherent(struct device *dev, size_t size, u64 *dma_addr, gfp_t flag) { unsigned long order = get_order(size); unsigned long page = __get_free_pages(flag, order); if (page == 0UL) return NULL; memset((char *)page, 0, PAGE_SIZE << order); *dma_addr = __pa(page); return (void *) page; } static void niu_phys_free_coherent(struct device *dev, size_t size, void *cpu_addr, u64 handle) { unsigned long order = get_order(size); free_pages((unsigned long) cpu_addr, order); } static u64 niu_phys_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { return page_to_phys(page) + offset; } static void niu_phys_unmap_page(struct device *dev, u64 dma_address, size_t size, enum dma_data_direction direction) { /* Nothing to do. */ } static u64 niu_phys_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction) { return __pa(cpu_addr); } static void niu_phys_unmap_single(struct device *dev, u64 dma_address, size_t size, enum dma_data_direction direction) { /* Nothing to do. */ } static const struct niu_ops niu_phys_ops = { .alloc_coherent = niu_phys_alloc_coherent, .free_coherent = niu_phys_free_coherent, .map_page = niu_phys_map_page, .unmap_page = niu_phys_unmap_page, .map_single = niu_phys_map_single, .unmap_single = niu_phys_unmap_single, }; static int niu_of_probe(struct platform_device *op) { union niu_parent_id parent_id; struct net_device *dev; struct niu *np; const u32 *reg; int err; niu_driver_version(); reg = of_get_property(op->dev.of_node, "reg", NULL); if (!reg) { dev_err(&op->dev, "%s: No 'reg' property, aborting\n", op->dev.of_node->full_name); return -ENODEV; } dev = niu_alloc_and_init(&op->dev, NULL, op, &niu_phys_ops, reg[0] & 0x1); if (!dev) { err = -ENOMEM; goto err_out; } np = netdev_priv(dev); memset(&parent_id, 0, sizeof(parent_id)); parent_id.of = of_get_parent(op->dev.of_node); np->parent = niu_get_parent(np, &parent_id, PLAT_TYPE_NIU); if (!np->parent) { err = -ENOMEM; goto err_out_free_dev; } niu_set_basic_features(dev); np->regs = of_ioremap(&op->resource[1], 0, resource_size(&op->resource[1]), "niu regs"); if (!np->regs) { dev_err(&op->dev, "Cannot map device registers, aborting\n"); err = -ENOMEM; goto err_out_release_parent; } np->vir_regs_1 = of_ioremap(&op->resource[2], 0, resource_size(&op->resource[2]), "niu vregs-1"); if (!np->vir_regs_1) { dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n"); err = -ENOMEM; goto err_out_iounmap; } np->vir_regs_2 = of_ioremap(&op->resource[3], 0, resource_size(&op->resource[3]), "niu vregs-2"); if (!np->vir_regs_2) { dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n"); err = -ENOMEM; goto err_out_iounmap; } niu_assign_netdev_ops(dev); err = niu_get_invariants(np); if (err) { if (err != -ENODEV) dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n"); goto err_out_iounmap; } err = register_netdev(dev); if (err) { dev_err(&op->dev, "Cannot register net device, aborting\n"); goto err_out_iounmap; } dev_set_drvdata(&op->dev, dev); niu_device_announce(np); return 0; err_out_iounmap: if (np->vir_regs_1) { of_iounmap(&op->resource[2], np->vir_regs_1, resource_size(&op->resource[2])); np->vir_regs_1 = NULL; } if (np->vir_regs_2) { of_iounmap(&op->resource[3], np->vir_regs_2, resource_size(&op->resource[3])); np->vir_regs_2 = NULL; } if (np->regs) { of_iounmap(&op->resource[1], np->regs, resource_size(&op->resource[1])); np->regs = NULL; } err_out_release_parent: niu_put_parent(np); err_out_free_dev: free_netdev(dev); err_out: return err; } static int niu_of_remove(struct platform_device *op) { struct net_device *dev = dev_get_drvdata(&op->dev); if (dev) { struct niu *np = netdev_priv(dev); unregister_netdev(dev); if (np->vir_regs_1) { of_iounmap(&op->resource[2], np->vir_regs_1, resource_size(&op->resource[2])); np->vir_regs_1 = NULL; } if (np->vir_regs_2) { of_iounmap(&op->resource[3], np->vir_regs_2, resource_size(&op->resource[3])); np->vir_regs_2 = NULL; } if (np->regs) { of_iounmap(&op->resource[1], np->regs, resource_size(&op->resource[1])); np->regs = NULL; } niu_ldg_free(np); niu_put_parent(np); free_netdev(dev); dev_set_drvdata(&op->dev, NULL); } return 0; } static const struct of_device_id niu_match[] = { { .name = "network", .compatible = "SUNW,niusl", }, {}, }; MODULE_DEVICE_TABLE(of, niu_match); static struct platform_driver niu_of_driver = { .driver = { .name = "niu", .owner = THIS_MODULE, .of_match_table = niu_match, }, .probe = niu_of_probe, .remove = niu_of_remove, }; #endif /* CONFIG_SPARC64 */ static int __init niu_init(void) { int err = 0; BUILD_BUG_ON(PAGE_SIZE < 4 * 1024); niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT); #ifdef CONFIG_SPARC64 err = platform_driver_register(&niu_of_driver); #endif if (!err) { err = pci_register_driver(&niu_pci_driver); #ifdef CONFIG_SPARC64 if (err) platform_driver_unregister(&niu_of_driver); #endif } return err; } static void __exit niu_exit(void) { pci_unregister_driver(&niu_pci_driver); #ifdef CONFIG_SPARC64 platform_driver_unregister(&niu_of_driver); #endif } module_init(niu_init); module_exit(niu_exit);
gpl-2.0
intervigilium/android_kernel_htc_msm8660
sound/pci/fm801.c
2339
42061
/* * The driver for the ForteMedia FM801 based soundcards * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * Support FM only card by Andy Shevchenko <andy@smile.org.ua> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/moduleparam.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/tlv.h> #include <sound/ac97_codec.h> #include <sound/mpu401.h> #include <sound/opl3.h> #include <sound/initval.h> #include <asm/io.h> #ifdef CONFIG_SND_FM801_TEA575X_BOOL #include <sound/tea575x-tuner.h> #endif MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("ForteMedia FM801"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{ForteMedia,FM801}," "{Genius,SoundMaker Live 5.1}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ /* * Enable TEA575x tuner * 1 = MediaForte 256-PCS * 2 = MediaForte 256-PCP * 3 = MediaForte 64-PCR * 16 = setup tuner only (this is additional bit), i.e. SF64-PCR FM card * High 16-bits are video (radio) device number + 1 */ static int tea575x_tuner[SNDRV_CARDS]; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the FM801 soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for the FM801 soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable FM801 soundcard."); module_param_array(tea575x_tuner, int, NULL, 0444); MODULE_PARM_DESC(tea575x_tuner, "TEA575x tuner access method (0 = auto, 1 = SF256-PCS, 2=SF256-PCP, 3=SF64-PCR, 8=disable, +16=tuner-only)."); #define TUNER_DISABLED (1<<3) #define TUNER_ONLY (1<<4) #define TUNER_TYPE_MASK (~TUNER_ONLY & 0xFFFF) /* * Direct registers */ #define FM801_REG(chip, reg) (chip->port + FM801_##reg) #define FM801_PCM_VOL 0x00 /* PCM Output Volume */ #define FM801_FM_VOL 0x02 /* FM Output Volume */ #define FM801_I2S_VOL 0x04 /* I2S Volume */ #define FM801_REC_SRC 0x06 /* Record Source */ #define FM801_PLY_CTRL 0x08 /* Playback Control */ #define FM801_PLY_COUNT 0x0a /* Playback Count */ #define FM801_PLY_BUF1 0x0c /* Playback Bufer I */ #define FM801_PLY_BUF2 0x10 /* Playback Buffer II */ #define FM801_CAP_CTRL 0x14 /* Capture Control */ #define FM801_CAP_COUNT 0x16 /* Capture Count */ #define FM801_CAP_BUF1 0x18 /* Capture Buffer I */ #define FM801_CAP_BUF2 0x1c /* Capture Buffer II */ #define FM801_CODEC_CTRL 0x22 /* Codec Control */ #define FM801_I2S_MODE 0x24 /* I2S Mode Control */ #define FM801_VOLUME 0x26 /* Volume Up/Down/Mute Status */ #define FM801_I2C_CTRL 0x29 /* I2C Control */ #define FM801_AC97_CMD 0x2a /* AC'97 Command */ #define FM801_AC97_DATA 0x2c /* AC'97 Data */ #define FM801_MPU401_DATA 0x30 /* MPU401 Data */ #define FM801_MPU401_CMD 0x31 /* MPU401 Command */ #define FM801_GPIO_CTRL 0x52 /* General Purpose I/O Control */ #define FM801_GEN_CTRL 0x54 /* General Control */ #define FM801_IRQ_MASK 0x56 /* Interrupt Mask */ #define FM801_IRQ_STATUS 0x5a /* Interrupt Status */ #define FM801_OPL3_BANK0 0x68 /* OPL3 Status Read / Bank 0 Write */ #define FM801_OPL3_DATA0 0x69 /* OPL3 Data 0 Write */ #define FM801_OPL3_BANK1 0x6a /* OPL3 Bank 1 Write */ #define FM801_OPL3_DATA1 0x6b /* OPL3 Bank 1 Write */ #define FM801_POWERDOWN 0x70 /* Blocks Power Down Control */ /* codec access */ #define FM801_AC97_READ (1<<7) /* read=1, write=0 */ #define FM801_AC97_VALID (1<<8) /* port valid=1 */ #define FM801_AC97_BUSY (1<<9) /* busy=1 */ #define FM801_AC97_ADDR_SHIFT 10 /* codec id (2bit) */ /* playback and record control register bits */ #define FM801_BUF1_LAST (1<<1) #define FM801_BUF2_LAST (1<<2) #define FM801_START (1<<5) #define FM801_PAUSE (1<<6) #define FM801_IMMED_STOP (1<<7) #define FM801_RATE_SHIFT 8 #define FM801_RATE_MASK (15 << FM801_RATE_SHIFT) #define FM801_CHANNELS_4 (1<<12) /* playback only */ #define FM801_CHANNELS_6 (2<<12) /* playback only */ #define FM801_CHANNELS_6MS (3<<12) /* playback only */ #define FM801_CHANNELS_MASK (3<<12) #define FM801_16BIT (1<<14) #define FM801_STEREO (1<<15) /* IRQ status bits */ #define FM801_IRQ_PLAYBACK (1<<8) #define FM801_IRQ_CAPTURE (1<<9) #define FM801_IRQ_VOLUME (1<<14) #define FM801_IRQ_MPU (1<<15) /* GPIO control register */ #define FM801_GPIO_GP0 (1<<0) /* read/write */ #define FM801_GPIO_GP1 (1<<1) #define FM801_GPIO_GP2 (1<<2) #define FM801_GPIO_GP3 (1<<3) #define FM801_GPIO_GP(x) (1<<(0+(x))) #define FM801_GPIO_GD0 (1<<8) /* directions: 1 = input, 0 = output*/ #define FM801_GPIO_GD1 (1<<9) #define FM801_GPIO_GD2 (1<<10) #define FM801_GPIO_GD3 (1<<11) #define FM801_GPIO_GD(x) (1<<(8+(x))) #define FM801_GPIO_GS0 (1<<12) /* function select: */ #define FM801_GPIO_GS1 (1<<13) /* 1 = GPIO */ #define FM801_GPIO_GS2 (1<<14) /* 0 = other (S/PDIF, VOL) */ #define FM801_GPIO_GS3 (1<<15) #define FM801_GPIO_GS(x) (1<<(12+(x))) /* */ struct fm801 { int irq; unsigned long port; /* I/O port number */ unsigned int multichannel: 1, /* multichannel support */ secondary: 1; /* secondary codec */ unsigned char secondary_addr; /* address of the secondary codec */ unsigned int tea575x_tuner; /* tuner access method & flags */ unsigned short ply_ctrl; /* playback control */ unsigned short cap_ctrl; /* capture control */ unsigned long ply_buffer; unsigned int ply_buf; unsigned int ply_count; unsigned int ply_size; unsigned int ply_pos; unsigned long cap_buffer; unsigned int cap_buf; unsigned int cap_count; unsigned int cap_size; unsigned int cap_pos; struct snd_ac97_bus *ac97_bus; struct snd_ac97 *ac97; struct snd_ac97 *ac97_sec; struct pci_dev *pci; struct snd_card *card; struct snd_pcm *pcm; struct snd_rawmidi *rmidi; struct snd_pcm_substream *playback_substream; struct snd_pcm_substream *capture_substream; unsigned int p_dma_size; unsigned int c_dma_size; spinlock_t reg_lock; struct snd_info_entry *proc_entry; #ifdef CONFIG_SND_FM801_TEA575X_BOOL struct snd_tea575x tea; #endif #ifdef CONFIG_PM u16 saved_regs[0x20]; #endif }; static DEFINE_PCI_DEVICE_TABLE(snd_fm801_ids) = { { 0x1319, 0x0801, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0, }, /* FM801 */ { 0x5213, 0x0510, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0, }, /* Gallant Odyssey Sound 4 */ { 0, } }; MODULE_DEVICE_TABLE(pci, snd_fm801_ids); /* * common I/O routines */ static int snd_fm801_update_bits(struct fm801 *chip, unsigned short reg, unsigned short mask, unsigned short value) { int change; unsigned long flags; unsigned short old, new; spin_lock_irqsave(&chip->reg_lock, flags); old = inw(chip->port + reg); new = (old & ~mask) | value; change = old != new; if (change) outw(new, chip->port + reg); spin_unlock_irqrestore(&chip->reg_lock, flags); return change; } static void snd_fm801_codec_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct fm801 *chip = ac97->private_data; int idx; /* * Wait until the codec interface is not ready.. */ for (idx = 0; idx < 100; idx++) { if (!(inw(FM801_REG(chip, AC97_CMD)) & FM801_AC97_BUSY)) goto ok1; udelay(10); } snd_printk(KERN_ERR "AC'97 interface is busy (1)\n"); return; ok1: /* write data and address */ outw(val, FM801_REG(chip, AC97_DATA)); outw(reg | (ac97->addr << FM801_AC97_ADDR_SHIFT), FM801_REG(chip, AC97_CMD)); /* * Wait until the write command is not completed.. */ for (idx = 0; idx < 1000; idx++) { if (!(inw(FM801_REG(chip, AC97_CMD)) & FM801_AC97_BUSY)) return; udelay(10); } snd_printk(KERN_ERR "AC'97 interface #%d is busy (2)\n", ac97->num); } static unsigned short snd_fm801_codec_read(struct snd_ac97 *ac97, unsigned short reg) { struct fm801 *chip = ac97->private_data; int idx; /* * Wait until the codec interface is not ready.. */ for (idx = 0; idx < 100; idx++) { if (!(inw(FM801_REG(chip, AC97_CMD)) & FM801_AC97_BUSY)) goto ok1; udelay(10); } snd_printk(KERN_ERR "AC'97 interface is busy (1)\n"); return 0; ok1: /* read command */ outw(reg | (ac97->addr << FM801_AC97_ADDR_SHIFT) | FM801_AC97_READ, FM801_REG(chip, AC97_CMD)); for (idx = 0; idx < 100; idx++) { if (!(inw(FM801_REG(chip, AC97_CMD)) & FM801_AC97_BUSY)) goto ok2; udelay(10); } snd_printk(KERN_ERR "AC'97 interface #%d is busy (2)\n", ac97->num); return 0; ok2: for (idx = 0; idx < 1000; idx++) { if (inw(FM801_REG(chip, AC97_CMD)) & FM801_AC97_VALID) goto ok3; udelay(10); } snd_printk(KERN_ERR "AC'97 interface #%d is not valid (2)\n", ac97->num); return 0; ok3: return inw(FM801_REG(chip, AC97_DATA)); } static unsigned int rates[] = { 5500, 8000, 9600, 11025, 16000, 19200, 22050, 32000, 38400, 44100, 48000 }; static struct snd_pcm_hw_constraint_list hw_constraints_rates = { .count = ARRAY_SIZE(rates), .list = rates, .mask = 0, }; static unsigned int channels[] = { 2, 4, 6 }; static struct snd_pcm_hw_constraint_list hw_constraints_channels = { .count = ARRAY_SIZE(channels), .list = channels, .mask = 0, }; /* * Sample rate routines */ static unsigned short snd_fm801_rate_bits(unsigned int rate) { unsigned int idx; for (idx = 0; idx < ARRAY_SIZE(rates); idx++) if (rates[idx] == rate) return idx; snd_BUG(); return ARRAY_SIZE(rates) - 1; } /* * PCM part */ static int snd_fm801_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct fm801 *chip = snd_pcm_substream_chip(substream); spin_lock(&chip->reg_lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: chip->ply_ctrl &= ~(FM801_BUF1_LAST | FM801_BUF2_LAST | FM801_PAUSE); chip->ply_ctrl |= FM801_START | FM801_IMMED_STOP; break; case SNDRV_PCM_TRIGGER_STOP: chip->ply_ctrl &= ~(FM801_START | FM801_PAUSE); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_SUSPEND: chip->ply_ctrl |= FM801_PAUSE; break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: case SNDRV_PCM_TRIGGER_RESUME: chip->ply_ctrl &= ~FM801_PAUSE; break; default: spin_unlock(&chip->reg_lock); snd_BUG(); return -EINVAL; } outw(chip->ply_ctrl, FM801_REG(chip, PLY_CTRL)); spin_unlock(&chip->reg_lock); return 0; } static int snd_fm801_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct fm801 *chip = snd_pcm_substream_chip(substream); spin_lock(&chip->reg_lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: chip->cap_ctrl &= ~(FM801_BUF1_LAST | FM801_BUF2_LAST | FM801_PAUSE); chip->cap_ctrl |= FM801_START | FM801_IMMED_STOP; break; case SNDRV_PCM_TRIGGER_STOP: chip->cap_ctrl &= ~(FM801_START | FM801_PAUSE); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_SUSPEND: chip->cap_ctrl |= FM801_PAUSE; break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: case SNDRV_PCM_TRIGGER_RESUME: chip->cap_ctrl &= ~FM801_PAUSE; break; default: spin_unlock(&chip->reg_lock); snd_BUG(); return -EINVAL; } outw(chip->cap_ctrl, FM801_REG(chip, CAP_CTRL)); spin_unlock(&chip->reg_lock); return 0; } static int snd_fm801_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int snd_fm801_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } static int snd_fm801_playback_prepare(struct snd_pcm_substream *substream) { struct fm801 *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; chip->ply_size = snd_pcm_lib_buffer_bytes(substream); chip->ply_count = snd_pcm_lib_period_bytes(substream); spin_lock_irq(&chip->reg_lock); chip->ply_ctrl &= ~(FM801_START | FM801_16BIT | FM801_STEREO | FM801_RATE_MASK | FM801_CHANNELS_MASK); if (snd_pcm_format_width(runtime->format) == 16) chip->ply_ctrl |= FM801_16BIT; if (runtime->channels > 1) { chip->ply_ctrl |= FM801_STEREO; if (runtime->channels == 4) chip->ply_ctrl |= FM801_CHANNELS_4; else if (runtime->channels == 6) chip->ply_ctrl |= FM801_CHANNELS_6; } chip->ply_ctrl |= snd_fm801_rate_bits(runtime->rate) << FM801_RATE_SHIFT; chip->ply_buf = 0; outw(chip->ply_ctrl, FM801_REG(chip, PLY_CTRL)); outw(chip->ply_count - 1, FM801_REG(chip, PLY_COUNT)); chip->ply_buffer = runtime->dma_addr; chip->ply_pos = 0; outl(chip->ply_buffer, FM801_REG(chip, PLY_BUF1)); outl(chip->ply_buffer + (chip->ply_count % chip->ply_size), FM801_REG(chip, PLY_BUF2)); spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_fm801_capture_prepare(struct snd_pcm_substream *substream) { struct fm801 *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; chip->cap_size = snd_pcm_lib_buffer_bytes(substream); chip->cap_count = snd_pcm_lib_period_bytes(substream); spin_lock_irq(&chip->reg_lock); chip->cap_ctrl &= ~(FM801_START | FM801_16BIT | FM801_STEREO | FM801_RATE_MASK); if (snd_pcm_format_width(runtime->format) == 16) chip->cap_ctrl |= FM801_16BIT; if (runtime->channels > 1) chip->cap_ctrl |= FM801_STEREO; chip->cap_ctrl |= snd_fm801_rate_bits(runtime->rate) << FM801_RATE_SHIFT; chip->cap_buf = 0; outw(chip->cap_ctrl, FM801_REG(chip, CAP_CTRL)); outw(chip->cap_count - 1, FM801_REG(chip, CAP_COUNT)); chip->cap_buffer = runtime->dma_addr; chip->cap_pos = 0; outl(chip->cap_buffer, FM801_REG(chip, CAP_BUF1)); outl(chip->cap_buffer + (chip->cap_count % chip->cap_size), FM801_REG(chip, CAP_BUF2)); spin_unlock_irq(&chip->reg_lock); return 0; } static snd_pcm_uframes_t snd_fm801_playback_pointer(struct snd_pcm_substream *substream) { struct fm801 *chip = snd_pcm_substream_chip(substream); size_t ptr; if (!(chip->ply_ctrl & FM801_START)) return 0; spin_lock(&chip->reg_lock); ptr = chip->ply_pos + (chip->ply_count - 1) - inw(FM801_REG(chip, PLY_COUNT)); if (inw(FM801_REG(chip, IRQ_STATUS)) & FM801_IRQ_PLAYBACK) { ptr += chip->ply_count; ptr %= chip->ply_size; } spin_unlock(&chip->reg_lock); return bytes_to_frames(substream->runtime, ptr); } static snd_pcm_uframes_t snd_fm801_capture_pointer(struct snd_pcm_substream *substream) { struct fm801 *chip = snd_pcm_substream_chip(substream); size_t ptr; if (!(chip->cap_ctrl & FM801_START)) return 0; spin_lock(&chip->reg_lock); ptr = chip->cap_pos + (chip->cap_count - 1) - inw(FM801_REG(chip, CAP_COUNT)); if (inw(FM801_REG(chip, IRQ_STATUS)) & FM801_IRQ_CAPTURE) { ptr += chip->cap_count; ptr %= chip->cap_size; } spin_unlock(&chip->reg_lock); return bytes_to_frames(substream->runtime, ptr); } static irqreturn_t snd_fm801_interrupt(int irq, void *dev_id) { struct fm801 *chip = dev_id; unsigned short status; unsigned int tmp; status = inw(FM801_REG(chip, IRQ_STATUS)); status &= FM801_IRQ_PLAYBACK|FM801_IRQ_CAPTURE|FM801_IRQ_MPU|FM801_IRQ_VOLUME; if (! status) return IRQ_NONE; /* ack first */ outw(status, FM801_REG(chip, IRQ_STATUS)); if (chip->pcm && (status & FM801_IRQ_PLAYBACK) && chip->playback_substream) { spin_lock(&chip->reg_lock); chip->ply_buf++; chip->ply_pos += chip->ply_count; chip->ply_pos %= chip->ply_size; tmp = chip->ply_pos + chip->ply_count; tmp %= chip->ply_size; outl(chip->ply_buffer + tmp, (chip->ply_buf & 1) ? FM801_REG(chip, PLY_BUF1) : FM801_REG(chip, PLY_BUF2)); spin_unlock(&chip->reg_lock); snd_pcm_period_elapsed(chip->playback_substream); } if (chip->pcm && (status & FM801_IRQ_CAPTURE) && chip->capture_substream) { spin_lock(&chip->reg_lock); chip->cap_buf++; chip->cap_pos += chip->cap_count; chip->cap_pos %= chip->cap_size; tmp = chip->cap_pos + chip->cap_count; tmp %= chip->cap_size; outl(chip->cap_buffer + tmp, (chip->cap_buf & 1) ? FM801_REG(chip, CAP_BUF1) : FM801_REG(chip, CAP_BUF2)); spin_unlock(&chip->reg_lock); snd_pcm_period_elapsed(chip->capture_substream); } if (chip->rmidi && (status & FM801_IRQ_MPU)) snd_mpu401_uart_interrupt(irq, chip->rmidi->private_data); if (status & FM801_IRQ_VOLUME) ;/* TODO */ return IRQ_HANDLED; } static struct snd_pcm_hardware snd_fm801_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_8000_48000, .rate_min = 5500, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; static struct snd_pcm_hardware snd_fm801_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_8000_48000, .rate_min = 5500, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; static int snd_fm801_playback_open(struct snd_pcm_substream *substream) { struct fm801 *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; chip->playback_substream = substream; runtime->hw = snd_fm801_playback; snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates); if (chip->multichannel) { runtime->hw.channels_max = 6; snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, &hw_constraints_channels); } if ((err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS)) < 0) return err; return 0; } static int snd_fm801_capture_open(struct snd_pcm_substream *substream) { struct fm801 *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; chip->capture_substream = substream; runtime->hw = snd_fm801_capture; snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates); if ((err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS)) < 0) return err; return 0; } static int snd_fm801_playback_close(struct snd_pcm_substream *substream) { struct fm801 *chip = snd_pcm_substream_chip(substream); chip->playback_substream = NULL; return 0; } static int snd_fm801_capture_close(struct snd_pcm_substream *substream) { struct fm801 *chip = snd_pcm_substream_chip(substream); chip->capture_substream = NULL; return 0; } static struct snd_pcm_ops snd_fm801_playback_ops = { .open = snd_fm801_playback_open, .close = snd_fm801_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_fm801_hw_params, .hw_free = snd_fm801_hw_free, .prepare = snd_fm801_playback_prepare, .trigger = snd_fm801_playback_trigger, .pointer = snd_fm801_playback_pointer, }; static struct snd_pcm_ops snd_fm801_capture_ops = { .open = snd_fm801_capture_open, .close = snd_fm801_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_fm801_hw_params, .hw_free = snd_fm801_hw_free, .prepare = snd_fm801_capture_prepare, .trigger = snd_fm801_capture_trigger, .pointer = snd_fm801_capture_pointer, }; static int __devinit snd_fm801_pcm(struct fm801 *chip, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(chip->card, "FM801", device, 1, 1, &pcm)) < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_fm801_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_fm801_capture_ops); pcm->private_data = chip; pcm->info_flags = 0; strcpy(pcm->name, "FM801"); chip->pcm = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), chip->multichannel ? 128*1024 : 64*1024, 128*1024); if (rpcm) *rpcm = pcm; return 0; } /* * TEA5757 radio */ #ifdef CONFIG_SND_FM801_TEA575X_BOOL /* GPIO to TEA575x maps */ struct snd_fm801_tea575x_gpio { u8 data, clk, wren, most; char *name; }; static struct snd_fm801_tea575x_gpio snd_fm801_tea575x_gpios[] = { { .data = 1, .clk = 3, .wren = 2, .most = 0, .name = "SF256-PCS" }, { .data = 1, .clk = 0, .wren = 2, .most = 3, .name = "SF256-PCP" }, { .data = 2, .clk = 0, .wren = 1, .most = 3, .name = "SF64-PCR" }, }; static void snd_fm801_tea575x_set_pins(struct snd_tea575x *tea, u8 pins) { struct fm801 *chip = tea->private_data; unsigned short reg = inw(FM801_REG(chip, GPIO_CTRL)); struct snd_fm801_tea575x_gpio gpio = snd_fm801_tea575x_gpios[(chip->tea575x_tuner & TUNER_TYPE_MASK) - 1]; reg &= ~(FM801_GPIO_GP(gpio.data) | FM801_GPIO_GP(gpio.clk) | FM801_GPIO_GP(gpio.wren)); reg |= (pins & TEA575X_DATA) ? FM801_GPIO_GP(gpio.data) : 0; reg |= (pins & TEA575X_CLK) ? FM801_GPIO_GP(gpio.clk) : 0; /* WRITE_ENABLE is inverted */ reg |= (pins & TEA575X_WREN) ? 0 : FM801_GPIO_GP(gpio.wren); outw(reg, FM801_REG(chip, GPIO_CTRL)); } static u8 snd_fm801_tea575x_get_pins(struct snd_tea575x *tea) { struct fm801 *chip = tea->private_data; unsigned short reg = inw(FM801_REG(chip, GPIO_CTRL)); struct snd_fm801_tea575x_gpio gpio = snd_fm801_tea575x_gpios[(chip->tea575x_tuner & TUNER_TYPE_MASK) - 1]; return (reg & FM801_GPIO_GP(gpio.data)) ? TEA575X_DATA : 0 | (reg & FM801_GPIO_GP(gpio.most)) ? TEA575X_MOST : 0; } static void snd_fm801_tea575x_set_direction(struct snd_tea575x *tea, bool output) { struct fm801 *chip = tea->private_data; unsigned short reg = inw(FM801_REG(chip, GPIO_CTRL)); struct snd_fm801_tea575x_gpio gpio = snd_fm801_tea575x_gpios[(chip->tea575x_tuner & TUNER_TYPE_MASK) - 1]; /* use GPIO lines and set write enable bit */ reg |= FM801_GPIO_GS(gpio.data) | FM801_GPIO_GS(gpio.wren) | FM801_GPIO_GS(gpio.clk) | FM801_GPIO_GS(gpio.most); if (output) { /* all of lines are in the write direction */ /* clear data and clock lines */ reg &= ~(FM801_GPIO_GD(gpio.data) | FM801_GPIO_GD(gpio.wren) | FM801_GPIO_GD(gpio.clk) | FM801_GPIO_GP(gpio.data) | FM801_GPIO_GP(gpio.clk) | FM801_GPIO_GP(gpio.wren)); } else { /* use GPIO lines, set data direction to input */ reg |= FM801_GPIO_GD(gpio.data) | FM801_GPIO_GD(gpio.most) | FM801_GPIO_GP(gpio.data) | FM801_GPIO_GP(gpio.most) | FM801_GPIO_GP(gpio.wren); /* all of lines are in the write direction, except data */ /* clear data, write enable and clock lines */ reg &= ~(FM801_GPIO_GD(gpio.wren) | FM801_GPIO_GD(gpio.clk) | FM801_GPIO_GP(gpio.clk)); } outw(reg, FM801_REG(chip, GPIO_CTRL)); } static struct snd_tea575x_ops snd_fm801_tea_ops = { .set_pins = snd_fm801_tea575x_set_pins, .get_pins = snd_fm801_tea575x_get_pins, .set_direction = snd_fm801_tea575x_set_direction, }; #endif /* * Mixer routines */ #define FM801_SINGLE(xname, reg, shift, mask, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .info = snd_fm801_info_single, \ .get = snd_fm801_get_single, .put = snd_fm801_put_single, \ .private_value = reg | (shift << 8) | (mask << 16) | (invert << 24) } static int snd_fm801_info_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int mask = (kcontrol->private_value >> 16) & 0xff; uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = mask; return 0; } static int snd_fm801_get_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct fm801 *chip = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xff; int shift = (kcontrol->private_value >> 8) & 0xff; int mask = (kcontrol->private_value >> 16) & 0xff; int invert = (kcontrol->private_value >> 24) & 0xff; ucontrol->value.integer.value[0] = (inw(chip->port + reg) >> shift) & mask; if (invert) ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0]; return 0; } static int snd_fm801_put_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct fm801 *chip = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xff; int shift = (kcontrol->private_value >> 8) & 0xff; int mask = (kcontrol->private_value >> 16) & 0xff; int invert = (kcontrol->private_value >> 24) & 0xff; unsigned short val; val = (ucontrol->value.integer.value[0] & mask); if (invert) val = mask - val; return snd_fm801_update_bits(chip, reg, mask << shift, val << shift); } #define FM801_DOUBLE(xname, reg, shift_left, shift_right, mask, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .info = snd_fm801_info_double, \ .get = snd_fm801_get_double, .put = snd_fm801_put_double, \ .private_value = reg | (shift_left << 8) | (shift_right << 12) | (mask << 16) | (invert << 24) } #define FM801_DOUBLE_TLV(xname, reg, shift_left, shift_right, mask, invert, xtlv) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, \ .name = xname, .info = snd_fm801_info_double, \ .get = snd_fm801_get_double, .put = snd_fm801_put_double, \ .private_value = reg | (shift_left << 8) | (shift_right << 12) | (mask << 16) | (invert << 24), \ .tlv = { .p = (xtlv) } } static int snd_fm801_info_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int mask = (kcontrol->private_value >> 16) & 0xff; uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = mask; return 0; } static int snd_fm801_get_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct fm801 *chip = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xff; int shift_left = (kcontrol->private_value >> 8) & 0x0f; int shift_right = (kcontrol->private_value >> 12) & 0x0f; int mask = (kcontrol->private_value >> 16) & 0xff; int invert = (kcontrol->private_value >> 24) & 0xff; spin_lock_irq(&chip->reg_lock); ucontrol->value.integer.value[0] = (inw(chip->port + reg) >> shift_left) & mask; ucontrol->value.integer.value[1] = (inw(chip->port + reg) >> shift_right) & mask; spin_unlock_irq(&chip->reg_lock); if (invert) { ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0]; ucontrol->value.integer.value[1] = mask - ucontrol->value.integer.value[1]; } return 0; } static int snd_fm801_put_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct fm801 *chip = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xff; int shift_left = (kcontrol->private_value >> 8) & 0x0f; int shift_right = (kcontrol->private_value >> 12) & 0x0f; int mask = (kcontrol->private_value >> 16) & 0xff; int invert = (kcontrol->private_value >> 24) & 0xff; unsigned short val1, val2; val1 = ucontrol->value.integer.value[0] & mask; val2 = ucontrol->value.integer.value[1] & mask; if (invert) { val1 = mask - val1; val2 = mask - val2; } return snd_fm801_update_bits(chip, reg, (mask << shift_left) | (mask << shift_right), (val1 << shift_left ) | (val2 << shift_right)); } static int snd_fm801_info_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static char *texts[5] = { "AC97 Primary", "FM", "I2S", "PCM", "AC97 Secondary" }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = 5; if (uinfo->value.enumerated.item > 4) uinfo->value.enumerated.item = 4; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int snd_fm801_get_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct fm801 *chip = snd_kcontrol_chip(kcontrol); unsigned short val; val = inw(FM801_REG(chip, REC_SRC)) & 7; if (val > 4) val = 4; ucontrol->value.enumerated.item[0] = val; return 0; } static int snd_fm801_put_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct fm801 *chip = snd_kcontrol_chip(kcontrol); unsigned short val; if ((val = ucontrol->value.enumerated.item[0]) > 4) return -EINVAL; return snd_fm801_update_bits(chip, FM801_REC_SRC, 7, val); } static const DECLARE_TLV_DB_SCALE(db_scale_dsp, -3450, 150, 0); #define FM801_CONTROLS ARRAY_SIZE(snd_fm801_controls) static struct snd_kcontrol_new snd_fm801_controls[] __devinitdata = { FM801_DOUBLE_TLV("Wave Playback Volume", FM801_PCM_VOL, 0, 8, 31, 1, db_scale_dsp), FM801_SINGLE("Wave Playback Switch", FM801_PCM_VOL, 15, 1, 1), FM801_DOUBLE_TLV("I2S Playback Volume", FM801_I2S_VOL, 0, 8, 31, 1, db_scale_dsp), FM801_SINGLE("I2S Playback Switch", FM801_I2S_VOL, 15, 1, 1), FM801_DOUBLE_TLV("FM Playback Volume", FM801_FM_VOL, 0, 8, 31, 1, db_scale_dsp), FM801_SINGLE("FM Playback Switch", FM801_FM_VOL, 15, 1, 1), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Digital Capture Source", .info = snd_fm801_info_mux, .get = snd_fm801_get_mux, .put = snd_fm801_put_mux, } }; #define FM801_CONTROLS_MULTI ARRAY_SIZE(snd_fm801_controls_multi) static struct snd_kcontrol_new snd_fm801_controls_multi[] __devinitdata = { FM801_SINGLE("AC97 2ch->4ch Copy Switch", FM801_CODEC_CTRL, 7, 1, 0), FM801_SINGLE("AC97 18-bit Switch", FM801_CODEC_CTRL, 10, 1, 0), FM801_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), FM801_I2S_MODE, 8, 1, 0), FM801_SINGLE(SNDRV_CTL_NAME_IEC958("Raw Data ",PLAYBACK,SWITCH), FM801_I2S_MODE, 9, 1, 0), FM801_SINGLE(SNDRV_CTL_NAME_IEC958("Raw Data ",CAPTURE,SWITCH), FM801_I2S_MODE, 10, 1, 0), FM801_SINGLE(SNDRV_CTL_NAME_IEC958("",PLAYBACK,SWITCH), FM801_GEN_CTRL, 2, 1, 0), }; static void snd_fm801_mixer_free_ac97_bus(struct snd_ac97_bus *bus) { struct fm801 *chip = bus->private_data; chip->ac97_bus = NULL; } static void snd_fm801_mixer_free_ac97(struct snd_ac97 *ac97) { struct fm801 *chip = ac97->private_data; if (ac97->num == 0) { chip->ac97 = NULL; } else { chip->ac97_sec = NULL; } } static int __devinit snd_fm801_mixer(struct fm801 *chip) { struct snd_ac97_template ac97; unsigned int i; int err; static struct snd_ac97_bus_ops ops = { .write = snd_fm801_codec_write, .read = snd_fm801_codec_read, }; if ((err = snd_ac97_bus(chip->card, 0, &ops, chip, &chip->ac97_bus)) < 0) return err; chip->ac97_bus->private_free = snd_fm801_mixer_free_ac97_bus; memset(&ac97, 0, sizeof(ac97)); ac97.private_data = chip; ac97.private_free = snd_fm801_mixer_free_ac97; if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97)) < 0) return err; if (chip->secondary) { ac97.num = 1; ac97.addr = chip->secondary_addr; if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97_sec)) < 0) return err; } for (i = 0; i < FM801_CONTROLS; i++) snd_ctl_add(chip->card, snd_ctl_new1(&snd_fm801_controls[i], chip)); if (chip->multichannel) { for (i = 0; i < FM801_CONTROLS_MULTI; i++) snd_ctl_add(chip->card, snd_ctl_new1(&snd_fm801_controls_multi[i], chip)); } return 0; } /* * initialization routines */ static int wait_for_codec(struct fm801 *chip, unsigned int codec_id, unsigned short reg, unsigned long waits) { unsigned long timeout = jiffies + waits; outw(FM801_AC97_READ | (codec_id << FM801_AC97_ADDR_SHIFT) | reg, FM801_REG(chip, AC97_CMD)); udelay(5); do { if ((inw(FM801_REG(chip, AC97_CMD)) & (FM801_AC97_VALID|FM801_AC97_BUSY)) == FM801_AC97_VALID) return 0; schedule_timeout_uninterruptible(1); } while (time_after(timeout, jiffies)); return -EIO; } static int snd_fm801_chip_init(struct fm801 *chip, int resume) { unsigned short cmdw; if (chip->tea575x_tuner & TUNER_ONLY) goto __ac97_ok; /* codec cold reset + AC'97 warm reset */ outw((1<<5) | (1<<6), FM801_REG(chip, CODEC_CTRL)); inw(FM801_REG(chip, CODEC_CTRL)); /* flush posting data */ udelay(100); outw(0, FM801_REG(chip, CODEC_CTRL)); if (wait_for_codec(chip, 0, AC97_RESET, msecs_to_jiffies(750)) < 0) if (!resume) { snd_printk(KERN_INFO "Primary AC'97 codec not found, " "assume SF64-PCR (tuner-only)\n"); chip->tea575x_tuner = 3 | TUNER_ONLY; goto __ac97_ok; } if (chip->multichannel) { if (chip->secondary_addr) { wait_for_codec(chip, chip->secondary_addr, AC97_VENDOR_ID1, msecs_to_jiffies(50)); } else { /* my card has the secondary codec */ /* at address #3, so the loop is inverted */ int i; for (i = 3; i > 0; i--) { if (!wait_for_codec(chip, i, AC97_VENDOR_ID1, msecs_to_jiffies(50))) { cmdw = inw(FM801_REG(chip, AC97_DATA)); if (cmdw != 0xffff && cmdw != 0) { chip->secondary = 1; chip->secondary_addr = i; break; } } } } /* the recovery phase, it seems that probing for non-existing codec might */ /* cause timeout problems */ wait_for_codec(chip, 0, AC97_VENDOR_ID1, msecs_to_jiffies(750)); } __ac97_ok: /* init volume */ outw(0x0808, FM801_REG(chip, PCM_VOL)); outw(0x9f1f, FM801_REG(chip, FM_VOL)); outw(0x8808, FM801_REG(chip, I2S_VOL)); /* I2S control - I2S mode */ outw(0x0003, FM801_REG(chip, I2S_MODE)); /* interrupt setup */ cmdw = inw(FM801_REG(chip, IRQ_MASK)); if (chip->irq < 0) cmdw |= 0x00c3; /* mask everything, no PCM nor MPU */ else cmdw &= ~0x0083; /* unmask MPU, PLAYBACK & CAPTURE */ outw(cmdw, FM801_REG(chip, IRQ_MASK)); /* interrupt clear */ outw(FM801_IRQ_PLAYBACK|FM801_IRQ_CAPTURE|FM801_IRQ_MPU, FM801_REG(chip, IRQ_STATUS)); return 0; } static int snd_fm801_free(struct fm801 *chip) { unsigned short cmdw; if (chip->irq < 0) goto __end_hw; /* interrupt setup - mask everything */ cmdw = inw(FM801_REG(chip, IRQ_MASK)); cmdw |= 0x00c3; outw(cmdw, FM801_REG(chip, IRQ_MASK)); __end_hw: #ifdef CONFIG_SND_FM801_TEA575X_BOOL if (!(chip->tea575x_tuner & TUNER_DISABLED)) snd_tea575x_exit(&chip->tea); #endif if (chip->irq >= 0) free_irq(chip->irq, chip); pci_release_regions(chip->pci); pci_disable_device(chip->pci); kfree(chip); return 0; } static int snd_fm801_dev_free(struct snd_device *device) { struct fm801 *chip = device->device_data; return snd_fm801_free(chip); } static int __devinit snd_fm801_create(struct snd_card *card, struct pci_dev * pci, int tea575x_tuner, struct fm801 ** rchip) { struct fm801 *chip; int err; static struct snd_device_ops ops = { .dev_free = snd_fm801_dev_free, }; *rchip = NULL; if ((err = pci_enable_device(pci)) < 0) return err; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) { pci_disable_device(pci); return -ENOMEM; } spin_lock_init(&chip->reg_lock); chip->card = card; chip->pci = pci; chip->irq = -1; chip->tea575x_tuner = tea575x_tuner; if ((err = pci_request_regions(pci, "FM801")) < 0) { kfree(chip); pci_disable_device(pci); return err; } chip->port = pci_resource_start(pci, 0); if ((tea575x_tuner & TUNER_ONLY) == 0) { if (request_irq(pci->irq, snd_fm801_interrupt, IRQF_SHARED, "FM801", chip)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", chip->irq); snd_fm801_free(chip); return -EBUSY; } chip->irq = pci->irq; pci_set_master(pci); } if (pci->revision >= 0xb1) /* FM801-AU */ chip->multichannel = 1; snd_fm801_chip_init(chip, 0); /* init might set tuner access method */ tea575x_tuner = chip->tea575x_tuner; if (chip->irq >= 0 && (tea575x_tuner & TUNER_ONLY)) { pci_clear_master(pci); free_irq(chip->irq, chip); chip->irq = -1; } if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_fm801_free(chip); return err; } snd_card_set_dev(card, &pci->dev); #ifdef CONFIG_SND_FM801_TEA575X_BOOL chip->tea.private_data = chip; chip->tea.ops = &snd_fm801_tea_ops; sprintf(chip->tea.bus_info, "PCI:%s", pci_name(pci)); if ((tea575x_tuner & TUNER_TYPE_MASK) > 0 && (tea575x_tuner & TUNER_TYPE_MASK) < 4) { if (snd_tea575x_init(&chip->tea)) { snd_printk(KERN_ERR "TEA575x radio not found\n"); return -ENODEV; } } else if ((tea575x_tuner & TUNER_TYPE_MASK) == 0) { /* autodetect tuner connection */ for (tea575x_tuner = 1; tea575x_tuner <= 3; tea575x_tuner++) { chip->tea575x_tuner = tea575x_tuner; if (!snd_tea575x_init(&chip->tea)) { snd_printk(KERN_INFO "detected TEA575x radio type %s\n", snd_fm801_tea575x_gpios[tea575x_tuner - 1].name); break; } } if (tea575x_tuner == 4) { snd_printk(KERN_ERR "TEA575x radio not found\n"); chip->tea575x_tuner = TUNER_DISABLED; } } if (!(chip->tea575x_tuner & TUNER_DISABLED)) { strlcpy(chip->tea.card, snd_fm801_tea575x_gpios[(tea575x_tuner & TUNER_TYPE_MASK) - 1].name, sizeof(chip->tea.card)); } #endif *rchip = chip; return 0; } static int __devinit snd_card_fm801_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct fm801 *chip; struct snd_opl3 *opl3; int err; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; if ((err = snd_fm801_create(card, pci, tea575x_tuner[dev], &chip)) < 0) { snd_card_free(card); return err; } card->private_data = chip; strcpy(card->driver, "FM801"); strcpy(card->shortname, "ForteMedia FM801-"); strcat(card->shortname, chip->multichannel ? "AU" : "AS"); sprintf(card->longname, "%s at 0x%lx, irq %i", card->shortname, chip->port, chip->irq); if (chip->tea575x_tuner & TUNER_ONLY) goto __fm801_tuner_only; if ((err = snd_fm801_pcm(chip, 0, NULL)) < 0) { snd_card_free(card); return err; } if ((err = snd_fm801_mixer(chip)) < 0) { snd_card_free(card); return err; } if ((err = snd_mpu401_uart_new(card, 0, MPU401_HW_FM801, FM801_REG(chip, MPU401_DATA), MPU401_INFO_INTEGRATED, chip->irq, 0, &chip->rmidi)) < 0) { snd_card_free(card); return err; } if ((err = snd_opl3_create(card, FM801_REG(chip, OPL3_BANK0), FM801_REG(chip, OPL3_BANK1), OPL3_HW_OPL3_FM801, 1, &opl3)) < 0) { snd_card_free(card); return err; } if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) { snd_card_free(card); return err; } __fm801_tuner_only: if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); dev++; return 0; } static void __devexit snd_card_fm801_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } #ifdef CONFIG_PM static unsigned char saved_regs[] = { FM801_PCM_VOL, FM801_I2S_VOL, FM801_FM_VOL, FM801_REC_SRC, FM801_PLY_CTRL, FM801_PLY_COUNT, FM801_PLY_BUF1, FM801_PLY_BUF2, FM801_CAP_CTRL, FM801_CAP_COUNT, FM801_CAP_BUF1, FM801_CAP_BUF2, FM801_CODEC_CTRL, FM801_I2S_MODE, FM801_VOLUME, FM801_GEN_CTRL, }; static int snd_fm801_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct fm801 *chip = card->private_data; int i; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(chip->pcm); snd_ac97_suspend(chip->ac97); snd_ac97_suspend(chip->ac97_sec); for (i = 0; i < ARRAY_SIZE(saved_regs); i++) chip->saved_regs[i] = inw(chip->port + saved_regs[i]); /* FIXME: tea575x suspend */ pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return 0; } static int snd_fm801_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct fm801 *chip = card->private_data; int i; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "fm801: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); snd_fm801_chip_init(chip, 1); snd_ac97_resume(chip->ac97); snd_ac97_resume(chip->ac97_sec); for (i = 0; i < ARRAY_SIZE(saved_regs); i++) outw(chip->saved_regs[i], chip->port + saved_regs[i]); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif static struct pci_driver driver = { .name = "FM801", .id_table = snd_fm801_ids, .probe = snd_card_fm801_probe, .remove = __devexit_p(snd_card_fm801_remove), #ifdef CONFIG_PM .suspend = snd_fm801_suspend, .resume = snd_fm801_resume, #endif }; static int __init alsa_card_fm801_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_fm801_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_fm801_init) module_exit(alsa_card_fm801_exit)
gpl-2.0
ace8957/SeniorDesignKernel
crypto/anubis.c
2339
28430
/* * Cryptographic API. * * Anubis Algorithm * * The Anubis algorithm was developed by Paulo S. L. M. Barreto and * Vincent Rijmen. * * See * * P.S.L.M. Barreto, V. Rijmen, * ``The Anubis block cipher,'' * NESSIE submission, 2000. * * This software implements the "tweaked" version of Anubis. * Only the S-box and (consequently) the rounds constants have been * changed. * * The original authors have disclaimed all copyright interest in this * code and thus put it in the public domain. The subsequent authors * have put this under the GNU General Public License. * * By Aaron Grothe ajgrothe@yahoo.com, October 28, 2004 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <asm/byteorder.h> #include <linux/crypto.h> #include <linux/types.h> #define ANUBIS_MIN_KEY_SIZE 16 #define ANUBIS_MAX_KEY_SIZE 40 #define ANUBIS_BLOCK_SIZE 16 #define ANUBIS_MAX_N 10 #define ANUBIS_MAX_ROUNDS (8 + ANUBIS_MAX_N) struct anubis_ctx { int key_len; // in bits int R; u32 E[ANUBIS_MAX_ROUNDS + 1][4]; u32 D[ANUBIS_MAX_ROUNDS + 1][4]; }; static const u32 T0[256] = { 0xba69d2bbU, 0x54a84de5U, 0x2f5ebce2U, 0x74e8cd25U, 0x53a651f7U, 0xd3bb6bd0U, 0xd2b96fd6U, 0x4d9a29b3U, 0x50a05dfdU, 0xac458acfU, 0x8d070e09U, 0xbf63c6a5U, 0x70e0dd3dU, 0x52a455f1U, 0x9a29527bU, 0x4c982db5U, 0xeac98f46U, 0xd5b773c4U, 0x97336655U, 0xd1bf63dcU, 0x3366ccaaU, 0x51a259fbU, 0x5bb671c7U, 0xa651a2f3U, 0xdea15ffeU, 0x48903dadU, 0xa84d9ad7U, 0x992f5e71U, 0xdbab4be0U, 0x3264c8acU, 0xb773e695U, 0xfce5d732U, 0xe3dbab70U, 0x9e214263U, 0x913f7e41U, 0x9b2b567dU, 0xe2d9af76U, 0xbb6bd6bdU, 0x4182199bU, 0x6edca579U, 0xa557aef9U, 0xcb8b0b80U, 0x6bd6b167U, 0x95376e59U, 0xa15fbee1U, 0xf3fbeb10U, 0xb17ffe81U, 0x0204080cU, 0xcc851792U, 0xc49537a2U, 0x1d3a744eU, 0x14285078U, 0xc39b2bb0U, 0x63c69157U, 0xdaa94fe6U, 0x5dba69d3U, 0x5fbe61dfU, 0xdca557f2U, 0x7dfae913U, 0xcd871394U, 0x7ffee11fU, 0x5ab475c1U, 0x6cd8ad75U, 0x5cb86dd5U, 0xf7f3fb08U, 0x264c98d4U, 0xffe3db38U, 0xedc79354U, 0xe8cd874aU, 0x9d274e69U, 0x6fdea17fU, 0x8e010203U, 0x19326456U, 0xa05dbae7U, 0xf0fde71aU, 0x890f1e11U, 0x0f1e3c22U, 0x070e1c12U, 0xaf4386c5U, 0xfbebcb20U, 0x08102030U, 0x152a547eU, 0x0d1a342eU, 0x04081018U, 0x01020406U, 0x64c88d45U, 0xdfa35bf8U, 0x76ecc529U, 0x79f2f90bU, 0xdda753f4U, 0x3d7af48eU, 0x162c5874U, 0x3f7efc82U, 0x376edcb2U, 0x6ddaa973U, 0x3870e090U, 0xb96fdeb1U, 0x73e6d137U, 0xe9cf834cU, 0x356ad4beU, 0x55aa49e3U, 0x71e2d93bU, 0x7bf6f107U, 0x8c050a0fU, 0x72e4d531U, 0x880d1a17U, 0xf6f1ff0eU, 0x2a54a8fcU, 0x3e7cf884U, 0x5ebc65d9U, 0x274e9cd2U, 0x468c0589U, 0x0c183028U, 0x65ca8943U, 0x68d0bd6dU, 0x61c2995bU, 0x03060c0aU, 0xc19f23bcU, 0x57ae41efU, 0xd6b17fceU, 0xd9af43ecU, 0x58b07dcdU, 0xd8ad47eaU, 0x66cc8549U, 0xd7b37bc8U, 0x3a74e89cU, 0xc88d078aU, 0x3c78f088U, 0xfae9cf26U, 0x96316253U, 0xa753a6f5U, 0x982d5a77U, 0xecc59752U, 0xb86ddab7U, 0xc7933ba8U, 0xae4182c3U, 0x69d2b96bU, 0x4b9631a7U, 0xab4b96ddU, 0xa94f9ed1U, 0x67ce814fU, 0x0a14283cU, 0x478e018fU, 0xf2f9ef16U, 0xb577ee99U, 0x224488ccU, 0xe5d7b364U, 0xeec19f5eU, 0xbe61c2a3U, 0x2b56acfaU, 0x811f3e21U, 0x1224486cU, 0x831b362dU, 0x1b366c5aU, 0x0e1c3824U, 0x23468ccaU, 0xf5f7f304U, 0x458a0983U, 0x214284c6U, 0xce811f9eU, 0x499239abU, 0x2c58b0e8U, 0xf9efc32cU, 0xe6d1bf6eU, 0xb671e293U, 0x2850a0f0U, 0x172e5c72U, 0x8219322bU, 0x1a34685cU, 0x8b0b161dU, 0xfee1df3eU, 0x8a09121bU, 0x09122436U, 0xc98f038cU, 0x87132635U, 0x4e9c25b9U, 0xe1dfa37cU, 0x2e5cb8e4U, 0xe4d5b762U, 0xe0dda77aU, 0xebcb8b40U, 0x903d7a47U, 0xa455aaffU, 0x1e3c7844U, 0x85172e39U, 0x60c09d5dU, 0x00000000U, 0x254a94deU, 0xf4f5f702U, 0xf1ffe31cU, 0x94356a5fU, 0x0b162c3aU, 0xe7d3bb68U, 0x75eac923U, 0xefc39b58U, 0x3468d0b8U, 0x3162c4a6U, 0xd4b577c2U, 0xd0bd67daU, 0x86112233U, 0x7efce519U, 0xad478ec9U, 0xfde7d334U, 0x2952a4f6U, 0x3060c0a0U, 0x3b76ec9aU, 0x9f234665U, 0xf8edc72aU, 0xc6913faeU, 0x13264c6aU, 0x060c1814U, 0x050a141eU, 0xc59733a4U, 0x11224466U, 0x77eec12fU, 0x7cf8ed15U, 0x7af4f501U, 0x78f0fd0dU, 0x366cd8b4U, 0x1c387048U, 0x3972e496U, 0x59b279cbU, 0x18306050U, 0x56ac45e9U, 0xb37bf68dU, 0xb07dfa87U, 0x244890d8U, 0x204080c0U, 0xb279f28bU, 0x9239724bU, 0xa35bb6edU, 0xc09d27baU, 0x44880d85U, 0x62c49551U, 0x10204060U, 0xb475ea9fU, 0x84152a3fU, 0x43861197U, 0x933b764dU, 0xc2992fb6U, 0x4a9435a1U, 0xbd67cea9U, 0x8f030605U, 0x2d5ab4eeU, 0xbc65caafU, 0x9c254a6fU, 0x6ad4b561U, 0x40801d9dU, 0xcf831b98U, 0xa259b2ebU, 0x801d3a27U, 0x4f9e21bfU, 0x1f3e7c42U, 0xca890f86U, 0xaa4992dbU, 0x42841591U, }; static const u32 T1[256] = { 0x69babbd2U, 0xa854e54dU, 0x5e2fe2bcU, 0xe87425cdU, 0xa653f751U, 0xbbd3d06bU, 0xb9d2d66fU, 0x9a4db329U, 0xa050fd5dU, 0x45accf8aU, 0x078d090eU, 0x63bfa5c6U, 0xe0703dddU, 0xa452f155U, 0x299a7b52U, 0x984cb52dU, 0xc9ea468fU, 0xb7d5c473U, 0x33975566U, 0xbfd1dc63U, 0x6633aaccU, 0xa251fb59U, 0xb65bc771U, 0x51a6f3a2U, 0xa1defe5fU, 0x9048ad3dU, 0x4da8d79aU, 0x2f99715eU, 0xabdbe04bU, 0x6432acc8U, 0x73b795e6U, 0xe5fc32d7U, 0xdbe370abU, 0x219e6342U, 0x3f91417eU, 0x2b9b7d56U, 0xd9e276afU, 0x6bbbbdd6U, 0x82419b19U, 0xdc6e79a5U, 0x57a5f9aeU, 0x8bcb800bU, 0xd66b67b1U, 0x3795596eU, 0x5fa1e1beU, 0xfbf310ebU, 0x7fb181feU, 0x04020c08U, 0x85cc9217U, 0x95c4a237U, 0x3a1d4e74U, 0x28147850U, 0x9bc3b02bU, 0xc6635791U, 0xa9dae64fU, 0xba5dd369U, 0xbe5fdf61U, 0xa5dcf257U, 0xfa7d13e9U, 0x87cd9413U, 0xfe7f1fe1U, 0xb45ac175U, 0xd86c75adU, 0xb85cd56dU, 0xf3f708fbU, 0x4c26d498U, 0xe3ff38dbU, 0xc7ed5493U, 0xcde84a87U, 0x279d694eU, 0xde6f7fa1U, 0x018e0302U, 0x32195664U, 0x5da0e7baU, 0xfdf01ae7U, 0x0f89111eU, 0x1e0f223cU, 0x0e07121cU, 0x43afc586U, 0xebfb20cbU, 0x10083020U, 0x2a157e54U, 0x1a0d2e34U, 0x08041810U, 0x02010604U, 0xc864458dU, 0xa3dff85bU, 0xec7629c5U, 0xf2790bf9U, 0xa7ddf453U, 0x7a3d8ef4U, 0x2c167458U, 0x7e3f82fcU, 0x6e37b2dcU, 0xda6d73a9U, 0x703890e0U, 0x6fb9b1deU, 0xe67337d1U, 0xcfe94c83U, 0x6a35bed4U, 0xaa55e349U, 0xe2713bd9U, 0xf67b07f1U, 0x058c0f0aU, 0xe47231d5U, 0x0d88171aU, 0xf1f60effU, 0x542afca8U, 0x7c3e84f8U, 0xbc5ed965U, 0x4e27d29cU, 0x8c468905U, 0x180c2830U, 0xca654389U, 0xd0686dbdU, 0xc2615b99U, 0x06030a0cU, 0x9fc1bc23U, 0xae57ef41U, 0xb1d6ce7fU, 0xafd9ec43U, 0xb058cd7dU, 0xadd8ea47U, 0xcc664985U, 0xb3d7c87bU, 0x743a9ce8U, 0x8dc88a07U, 0x783c88f0U, 0xe9fa26cfU, 0x31965362U, 0x53a7f5a6U, 0x2d98775aU, 0xc5ec5297U, 0x6db8b7daU, 0x93c7a83bU, 0x41aec382U, 0xd2696bb9U, 0x964ba731U, 0x4babdd96U, 0x4fa9d19eU, 0xce674f81U, 0x140a3c28U, 0x8e478f01U, 0xf9f216efU, 0x77b599eeU, 0x4422cc88U, 0xd7e564b3U, 0xc1ee5e9fU, 0x61bea3c2U, 0x562bfaacU, 0x1f81213eU, 0x24126c48U, 0x1b832d36U, 0x361b5a6cU, 0x1c0e2438U, 0x4623ca8cU, 0xf7f504f3U, 0x8a458309U, 0x4221c684U, 0x81ce9e1fU, 0x9249ab39U, 0x582ce8b0U, 0xeff92cc3U, 0xd1e66ebfU, 0x71b693e2U, 0x5028f0a0U, 0x2e17725cU, 0x19822b32U, 0x341a5c68U, 0x0b8b1d16U, 0xe1fe3edfU, 0x098a1b12U, 0x12093624U, 0x8fc98c03U, 0x13873526U, 0x9c4eb925U, 0xdfe17ca3U, 0x5c2ee4b8U, 0xd5e462b7U, 0xdde07aa7U, 0xcbeb408bU, 0x3d90477aU, 0x55a4ffaaU, 0x3c1e4478U, 0x1785392eU, 0xc0605d9dU, 0x00000000U, 0x4a25de94U, 0xf5f402f7U, 0xfff11ce3U, 0x35945f6aU, 0x160b3a2cU, 0xd3e768bbU, 0xea7523c9U, 0xc3ef589bU, 0x6834b8d0U, 0x6231a6c4U, 0xb5d4c277U, 0xbdd0da67U, 0x11863322U, 0xfc7e19e5U, 0x47adc98eU, 0xe7fd34d3U, 0x5229f6a4U, 0x6030a0c0U, 0x763b9aecU, 0x239f6546U, 0xedf82ac7U, 0x91c6ae3fU, 0x26136a4cU, 0x0c061418U, 0x0a051e14U, 0x97c5a433U, 0x22116644U, 0xee772fc1U, 0xf87c15edU, 0xf47a01f5U, 0xf0780dfdU, 0x6c36b4d8U, 0x381c4870U, 0x723996e4U, 0xb259cb79U, 0x30185060U, 0xac56e945U, 0x7bb38df6U, 0x7db087faU, 0x4824d890U, 0x4020c080U, 0x79b28bf2U, 0x39924b72U, 0x5ba3edb6U, 0x9dc0ba27U, 0x8844850dU, 0xc4625195U, 0x20106040U, 0x75b49feaU, 0x15843f2aU, 0x86439711U, 0x3b934d76U, 0x99c2b62fU, 0x944aa135U, 0x67bda9ceU, 0x038f0506U, 0x5a2deeb4U, 0x65bcafcaU, 0x259c6f4aU, 0xd46a61b5U, 0x80409d1dU, 0x83cf981bU, 0x59a2ebb2U, 0x1d80273aU, 0x9e4fbf21U, 0x3e1f427cU, 0x89ca860fU, 0x49aadb92U, 0x84429115U, }; static const u32 T2[256] = { 0xd2bbba69U, 0x4de554a8U, 0xbce22f5eU, 0xcd2574e8U, 0x51f753a6U, 0x6bd0d3bbU, 0x6fd6d2b9U, 0x29b34d9aU, 0x5dfd50a0U, 0x8acfac45U, 0x0e098d07U, 0xc6a5bf63U, 0xdd3d70e0U, 0x55f152a4U, 0x527b9a29U, 0x2db54c98U, 0x8f46eac9U, 0x73c4d5b7U, 0x66559733U, 0x63dcd1bfU, 0xccaa3366U, 0x59fb51a2U, 0x71c75bb6U, 0xa2f3a651U, 0x5ffedea1U, 0x3dad4890U, 0x9ad7a84dU, 0x5e71992fU, 0x4be0dbabU, 0xc8ac3264U, 0xe695b773U, 0xd732fce5U, 0xab70e3dbU, 0x42639e21U, 0x7e41913fU, 0x567d9b2bU, 0xaf76e2d9U, 0xd6bdbb6bU, 0x199b4182U, 0xa5796edcU, 0xaef9a557U, 0x0b80cb8bU, 0xb1676bd6U, 0x6e599537U, 0xbee1a15fU, 0xeb10f3fbU, 0xfe81b17fU, 0x080c0204U, 0x1792cc85U, 0x37a2c495U, 0x744e1d3aU, 0x50781428U, 0x2bb0c39bU, 0x915763c6U, 0x4fe6daa9U, 0x69d35dbaU, 0x61df5fbeU, 0x57f2dca5U, 0xe9137dfaU, 0x1394cd87U, 0xe11f7ffeU, 0x75c15ab4U, 0xad756cd8U, 0x6dd55cb8U, 0xfb08f7f3U, 0x98d4264cU, 0xdb38ffe3U, 0x9354edc7U, 0x874ae8cdU, 0x4e699d27U, 0xa17f6fdeU, 0x02038e01U, 0x64561932U, 0xbae7a05dU, 0xe71af0fdU, 0x1e11890fU, 0x3c220f1eU, 0x1c12070eU, 0x86c5af43U, 0xcb20fbebU, 0x20300810U, 0x547e152aU, 0x342e0d1aU, 0x10180408U, 0x04060102U, 0x8d4564c8U, 0x5bf8dfa3U, 0xc52976ecU, 0xf90b79f2U, 0x53f4dda7U, 0xf48e3d7aU, 0x5874162cU, 0xfc823f7eU, 0xdcb2376eU, 0xa9736ddaU, 0xe0903870U, 0xdeb1b96fU, 0xd13773e6U, 0x834ce9cfU, 0xd4be356aU, 0x49e355aaU, 0xd93b71e2U, 0xf1077bf6U, 0x0a0f8c05U, 0xd53172e4U, 0x1a17880dU, 0xff0ef6f1U, 0xa8fc2a54U, 0xf8843e7cU, 0x65d95ebcU, 0x9cd2274eU, 0x0589468cU, 0x30280c18U, 0x894365caU, 0xbd6d68d0U, 0x995b61c2U, 0x0c0a0306U, 0x23bcc19fU, 0x41ef57aeU, 0x7fced6b1U, 0x43ecd9afU, 0x7dcd58b0U, 0x47ead8adU, 0x854966ccU, 0x7bc8d7b3U, 0xe89c3a74U, 0x078ac88dU, 0xf0883c78U, 0xcf26fae9U, 0x62539631U, 0xa6f5a753U, 0x5a77982dU, 0x9752ecc5U, 0xdab7b86dU, 0x3ba8c793U, 0x82c3ae41U, 0xb96b69d2U, 0x31a74b96U, 0x96ddab4bU, 0x9ed1a94fU, 0x814f67ceU, 0x283c0a14U, 0x018f478eU, 0xef16f2f9U, 0xee99b577U, 0x88cc2244U, 0xb364e5d7U, 0x9f5eeec1U, 0xc2a3be61U, 0xacfa2b56U, 0x3e21811fU, 0x486c1224U, 0x362d831bU, 0x6c5a1b36U, 0x38240e1cU, 0x8cca2346U, 0xf304f5f7U, 0x0983458aU, 0x84c62142U, 0x1f9ece81U, 0x39ab4992U, 0xb0e82c58U, 0xc32cf9efU, 0xbf6ee6d1U, 0xe293b671U, 0xa0f02850U, 0x5c72172eU, 0x322b8219U, 0x685c1a34U, 0x161d8b0bU, 0xdf3efee1U, 0x121b8a09U, 0x24360912U, 0x038cc98fU, 0x26358713U, 0x25b94e9cU, 0xa37ce1dfU, 0xb8e42e5cU, 0xb762e4d5U, 0xa77ae0ddU, 0x8b40ebcbU, 0x7a47903dU, 0xaaffa455U, 0x78441e3cU, 0x2e398517U, 0x9d5d60c0U, 0x00000000U, 0x94de254aU, 0xf702f4f5U, 0xe31cf1ffU, 0x6a5f9435U, 0x2c3a0b16U, 0xbb68e7d3U, 0xc92375eaU, 0x9b58efc3U, 0xd0b83468U, 0xc4a63162U, 0x77c2d4b5U, 0x67dad0bdU, 0x22338611U, 0xe5197efcU, 0x8ec9ad47U, 0xd334fde7U, 0xa4f62952U, 0xc0a03060U, 0xec9a3b76U, 0x46659f23U, 0xc72af8edU, 0x3faec691U, 0x4c6a1326U, 0x1814060cU, 0x141e050aU, 0x33a4c597U, 0x44661122U, 0xc12f77eeU, 0xed157cf8U, 0xf5017af4U, 0xfd0d78f0U, 0xd8b4366cU, 0x70481c38U, 0xe4963972U, 0x79cb59b2U, 0x60501830U, 0x45e956acU, 0xf68db37bU, 0xfa87b07dU, 0x90d82448U, 0x80c02040U, 0xf28bb279U, 0x724b9239U, 0xb6eda35bU, 0x27bac09dU, 0x0d854488U, 0x955162c4U, 0x40601020U, 0xea9fb475U, 0x2a3f8415U, 0x11974386U, 0x764d933bU, 0x2fb6c299U, 0x35a14a94U, 0xcea9bd67U, 0x06058f03U, 0xb4ee2d5aU, 0xcaafbc65U, 0x4a6f9c25U, 0xb5616ad4U, 0x1d9d4080U, 0x1b98cf83U, 0xb2eba259U, 0x3a27801dU, 0x21bf4f9eU, 0x7c421f3eU, 0x0f86ca89U, 0x92dbaa49U, 0x15914284U, }; static const u32 T3[256] = { 0xbbd269baU, 0xe54da854U, 0xe2bc5e2fU, 0x25cde874U, 0xf751a653U, 0xd06bbbd3U, 0xd66fb9d2U, 0xb3299a4dU, 0xfd5da050U, 0xcf8a45acU, 0x090e078dU, 0xa5c663bfU, 0x3ddde070U, 0xf155a452U, 0x7b52299aU, 0xb52d984cU, 0x468fc9eaU, 0xc473b7d5U, 0x55663397U, 0xdc63bfd1U, 0xaacc6633U, 0xfb59a251U, 0xc771b65bU, 0xf3a251a6U, 0xfe5fa1deU, 0xad3d9048U, 0xd79a4da8U, 0x715e2f99U, 0xe04babdbU, 0xacc86432U, 0x95e673b7U, 0x32d7e5fcU, 0x70abdbe3U, 0x6342219eU, 0x417e3f91U, 0x7d562b9bU, 0x76afd9e2U, 0xbdd66bbbU, 0x9b198241U, 0x79a5dc6eU, 0xf9ae57a5U, 0x800b8bcbU, 0x67b1d66bU, 0x596e3795U, 0xe1be5fa1U, 0x10ebfbf3U, 0x81fe7fb1U, 0x0c080402U, 0x921785ccU, 0xa23795c4U, 0x4e743a1dU, 0x78502814U, 0xb02b9bc3U, 0x5791c663U, 0xe64fa9daU, 0xd369ba5dU, 0xdf61be5fU, 0xf257a5dcU, 0x13e9fa7dU, 0x941387cdU, 0x1fe1fe7fU, 0xc175b45aU, 0x75add86cU, 0xd56db85cU, 0x08fbf3f7U, 0xd4984c26U, 0x38dbe3ffU, 0x5493c7edU, 0x4a87cde8U, 0x694e279dU, 0x7fa1de6fU, 0x0302018eU, 0x56643219U, 0xe7ba5da0U, 0x1ae7fdf0U, 0x111e0f89U, 0x223c1e0fU, 0x121c0e07U, 0xc58643afU, 0x20cbebfbU, 0x30201008U, 0x7e542a15U, 0x2e341a0dU, 0x18100804U, 0x06040201U, 0x458dc864U, 0xf85ba3dfU, 0x29c5ec76U, 0x0bf9f279U, 0xf453a7ddU, 0x8ef47a3dU, 0x74582c16U, 0x82fc7e3fU, 0xb2dc6e37U, 0x73a9da6dU, 0x90e07038U, 0xb1de6fb9U, 0x37d1e673U, 0x4c83cfe9U, 0xbed46a35U, 0xe349aa55U, 0x3bd9e271U, 0x07f1f67bU, 0x0f0a058cU, 0x31d5e472U, 0x171a0d88U, 0x0efff1f6U, 0xfca8542aU, 0x84f87c3eU, 0xd965bc5eU, 0xd29c4e27U, 0x89058c46U, 0x2830180cU, 0x4389ca65U, 0x6dbdd068U, 0x5b99c261U, 0x0a0c0603U, 0xbc239fc1U, 0xef41ae57U, 0xce7fb1d6U, 0xec43afd9U, 0xcd7db058U, 0xea47add8U, 0x4985cc66U, 0xc87bb3d7U, 0x9ce8743aU, 0x8a078dc8U, 0x88f0783cU, 0x26cfe9faU, 0x53623196U, 0xf5a653a7U, 0x775a2d98U, 0x5297c5ecU, 0xb7da6db8U, 0xa83b93c7U, 0xc38241aeU, 0x6bb9d269U, 0xa731964bU, 0xdd964babU, 0xd19e4fa9U, 0x4f81ce67U, 0x3c28140aU, 0x8f018e47U, 0x16eff9f2U, 0x99ee77b5U, 0xcc884422U, 0x64b3d7e5U, 0x5e9fc1eeU, 0xa3c261beU, 0xfaac562bU, 0x213e1f81U, 0x6c482412U, 0x2d361b83U, 0x5a6c361bU, 0x24381c0eU, 0xca8c4623U, 0x04f3f7f5U, 0x83098a45U, 0xc6844221U, 0x9e1f81ceU, 0xab399249U, 0xe8b0582cU, 0x2cc3eff9U, 0x6ebfd1e6U, 0x93e271b6U, 0xf0a05028U, 0x725c2e17U, 0x2b321982U, 0x5c68341aU, 0x1d160b8bU, 0x3edfe1feU, 0x1b12098aU, 0x36241209U, 0x8c038fc9U, 0x35261387U, 0xb9259c4eU, 0x7ca3dfe1U, 0xe4b85c2eU, 0x62b7d5e4U, 0x7aa7dde0U, 0x408bcbebU, 0x477a3d90U, 0xffaa55a4U, 0x44783c1eU, 0x392e1785U, 0x5d9dc060U, 0x00000000U, 0xde944a25U, 0x02f7f5f4U, 0x1ce3fff1U, 0x5f6a3594U, 0x3a2c160bU, 0x68bbd3e7U, 0x23c9ea75U, 0x589bc3efU, 0xb8d06834U, 0xa6c46231U, 0xc277b5d4U, 0xda67bdd0U, 0x33221186U, 0x19e5fc7eU, 0xc98e47adU, 0x34d3e7fdU, 0xf6a45229U, 0xa0c06030U, 0x9aec763bU, 0x6546239fU, 0x2ac7edf8U, 0xae3f91c6U, 0x6a4c2613U, 0x14180c06U, 0x1e140a05U, 0xa43397c5U, 0x66442211U, 0x2fc1ee77U, 0x15edf87cU, 0x01f5f47aU, 0x0dfdf078U, 0xb4d86c36U, 0x4870381cU, 0x96e47239U, 0xcb79b259U, 0x50603018U, 0xe945ac56U, 0x8df67bb3U, 0x87fa7db0U, 0xd8904824U, 0xc0804020U, 0x8bf279b2U, 0x4b723992U, 0xedb65ba3U, 0xba279dc0U, 0x850d8844U, 0x5195c462U, 0x60402010U, 0x9fea75b4U, 0x3f2a1584U, 0x97118643U, 0x4d763b93U, 0xb62f99c2U, 0xa135944aU, 0xa9ce67bdU, 0x0506038fU, 0xeeb45a2dU, 0xafca65bcU, 0x6f4a259cU, 0x61b5d46aU, 0x9d1d8040U, 0x981b83cfU, 0xebb259a2U, 0x273a1d80U, 0xbf219e4fU, 0x427c3e1fU, 0x860f89caU, 0xdb9249aaU, 0x91158442U, }; static const u32 T4[256] = { 0xbabababaU, 0x54545454U, 0x2f2f2f2fU, 0x74747474U, 0x53535353U, 0xd3d3d3d3U, 0xd2d2d2d2U, 0x4d4d4d4dU, 0x50505050U, 0xacacacacU, 0x8d8d8d8dU, 0xbfbfbfbfU, 0x70707070U, 0x52525252U, 0x9a9a9a9aU, 0x4c4c4c4cU, 0xeaeaeaeaU, 0xd5d5d5d5U, 0x97979797U, 0xd1d1d1d1U, 0x33333333U, 0x51515151U, 0x5b5b5b5bU, 0xa6a6a6a6U, 0xdedededeU, 0x48484848U, 0xa8a8a8a8U, 0x99999999U, 0xdbdbdbdbU, 0x32323232U, 0xb7b7b7b7U, 0xfcfcfcfcU, 0xe3e3e3e3U, 0x9e9e9e9eU, 0x91919191U, 0x9b9b9b9bU, 0xe2e2e2e2U, 0xbbbbbbbbU, 0x41414141U, 0x6e6e6e6eU, 0xa5a5a5a5U, 0xcbcbcbcbU, 0x6b6b6b6bU, 0x95959595U, 0xa1a1a1a1U, 0xf3f3f3f3U, 0xb1b1b1b1U, 0x02020202U, 0xccccccccU, 0xc4c4c4c4U, 0x1d1d1d1dU, 0x14141414U, 0xc3c3c3c3U, 0x63636363U, 0xdadadadaU, 0x5d5d5d5dU, 0x5f5f5f5fU, 0xdcdcdcdcU, 0x7d7d7d7dU, 0xcdcdcdcdU, 0x7f7f7f7fU, 0x5a5a5a5aU, 0x6c6c6c6cU, 0x5c5c5c5cU, 0xf7f7f7f7U, 0x26262626U, 0xffffffffU, 0xededededU, 0xe8e8e8e8U, 0x9d9d9d9dU, 0x6f6f6f6fU, 0x8e8e8e8eU, 0x19191919U, 0xa0a0a0a0U, 0xf0f0f0f0U, 0x89898989U, 0x0f0f0f0fU, 0x07070707U, 0xafafafafU, 0xfbfbfbfbU, 0x08080808U, 0x15151515U, 0x0d0d0d0dU, 0x04040404U, 0x01010101U, 0x64646464U, 0xdfdfdfdfU, 0x76767676U, 0x79797979U, 0xddddddddU, 0x3d3d3d3dU, 0x16161616U, 0x3f3f3f3fU, 0x37373737U, 0x6d6d6d6dU, 0x38383838U, 0xb9b9b9b9U, 0x73737373U, 0xe9e9e9e9U, 0x35353535U, 0x55555555U, 0x71717171U, 0x7b7b7b7bU, 0x8c8c8c8cU, 0x72727272U, 0x88888888U, 0xf6f6f6f6U, 0x2a2a2a2aU, 0x3e3e3e3eU, 0x5e5e5e5eU, 0x27272727U, 0x46464646U, 0x0c0c0c0cU, 0x65656565U, 0x68686868U, 0x61616161U, 0x03030303U, 0xc1c1c1c1U, 0x57575757U, 0xd6d6d6d6U, 0xd9d9d9d9U, 0x58585858U, 0xd8d8d8d8U, 0x66666666U, 0xd7d7d7d7U, 0x3a3a3a3aU, 0xc8c8c8c8U, 0x3c3c3c3cU, 0xfafafafaU, 0x96969696U, 0xa7a7a7a7U, 0x98989898U, 0xececececU, 0xb8b8b8b8U, 0xc7c7c7c7U, 0xaeaeaeaeU, 0x69696969U, 0x4b4b4b4bU, 0xababababU, 0xa9a9a9a9U, 0x67676767U, 0x0a0a0a0aU, 0x47474747U, 0xf2f2f2f2U, 0xb5b5b5b5U, 0x22222222U, 0xe5e5e5e5U, 0xeeeeeeeeU, 0xbebebebeU, 0x2b2b2b2bU, 0x81818181U, 0x12121212U, 0x83838383U, 0x1b1b1b1bU, 0x0e0e0e0eU, 0x23232323U, 0xf5f5f5f5U, 0x45454545U, 0x21212121U, 0xcecececeU, 0x49494949U, 0x2c2c2c2cU, 0xf9f9f9f9U, 0xe6e6e6e6U, 0xb6b6b6b6U, 0x28282828U, 0x17171717U, 0x82828282U, 0x1a1a1a1aU, 0x8b8b8b8bU, 0xfefefefeU, 0x8a8a8a8aU, 0x09090909U, 0xc9c9c9c9U, 0x87878787U, 0x4e4e4e4eU, 0xe1e1e1e1U, 0x2e2e2e2eU, 0xe4e4e4e4U, 0xe0e0e0e0U, 0xebebebebU, 0x90909090U, 0xa4a4a4a4U, 0x1e1e1e1eU, 0x85858585U, 0x60606060U, 0x00000000U, 0x25252525U, 0xf4f4f4f4U, 0xf1f1f1f1U, 0x94949494U, 0x0b0b0b0bU, 0xe7e7e7e7U, 0x75757575U, 0xefefefefU, 0x34343434U, 0x31313131U, 0xd4d4d4d4U, 0xd0d0d0d0U, 0x86868686U, 0x7e7e7e7eU, 0xadadadadU, 0xfdfdfdfdU, 0x29292929U, 0x30303030U, 0x3b3b3b3bU, 0x9f9f9f9fU, 0xf8f8f8f8U, 0xc6c6c6c6U, 0x13131313U, 0x06060606U, 0x05050505U, 0xc5c5c5c5U, 0x11111111U, 0x77777777U, 0x7c7c7c7cU, 0x7a7a7a7aU, 0x78787878U, 0x36363636U, 0x1c1c1c1cU, 0x39393939U, 0x59595959U, 0x18181818U, 0x56565656U, 0xb3b3b3b3U, 0xb0b0b0b0U, 0x24242424U, 0x20202020U, 0xb2b2b2b2U, 0x92929292U, 0xa3a3a3a3U, 0xc0c0c0c0U, 0x44444444U, 0x62626262U, 0x10101010U, 0xb4b4b4b4U, 0x84848484U, 0x43434343U, 0x93939393U, 0xc2c2c2c2U, 0x4a4a4a4aU, 0xbdbdbdbdU, 0x8f8f8f8fU, 0x2d2d2d2dU, 0xbcbcbcbcU, 0x9c9c9c9cU, 0x6a6a6a6aU, 0x40404040U, 0xcfcfcfcfU, 0xa2a2a2a2U, 0x80808080U, 0x4f4f4f4fU, 0x1f1f1f1fU, 0xcacacacaU, 0xaaaaaaaaU, 0x42424242U, }; static const u32 T5[256] = { 0x00000000U, 0x01020608U, 0x02040c10U, 0x03060a18U, 0x04081820U, 0x050a1e28U, 0x060c1430U, 0x070e1238U, 0x08103040U, 0x09123648U, 0x0a143c50U, 0x0b163a58U, 0x0c182860U, 0x0d1a2e68U, 0x0e1c2470U, 0x0f1e2278U, 0x10206080U, 0x11226688U, 0x12246c90U, 0x13266a98U, 0x142878a0U, 0x152a7ea8U, 0x162c74b0U, 0x172e72b8U, 0x183050c0U, 0x193256c8U, 0x1a345cd0U, 0x1b365ad8U, 0x1c3848e0U, 0x1d3a4ee8U, 0x1e3c44f0U, 0x1f3e42f8U, 0x2040c01dU, 0x2142c615U, 0x2244cc0dU, 0x2346ca05U, 0x2448d83dU, 0x254ade35U, 0x264cd42dU, 0x274ed225U, 0x2850f05dU, 0x2952f655U, 0x2a54fc4dU, 0x2b56fa45U, 0x2c58e87dU, 0x2d5aee75U, 0x2e5ce46dU, 0x2f5ee265U, 0x3060a09dU, 0x3162a695U, 0x3264ac8dU, 0x3366aa85U, 0x3468b8bdU, 0x356abeb5U, 0x366cb4adU, 0x376eb2a5U, 0x387090ddU, 0x397296d5U, 0x3a749ccdU, 0x3b769ac5U, 0x3c7888fdU, 0x3d7a8ef5U, 0x3e7c84edU, 0x3f7e82e5U, 0x40809d3aU, 0x41829b32U, 0x4284912aU, 0x43869722U, 0x4488851aU, 0x458a8312U, 0x468c890aU, 0x478e8f02U, 0x4890ad7aU, 0x4992ab72U, 0x4a94a16aU, 0x4b96a762U, 0x4c98b55aU, 0x4d9ab352U, 0x4e9cb94aU, 0x4f9ebf42U, 0x50a0fdbaU, 0x51a2fbb2U, 0x52a4f1aaU, 0x53a6f7a2U, 0x54a8e59aU, 0x55aae392U, 0x56ace98aU, 0x57aeef82U, 0x58b0cdfaU, 0x59b2cbf2U, 0x5ab4c1eaU, 0x5bb6c7e2U, 0x5cb8d5daU, 0x5dbad3d2U, 0x5ebcd9caU, 0x5fbedfc2U, 0x60c05d27U, 0x61c25b2fU, 0x62c45137U, 0x63c6573fU, 0x64c84507U, 0x65ca430fU, 0x66cc4917U, 0x67ce4f1fU, 0x68d06d67U, 0x69d26b6fU, 0x6ad46177U, 0x6bd6677fU, 0x6cd87547U, 0x6dda734fU, 0x6edc7957U, 0x6fde7f5fU, 0x70e03da7U, 0x71e23bafU, 0x72e431b7U, 0x73e637bfU, 0x74e82587U, 0x75ea238fU, 0x76ec2997U, 0x77ee2f9fU, 0x78f00de7U, 0x79f20befU, 0x7af401f7U, 0x7bf607ffU, 0x7cf815c7U, 0x7dfa13cfU, 0x7efc19d7U, 0x7ffe1fdfU, 0x801d2774U, 0x811f217cU, 0x82192b64U, 0x831b2d6cU, 0x84153f54U, 0x8517395cU, 0x86113344U, 0x8713354cU, 0x880d1734U, 0x890f113cU, 0x8a091b24U, 0x8b0b1d2cU, 0x8c050f14U, 0x8d07091cU, 0x8e010304U, 0x8f03050cU, 0x903d47f4U, 0x913f41fcU, 0x92394be4U, 0x933b4decU, 0x94355fd4U, 0x953759dcU, 0x963153c4U, 0x973355ccU, 0x982d77b4U, 0x992f71bcU, 0x9a297ba4U, 0x9b2b7dacU, 0x9c256f94U, 0x9d27699cU, 0x9e216384U, 0x9f23658cU, 0xa05de769U, 0xa15fe161U, 0xa259eb79U, 0xa35bed71U, 0xa455ff49U, 0xa557f941U, 0xa651f359U, 0xa753f551U, 0xa84dd729U, 0xa94fd121U, 0xaa49db39U, 0xab4bdd31U, 0xac45cf09U, 0xad47c901U, 0xae41c319U, 0xaf43c511U, 0xb07d87e9U, 0xb17f81e1U, 0xb2798bf9U, 0xb37b8df1U, 0xb4759fc9U, 0xb57799c1U, 0xb67193d9U, 0xb77395d1U, 0xb86db7a9U, 0xb96fb1a1U, 0xba69bbb9U, 0xbb6bbdb1U, 0xbc65af89U, 0xbd67a981U, 0xbe61a399U, 0xbf63a591U, 0xc09dba4eU, 0xc19fbc46U, 0xc299b65eU, 0xc39bb056U, 0xc495a26eU, 0xc597a466U, 0xc691ae7eU, 0xc793a876U, 0xc88d8a0eU, 0xc98f8c06U, 0xca89861eU, 0xcb8b8016U, 0xcc85922eU, 0xcd879426U, 0xce819e3eU, 0xcf839836U, 0xd0bddaceU, 0xd1bfdcc6U, 0xd2b9d6deU, 0xd3bbd0d6U, 0xd4b5c2eeU, 0xd5b7c4e6U, 0xd6b1cefeU, 0xd7b3c8f6U, 0xd8adea8eU, 0xd9afec86U, 0xdaa9e69eU, 0xdbabe096U, 0xdca5f2aeU, 0xdda7f4a6U, 0xdea1febeU, 0xdfa3f8b6U, 0xe0dd7a53U, 0xe1df7c5bU, 0xe2d97643U, 0xe3db704bU, 0xe4d56273U, 0xe5d7647bU, 0xe6d16e63U, 0xe7d3686bU, 0xe8cd4a13U, 0xe9cf4c1bU, 0xeac94603U, 0xebcb400bU, 0xecc55233U, 0xedc7543bU, 0xeec15e23U, 0xefc3582bU, 0xf0fd1ad3U, 0xf1ff1cdbU, 0xf2f916c3U, 0xf3fb10cbU, 0xf4f502f3U, 0xf5f704fbU, 0xf6f10ee3U, 0xf7f308ebU, 0xf8ed2a93U, 0xf9ef2c9bU, 0xfae92683U, 0xfbeb208bU, 0xfce532b3U, 0xfde734bbU, 0xfee13ea3U, 0xffe338abU, }; static const u32 rc[] = { 0xba542f74U, 0x53d3d24dU, 0x50ac8dbfU, 0x70529a4cU, 0xead597d1U, 0x33515ba6U, 0xde48a899U, 0xdb32b7fcU, 0xe39e919bU, 0xe2bb416eU, 0xa5cb6b95U, 0xa1f3b102U, 0xccc41d14U, 0xc363da5dU, 0x5fdc7dcdU, 0x7f5a6c5cU, 0xf726ffedU, 0xe89d6f8eU, 0x19a0f089U, }; static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); const __be32 *key = (const __be32 *)in_key; u32 *flags = &tfm->crt_flags; int N, R, i, r; u32 kappa[ANUBIS_MAX_N]; u32 inter[ANUBIS_MAX_N]; switch (key_len) { case 16: case 20: case 24: case 28: case 32: case 36: case 40: break; default: *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } ctx->key_len = key_len * 8; N = ctx->key_len >> 5; ctx->R = R = 8 + N; /* * map cipher key to initial key state (mu): */ for (i = 0; i < N; i++) kappa[i] = be32_to_cpu(key[i]); /* * generate R + 1 round keys: */ for (r = 0; r <= R; r++) { u32 K0, K1, K2, K3; /* * generate r-th round key K^r: */ K0 = T4[(kappa[N - 1] >> 24) ]; K1 = T4[(kappa[N - 1] >> 16) & 0xff]; K2 = T4[(kappa[N - 1] >> 8) & 0xff]; K3 = T4[(kappa[N - 1] ) & 0xff]; for (i = N - 2; i >= 0; i--) { K0 = T4[(kappa[i] >> 24) ] ^ (T5[(K0 >> 24) ] & 0xff000000U) ^ (T5[(K0 >> 16) & 0xff] & 0x00ff0000U) ^ (T5[(K0 >> 8) & 0xff] & 0x0000ff00U) ^ (T5[(K0 ) & 0xff] & 0x000000ffU); K1 = T4[(kappa[i] >> 16) & 0xff] ^ (T5[(K1 >> 24) ] & 0xff000000U) ^ (T5[(K1 >> 16) & 0xff] & 0x00ff0000U) ^ (T5[(K1 >> 8) & 0xff] & 0x0000ff00U) ^ (T5[(K1 ) & 0xff] & 0x000000ffU); K2 = T4[(kappa[i] >> 8) & 0xff] ^ (T5[(K2 >> 24) ] & 0xff000000U) ^ (T5[(K2 >> 16) & 0xff] & 0x00ff0000U) ^ (T5[(K2 >> 8) & 0xff] & 0x0000ff00U) ^ (T5[(K2 ) & 0xff] & 0x000000ffU); K3 = T4[(kappa[i] ) & 0xff] ^ (T5[(K3 >> 24) ] & 0xff000000U) ^ (T5[(K3 >> 16) & 0xff] & 0x00ff0000U) ^ (T5[(K3 >> 8) & 0xff] & 0x0000ff00U) ^ (T5[(K3 ) & 0xff] & 0x000000ffU); } ctx->E[r][0] = K0; ctx->E[r][1] = K1; ctx->E[r][2] = K2; ctx->E[r][3] = K3; /* * compute kappa^{r+1} from kappa^r: */ if (r == R) break; for (i = 0; i < N; i++) { int j = i; inter[i] = T0[(kappa[j--] >> 24) ]; if (j < 0) j = N - 1; inter[i] ^= T1[(kappa[j--] >> 16) & 0xff]; if (j < 0) j = N - 1; inter[i] ^= T2[(kappa[j--] >> 8) & 0xff]; if (j < 0) j = N - 1; inter[i] ^= T3[(kappa[j ] ) & 0xff]; } kappa[0] = inter[0] ^ rc[r]; for (i = 1; i < N; i++) kappa[i] = inter[i]; } /* * generate inverse key schedule: K'^0 = K^R, K'^R = * K^0, K'^r = theta(K^{R-r}): */ for (i = 0; i < 4; i++) { ctx->D[0][i] = ctx->E[R][i]; ctx->D[R][i] = ctx->E[0][i]; } for (r = 1; r < R; r++) { for (i = 0; i < 4; i++) { u32 v = ctx->E[R - r][i]; ctx->D[r][i] = T0[T4[(v >> 24) ] & 0xff] ^ T1[T4[(v >> 16) & 0xff] & 0xff] ^ T2[T4[(v >> 8) & 0xff] & 0xff] ^ T3[T4[(v ) & 0xff] & 0xff]; } } return 0; } static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], u8 *ciphertext, const u8 *plaintext, const int R) { const __be32 *src = (const __be32 *)plaintext; __be32 *dst = (__be32 *)ciphertext; int i, r; u32 state[4]; u32 inter[4]; /* * map plaintext block to cipher state (mu) * and add initial round key (sigma[K^0]): */ for (i = 0; i < 4; i++) state[i] = be32_to_cpu(src[i]) ^ roundKey[0][i]; /* * R - 1 full rounds: */ for (r = 1; r < R; r++) { inter[0] = T0[(state[0] >> 24) ] ^ T1[(state[1] >> 24) ] ^ T2[(state[2] >> 24) ] ^ T3[(state[3] >> 24) ] ^ roundKey[r][0]; inter[1] = T0[(state[0] >> 16) & 0xff] ^ T1[(state[1] >> 16) & 0xff] ^ T2[(state[2] >> 16) & 0xff] ^ T3[(state[3] >> 16) & 0xff] ^ roundKey[r][1]; inter[2] = T0[(state[0] >> 8) & 0xff] ^ T1[(state[1] >> 8) & 0xff] ^ T2[(state[2] >> 8) & 0xff] ^ T3[(state[3] >> 8) & 0xff] ^ roundKey[r][2]; inter[3] = T0[(state[0] ) & 0xff] ^ T1[(state[1] ) & 0xff] ^ T2[(state[2] ) & 0xff] ^ T3[(state[3] ) & 0xff] ^ roundKey[r][3]; state[0] = inter[0]; state[1] = inter[1]; state[2] = inter[2]; state[3] = inter[3]; } /* * last round: */ inter[0] = (T0[(state[0] >> 24) ] & 0xff000000U) ^ (T1[(state[1] >> 24) ] & 0x00ff0000U) ^ (T2[(state[2] >> 24) ] & 0x0000ff00U) ^ (T3[(state[3] >> 24) ] & 0x000000ffU) ^ roundKey[R][0]; inter[1] = (T0[(state[0] >> 16) & 0xff] & 0xff000000U) ^ (T1[(state[1] >> 16) & 0xff] & 0x00ff0000U) ^ (T2[(state[2] >> 16) & 0xff] & 0x0000ff00U) ^ (T3[(state[3] >> 16) & 0xff] & 0x000000ffU) ^ roundKey[R][1]; inter[2] = (T0[(state[0] >> 8) & 0xff] & 0xff000000U) ^ (T1[(state[1] >> 8) & 0xff] & 0x00ff0000U) ^ (T2[(state[2] >> 8) & 0xff] & 0x0000ff00U) ^ (T3[(state[3] >> 8) & 0xff] & 0x000000ffU) ^ roundKey[R][2]; inter[3] = (T0[(state[0] ) & 0xff] & 0xff000000U) ^ (T1[(state[1] ) & 0xff] & 0x00ff0000U) ^ (T2[(state[2] ) & 0xff] & 0x0000ff00U) ^ (T3[(state[3] ) & 0xff] & 0x000000ffU) ^ roundKey[R][3]; /* * map cipher state to ciphertext block (mu^{-1}): */ for (i = 0; i < 4; i++) dst[i] = cpu_to_be32(inter[i]); } static void anubis_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); anubis_crypt(ctx->E, dst, src, ctx->R); } static void anubis_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); anubis_crypt(ctx->D, dst, src, ctx->R); } static struct crypto_alg anubis_alg = { .cra_name = "anubis", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = ANUBIS_BLOCK_SIZE, .cra_ctxsize = sizeof (struct anubis_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = ANUBIS_MIN_KEY_SIZE, .cia_max_keysize = ANUBIS_MAX_KEY_SIZE, .cia_setkey = anubis_setkey, .cia_encrypt = anubis_encrypt, .cia_decrypt = anubis_decrypt } } }; static int __init anubis_mod_init(void) { int ret = 0; ret = crypto_register_alg(&anubis_alg); return ret; } static void __exit anubis_mod_fini(void) { crypto_unregister_alg(&anubis_alg); } module_init(anubis_mod_init); module_exit(anubis_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Anubis Cryptographic Algorithm");
gpl-2.0