repo_name
string
path
string
copies
string
size
string
content
string
license
string
nimon/m8_kernel
drivers/mtd/maps/dbox2-flash.c
8181
2716
/* * D-Box 2 flash driver */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <linux/errno.h> /* partition_info gives details on the logical partitions that the split the * single flash device into. If the size if zero we use up to the end of the * device. */ static struct mtd_partition partition_info[]= { { .name = "BR bootloader", .size = 128 * 1024, .offset = 0, .mask_flags = MTD_WRITEABLE }, { .name = "FLFS (U-Boot)", .size = 128 * 1024, .offset = MTDPART_OFS_APPEND, .mask_flags = 0 }, { .name = "Root (SquashFS)", .size = 7040 * 1024, .offset = MTDPART_OFS_APPEND, .mask_flags = 0 }, { .name = "var (JFFS2)", .size = 896 * 1024, .offset = MTDPART_OFS_APPEND, .mask_flags = 0 }, { .name = "Flash without bootloader", .size = MTDPART_SIZ_FULL, .offset = 128 * 1024, .mask_flags = 0 }, { .name = "Complete Flash", .size = MTDPART_SIZ_FULL, .offset = 0, .mask_flags = MTD_WRITEABLE } }; #define NUM_PARTITIONS ARRAY_SIZE(partition_info) #define WINDOW_ADDR 0x10000000 #define WINDOW_SIZE 0x800000 static struct mtd_info *mymtd; struct map_info dbox2_flash_map = { .name = "D-Box 2 flash memory", .size = WINDOW_SIZE, .bankwidth = 4, .phys = WINDOW_ADDR, }; static int __init init_dbox2_flash(void) { printk(KERN_NOTICE "D-Box 2 flash driver (size->0x%X mem->0x%X)\n", WINDOW_SIZE, WINDOW_ADDR); dbox2_flash_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE); if (!dbox2_flash_map.virt) { printk("Failed to ioremap\n"); return -EIO; } simple_map_init(&dbox2_flash_map); // Probe for dual Intel 28F320 or dual AMD mymtd = do_map_probe("cfi_probe", &dbox2_flash_map); if (!mymtd) { // Probe for single Intel 28F640 dbox2_flash_map.bankwidth = 2; mymtd = do_map_probe("cfi_probe", &dbox2_flash_map); } if (mymtd) { mymtd->owner = THIS_MODULE; /* Create MTD devices for each partition. */ mtd_device_register(mymtd, partition_info, NUM_PARTITIONS); return 0; } iounmap((void *)dbox2_flash_map.virt); return -ENXIO; } static void __exit cleanup_dbox2_flash(void) { if (mymtd) { mtd_device_unregister(mymtd); map_destroy(mymtd); } if (dbox2_flash_map.virt) { iounmap((void *)dbox2_flash_map.virt); dbox2_flash_map.virt = 0; } } module_init(init_dbox2_flash); module_exit(cleanup_dbox2_flash); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kári Davíðsson <kd@flaga.is>, Bastian Blank <waldi@tuxbox.org>, Alexander Wild <wild@te-elektronik.com>"); MODULE_DESCRIPTION("MTD map driver for D-Box 2 board");
gpl-2.0
anders3408/kernel_oppo_find5-old
drivers/isdn/i4l/isdn_concap.c
9461
2987
/* $Id: isdn_concap.c,v 1.1.2.2 2004/01/12 22:37:19 keil Exp $ * * Linux ISDN subsystem, protocol encapsulation * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ /* Stuff to support the concap_proto by isdn4linux. isdn4linux - specific * stuff goes here. Stuff that depends only on the concap protocol goes to * another -- protocol specific -- source file. * */ #include <linux/isdn.h> #include "isdn_x25iface.h" #include "isdn_net.h" #include <linux/concap.h> #include "isdn_concap.h" /* The following set of device service operations are for encapsulation protocols that require for reliable datalink semantics. That means: - before any data is to be submitted the connection must explicitly be set up. - after the successful set up of the connection is signalled the connection is considered to be reliably up. Auto-dialing ist not compatible with this requirements. Thus, auto-dialing is completely bypassed. It might be possible to implement a (non standardized) datalink protocol that provides a reliable data link service while using some auto dialing mechanism. Such a protocol would need an auxiliary channel (i.e. user-user- signaling on the D-channel) while the B-channel is down. */ static int isdn_concap_dl_data_req(struct concap_proto *concap, struct sk_buff *skb) { struct net_device *ndev = concap->net_dev; isdn_net_dev *nd = ((isdn_net_local *) netdev_priv(ndev))->netdev; isdn_net_local *lp = isdn_net_get_locked_lp(nd); IX25DEBUG("isdn_concap_dl_data_req: %s \n", concap->net_dev->name); if (!lp) { IX25DEBUG("isdn_concap_dl_data_req: %s : isdn_net_send_skb returned %d\n", concap->net_dev->name, 1); return 1; } lp->huptimer = 0; isdn_net_writebuf_skb(lp, skb); spin_unlock_bh(&lp->xmit_lock); IX25DEBUG("isdn_concap_dl_data_req: %s : isdn_net_send_skb returned %d\n", concap->net_dev->name, 0); return 0; } static int isdn_concap_dl_connect_req(struct concap_proto *concap) { struct net_device *ndev = concap->net_dev; isdn_net_local *lp = netdev_priv(ndev); int ret; IX25DEBUG("isdn_concap_dl_connect_req: %s \n", ndev->name); /* dial ... */ ret = isdn_net_dial_req(lp); if (ret) IX25DEBUG("dialing failed\n"); return ret; } static int isdn_concap_dl_disconn_req(struct concap_proto *concap) { IX25DEBUG("isdn_concap_dl_disconn_req: %s \n", concap->net_dev->name); isdn_net_hangup(concap->net_dev); return 0; } struct concap_device_ops isdn_concap_reliable_dl_dops = { &isdn_concap_dl_data_req, &isdn_concap_dl_connect_req, &isdn_concap_dl_disconn_req }; /* The following should better go into a dedicated source file such that this sourcefile does not need to include any protocol specific header files. For now: */ struct concap_proto *isdn_concap_new(int encap) { switch (encap) { case ISDN_NET_ENCAP_X25IFACE: return isdn_x25iface_proto_new(); } return NULL; }
gpl-2.0
jepler/odroid-linux
arch/sh/mm/cache-sh2.c
10997
2289
/* * arch/sh/mm/cache-sh2.c * * Copyright (C) 2002 Paul Mundt * Copyright (C) 2008 Yoshinori Sato * * Released under the terms of the GNU GPL v2.0. */ #include <linux/init.h> #include <linux/mm.h> #include <asm/cache.h> #include <asm/addrspace.h> #include <asm/processor.h> #include <asm/cacheflush.h> #include <asm/io.h> static void sh2__flush_wback_region(void *start, int size) { unsigned long v; unsigned long begin, end; begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); end = ((unsigned long)start + size + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); for (v = begin; v < end; v+=L1_CACHE_BYTES) { unsigned long addr = CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0); int way; for (way = 0; way < 4; way++) { unsigned long data = __raw_readl(addr | (way << 12)); if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) { data &= ~SH_CACHE_UPDATED; __raw_writel(data, addr | (way << 12)); } } } } static void sh2__flush_purge_region(void *start, int size) { unsigned long v; unsigned long begin, end; begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); end = ((unsigned long)start + size + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); for (v = begin; v < end; v+=L1_CACHE_BYTES) __raw_writel((v & CACHE_PHYSADDR_MASK), CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008); } static void sh2__flush_invalidate_region(void *start, int size) { #ifdef CONFIG_CACHE_WRITEBACK /* * SH-2 does not support individual line invalidation, only a * global invalidate. */ unsigned long ccr; unsigned long flags; local_irq_save(flags); jump_to_uncached(); ccr = __raw_readl(CCR); ccr |= CCR_CACHE_INVALIDATE; __raw_writel(ccr, CCR); back_to_cached(); local_irq_restore(flags); #else unsigned long v; unsigned long begin, end; begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); end = ((unsigned long)start + size + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); for (v = begin; v < end; v+=L1_CACHE_BYTES) __raw_writel((v & CACHE_PHYSADDR_MASK), CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008); #endif } void __init sh2_cache_init(void) { __flush_wback_region = sh2__flush_wback_region; __flush_purge_region = sh2__flush_purge_region; __flush_invalidate_region = sh2__flush_invalidate_region; }
gpl-2.0
FEDEVEL/imx6tinyrex-linux-3.0.35
arch/mips/lasat/at93c.c
11765
2613
/* * Atmel AT93C46 serial eeprom driver * * Brian Murphy <brian.murphy@eicon.com> * */ #include <linux/kernel.h> #include <linux/delay.h> #include <asm/lasat/lasat.h> #include <linux/module.h> #include <linux/init.h> #include "at93c.h" #define AT93C_ADDR_SHIFT 7 #define AT93C_ADDR_MAX ((1 << AT93C_ADDR_SHIFT) - 1) #define AT93C_RCMD (0x6 << AT93C_ADDR_SHIFT) #define AT93C_WCMD (0x5 << AT93C_ADDR_SHIFT) #define AT93C_WENCMD 0x260 #define AT93C_WDSCMD 0x200 struct at93c_defs *at93c; static void at93c_reg_write(u32 val) { *at93c->reg = val; } static u32 at93c_reg_read(void) { u32 tmp = *at93c->reg; return tmp; } static u32 at93c_datareg_read(void) { u32 tmp = *at93c->rdata_reg; return tmp; } static void at93c_cycle_clk(u32 data) { at93c_reg_write(data | at93c->clk); lasat_ndelay(250); at93c_reg_write(data & ~at93c->clk); lasat_ndelay(250); } static void at93c_write_databit(u8 bit) { u32 data = at93c_reg_read(); if (bit) data |= 1 << at93c->wdata_shift; else data &= ~(1 << at93c->wdata_shift); at93c_reg_write(data); lasat_ndelay(100); at93c_cycle_clk(data); } static unsigned int at93c_read_databit(void) { u32 data; at93c_cycle_clk(at93c_reg_read()); data = (at93c_datareg_read() >> at93c->rdata_shift) & 1; return data; } static u8 at93c_read_byte(void) { int i; u8 data = 0; for (i = 0; i <= 7; i++) { data <<= 1; data |= at93c_read_databit(); } return data; } static void at93c_write_bits(u32 data, int size) { int i; int shift = size - 1; u32 mask = (1 << shift); for (i = 0; i < size; i++) { at93c_write_databit((data & mask) >> shift); data <<= 1; } } static void at93c_init_op(void) { at93c_reg_write((at93c_reg_read() | at93c->cs) & ~at93c->clk & ~(1 << at93c->rdata_shift)); lasat_ndelay(50); } static void at93c_end_op(void) { at93c_reg_write(at93c_reg_read() & ~at93c->cs); lasat_ndelay(250); } static void at93c_wait(void) { at93c_init_op(); while (!at93c_read_databit()) ; at93c_end_op(); }; static void at93c_disable_wp(void) { at93c_init_op(); at93c_write_bits(AT93C_WENCMD, 10); at93c_end_op(); } static void at93c_enable_wp(void) { at93c_init_op(); at93c_write_bits(AT93C_WDSCMD, 10); at93c_end_op(); } u8 at93c_read(u8 addr) { u8 byte; at93c_init_op(); at93c_write_bits((addr & AT93C_ADDR_MAX)|AT93C_RCMD, 10); byte = at93c_read_byte(); at93c_end_op(); return byte; } void at93c_write(u8 addr, u8 data) { at93c_disable_wp(); at93c_init_op(); at93c_write_bits((addr & AT93C_ADDR_MAX)|AT93C_WCMD, 10); at93c_write_bits(data, 8); at93c_end_op(); at93c_wait(); at93c_enable_wp(); }
gpl-2.0
dalinaum/studyak
arch/arm/mach-realview/realview_pb11mp.c
502
10719
/* * linux/arch/arm/mach-realview/realview_pb11mp.c * * Copyright (C) 2008 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/sysdev.h> #include <linux/amba/bus.h> #include <linux/amba/pl061.h> #include <linux/amba/mmci.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/leds.h> #include <asm/mach-types.h> #include <asm/hardware/gic.h> #include <asm/hardware/icst307.h> #include <asm/hardware/cache-l2x0.h> #include <asm/localtimer.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> #include <asm/mach/map.h> #include <asm/mach/time.h> #include <mach/board-pb11mp.h> #include <mach/irqs.h> #include "core.h" #include "clock.h" static struct map_desc realview_pb11mp_io_desc[] __initdata = { { .virtual = IO_ADDRESS(REALVIEW_SYS_BASE), .pfn = __phys_to_pfn(REALVIEW_SYS_BASE), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(REALVIEW_PB11MP_GIC_CPU_BASE), .pfn = __phys_to_pfn(REALVIEW_PB11MP_GIC_CPU_BASE), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(REALVIEW_PB11MP_GIC_DIST_BASE), .pfn = __phys_to_pfn(REALVIEW_PB11MP_GIC_DIST_BASE), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(REALVIEW_TC11MP_GIC_CPU_BASE), .pfn = __phys_to_pfn(REALVIEW_TC11MP_GIC_CPU_BASE), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(REALVIEW_TC11MP_GIC_DIST_BASE), .pfn = __phys_to_pfn(REALVIEW_TC11MP_GIC_DIST_BASE), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(REALVIEW_SCTL_BASE), .pfn = __phys_to_pfn(REALVIEW_SCTL_BASE), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(REALVIEW_PB11MP_TIMER0_1_BASE), .pfn = __phys_to_pfn(REALVIEW_PB11MP_TIMER0_1_BASE), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(REALVIEW_PB11MP_TIMER2_3_BASE), .pfn = __phys_to_pfn(REALVIEW_PB11MP_TIMER2_3_BASE), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(REALVIEW_TC11MP_L220_BASE), .pfn = __phys_to_pfn(REALVIEW_TC11MP_L220_BASE), .length = SZ_8K, .type = MT_DEVICE, }, #ifdef CONFIG_DEBUG_LL { .virtual = IO_ADDRESS(REALVIEW_PB11MP_UART0_BASE), .pfn = __phys_to_pfn(REALVIEW_PB11MP_UART0_BASE), .length = SZ_4K, .type = MT_DEVICE, }, #endif }; static void __init realview_pb11mp_map_io(void) { iotable_init(realview_pb11mp_io_desc, ARRAY_SIZE(realview_pb11mp_io_desc)); } static struct pl061_platform_data gpio0_plat_data = { .gpio_base = 0, .irq_base = -1, }; static struct pl061_platform_data gpio1_plat_data = { .gpio_base = 8, .irq_base = -1, }; static struct pl061_platform_data gpio2_plat_data = { .gpio_base = 16, .irq_base = -1, }; /* * RealView PB11MPCore AMBA devices */ #define GPIO2_IRQ { IRQ_PB11MP_GPIO2, NO_IRQ } #define GPIO2_DMA { 0, 0 } #define GPIO3_IRQ { IRQ_PB11MP_GPIO3, NO_IRQ } #define GPIO3_DMA { 0, 0 } #define AACI_IRQ { IRQ_TC11MP_AACI, NO_IRQ } #define AACI_DMA { 0x80, 0x81 } #define MMCI0_IRQ { IRQ_TC11MP_MMCI0A, IRQ_TC11MP_MMCI0B } #define MMCI0_DMA { 0x84, 0 } #define KMI0_IRQ { IRQ_TC11MP_KMI0, NO_IRQ } #define KMI0_DMA { 0, 0 } #define KMI1_IRQ { IRQ_TC11MP_KMI1, NO_IRQ } #define KMI1_DMA { 0, 0 } #define PB11MP_SMC_IRQ { NO_IRQ, NO_IRQ } #define PB11MP_SMC_DMA { 0, 0 } #define MPMC_IRQ { NO_IRQ, NO_IRQ } #define MPMC_DMA { 0, 0 } #define PB11MP_CLCD_IRQ { IRQ_PB11MP_CLCD, NO_IRQ } #define PB11MP_CLCD_DMA { 0, 0 } #define DMAC_IRQ { IRQ_PB11MP_DMAC, NO_IRQ } #define DMAC_DMA { 0, 0 } #define SCTL_IRQ { NO_IRQ, NO_IRQ } #define SCTL_DMA { 0, 0 } #define PB11MP_WATCHDOG_IRQ { IRQ_PB11MP_WATCHDOG, NO_IRQ } #define PB11MP_WATCHDOG_DMA { 0, 0 } #define PB11MP_GPIO0_IRQ { IRQ_PB11MP_GPIO0, NO_IRQ } #define PB11MP_GPIO0_DMA { 0, 0 } #define GPIO1_IRQ { IRQ_PB11MP_GPIO1, NO_IRQ } #define GPIO1_DMA { 0, 0 } #define PB11MP_RTC_IRQ { IRQ_TC11MP_RTC, NO_IRQ } #define PB11MP_RTC_DMA { 0, 0 } #define SCI_IRQ { IRQ_PB11MP_SCI, NO_IRQ } #define SCI_DMA { 7, 6 } #define PB11MP_UART0_IRQ { IRQ_TC11MP_UART0, NO_IRQ } #define PB11MP_UART0_DMA { 15, 14 } #define PB11MP_UART1_IRQ { IRQ_TC11MP_UART1, NO_IRQ } #define PB11MP_UART1_DMA { 13, 12 } #define PB11MP_UART2_IRQ { IRQ_PB11MP_UART2, NO_IRQ } #define PB11MP_UART2_DMA { 11, 10 } #define PB11MP_UART3_IRQ { IRQ_PB11MP_UART3, NO_IRQ } #define PB11MP_UART3_DMA { 0x86, 0x87 } #define PB11MP_SSP_IRQ { IRQ_PB11MP_SSP, NO_IRQ } #define PB11MP_SSP_DMA { 9, 8 } /* FPGA Primecells */ AMBA_DEVICE(aaci, "fpga:aaci", AACI, NULL); AMBA_DEVICE(mmc0, "fpga:mmc0", MMCI0, &realview_mmc0_plat_data); AMBA_DEVICE(kmi0, "fpga:kmi0", KMI0, NULL); AMBA_DEVICE(kmi1, "fpga:kmi1", KMI1, NULL); AMBA_DEVICE(uart3, "fpga:uart3", PB11MP_UART3, NULL); /* DevChip Primecells */ AMBA_DEVICE(smc, "dev:smc", PB11MP_SMC, NULL); AMBA_DEVICE(sctl, "dev:sctl", SCTL, NULL); AMBA_DEVICE(wdog, "dev:wdog", PB11MP_WATCHDOG, NULL); AMBA_DEVICE(gpio0, "dev:gpio0", PB11MP_GPIO0, &gpio0_plat_data); AMBA_DEVICE(gpio1, "dev:gpio1", GPIO1, &gpio1_plat_data); AMBA_DEVICE(gpio2, "dev:gpio2", GPIO2, &gpio2_plat_data); AMBA_DEVICE(rtc, "dev:rtc", PB11MP_RTC, NULL); AMBA_DEVICE(sci0, "dev:sci0", SCI, NULL); AMBA_DEVICE(uart0, "dev:uart0", PB11MP_UART0, NULL); AMBA_DEVICE(uart1, "dev:uart1", PB11MP_UART1, NULL); AMBA_DEVICE(uart2, "dev:uart2", PB11MP_UART2, NULL); AMBA_DEVICE(ssp0, "dev:ssp0", PB11MP_SSP, NULL); /* Primecells on the NEC ISSP chip */ AMBA_DEVICE(clcd, "issp:clcd", PB11MP_CLCD, &clcd_plat_data); AMBA_DEVICE(dmac, "issp:dmac", DMAC, NULL); static struct amba_device *amba_devs[] __initdata = { &dmac_device, &uart0_device, &uart1_device, &uart2_device, &uart3_device, &smc_device, &clcd_device, &sctl_device, &wdog_device, &gpio0_device, &gpio1_device, &gpio2_device, &rtc_device, &sci0_device, &ssp0_device, &aaci_device, &mmc0_device, &kmi0_device, &kmi1_device, }; /* * RealView PB11MPCore platform devices */ static struct resource realview_pb11mp_flash_resource[] = { [0] = { .start = REALVIEW_PB11MP_FLASH0_BASE, .end = REALVIEW_PB11MP_FLASH0_BASE + REALVIEW_PB11MP_FLASH0_SIZE - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = REALVIEW_PB11MP_FLASH1_BASE, .end = REALVIEW_PB11MP_FLASH1_BASE + REALVIEW_PB11MP_FLASH1_SIZE - 1, .flags = IORESOURCE_MEM, }, }; static struct resource realview_pb11mp_smsc911x_resources[] = { [0] = { .start = REALVIEW_PB11MP_ETH_BASE, .end = REALVIEW_PB11MP_ETH_BASE + SZ_64K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_TC11MP_ETH, .end = IRQ_TC11MP_ETH, .flags = IORESOURCE_IRQ, }, }; static struct resource realview_pb11mp_isp1761_resources[] = { [0] = { .start = REALVIEW_PB11MP_USB_BASE, .end = REALVIEW_PB11MP_USB_BASE + SZ_128K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_TC11MP_USB, .end = IRQ_TC11MP_USB, .flags = IORESOURCE_IRQ, }, }; static void __init gic_init_irq(void) { unsigned int pldctrl; /* new irq mode with no DCC */ writel(0x0000a05f, __io_address(REALVIEW_SYS_LOCK)); pldctrl = readl(__io_address(REALVIEW_SYS_BASE) + REALVIEW_PB11MP_SYS_PLD_CTRL1); pldctrl |= 2 << 22; writel(pldctrl, __io_address(REALVIEW_SYS_BASE) + REALVIEW_PB11MP_SYS_PLD_CTRL1); writel(0x00000000, __io_address(REALVIEW_SYS_LOCK)); /* ARM11MPCore test chip GIC, primary */ gic_cpu_base_addr = __io_address(REALVIEW_TC11MP_GIC_CPU_BASE); gic_dist_init(0, __io_address(REALVIEW_TC11MP_GIC_DIST_BASE), 29); gic_cpu_init(0, gic_cpu_base_addr); /* board GIC, secondary */ gic_dist_init(1, __io_address(REALVIEW_PB11MP_GIC_DIST_BASE), IRQ_PB11MP_GIC_START); gic_cpu_init(1, __io_address(REALVIEW_PB11MP_GIC_CPU_BASE)); gic_cascade_irq(1, IRQ_TC11MP_PB_IRQ1); } static void __init realview_pb11mp_timer_init(void) { timer0_va_base = __io_address(REALVIEW_PB11MP_TIMER0_1_BASE); timer1_va_base = __io_address(REALVIEW_PB11MP_TIMER0_1_BASE) + 0x20; timer2_va_base = __io_address(REALVIEW_PB11MP_TIMER2_3_BASE); timer3_va_base = __io_address(REALVIEW_PB11MP_TIMER2_3_BASE) + 0x20; #ifdef CONFIG_LOCAL_TIMERS twd_base = __io_address(REALVIEW_TC11MP_TWD_BASE); #endif realview_timer_init(IRQ_TC11MP_TIMER0_1); } static struct sys_timer realview_pb11mp_timer = { .init = realview_pb11mp_timer_init, }; static void realview_pb11mp_reset(char mode) { void __iomem *hdr_ctrl = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_RESETCTL_OFFSET; unsigned int val; /* * To reset, we hit the on-board reset register * in the system FPGA */ val = __raw_readl(hdr_ctrl); val |= REALVIEW_PB11MP_SYS_CTRL_RESET_CONFIGCLR; __raw_writel(val, hdr_ctrl); } static void __init realview_pb11mp_init(void) { int i; #ifdef CONFIG_CACHE_L2X0 /* 1MB (128KB/way), 8-way associativity, evmon/parity/share enabled * Bits: .... ...0 0111 1001 0000 .... .... .... */ l2x0_init(__io_address(REALVIEW_TC11MP_L220_BASE), 0x00790000, 0xfe000fff); #endif realview_flash_register(realview_pb11mp_flash_resource, ARRAY_SIZE(realview_pb11mp_flash_resource)); realview_eth_register(NULL, realview_pb11mp_smsc911x_resources); platform_device_register(&realview_i2c_device); platform_device_register(&realview_cf_device); realview_usb_register(realview_pb11mp_isp1761_resources); for (i = 0; i < ARRAY_SIZE(amba_devs); i++) { struct amba_device *d = amba_devs[i]; amba_device_register(d, &iomem_resource); } #ifdef CONFIG_LEDS leds_event = realview_leds_event; #endif realview_reset = realview_pb11mp_reset; } MACHINE_START(REALVIEW_PB11MP, "ARM-RealView PB11MPCore") /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */ .phys_io = REALVIEW_PB11MP_UART0_BASE, .io_pg_offst = (IO_ADDRESS(REALVIEW_PB11MP_UART0_BASE) >> 18) & 0xfffc, .boot_params = PHYS_OFFSET + 0x00000100, .fixup = realview_fixup, .map_io = realview_pb11mp_map_io, .init_irq = gic_init_irq, .timer = &realview_pb11mp_timer, .init_machine = realview_pb11mp_init, MACHINE_END
gpl-2.0
C-Aniruddh/SXD_kernel
arch/arm/mach-msm/rpm-smd.c
758
24795
/* Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/bug.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/device.h> #include <linux/notifier.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/of_platform.h> #include <mach/socinfo.h> #include <mach/msm_smd.h> #include <mach/rpm-smd.h> #include "rpm-notifier.h" /* Debug Definitions */ enum { MSM_RPM_LOG_REQUEST_PRETTY = BIT(0), MSM_RPM_LOG_REQUEST_RAW = BIT(1), MSM_RPM_LOG_REQUEST_SHOW_MSG_ID = BIT(2), }; static int msm_rpm_debug_mask; module_param_named( debug_mask, msm_rpm_debug_mask, int, S_IRUGO | S_IWUSR ); struct msm_rpm_driver_data { const char *ch_name; uint32_t ch_type; smd_channel_t *ch_info; struct work_struct work; spinlock_t smd_lock_write; spinlock_t smd_lock_read; struct completion smd_open; }; #define DEFAULT_BUFFER_SIZE 256 #define GFP_FLAG(noirq) (noirq ? GFP_ATOMIC : GFP_KERNEL) #define INV_RSC "resource does not exist" #define ERR "err\0" #define MAX_ERR_BUFFER_SIZE 128 #define INIT_ERROR 1 static ATOMIC_NOTIFIER_HEAD(msm_rpm_sleep_notifier); static bool standalone; int msm_rpm_register_notifier(struct notifier_block *nb) { return atomic_notifier_chain_register(&msm_rpm_sleep_notifier, nb); } int msm_rpm_unregister_notifier(struct notifier_block *nb) { return atomic_notifier_chain_unregister(&msm_rpm_sleep_notifier, nb); } static struct workqueue_struct *msm_rpm_smd_wq; enum { MSM_RPM_MSG_REQUEST_TYPE = 0, MSM_RPM_MSG_TYPE_NR, }; static const uint32_t msm_rpm_request_service[MSM_RPM_MSG_TYPE_NR] = { 0x716572, /* 'req\0' */ }; /*the order of fields matter and reflect the order expected by the RPM*/ struct rpm_request_header { uint32_t service_type; uint32_t request_len; }; struct rpm_message_header { uint32_t msg_id; enum msm_rpm_set set; uint32_t resource_type; uint32_t resource_id; uint32_t data_len; }; struct msm_rpm_kvp_data { uint32_t key; uint32_t nbytes; /* number of bytes */ uint8_t *value; bool valid; }; static atomic_t msm_rpm_msg_id = ATOMIC_INIT(0); static struct msm_rpm_driver_data msm_rpm_data; struct msm_rpm_request { struct rpm_request_header req_hdr; struct rpm_message_header msg_hdr; struct msm_rpm_kvp_data *kvp; uint32_t num_elements; uint32_t write_idx; uint8_t *buf; uint32_t numbytes; }; /* * Data related to message acknowledgement */ LIST_HEAD(msm_rpm_wait_list); struct msm_rpm_wait_data { struct list_head list; uint32_t msg_id; bool ack_recd; int errno; struct completion ack; }; DEFINE_SPINLOCK(msm_rpm_list_lock); struct msm_rpm_ack_msg { uint32_t req; uint32_t req_len; uint32_t rsc_id; uint32_t msg_len; uint32_t id_ack; }; static int irq_process; LIST_HEAD(msm_rpm_ack_list); static void msm_rpm_notify_sleep_chain(struct rpm_message_header *hdr, struct msm_rpm_kvp_data *kvp) { struct msm_rpm_notifier_data notif; notif.rsc_type = hdr->resource_type; notif.rsc_id = hdr->resource_id; notif.key = kvp->key; notif.size = kvp->nbytes; notif.value = kvp->value; atomic_notifier_call_chain(&msm_rpm_sleep_notifier, 0, &notif); } static int msm_rpm_add_kvp_data_common(struct msm_rpm_request *handle, uint32_t key, const uint8_t *data, int size, bool noirq) { int i; int data_size, msg_size; if (!handle) { pr_err("%s(): Invalid handle\n", __func__); return -EINVAL; } data_size = ALIGN(size, SZ_4); msg_size = data_size + sizeof(struct rpm_request_header); for (i = 0; i < handle->write_idx; i++) { if (handle->kvp[i].key != key) continue; if (handle->kvp[i].nbytes != data_size) { kfree(handle->kvp[i].value); handle->kvp[i].value = NULL; } else { if (!memcmp(handle->kvp[i].value, data, data_size)) return 0; } break; } if (i >= handle->num_elements) { pr_err("%s(): Number of resources exceeds max allocated\n", __func__); return -ENOMEM; } if (i == handle->write_idx) handle->write_idx++; if (!handle->kvp[i].value) { handle->kvp[i].value = kzalloc(data_size, GFP_FLAG(noirq)); if (!handle->kvp[i].value) { pr_err("%s(): Failed malloc\n", __func__); return -ENOMEM; } } else { /* We enter the else case, if a key already exists but the * data doesn't match. In which case, we should zero the data * out. */ memset(handle->kvp[i].value, 0, data_size); } if (!handle->kvp[i].valid) handle->msg_hdr.data_len += msg_size; else handle->msg_hdr.data_len += (data_size - handle->kvp[i].nbytes); handle->kvp[i].nbytes = data_size; handle->kvp[i].key = key; memcpy(handle->kvp[i].value, data, size); handle->kvp[i].valid = true; if (handle->msg_hdr.set == MSM_RPM_CTX_SLEEP_SET) msm_rpm_notify_sleep_chain(&handle->msg_hdr, &handle->kvp[i]); return 0; } static struct msm_rpm_request *msm_rpm_create_request_common( enum msm_rpm_set set, uint32_t rsc_type, uint32_t rsc_id, int num_elements, bool noirq) { struct msm_rpm_request *cdata; cdata = kzalloc(sizeof(struct msm_rpm_request), GFP_FLAG(noirq)); if (!cdata) { printk(KERN_INFO"%s():Cannot allocate memory for client data\n", __func__); goto cdata_alloc_fail; } cdata->msg_hdr.set = set; cdata->msg_hdr.resource_type = rsc_type; cdata->msg_hdr.resource_id = rsc_id; cdata->msg_hdr.data_len = 0; cdata->num_elements = num_elements; cdata->write_idx = 0; cdata->kvp = kzalloc(sizeof(struct msm_rpm_kvp_data) * num_elements, GFP_FLAG(noirq)); if (!cdata->kvp) { pr_warn("%s(): Cannot allocate memory for key value data\n", __func__); goto kvp_alloc_fail; } cdata->buf = kzalloc(DEFAULT_BUFFER_SIZE, GFP_FLAG(noirq)); if (!cdata->buf) goto buf_alloc_fail; cdata->numbytes = DEFAULT_BUFFER_SIZE; return cdata; buf_alloc_fail: kfree(cdata->kvp); kvp_alloc_fail: kfree(cdata); cdata_alloc_fail: return NULL; } void msm_rpm_free_request(struct msm_rpm_request *handle) { int i; if (!handle) return; for (i = 0; i < handle->write_idx; i++) kfree(handle->kvp[i].value); kfree(handle->kvp); kfree(handle); } EXPORT_SYMBOL(msm_rpm_free_request); struct msm_rpm_request *msm_rpm_create_request( enum msm_rpm_set set, uint32_t rsc_type, uint32_t rsc_id, int num_elements) { return msm_rpm_create_request_common(set, rsc_type, rsc_id, num_elements, false); } EXPORT_SYMBOL(msm_rpm_create_request); struct msm_rpm_request *msm_rpm_create_request_noirq( enum msm_rpm_set set, uint32_t rsc_type, uint32_t rsc_id, int num_elements) { return msm_rpm_create_request_common(set, rsc_type, rsc_id, num_elements, true); } EXPORT_SYMBOL(msm_rpm_create_request_noirq); int msm_rpm_add_kvp_data(struct msm_rpm_request *handle, uint32_t key, const uint8_t *data, int size) { return msm_rpm_add_kvp_data_common(handle, key, data, size, false); } EXPORT_SYMBOL(msm_rpm_add_kvp_data); int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle, uint32_t key, const uint8_t *data, int size) { return msm_rpm_add_kvp_data_common(handle, key, data, size, true); } EXPORT_SYMBOL(msm_rpm_add_kvp_data_noirq); /* Runs in interrupt context */ static void msm_rpm_notify(void *data, unsigned event) { struct msm_rpm_driver_data *pdata = (struct msm_rpm_driver_data *)data; BUG_ON(!pdata); if (!(pdata->ch_info)) return; switch (event) { case SMD_EVENT_DATA: queue_work(msm_rpm_smd_wq, &pdata->work); break; case SMD_EVENT_OPEN: complete(&pdata->smd_open); break; case SMD_EVENT_CLOSE: case SMD_EVENT_STATUS: case SMD_EVENT_REOPEN_READY: break; default: pr_info("Unknown SMD event\n"); } } static struct msm_rpm_wait_data *msm_rpm_get_entry_from_msg_id(uint32_t msg_id) { struct list_head *ptr; struct msm_rpm_wait_data *elem; unsigned long flags; spin_lock_irqsave(&msm_rpm_list_lock, flags); list_for_each(ptr, &msm_rpm_wait_list) { elem = list_entry(ptr, struct msm_rpm_wait_data, list); if (elem && (elem->msg_id == msg_id)) break; elem = NULL; } spin_unlock_irqrestore(&msm_rpm_list_lock, flags); return elem; } static uint32_t msm_rpm_get_next_msg_id(void) { uint32_t id; /* * A message id of 0 is used by the driver to indicate a error * condition. The RPM driver uses a id of 1 to indicate unsent data * when the data sent over hasn't been modified. This isn't a error * scenario and wait for ack returns a success when the message id is 1. */ do { id = atomic_inc_return(&msm_rpm_msg_id); } while ((id == 0) || (id == 1) || msm_rpm_get_entry_from_msg_id(id)); return id; } static int msm_rpm_add_wait_list(uint32_t msg_id) { unsigned long flags; struct msm_rpm_wait_data *data = kzalloc(sizeof(struct msm_rpm_wait_data), GFP_ATOMIC); if (!data) return -ENOMEM; init_completion(&data->ack); data->ack_recd = false; data->msg_id = msg_id; data->errno = INIT_ERROR; spin_lock_irqsave(&msm_rpm_list_lock, flags); list_add(&data->list, &msm_rpm_wait_list); spin_unlock_irqrestore(&msm_rpm_list_lock, flags); return 0; } static void msm_rpm_free_list_entry(struct msm_rpm_wait_data *elem) { unsigned long flags; spin_lock_irqsave(&msm_rpm_list_lock, flags); list_del(&elem->list); spin_unlock_irqrestore(&msm_rpm_list_lock, flags); kfree(elem); } static void msm_rpm_process_ack(uint32_t msg_id, int errno) { struct list_head *ptr; struct msm_rpm_wait_data *elem; unsigned long flags; spin_lock_irqsave(&msm_rpm_list_lock, flags); list_for_each(ptr, &msm_rpm_wait_list) { elem = list_entry(ptr, struct msm_rpm_wait_data, list); if (elem && (elem->msg_id == msg_id)) { elem->errno = errno; elem->ack_recd = true; complete(&elem->ack); break; } elem = NULL; } WARN_ON(!elem); spin_unlock_irqrestore(&msm_rpm_list_lock, flags); } struct msm_rpm_kvp_packet { uint32_t id; uint32_t len; uint32_t val; }; static inline uint32_t msm_rpm_get_msg_id_from_ack(uint8_t *buf) { return ((struct msm_rpm_ack_msg *)buf)->id_ack; } static inline int msm_rpm_get_error_from_ack(uint8_t *buf) { uint8_t *tmp; uint32_t req_len = ((struct msm_rpm_ack_msg *)buf)->req_len; int rc = -ENODEV; req_len -= sizeof(struct msm_rpm_ack_msg); req_len += 2 * sizeof(uint32_t); if (!req_len) return 0; tmp = buf + sizeof(struct msm_rpm_ack_msg); BUG_ON(memcmp(tmp, ERR, sizeof(uint32_t))); tmp += 2 * sizeof(uint32_t); if (!(memcmp(tmp, INV_RSC, min(req_len, sizeof(INV_RSC))-1))) { pr_err("%s(): RPM NACK Unsupported resource\n", __func__); rc = -EINVAL; } else { pr_err("%s(): RPM NACK Invalid header\n", __func__); } return rc; } static int msm_rpm_read_smd_data(char *buf) { int pkt_sz; int bytes_read = 0; pkt_sz = smd_cur_packet_size(msm_rpm_data.ch_info); if (!pkt_sz) return -EAGAIN; BUG_ON(pkt_sz > MAX_ERR_BUFFER_SIZE); if (pkt_sz != smd_read_avail(msm_rpm_data.ch_info)) return -EAGAIN; do { int len; len = smd_read(msm_rpm_data.ch_info, buf + bytes_read, pkt_sz); pkt_sz -= len; bytes_read += len; } while (pkt_sz > 0); BUG_ON(pkt_sz < 0); return 0; } static void msm_rpm_smd_work(struct work_struct *work) { uint32_t msg_id; int errno; char buf[MAX_ERR_BUFFER_SIZE] = {0}; unsigned long flags; while (smd_is_pkt_avail(msm_rpm_data.ch_info) && !irq_process) { spin_lock_irqsave(&msm_rpm_data.smd_lock_read, flags); if (msm_rpm_read_smd_data(buf)) { spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags); break; } msg_id = msm_rpm_get_msg_id_from_ack(buf); errno = msm_rpm_get_error_from_ack(buf); msm_rpm_process_ack(msg_id, errno); spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags); } } #define DEBUG_PRINT_BUFFER_SIZE 512 static void msm_rpm_log_request(struct msm_rpm_request *cdata) { char buf[DEBUG_PRINT_BUFFER_SIZE]; size_t buflen = DEBUG_PRINT_BUFFER_SIZE; char name[5]; u32 value; int i, j, prev_valid; int valid_count = 0; int pos = 0; name[4] = 0; for (i = 0; i < cdata->write_idx; i++) if (cdata->kvp[i].valid) valid_count++; pos += scnprintf(buf + pos, buflen - pos, "%sRPM req: ", KERN_INFO); if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_SHOW_MSG_ID) pos += scnprintf(buf + pos, buflen - pos, "msg_id=%u, ", cdata->msg_hdr.msg_id); pos += scnprintf(buf + pos, buflen - pos, "s=%s", (cdata->msg_hdr.set == MSM_RPM_CTX_ACTIVE_SET ? "act" : "slp")); if ((msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY) && (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_RAW)) { /* Both pretty and raw formatting */ memcpy(name, &cdata->msg_hdr.resource_type, sizeof(uint32_t)); pos += scnprintf(buf + pos, buflen - pos, ", rsc_type=0x%08X (%s), rsc_id=%u; ", cdata->msg_hdr.resource_type, name, cdata->msg_hdr.resource_id); for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) { if (!cdata->kvp[i].valid) continue; memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t)); pos += scnprintf(buf + pos, buflen - pos, "[key=0x%08X (%s), value=%s", cdata->kvp[i].key, name, (cdata->kvp[i].nbytes ? "0x" : "null")); for (j = 0; j < cdata->kvp[i].nbytes; j++) pos += scnprintf(buf + pos, buflen - pos, "%02X ", cdata->kvp[i].value[j]); if (cdata->kvp[i].nbytes) pos += scnprintf(buf + pos, buflen - pos, "("); for (j = 0; j < cdata->kvp[i].nbytes; j += 4) { value = 0; memcpy(&value, &cdata->kvp[i].value[j], min(sizeof(uint32_t), cdata->kvp[i].nbytes - j)); pos += scnprintf(buf + pos, buflen - pos, "%u", value); if (j + 4 < cdata->kvp[i].nbytes) pos += scnprintf(buf + pos, buflen - pos, " "); } if (cdata->kvp[i].nbytes) pos += scnprintf(buf + pos, buflen - pos, ")"); pos += scnprintf(buf + pos, buflen - pos, "]"); if (prev_valid + 1 < valid_count) pos += scnprintf(buf + pos, buflen - pos, ", "); prev_valid++; } } else if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY) { /* Pretty formatting only */ memcpy(name, &cdata->msg_hdr.resource_type, sizeof(uint32_t)); pos += scnprintf(buf + pos, buflen - pos, " %s %u; ", name, cdata->msg_hdr.resource_id); for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) { if (!cdata->kvp[i].valid) continue; memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t)); pos += scnprintf(buf + pos, buflen - pos, "%s=%s", name, (cdata->kvp[i].nbytes ? "" : "null")); for (j = 0; j < cdata->kvp[i].nbytes; j += 4) { value = 0; memcpy(&value, &cdata->kvp[i].value[j], min(sizeof(uint32_t), cdata->kvp[i].nbytes - j)); pos += scnprintf(buf + pos, buflen - pos, "%u", value); if (j + 4 < cdata->kvp[i].nbytes) pos += scnprintf(buf + pos, buflen - pos, " "); } if (prev_valid + 1 < valid_count) pos += scnprintf(buf + pos, buflen - pos, ", "); prev_valid++; } } else { /* Raw formatting only */ pos += scnprintf(buf + pos, buflen - pos, ", rsc_type=0x%08X, rsc_id=%u; ", cdata->msg_hdr.resource_type, cdata->msg_hdr.resource_id); for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) { if (!cdata->kvp[i].valid) continue; pos += scnprintf(buf + pos, buflen - pos, "[key=0x%08X, value=%s", cdata->kvp[i].key, (cdata->kvp[i].nbytes ? "0x" : "null")); for (j = 0; j < cdata->kvp[i].nbytes; j++) { pos += scnprintf(buf + pos, buflen - pos, "%02X", cdata->kvp[i].value[j]); if (j + 1 < cdata->kvp[i].nbytes) pos += scnprintf(buf + pos, buflen - pos, " "); } pos += scnprintf(buf + pos, buflen - pos, "]"); if (prev_valid + 1 < valid_count) pos += scnprintf(buf + pos, buflen - pos, ", "); prev_valid++; } } pos += scnprintf(buf + pos, buflen - pos, "\n"); printk(buf); } static int msm_rpm_send_data(struct msm_rpm_request *cdata, int msg_type, bool noirq) { uint8_t *tmpbuff; int i, ret, msg_size; unsigned long flags; int req_hdr_sz, msg_hdr_sz; if (!cdata->msg_hdr.data_len) return 1; req_hdr_sz = sizeof(cdata->req_hdr); msg_hdr_sz = sizeof(cdata->msg_hdr); cdata->req_hdr.service_type = msm_rpm_request_service[msg_type]; cdata->msg_hdr.msg_id = msm_rpm_get_next_msg_id(); cdata->req_hdr.request_len = cdata->msg_hdr.data_len + msg_hdr_sz; msg_size = cdata->req_hdr.request_len + req_hdr_sz; /* populate data_len */ if (msg_size > cdata->numbytes) { kfree(cdata->buf); cdata->numbytes = msg_size; cdata->buf = kzalloc(msg_size, GFP_FLAG(noirq)); } if (!cdata->buf) { pr_err("%s(): Failed malloc\n", __func__); return 0; } tmpbuff = cdata->buf; memcpy(tmpbuff, &cdata->req_hdr, req_hdr_sz + msg_hdr_sz); tmpbuff += req_hdr_sz + msg_hdr_sz; for (i = 0; (i < cdata->write_idx); i++) { /* Sanity check */ BUG_ON((tmpbuff - cdata->buf) > cdata->numbytes); if (!cdata->kvp[i].valid) continue; memcpy(tmpbuff, &cdata->kvp[i].key, sizeof(uint32_t)); tmpbuff += sizeof(uint32_t); memcpy(tmpbuff, &cdata->kvp[i].nbytes, sizeof(uint32_t)); tmpbuff += sizeof(uint32_t); memcpy(tmpbuff, cdata->kvp[i].value, cdata->kvp[i].nbytes); tmpbuff += cdata->kvp[i].nbytes; } if (msm_rpm_debug_mask & (MSM_RPM_LOG_REQUEST_PRETTY | MSM_RPM_LOG_REQUEST_RAW)) msm_rpm_log_request(cdata); if (standalone) { for (i = 0; (i < cdata->write_idx); i++) cdata->kvp[i].valid = false; cdata->msg_hdr.data_len = 0; ret = cdata->msg_hdr.msg_id; return ret; } msm_rpm_add_wait_list(cdata->msg_hdr.msg_id); spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags); ret = smd_write_avail(msm_rpm_data.ch_info); if (ret < 0) { pr_err("%s(): SMD not initialized\n", __func__); spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags); return 0; } while ((ret < msg_size)) { if (!noirq) { spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags); cpu_relax(); spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags); } else udelay(5); ret = smd_write_avail(msm_rpm_data.ch_info); } ret = smd_write(msm_rpm_data.ch_info, &cdata->buf[0], msg_size); spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags); if (ret == msg_size) { for (i = 0; (i < cdata->write_idx); i++) cdata->kvp[i].valid = false; cdata->msg_hdr.data_len = 0; ret = cdata->msg_hdr.msg_id; } else if (ret < msg_size) { struct msm_rpm_wait_data *rc; ret = 0; pr_err("Failed to write data msg_size:%d ret:%d\n", msg_size, ret); rc = msm_rpm_get_entry_from_msg_id(cdata->msg_hdr.msg_id); if (rc) msm_rpm_free_list_entry(rc); } return ret; } int msm_rpm_send_request(struct msm_rpm_request *handle) { return msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, false); } EXPORT_SYMBOL(msm_rpm_send_request); int msm_rpm_send_request_noirq(struct msm_rpm_request *handle) { return msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, true); } EXPORT_SYMBOL(msm_rpm_send_request_noirq); int msm_rpm_wait_for_ack(uint32_t msg_id) { struct msm_rpm_wait_data *elem; if (!msg_id) { pr_err("%s(): Invalid msg id\n", __func__); return -ENOMEM; } if (msg_id == 1) return 0; if (standalone) return 0; elem = msm_rpm_get_entry_from_msg_id(msg_id); if (!elem) return 0; wait_for_completion(&elem->ack); msm_rpm_free_list_entry(elem); return elem->errno; } EXPORT_SYMBOL(msm_rpm_wait_for_ack); int msm_rpm_wait_for_ack_noirq(uint32_t msg_id) { struct msm_rpm_wait_data *elem; unsigned long flags; int rc = 0; uint32_t id = 0; if (!msg_id) { pr_err("%s(): Invalid msg id\n", __func__); return -ENOMEM; } if (msg_id == 1) return 0; if (standalone) return 0; spin_lock_irqsave(&msm_rpm_data.smd_lock_read, flags); irq_process = true; elem = msm_rpm_get_entry_from_msg_id(msg_id); if (!elem) /* Should this be a bug * Is it ok for another thread to read the msg? */ goto wait_ack_cleanup; if (elem->errno != INIT_ERROR) { rc = elem->errno; msm_rpm_free_list_entry(elem); goto wait_ack_cleanup; } while (id != msg_id) { if (smd_is_pkt_avail(msm_rpm_data.ch_info)) { int errno; char buf[MAX_ERR_BUFFER_SIZE] = {}; msm_rpm_read_smd_data(buf); id = msm_rpm_get_msg_id_from_ack(buf); errno = msm_rpm_get_error_from_ack(buf); msm_rpm_process_ack(id, errno); } } rc = elem->errno; msm_rpm_free_list_entry(elem); wait_ack_cleanup: irq_process = false; spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags); return rc; } EXPORT_SYMBOL(msm_rpm_wait_for_ack_noirq); int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type, uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems) { int i, rc; struct msm_rpm_request *req = msm_rpm_create_request(set, rsc_type, rsc_id, nelems); if (!req) return -ENOMEM; for (i = 0; i < nelems; i++) { rc = msm_rpm_add_kvp_data(req, kvp[i].key, kvp[i].data, kvp[i].length); if (rc) goto bail; } rc = msm_rpm_wait_for_ack(msm_rpm_send_request(req)); bail: msm_rpm_free_request(req); return rc; } EXPORT_SYMBOL(msm_rpm_send_message); int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type, uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems) { int i, rc; struct msm_rpm_request *req = msm_rpm_create_request_noirq(set, rsc_type, rsc_id, nelems); if (!req) return -ENOMEM; for (i = 0; i < nelems; i++) { rc = msm_rpm_add_kvp_data_noirq(req, kvp[i].key, kvp[i].data, kvp[i].length); if (rc) goto bail; } rc = msm_rpm_wait_for_ack_noirq(msm_rpm_send_request_noirq(req)); bail: msm_rpm_free_request(req); return rc; } EXPORT_SYMBOL(msm_rpm_send_message_noirq); /** * During power collapse, the rpm driver disables the SMD interrupts to make * sure that the interrupt doesn't wakes us from sleep. */ int msm_rpm_enter_sleep(void) { return smd_mask_receive_interrupt(msm_rpm_data.ch_info, true); } EXPORT_SYMBOL(msm_rpm_enter_sleep); /** * When the system resumes from power collapse, the SMD interrupt disabled by * enter function has to reenabled to continue processing SMD message. */ void msm_rpm_exit_sleep(void) { smd_mask_receive_interrupt(msm_rpm_data.ch_info, false); } EXPORT_SYMBOL(msm_rpm_exit_sleep); static bool msm_rpm_set_standalone(void) { if (machine_is_msm8974()) { pr_warn("%s(): Running in standalone mode, requests " "will not be sent to RPM\n", __func__); standalone = true; } return standalone; } static int __devinit msm_rpm_dev_probe(struct platform_device *pdev) { char *key = NULL; int ret; key = "rpm-channel-name"; ret = of_property_read_string(pdev->dev.of_node, key, &msm_rpm_data.ch_name); if (ret) goto fail; key = "rpm-channel-type"; ret = of_property_read_u32(pdev->dev.of_node, key, &msm_rpm_data.ch_type); if (ret) goto fail; init_completion(&msm_rpm_data.smd_open); spin_lock_init(&msm_rpm_data.smd_lock_write); spin_lock_init(&msm_rpm_data.smd_lock_read); INIT_WORK(&msm_rpm_data.work, msm_rpm_smd_work); if (smd_named_open_on_edge(msm_rpm_data.ch_name, msm_rpm_data.ch_type, &msm_rpm_data.ch_info, &msm_rpm_data, msm_rpm_notify)) { pr_info("Cannot open RPM channel %s %d\n", msm_rpm_data.ch_name, msm_rpm_data.ch_type); msm_rpm_set_standalone(); BUG_ON(!standalone); complete(&msm_rpm_data.smd_open); } wait_for_completion(&msm_rpm_data.smd_open); smd_disable_read_intr(msm_rpm_data.ch_info); if (!standalone) { msm_rpm_smd_wq = create_singlethread_workqueue("rpm-smd"); if (!msm_rpm_smd_wq) return -EINVAL; } of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); return 0; fail: pr_err("%s(): Failed to read node: %s, key=%s\n", __func__, pdev->dev.of_node->full_name, key); return -EINVAL; } static struct of_device_id msm_rpm_match_table[] = { {.compatible = "qcom,rpm-smd"}, {}, }; static struct platform_driver msm_rpm_device_driver = { .probe = msm_rpm_dev_probe, .driver = { .name = "rpm-smd", .owner = THIS_MODULE, .of_match_table = msm_rpm_match_table, }, }; int __init msm_rpm_driver_init(void) { static bool registered; if (registered) return 0; registered = true; return platform_driver_register(&msm_rpm_device_driver); } EXPORT_SYMBOL(msm_rpm_driver_init); late_initcall(msm_rpm_driver_init);
gpl-2.0
Bdaman80/BDA-Vivid
drivers/staging/rtl8187se/r8180_rtl8225z2.c
1014
27809
/* * This is part of the rtl8180-sa2400 driver * released under the GPL (See file COPYING for details). * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it> * * This files contains programming code for the rtl8225 * radio frontend. * * *Many* thanks to Realtek Corp. for their great support! */ #include "r8180_hw.h" #include "r8180_rtl8225.h" #include "r8180_93cx6.h" #include "ieee80211/dot11d.h" static void write_rtl8225(struct net_device *dev, u8 adr, u16 data) { int i; u16 out, select; u8 bit; u32 bangdata = (data << 4) | (adr & 0xf); out = read_nic_word(dev, RFPinsOutput) & 0xfff3; write_nic_word(dev, RFPinsEnable, (read_nic_word(dev, RFPinsEnable) | 0x7)); select = read_nic_word(dev, RFPinsSelect); write_nic_word(dev, RFPinsSelect, select | 0x7 | SW_CONTROL_GPIO); force_pci_posting(dev); udelay(10); write_nic_word(dev, RFPinsOutput, out | BB_HOST_BANG_EN); force_pci_posting(dev); udelay(2); write_nic_word(dev, RFPinsOutput, out); force_pci_posting(dev); udelay(10); for (i = 15; i >= 0; i--) { bit = (bangdata & (1 << i)) >> i; write_nic_word(dev, RFPinsOutput, bit | out); write_nic_word(dev, RFPinsOutput, bit | out | BB_HOST_BANG_CLK); write_nic_word(dev, RFPinsOutput, bit | out | BB_HOST_BANG_CLK); i--; bit = (bangdata & (1 << i)) >> i; write_nic_word(dev, RFPinsOutput, bit | out | BB_HOST_BANG_CLK); write_nic_word(dev, RFPinsOutput, bit | out | BB_HOST_BANG_CLK); write_nic_word(dev, RFPinsOutput, bit | out); } write_nic_word(dev, RFPinsOutput, out | BB_HOST_BANG_EN); force_pci_posting(dev); udelay(10); write_nic_word(dev, RFPinsOutput, out | BB_HOST_BANG_EN); write_nic_word(dev, RFPinsSelect, select | SW_CONTROL_GPIO); rtl8185_rf_pins_enable(dev); } static const u16 rtl8225bcd_rxgain[] = { 0x0400, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0408, 0x0409, 0x040a, 0x040b, 0x0502, 0x0503, 0x0504, 0x0505, 0x0540, 0x0541, 0x0542, 0x0543, 0x0544, 0x0545, 0x0580, 0x0581, 0x0582, 0x0583, 0x0584, 0x0585, 0x0588, 0x0589, 0x058a, 0x058b, 0x0643, 0x0644, 0x0645, 0x0680, 0x0681, 0x0682, 0x0683, 0x0684, 0x0685, 0x0688, 0x0689, 0x068a, 0x068b, 0x068c, 0x0742, 0x0743, 0x0744, 0x0745, 0x0780, 0x0781, 0x0782, 0x0783, 0x0784, 0x0785, 0x0788, 0x0789, 0x078a, 0x078b, 0x078c, 0x078d, 0x0790, 0x0791, 0x0792, 0x0793, 0x0794, 0x0795, 0x0798, 0x0799, 0x079a, 0x079b, 0x079c, 0x079d, 0x07a0, 0x07a1, 0x07a2, 0x07a3, 0x07a4, 0x07a5, 0x07a8, 0x07a9, 0x07aa, 0x07ab, 0x07ac, 0x07ad, 0x07b0, 0x07b1, 0x07b2, 0x07b3, 0x07b4, 0x07b5, 0x07b8, 0x07b9, 0x07ba, 0x07bb, 0x07bb }; static const u8 rtl8225_agc[] = { 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9d, 0x9c, 0x9b, 0x9a, 0x99, 0x98, 0x97, 0x96, 0x95, 0x94, 0x93, 0x92, 0x91, 0x90, 0x8f, 0x8e, 0x8d, 0x8c, 0x8b, 0x8a, 0x89, 0x88, 0x87, 0x86, 0x85, 0x84, 0x83, 0x82, 0x81, 0x80, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b, 0x3a, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2f, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20, 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, }; static const u8 rtl8225_gain[] = { 0x23, 0x88, 0x7c, 0xa5, /* -82dBm */ 0x23, 0x88, 0x7c, 0xb5, /* -82dBm */ 0x23, 0x88, 0x7c, 0xc5, /* -82dBm */ 0x33, 0x80, 0x79, 0xc5, /* -78dBm */ 0x43, 0x78, 0x76, 0xc5, /* -74dBm */ 0x53, 0x60, 0x73, 0xc5, /* -70dBm */ 0x63, 0x58, 0x70, 0xc5, /* -66dBm */ }; static const u8 rtl8225_tx_gain_cck_ofdm[] = { 0x02, 0x06, 0x0e, 0x1e, 0x3e, 0x7e }; static const u8 rtl8225_tx_power_cck[] = { 0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02, 0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02, 0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02, 0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02, 0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03, 0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03 }; static const u8 rtl8225_tx_power_cck_ch14[] = { 0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00, 0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00, 0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00 }; static const u8 rtl8225_tx_power_ofdm[] = { 0x80, 0x90, 0xa2, 0xb5, 0xcb, 0xe4 }; static const u32 rtl8225_chan[] = { 0, 0x0080, 0x0100, 0x0180, 0x0200, 0x0280, 0x0300, 0x0380, 0x0400, 0x0480, 0x0500, 0x0580, 0x0600, 0x0680, 0x074A, }; static void rtl8225_SetTXPowerLevel(struct net_device *dev, short ch) { struct r8180_priv *priv = ieee80211_priv(dev); int GainIdx; int GainSetting; int i; u8 power; const u8 *cck_power_table; u8 max_cck_power_level; u8 max_ofdm_power_level; u8 min_ofdm_power_level; u8 cck_power_level = 0xff & priv->chtxpwr[ch]; u8 ofdm_power_level = 0xff & priv->chtxpwr_ofdm[ch]; max_cck_power_level = 35; max_ofdm_power_level = 35; min_ofdm_power_level = 0; if (cck_power_level > max_cck_power_level) cck_power_level = max_cck_power_level; GainIdx = cck_power_level % 6; GainSetting = cck_power_level / 6; if (ch == 14) cck_power_table = rtl8225_tx_power_cck_ch14; else cck_power_table = rtl8225_tx_power_cck; write_nic_byte(dev, TX_GAIN_CCK, rtl8225_tx_gain_cck_ofdm[GainSetting] >> 1); for (i = 0; i < 8; i++) { power = cck_power_table[GainIdx * 8 + i]; write_phy_cck(dev, 0x44 + i, power); } /* FIXME Is this delay really needeed ? */ force_pci_posting(dev); mdelay(1); if (ofdm_power_level > (max_ofdm_power_level - min_ofdm_power_level)) ofdm_power_level = max_ofdm_power_level; else ofdm_power_level += min_ofdm_power_level; if (ofdm_power_level > 35) ofdm_power_level = 35; GainIdx = ofdm_power_level % 6; GainSetting = ofdm_power_level / 6; rtl8185_set_anaparam2(dev, RTL8225_ANAPARAM2_ON); write_phy_ofdm(dev, 2, 0x42); write_phy_ofdm(dev, 6, 0x00); write_phy_ofdm(dev, 8, 0x00); write_nic_byte(dev, TX_GAIN_OFDM, rtl8225_tx_gain_cck_ofdm[GainSetting] >> 1); power = rtl8225_tx_power_ofdm[GainIdx]; write_phy_ofdm(dev, 5, power); write_phy_ofdm(dev, 7, power); force_pci_posting(dev); mdelay(1); } static const u8 rtl8225z2_threshold[] = { 0x8d, 0x8d, 0x8d, 0x8d, 0x9d, 0xad, 0xbd, }; static const u8 rtl8225z2_gain_bg[] = { 0x23, 0x15, 0xa5, /* -82-1dBm */ 0x23, 0x15, 0xb5, /* -82-2dBm */ 0x23, 0x15, 0xc5, /* -82-3dBm */ 0x33, 0x15, 0xc5, /* -78dBm */ 0x43, 0x15, 0xc5, /* -74dBm */ 0x53, 0x15, 0xc5, /* -70dBm */ 0x63, 0x15, 0xc5, /* -66dBm */ }; static const u8 rtl8225z2_gain_a[] = { 0x13, 0x27, 0x5a, /* -82dBm */ 0x23, 0x23, 0x58, /* -82dBm */ 0x33, 0x1f, 0x56, /* -82dBm */ 0x43, 0x1b, 0x54, /* -78dBm */ 0x53, 0x17, 0x51, /* -74dBm */ 0x63, 0x24, 0x4f, /* -70dBm */ 0x73, 0x0f, 0x4c, /* -66dBm */ }; static const u16 rtl8225z2_rxgain[] = { 0x0400, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0408, 0x0409, 0x040a, 0x040b, 0x0502, 0x0503, 0x0504, 0x0505, 0x0540, 0x0541, 0x0542, 0x0543, 0x0544, 0x0545, 0x0580, 0x0581, 0x0582, 0x0583, 0x0584, 0x0585, 0x0588, 0x0589, 0x058a, 0x058b, 0x0643, 0x0644, 0x0645, 0x0680, 0x0681, 0x0682, 0x0683, 0x0684, 0x0685, 0x0688, 0x0689, 0x068a, 0x068b, 0x068c, 0x0742, 0x0743, 0x0744, 0x0745, 0x0780, 0x0781, 0x0782, 0x0783, 0x0784, 0x0785, 0x0788, 0x0789, 0x078a, 0x078b, 0x078c, 0x078d, 0x0790, 0x0791, 0x0792, 0x0793, 0x0794, 0x0795, 0x0798, 0x0799, 0x079a, 0x079b, 0x079c, 0x079d, 0x07a0, 0x07a1, 0x07a2, 0x07a3, 0x07a4, 0x07a5, 0x07a8, 0x07a9, 0x03aa, 0x03ab, 0x03ac, 0x03ad, 0x03b0, 0x03b1, 0x03b2, 0x03b3, 0x03b4, 0x03b5, 0x03b8, 0x03b9, 0x03ba, 0x03bb, 0x03bb }; static const u8 ZEBRA2_CCK_OFDM_GAIN_SETTING[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, }; static const u8 rtl8225z2_tx_power_ofdm[] = { 0x42, 0x00, 0x40, 0x00, 0x40 }; static const u8 rtl8225z2_tx_power_cck_ch14[] = { 0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00 }; static const u8 rtl8225z2_tx_power_cck[] = { 0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04 }; void rtl8225z2_set_gain(struct net_device *dev, short gain) { const u8 *rtl8225_gain; struct r8180_priv *priv = ieee80211_priv(dev); u8 mode = priv->ieee80211->mode; if (mode == IEEE_B || mode == IEEE_G) rtl8225_gain = rtl8225z2_gain_bg; else rtl8225_gain = rtl8225z2_gain_a; write_phy_ofdm(dev, 0x0b, rtl8225_gain[gain * 3]); write_phy_ofdm(dev, 0x1b, rtl8225_gain[gain * 3 + 1]); write_phy_ofdm(dev, 0x1d, rtl8225_gain[gain * 3 + 2]); write_phy_ofdm(dev, 0x21, 0x37); } static u32 read_rtl8225(struct net_device *dev, u8 adr) { u32 data2Write = ((u32)(adr & 0x1f)) << 27; u32 dataRead; u32 mask; u16 oval, oval2, oval3, tmp; int i; short bit, rw; u8 wLength = 6; u8 rLength = 12; u8 low2high = 0; oval = read_nic_word(dev, RFPinsOutput); oval2 = read_nic_word(dev, RFPinsEnable); oval3 = read_nic_word(dev, RFPinsSelect); write_nic_word(dev, RFPinsEnable, (oval2|0xf)); write_nic_word(dev, RFPinsSelect, (oval3|0xf)); dataRead = 0; oval &= ~0xf; write_nic_word(dev, RFPinsOutput, oval | BB_HOST_BANG_EN); udelay(4); write_nic_word(dev, RFPinsOutput, oval); udelay(5); rw = 0; mask = (low2high) ? 0x01 : (((u32)0x01)<<(32-1)); for (i = 0; i < wLength/2; i++) { bit = ((data2Write&mask) != 0) ? 1 : 0; write_nic_word(dev, RFPinsOutput, bit | oval | rw); udelay(1); write_nic_word(dev, RFPinsOutput, bit | oval | BB_HOST_BANG_CLK | rw); udelay(2); write_nic_word(dev, RFPinsOutput, bit | oval | BB_HOST_BANG_CLK | rw); udelay(2); mask = (low2high) ? (mask<<1) : (mask>>1); if (i == 2) { rw = BB_HOST_BANG_RW; write_nic_word(dev, RFPinsOutput, bit | oval | BB_HOST_BANG_CLK | rw); udelay(2); write_nic_word(dev, RFPinsOutput, bit | oval | rw); udelay(2); break; } bit = ((data2Write&mask) != 0) ? 1 : 0; write_nic_word(dev, RFPinsOutput, oval | bit | rw | BB_HOST_BANG_CLK); udelay(2); write_nic_word(dev, RFPinsOutput, oval | bit | rw | BB_HOST_BANG_CLK); udelay(2); write_nic_word(dev, RFPinsOutput, oval | bit | rw); udelay(1); mask = (low2high) ? (mask<<1) : (mask>>1); } write_nic_word(dev, RFPinsOutput, rw|oval); udelay(2); mask = (low2high) ? 0x01 : (((u32)0x01) << (12-1)); /* * We must set data pin to HW controled, otherwise RF can't driver it * and value RF register won't be able to read back properly. */ write_nic_word(dev, RFPinsEnable, (oval2 & (~0x01))); for (i = 0; i < rLength; i++) { write_nic_word(dev, RFPinsOutput, rw|oval); udelay(1); write_nic_word(dev, RFPinsOutput, rw|oval|BB_HOST_BANG_CLK); udelay(2); write_nic_word(dev, RFPinsOutput, rw|oval|BB_HOST_BANG_CLK); udelay(2); write_nic_word(dev, RFPinsOutput, rw|oval|BB_HOST_BANG_CLK); udelay(2); tmp = read_nic_word(dev, RFPinsInput); dataRead |= (tmp & BB_HOST_BANG_CLK ? mask : 0); write_nic_word(dev, RFPinsOutput, (rw|oval)); udelay(2); mask = (low2high) ? (mask<<1) : (mask>>1); } write_nic_word(dev, RFPinsOutput, BB_HOST_BANG_EN | BB_HOST_BANG_RW | oval); udelay(2); write_nic_word(dev, RFPinsEnable, oval2); write_nic_word(dev, RFPinsSelect, oval3); /* Set To SW Switch */ write_nic_word(dev, RFPinsOutput, 0x3a0); return dataRead; } short rtl8225_is_V_z2(struct net_device *dev) { short vz2 = 1; if (read_rtl8225(dev, 8) != 0x588) vz2 = 0; else /* reg 9 pg 1 = 24 */ if (read_rtl8225(dev, 9) != 0x700) vz2 = 0; /* sw back to pg 0 */ write_rtl8225(dev, 0, 0xb7); return vz2; } void rtl8225z2_rf_close(struct net_device *dev) { RF_WriteReg(dev, 0x4, 0x1f); force_pci_posting(dev); mdelay(1); rtl8180_set_anaparam(dev, RTL8225z2_ANAPARAM_OFF); rtl8185_set_anaparam2(dev, RTL8225z2_ANAPARAM2_OFF); } /* * Map dBm into Tx power index according to current HW model, for example, * RF and PA, and current wireless mode. */ s8 DbmToTxPwrIdx(struct r8180_priv *priv, WIRELESS_MODE WirelessMode, s32 PowerInDbm) { bool bUseDefault = true; s8 TxPwrIdx = 0; /* * OFDM Power in dBm = Index * 0.5 + 0 * CCK Power in dBm = Index * 0.25 + 13 */ s32 tmp = 0; if (WirelessMode == WIRELESS_MODE_G) { bUseDefault = false; tmp = (2 * PowerInDbm); if (tmp < 0) TxPwrIdx = 0; else if (tmp > 40) /* 40 means 20 dBm. */ TxPwrIdx = 40; else TxPwrIdx = (s8)tmp; } else if (WirelessMode == WIRELESS_MODE_B) { bUseDefault = false; tmp = (4 * PowerInDbm) - 52; if (tmp < 0) TxPwrIdx = 0; else if (tmp > 28) /* 28 means 20 dBm. */ TxPwrIdx = 28; else TxPwrIdx = (s8)tmp; } /* * TRUE if we want to use a default implementation. * We shall set it to FALSE when we have exact translation formular * for target IC. 070622, by rcnjko. */ if (bUseDefault) { if (PowerInDbm < 0) TxPwrIdx = 0; else if (PowerInDbm > 35) TxPwrIdx = 35; else TxPwrIdx = (u8)PowerInDbm; } return TxPwrIdx; } void rtl8225z2_SetTXPowerLevel(struct net_device *dev, short ch) { struct r8180_priv *priv = ieee80211_priv(dev); u8 max_cck_power_level; u8 max_ofdm_power_level; u8 min_ofdm_power_level; char cck_power_level = (char)(0xff & priv->chtxpwr[ch]); char ofdm_power_level = (char)(0xff & priv->chtxpwr_ofdm[ch]); if (IS_DOT11D_ENABLE(priv->ieee80211) && IS_DOT11D_STATE_DONE(priv->ieee80211)) { u8 MaxTxPwrInDbm = DOT11D_GetMaxTxPwrInDbm(priv->ieee80211, ch); u8 CckMaxPwrIdx = DbmToTxPwrIdx(priv, WIRELESS_MODE_B, MaxTxPwrInDbm); u8 OfdmMaxPwrIdx = DbmToTxPwrIdx(priv, WIRELESS_MODE_G, MaxTxPwrInDbm); if (cck_power_level > CckMaxPwrIdx) cck_power_level = CckMaxPwrIdx; if (ofdm_power_level > OfdmMaxPwrIdx) ofdm_power_level = OfdmMaxPwrIdx; } max_cck_power_level = 15; max_ofdm_power_level = 25; min_ofdm_power_level = 10; if (cck_power_level > 35) cck_power_level = 35; write_nic_byte(dev, CCK_TXAGC, (ZEBRA2_CCK_OFDM_GAIN_SETTING[(u8)cck_power_level])); force_pci_posting(dev); mdelay(1); if (ofdm_power_level > 35) ofdm_power_level = 35; if (priv->up == 0) { write_phy_ofdm(dev, 2, 0x42); write_phy_ofdm(dev, 5, 0x00); write_phy_ofdm(dev, 6, 0x40); write_phy_ofdm(dev, 7, 0x00); write_phy_ofdm(dev, 8, 0x40); } write_nic_byte(dev, OFDM_TXAGC, ZEBRA2_CCK_OFDM_GAIN_SETTING[(u8)ofdm_power_level]); if (ofdm_power_level <= 11) { write_phy_ofdm(dev, 0x07, 0x5c); write_phy_ofdm(dev, 0x09, 0x5c); } if (ofdm_power_level <= 17) { write_phy_ofdm(dev, 0x07, 0x54); write_phy_ofdm(dev, 0x09, 0x54); } else { write_phy_ofdm(dev, 0x07, 0x50); write_phy_ofdm(dev, 0x09, 0x50); } force_pci_posting(dev); mdelay(1); } void rtl8225z2_rf_set_chan(struct net_device *dev, short ch) { rtl8225z2_SetTXPowerLevel(dev, ch); RF_WriteReg(dev, 0x7, rtl8225_chan[ch]); if ((RF_ReadReg(dev, 0x7) & 0x0F80) != rtl8225_chan[ch]) RF_WriteReg(dev, 0x7, rtl8225_chan[ch]); mdelay(1); force_pci_posting(dev); mdelay(10); } static void rtl8225_host_pci_init(struct net_device *dev) { write_nic_word(dev, RFPinsOutput, 0x480); rtl8185_rf_pins_enable(dev); write_nic_word(dev, RFPinsSelect, 0x88 | SW_CONTROL_GPIO); write_nic_byte(dev, GP_ENABLE, 0); force_pci_posting(dev); mdelay(200); /* bit 6 is for RF on/off detection */ write_nic_word(dev, GP_ENABLE, 0xff & (~(1 << 6))); } static void rtl8225_rf_set_chan(struct net_device *dev, short ch) { struct r8180_priv *priv = ieee80211_priv(dev); short gset = (priv->ieee80211->state == IEEE80211_LINKED && ieee80211_is_54g(priv->ieee80211->current_network)) || priv->ieee80211->iw_mode == IW_MODE_MONITOR; rtl8225_SetTXPowerLevel(dev, ch); write_rtl8225(dev, 0x7, rtl8225_chan[ch]); force_pci_posting(dev); mdelay(10); if (gset) { write_nic_byte(dev, SIFS, 0x22); write_nic_byte(dev, DIFS, 0x14); } else { write_nic_byte(dev, SIFS, 0x44); write_nic_byte(dev, DIFS, 0x24); } if (priv->ieee80211->state == IEEE80211_LINKED && ieee80211_is_shortslot(priv->ieee80211->current_network)) write_nic_byte(dev, SLOT, 0x9); else write_nic_byte(dev, SLOT, 0x14); if (gset) { write_nic_byte(dev, EIFS, 81); write_nic_byte(dev, CW_VAL, 0x73); } else { write_nic_byte(dev, EIFS, 81); write_nic_byte(dev, CW_VAL, 0xa5); } } void rtl8225z2_rf_init(struct net_device *dev) { struct r8180_priv *priv = ieee80211_priv(dev); int i; short channel = 1; u16 brsr; u32 data, addr; priv->chan = channel; rtl8225_host_pci_init(dev); write_nic_dword(dev, RF_TIMING, 0x000a8008); brsr = read_nic_word(dev, BRSR); write_nic_word(dev, BRSR, 0xffff); write_nic_dword(dev, RF_PARA, 0x100044); rtl8180_set_mode(dev, EPROM_CMD_CONFIG); write_nic_byte(dev, CONFIG3, 0x44); rtl8180_set_mode(dev, EPROM_CMD_NORMAL); rtl8185_rf_pins_enable(dev); write_rtl8225(dev, 0x0, 0x2bf); mdelay(1); write_rtl8225(dev, 0x1, 0xee0); mdelay(1); write_rtl8225(dev, 0x2, 0x44d); mdelay(1); write_rtl8225(dev, 0x3, 0x441); mdelay(1); write_rtl8225(dev, 0x4, 0x8c3); mdelay(1); write_rtl8225(dev, 0x5, 0xc72); mdelay(1); write_rtl8225(dev, 0x6, 0xe6); mdelay(1); write_rtl8225(dev, 0x7, rtl8225_chan[channel]); mdelay(1); write_rtl8225(dev, 0x8, 0x3f); mdelay(1); write_rtl8225(dev, 0x9, 0x335); mdelay(1); write_rtl8225(dev, 0xa, 0x9d4); mdelay(1); write_rtl8225(dev, 0xb, 0x7bb); mdelay(1); write_rtl8225(dev, 0xc, 0x850); mdelay(1); write_rtl8225(dev, 0xd, 0xcdf); mdelay(1); write_rtl8225(dev, 0xe, 0x2b); mdelay(1); write_rtl8225(dev, 0xf, 0x114); mdelay(100); write_rtl8225(dev, 0x0, 0x1b7); for (i = 0; i < 95; i++) { write_rtl8225(dev, 0x1, (u8)(i + 1)); write_rtl8225(dev, 0x2, rtl8225z2_rxgain[i]); } write_rtl8225(dev, 0x3, 0x80); write_rtl8225(dev, 0x5, 0x4); write_rtl8225(dev, 0x0, 0xb7); write_rtl8225(dev, 0x2, 0xc4d); /* FIXME!! rtl8187 we have to check if calibrarion * is successful and eventually cal. again (repeat * the two write on reg 2) */ data = read_rtl8225(dev, 6); if (!(data & 0x00000080)) { write_rtl8225(dev, 0x02, 0x0c4d); force_pci_posting(dev); mdelay(200); write_rtl8225(dev, 0x02, 0x044d); force_pci_posting(dev); mdelay(100); data = read_rtl8225(dev, 6); if (!(data & 0x00000080)) DMESGW("RF Calibration Failed!!!!\n"); } mdelay(200); write_rtl8225(dev, 0x0, 0x2bf); for (i = 0; i < 128; i++) { data = rtl8225_agc[i]; addr = i + 0x80; /* enable writing AGC table */ write_phy_ofdm(dev, 0xb, data); mdelay(1); write_phy_ofdm(dev, 0xa, addr); mdelay(1); } force_pci_posting(dev); mdelay(1); write_phy_ofdm(dev, 0x00, 0x01); mdelay(1); write_phy_ofdm(dev, 0x01, 0x02); mdelay(1); write_phy_ofdm(dev, 0x02, 0x62); mdelay(1); write_phy_ofdm(dev, 0x03, 0x00); mdelay(1); write_phy_ofdm(dev, 0x04, 0x00); mdelay(1); write_phy_ofdm(dev, 0x05, 0x00); mdelay(1); write_phy_ofdm(dev, 0x06, 0x40); mdelay(1); write_phy_ofdm(dev, 0x07, 0x00); mdelay(1); write_phy_ofdm(dev, 0x08, 0x40); mdelay(1); write_phy_ofdm(dev, 0x09, 0xfe); mdelay(1); write_phy_ofdm(dev, 0x0a, 0x08); mdelay(1); write_phy_ofdm(dev, 0x0b, 0x80); mdelay(1); write_phy_ofdm(dev, 0x0c, 0x01); mdelay(1); write_phy_ofdm(dev, 0x0d, 0x43); write_phy_ofdm(dev, 0x0e, 0xd3); mdelay(1); write_phy_ofdm(dev, 0x0f, 0x38); mdelay(1); write_phy_ofdm(dev, 0x10, 0x84); mdelay(1); write_phy_ofdm(dev, 0x11, 0x07); mdelay(1); write_phy_ofdm(dev, 0x12, 0x20); mdelay(1); write_phy_ofdm(dev, 0x13, 0x20); mdelay(1); write_phy_ofdm(dev, 0x14, 0x00); mdelay(1); write_phy_ofdm(dev, 0x15, 0x40); mdelay(1); write_phy_ofdm(dev, 0x16, 0x00); mdelay(1); write_phy_ofdm(dev, 0x17, 0x40); mdelay(1); write_phy_ofdm(dev, 0x18, 0xef); mdelay(1); write_phy_ofdm(dev, 0x19, 0x19); mdelay(1); write_phy_ofdm(dev, 0x1a, 0x20); mdelay(1); write_phy_ofdm(dev, 0x1b, 0x15); mdelay(1); write_phy_ofdm(dev, 0x1c, 0x04); mdelay(1); write_phy_ofdm(dev, 0x1d, 0xc5); mdelay(1); write_phy_ofdm(dev, 0x1e, 0x95); mdelay(1); write_phy_ofdm(dev, 0x1f, 0x75); mdelay(1); write_phy_ofdm(dev, 0x20, 0x1f); mdelay(1); write_phy_ofdm(dev, 0x21, 0x17); mdelay(1); write_phy_ofdm(dev, 0x22, 0x16); mdelay(1); write_phy_ofdm(dev, 0x23, 0x80); mdelay(1); /* FIXME maybe not needed */ write_phy_ofdm(dev, 0x24, 0x46); mdelay(1); write_phy_ofdm(dev, 0x25, 0x00); mdelay(1); write_phy_ofdm(dev, 0x26, 0x90); mdelay(1); write_phy_ofdm(dev, 0x27, 0x88); mdelay(1); rtl8225z2_set_gain(dev, 4); write_phy_cck(dev, 0x0, 0x98); mdelay(1); write_phy_cck(dev, 0x3, 0x20); mdelay(1); write_phy_cck(dev, 0x4, 0x7e); mdelay(1); write_phy_cck(dev, 0x5, 0x12); mdelay(1); write_phy_cck(dev, 0x6, 0xfc); mdelay(1); write_phy_cck(dev, 0x7, 0x78); mdelay(1); write_phy_cck(dev, 0x8, 0x2e); mdelay(1); write_phy_cck(dev, 0x10, 0x93); mdelay(1); write_phy_cck(dev, 0x11, 0x88); mdelay(1); write_phy_cck(dev, 0x12, 0x47); mdelay(1); write_phy_cck(dev, 0x13, 0xd0); write_phy_cck(dev, 0x19, 0x00); write_phy_cck(dev, 0x1a, 0xa0); write_phy_cck(dev, 0x1b, 0x08); write_phy_cck(dev, 0x40, 0x86); /* CCK Carrier Sense Threshold */ write_phy_cck(dev, 0x41, 0x8d); mdelay(1); write_phy_cck(dev, 0x42, 0x15); mdelay(1); write_phy_cck(dev, 0x43, 0x18); mdelay(1); write_phy_cck(dev, 0x44, 0x36); mdelay(1); write_phy_cck(dev, 0x45, 0x35); mdelay(1); write_phy_cck(dev, 0x46, 0x2e); mdelay(1); write_phy_cck(dev, 0x47, 0x25); mdelay(1); write_phy_cck(dev, 0x48, 0x1c); mdelay(1); write_phy_cck(dev, 0x49, 0x12); mdelay(1); write_phy_cck(dev, 0x4a, 0x09); mdelay(1); write_phy_cck(dev, 0x4b, 0x04); mdelay(1); write_phy_cck(dev, 0x4c, 0x05); mdelay(1); write_nic_byte(dev, 0x5b, 0x0d); mdelay(1); rtl8225z2_SetTXPowerLevel(dev, channel); /* RX antenna default to A */ write_phy_cck(dev, 0x11, 0x9b); mdelay(1); /* B: 0xDB */ write_phy_ofdm(dev, 0x26, 0x90); mdelay(1); /* B: 0x10 */ rtl8185_tx_antenna(dev, 0x03); /* B: 0x00 */ /* switch to high-speed 3-wire * last digit. 2 for both cck and ofdm */ write_nic_dword(dev, 0x94, 0x15c00002); rtl8185_rf_pins_enable(dev); rtl8225_rf_set_chan(dev, priv->chan); } void rtl8225z2_rf_set_mode(struct net_device *dev) { struct r8180_priv *priv = ieee80211_priv(dev); if (priv->ieee80211->mode == IEEE_A) { write_rtl8225(dev, 0x5, 0x1865); write_nic_dword(dev, RF_PARA, 0x10084); write_nic_dword(dev, RF_TIMING, 0xa8008); write_phy_ofdm(dev, 0x0, 0x0); write_phy_ofdm(dev, 0xa, 0x6); write_phy_ofdm(dev, 0xb, 0x99); write_phy_ofdm(dev, 0xf, 0x20); write_phy_ofdm(dev, 0x11, 0x7); rtl8225z2_set_gain(dev, 4); write_phy_ofdm(dev, 0x15, 0x40); write_phy_ofdm(dev, 0x17, 0x40); write_nic_dword(dev, 0x94, 0x10000000); } else { write_rtl8225(dev, 0x5, 0x1864); write_nic_dword(dev, RF_PARA, 0x10044); write_nic_dword(dev, RF_TIMING, 0xa8008); write_phy_ofdm(dev, 0x0, 0x1); write_phy_ofdm(dev, 0xa, 0x6); write_phy_ofdm(dev, 0xb, 0x99); write_phy_ofdm(dev, 0xf, 0x20); write_phy_ofdm(dev, 0x11, 0x7); rtl8225z2_set_gain(dev, 4); write_phy_ofdm(dev, 0x15, 0x40); write_phy_ofdm(dev, 0x17, 0x40); write_nic_dword(dev, 0x94, 0x04000002); } } #define MAX_DOZE_WAITING_TIMES_85B 20 #define MAX_POLLING_24F_TIMES_87SE 10 #define LPS_MAX_SLEEP_WAITING_TIMES_87SE 5 bool SetZebraRFPowerState8185(struct net_device *dev, RT_RF_POWER_STATE eRFPowerState) { struct r8180_priv *priv = ieee80211_priv(dev); u8 btCR9346, btConfig3; bool bActionAllowed = true, bTurnOffBB = true; u8 u1bTmp; int i; bool bResult = true; u8 QueueID; if (priv->SetRFPowerStateInProgress == true) return false; priv->SetRFPowerStateInProgress = true; btCR9346 = read_nic_byte(dev, CR9346); write_nic_byte(dev, CR9346, (btCR9346 | 0xC0)); btConfig3 = read_nic_byte(dev, CONFIG3); write_nic_byte(dev, CONFIG3, (btConfig3 | CONFIG3_PARM_En)); switch (eRFPowerState) { case eRfOn: write_nic_word(dev, 0x37C, 0x00EC); /* turn on AFE */ write_nic_byte(dev, 0x54, 0x00); write_nic_byte(dev, 0x62, 0x00); /* turn on RF */ RF_WriteReg(dev, 0x0, 0x009f); udelay(500); RF_WriteReg(dev, 0x4, 0x0972); udelay(500); /* turn on RF again */ RF_WriteReg(dev, 0x0, 0x009f); udelay(500); RF_WriteReg(dev, 0x4, 0x0972); udelay(500); /* turn on BB */ write_phy_ofdm(dev, 0x10, 0x40); write_phy_ofdm(dev, 0x12, 0x40); /* Avoid power down at init time. */ write_nic_byte(dev, CONFIG4, priv->RFProgType); u1bTmp = read_nic_byte(dev, 0x24E); write_nic_byte(dev, 0x24E, (u1bTmp & (~(BIT5 | BIT6)))); break; case eRfSleep: for (QueueID = 0, i = 0; QueueID < 6;) { if (get_curr_tx_free_desc(dev, QueueID) == priv->txringcount) { QueueID++; continue; } else { priv->TxPollingTimes++; if (priv->TxPollingTimes >= LPS_MAX_SLEEP_WAITING_TIMES_87SE) { bActionAllowed = false; break; } else udelay(10); } } if (bActionAllowed) { /* turn off BB RXIQ matrix to cut off rx signal */ write_phy_ofdm(dev, 0x10, 0x00); write_phy_ofdm(dev, 0x12, 0x00); /* turn off RF */ RF_WriteReg(dev, 0x4, 0x0000); RF_WriteReg(dev, 0x0, 0x0000); /* turn off AFE except PLL */ write_nic_byte(dev, 0x62, 0xff); write_nic_byte(dev, 0x54, 0xec); mdelay(1); { int i = 0; while (true) { u8 tmp24F = read_nic_byte(dev, 0x24f); if ((tmp24F == 0x01) || (tmp24F == 0x09)) { bTurnOffBB = true; break; } else { udelay(10); i++; priv->TxPollingTimes++; if (priv->TxPollingTimes >= LPS_MAX_SLEEP_WAITING_TIMES_87SE) { bTurnOffBB = false; break; } else udelay(10); } } } if (bTurnOffBB) { /* turn off BB */ u1bTmp = read_nic_byte(dev, 0x24E); write_nic_byte(dev, 0x24E, (u1bTmp | BIT5 | BIT6)); /* turn off AFE PLL */ write_nic_byte(dev, 0x54, 0xFC); write_nic_word(dev, 0x37C, 0x00FC); } } break; case eRfOff: for (QueueID = 0, i = 0; QueueID < 6;) { if (get_curr_tx_free_desc(dev, QueueID) == priv->txringcount) { QueueID++; continue; } else { udelay(10); i++; } if (i >= MAX_DOZE_WAITING_TIMES_85B) break; } /* turn off BB RXIQ matrix to cut off rx signal */ write_phy_ofdm(dev, 0x10, 0x00); write_phy_ofdm(dev, 0x12, 0x00); /* turn off RF */ RF_WriteReg(dev, 0x4, 0x0000); RF_WriteReg(dev, 0x0, 0x0000); /* turn off AFE except PLL */ write_nic_byte(dev, 0x62, 0xff); write_nic_byte(dev, 0x54, 0xec); mdelay(1); { int i = 0; while (true) { u8 tmp24F = read_nic_byte(dev, 0x24f); if ((tmp24F == 0x01) || (tmp24F == 0x09)) { bTurnOffBB = true; break; } else { bTurnOffBB = false; udelay(10); i++; } if (i > MAX_POLLING_24F_TIMES_87SE) break; } } if (bTurnOffBB) { /* turn off BB */ u1bTmp = read_nic_byte(dev, 0x24E); write_nic_byte(dev, 0x24E, (u1bTmp | BIT5 | BIT6)); /* turn off AFE PLL (80M) */ write_nic_byte(dev, 0x54, 0xFC); write_nic_word(dev, 0x37C, 0x00FC); } break; } btConfig3 &= ~(CONFIG3_PARM_En); write_nic_byte(dev, CONFIG3, btConfig3); btCR9346 &= ~(0xC0); write_nic_byte(dev, CR9346, btCR9346); if (bResult && bActionAllowed) priv->eRFPowerState = eRFPowerState; priv->SetRFPowerStateInProgress = false; return bResult && bActionAllowed; } void rtl8225z4_rf_sleep(struct net_device *dev) { MgntActSet_RF_State(dev, eRfSleep, RF_CHANGE_BY_PS); } void rtl8225z4_rf_wakeup(struct net_device *dev) { MgntActSet_RF_State(dev, eRfOn, RF_CHANGE_BY_PS); }
gpl-2.0
sztupy/samsung_kernel_galaxys_gb
drivers/staging/pohmelfs/dir.c
1014
25904
/* * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net> * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/jhash.h> #include <linux/namei.h> #include <linux/slab.h> #include <linux/pagemap.h> #include "netfs.h" static int pohmelfs_cmp_hash(struct pohmelfs_name *n, u32 hash) { if (n->hash > hash) return -1; if (n->hash < hash) return 1; return 0; } static struct pohmelfs_name *pohmelfs_search_hash_unprecise(struct pohmelfs_inode *pi, u32 hash) { struct rb_node *n = pi->hash_root.rb_node; struct pohmelfs_name *tmp = NULL; int cmp; while (n) { tmp = rb_entry(n, struct pohmelfs_name, hash_node); cmp = pohmelfs_cmp_hash(tmp, hash); if (cmp < 0) n = n->rb_left; else if (cmp > 0) n = n->rb_right; else break; } return tmp; } struct pohmelfs_name *pohmelfs_search_hash(struct pohmelfs_inode *pi, u32 hash) { struct pohmelfs_name *tmp; tmp = pohmelfs_search_hash_unprecise(pi, hash); if (tmp && (tmp->hash == hash)) return tmp; return NULL; } static void __pohmelfs_name_del(struct pohmelfs_inode *parent, struct pohmelfs_name *node) { rb_erase(&node->hash_node, &parent->hash_root); } /* * Remove name cache entry from its caches and free it. */ static void pohmelfs_name_free(struct pohmelfs_inode *parent, struct pohmelfs_name *node) { __pohmelfs_name_del(parent, node); list_del(&node->sync_create_entry); kfree(node); } static struct pohmelfs_name *pohmelfs_insert_hash(struct pohmelfs_inode *pi, struct pohmelfs_name *new) { struct rb_node **n = &pi->hash_root.rb_node, *parent = NULL; struct pohmelfs_name *ret = NULL, *tmp; int cmp; while (*n) { parent = *n; tmp = rb_entry(parent, struct pohmelfs_name, hash_node); cmp = pohmelfs_cmp_hash(tmp, new->hash); if (cmp < 0) n = &parent->rb_left; else if (cmp > 0) n = &parent->rb_right; else { ret = tmp; break; } } if (ret) { printk("%s: exist: parent: %llu, ino: %llu, hash: %x, len: %u, data: '%s', " "new: ino: %llu, hash: %x, len: %u, data: '%s'.\n", __func__, pi->ino, ret->ino, ret->hash, ret->len, ret->data, new->ino, new->hash, new->len, new->data); ret->ino = new->ino; return ret; } rb_link_node(&new->hash_node, parent, n); rb_insert_color(&new->hash_node, &pi->hash_root); return NULL; } /* * Free name cache for given inode. */ void pohmelfs_free_names(struct pohmelfs_inode *parent) { struct rb_node *rb_node; struct pohmelfs_name *n; for (rb_node = rb_first(&parent->hash_root); rb_node;) { n = rb_entry(rb_node, struct pohmelfs_name, hash_node); rb_node = rb_next(rb_node); pohmelfs_name_free(parent, n); } } static void pohmelfs_fix_offset(struct pohmelfs_inode *parent, struct pohmelfs_name *node) { parent->total_len -= node->len; } /* * Free name cache entry helper. */ void pohmelfs_name_del(struct pohmelfs_inode *parent, struct pohmelfs_name *node) { pohmelfs_fix_offset(parent, node); pohmelfs_name_free(parent, node); } /* * Insert new name cache entry into all hash cache. */ static int pohmelfs_insert_name(struct pohmelfs_inode *parent, struct pohmelfs_name *n) { struct pohmelfs_name *name; name = pohmelfs_insert_hash(parent, n); if (name) return -EEXIST; parent->total_len += n->len; list_add_tail(&n->sync_create_entry, &parent->sync_create_list); return 0; } /* * Allocate new name cache entry. */ static struct pohmelfs_name *pohmelfs_name_alloc(unsigned int len) { struct pohmelfs_name *n; n = kzalloc(sizeof(struct pohmelfs_name) + len, GFP_KERNEL); if (!n) return NULL; INIT_LIST_HEAD(&n->sync_create_entry); n->data = (char *)(n+1); return n; } /* * Add new name entry into directory's cache. */ static int pohmelfs_add_dir(struct pohmelfs_sb *psb, struct pohmelfs_inode *parent, struct pohmelfs_inode *npi, struct qstr *str, unsigned int mode, int link) { int err = -ENOMEM; struct pohmelfs_name *n; n = pohmelfs_name_alloc(str->len + 1); if (!n) goto err_out_exit; n->ino = npi->ino; n->mode = mode; n->len = str->len; n->hash = str->hash; sprintf(n->data, "%s", str->name); mutex_lock(&parent->offset_lock); err = pohmelfs_insert_name(parent, n); mutex_unlock(&parent->offset_lock); if (err) { if (err != -EEXIST) goto err_out_free; kfree(n); } return 0; err_out_free: kfree(n); err_out_exit: return err; } /* * Create new inode for given parameters (name, inode info, parent). * This does not create object on the server, it will be synced there during writeback. */ struct pohmelfs_inode *pohmelfs_new_inode(struct pohmelfs_sb *psb, struct pohmelfs_inode *parent, struct qstr *str, struct netfs_inode_info *info, int link) { struct inode *new = NULL; struct pohmelfs_inode *npi; int err = -EEXIST; dprintk("%s: creating inode: parent: %llu, ino: %llu, str: %p.\n", __func__, (parent) ? parent->ino : 0, info->ino, str); err = -ENOMEM; new = iget_locked(psb->sb, info->ino); if (!new) goto err_out_exit; npi = POHMELFS_I(new); npi->ino = info->ino; err = 0; if (new->i_state & I_NEW) { dprintk("%s: filling VFS inode: %lu/%llu.\n", __func__, new->i_ino, info->ino); pohmelfs_fill_inode(new, info); if (S_ISDIR(info->mode)) { struct qstr s; s.name = "."; s.len = 1; s.hash = jhash(s.name, s.len, 0); err = pohmelfs_add_dir(psb, npi, npi, &s, info->mode, 0); if (err) goto err_out_put; s.name = ".."; s.len = 2; s.hash = jhash(s.name, s.len, 0); err = pohmelfs_add_dir(psb, npi, (parent) ? parent : npi, &s, (parent) ? parent->vfs_inode.i_mode : npi->vfs_inode.i_mode, 0); if (err) goto err_out_put; } } if (str) { if (parent) { err = pohmelfs_add_dir(psb, parent, npi, str, info->mode, link); dprintk("%s: %s inserted name: '%s', new_offset: %llu, ino: %llu, parent: %llu.\n", __func__, (err) ? "unsuccessfully" : "successfully", str->name, parent->total_len, info->ino, parent->ino); if (err && err != -EEXIST) goto err_out_put; } } if (new->i_state & I_NEW) { if (parent) mark_inode_dirty(&parent->vfs_inode); mark_inode_dirty(new); } set_bit(NETFS_INODE_OWNED, &npi->state); npi->lock_type = POHMELFS_WRITE_LOCK; unlock_new_inode(new); return npi; err_out_put: printk("%s: putting inode: %p, npi: %p, error: %d.\n", __func__, new, npi, err); iput(new); err_out_exit: return ERR_PTR(err); } static int pohmelfs_remote_sync_complete(struct page **pages, unsigned int page_num, void *private, int err) { struct pohmelfs_inode *pi = private; struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb); dprintk("%s: ino: %llu, err: %d.\n", __func__, pi->ino, err); if (err) pi->error = err; wake_up(&psb->wait); pohmelfs_put_inode(pi); return err; } /* * Receive directory content from the server. * This should be only done for objects, which were not created locally, * and which were not synced previously. */ static int pohmelfs_sync_remote_dir(struct pohmelfs_inode *pi) { struct inode *inode = &pi->vfs_inode; struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb); long ret = psb->wait_on_page_timeout; int err; dprintk("%s: dir: %llu, state: %lx: remote_synced: %d.\n", __func__, pi->ino, pi->state, test_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state)); if (test_bit(NETFS_INODE_REMOTE_DIR_SYNCED, &pi->state)) return 0; if (!igrab(inode)) { err = -ENOENT; goto err_out_exit; } err = pohmelfs_meta_command(pi, NETFS_READDIR, NETFS_TRANS_SINGLE_DST, pohmelfs_remote_sync_complete, pi, 0); if (err) goto err_out_exit; pi->error = 0; ret = wait_event_interruptible_timeout(psb->wait, test_bit(NETFS_INODE_REMOTE_DIR_SYNCED, &pi->state) || pi->error, ret); dprintk("%s: awake dir: %llu, ret: %ld, err: %d.\n", __func__, pi->ino, ret, pi->error); if (ret <= 0) { err = ret; if (!err) err = -ETIMEDOUT; goto err_out_exit; } if (pi->error) return pi->error; return 0; err_out_exit: clear_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state); return err; } static int pohmelfs_dir_open(struct inode *inode, struct file *file) { file->private_data = NULL; return 0; } /* * VFS readdir callback. Syncs directory content from server if needed, * and provides direntry info to the userspace. */ static int pohmelfs_readdir(struct file *file, void *dirent, filldir_t filldir) { struct inode *inode = file->f_path.dentry->d_inode; struct pohmelfs_inode *pi = POHMELFS_I(inode); struct pohmelfs_name *n; struct rb_node *rb_node; int err = 0, mode; u64 len; dprintk("%s: parent: %llu, fpos: %llu, hash: %08lx.\n", __func__, pi->ino, (u64)file->f_pos, (unsigned long)file->private_data); #if 0 err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_READ_LOCK); if (err) return err; #endif err = pohmelfs_sync_remote_dir(pi); if (err) return err; if (file->private_data && (file->private_data == (void *)(unsigned long)file->f_pos)) return 0; mutex_lock(&pi->offset_lock); n = pohmelfs_search_hash_unprecise(pi, (unsigned long)file->private_data); while (n) { mode = (n->mode >> 12) & 15; dprintk("%s: offset: %llu, parent ino: %llu, name: '%s', len: %u, ino: %llu, " "mode: %o/%o, fpos: %llu, hash: %08x.\n", __func__, file->f_pos, pi->ino, n->data, n->len, n->ino, n->mode, mode, file->f_pos, n->hash); file->private_data = (void *)(unsigned long)n->hash; len = n->len; err = filldir(dirent, n->data, n->len, file->f_pos, n->ino, mode); if (err < 0) { dprintk("%s: err: %d.\n", __func__, err); err = 0; break; } file->f_pos += len; rb_node = rb_next(&n->hash_node); if (!rb_node || (rb_node == &n->hash_node)) { file->private_data = (void *)(unsigned long)file->f_pos; break; } n = rb_entry(rb_node, struct pohmelfs_name, hash_node); } mutex_unlock(&pi->offset_lock); return err; } static loff_t pohmelfs_dir_lseek(struct file *file, loff_t offset, int origin) { file->f_pos = offset; file->private_data = NULL; return offset; } const struct file_operations pohmelfs_dir_fops = { .open = pohmelfs_dir_open, .read = generic_read_dir, .llseek = pohmelfs_dir_lseek, .readdir = pohmelfs_readdir, }; /* * Lookup single object on server. */ static int pohmelfs_lookup_single(struct pohmelfs_inode *parent, struct qstr *str, u64 ino) { struct pohmelfs_sb *psb = POHMELFS_SB(parent->vfs_inode.i_sb); long ret = msecs_to_jiffies(5000); int err; set_bit(NETFS_COMMAND_PENDING, &parent->state); err = pohmelfs_meta_command_data(parent, parent->ino, NETFS_LOOKUP, (char *)str->name, NETFS_TRANS_SINGLE_DST, NULL, NULL, ino); if (err) goto err_out_exit; err = 0; ret = wait_event_interruptible_timeout(psb->wait, !test_bit(NETFS_COMMAND_PENDING, &parent->state), ret); if (ret <= 0) { err = ret; if (!err) err = -ETIMEDOUT; } if (err) goto err_out_exit; return 0; err_out_exit: clear_bit(NETFS_COMMAND_PENDING, &parent->state); printk("%s: failed: parent: %llu, ino: %llu, name: '%s', err: %d.\n", __func__, parent->ino, ino, str->name, err); return err; } /* * VFS lookup callback. * We first try to get inode number from local name cache, if we have one, * then inode can be found in inode cache. If there is no inode or no object in * local cache, try to lookup it on server. This only should be done for directories, * which were not created locally, otherwise remote server does not know about dir at all, * so no need to try to know that. */ struct dentry *pohmelfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct pohmelfs_inode *parent = POHMELFS_I(dir); struct pohmelfs_name *n; struct inode *inode = NULL; unsigned long ino = 0; int err, lock_type = POHMELFS_READ_LOCK, need_lock = 1; struct qstr str = dentry->d_name; if ((nd->intent.open.flags & O_ACCMODE) > 1) lock_type = POHMELFS_WRITE_LOCK; if (test_bit(NETFS_INODE_OWNED, &parent->state)) { if (lock_type == parent->lock_type) need_lock = 0; if ((lock_type == POHMELFS_READ_LOCK) && (parent->lock_type == POHMELFS_WRITE_LOCK)) need_lock = 0; } if ((lock_type == POHMELFS_READ_LOCK) && !test_bit(NETFS_INODE_REMOTE_DIR_SYNCED, &parent->state)) need_lock = 1; str.hash = jhash(dentry->d_name.name, dentry->d_name.len, 0); mutex_lock(&parent->offset_lock); n = pohmelfs_search_hash(parent, str.hash); if (n) ino = n->ino; mutex_unlock(&parent->offset_lock); dprintk("%s: start ino: %lu, inode: %p, name: '%s', hash: %x, parent_state: %lx, need_lock: %d.\n", __func__, ino, inode, str.name, str.hash, parent->state, need_lock); if (ino) { inode = ilookup(dir->i_sb, ino); if (inode) goto out; } dprintk("%s: no inode dir: %p, dir_ino: %llu, name: '%s', len: %u, dir_state: %lx, ino: %lu.\n", __func__, dir, parent->ino, str.name, str.len, parent->state, ino); if (!ino) { if (!need_lock) goto out; } err = pohmelfs_data_lock(parent, 0, ~0, lock_type); if (err) goto out; err = pohmelfs_lookup_single(parent, &str, ino); if (err) goto out; if (!ino) { mutex_lock(&parent->offset_lock); n = pohmelfs_search_hash(parent, str.hash); if (n) ino = n->ino; mutex_unlock(&parent->offset_lock); } if (ino) { inode = ilookup(dir->i_sb, ino); dprintk("%s: second lookup ino: %lu, inode: %p, name: '%s', hash: %x.\n", __func__, ino, inode, str.name, str.hash); if (!inode) { dprintk("%s: No inode for ino: %lu, name: '%s', hash: %x.\n", __func__, ino, str.name, str.hash); /* return NULL; */ return ERR_PTR(-EACCES); } } else { printk("%s: No inode number : name: '%s', hash: %x.\n", __func__, str.name, str.hash); } out: return d_splice_alias(inode, dentry); } /* * Create new object in local cache. Object will be synced to server * during writeback for given inode. */ struct pohmelfs_inode *pohmelfs_create_entry_local(struct pohmelfs_sb *psb, struct pohmelfs_inode *parent, struct qstr *str, u64 start, int mode) { struct pohmelfs_inode *npi; int err = -ENOMEM; struct netfs_inode_info info; dprintk("%s: name: '%s', mode: %o, start: %llu.\n", __func__, str->name, mode, start); info.mode = mode; info.ino = start; if (!start) info.ino = pohmelfs_new_ino(psb); info.nlink = S_ISDIR(mode) ? 2 : 1; info.uid = current_fsuid(); info.gid = current_fsgid(); info.size = 0; info.blocksize = 512; info.blocks = 0; info.rdev = 0; info.version = 0; npi = pohmelfs_new_inode(psb, parent, str, &info, !!start); if (IS_ERR(npi)) { err = PTR_ERR(npi); goto err_out_unlock; } return npi; err_out_unlock: dprintk("%s: err: %d.\n", __func__, err); return ERR_PTR(err); } /* * Create local object and bind it to dentry. */ static int pohmelfs_create_entry(struct inode *dir, struct dentry *dentry, u64 start, int mode) { struct pohmelfs_sb *psb = POHMELFS_SB(dir->i_sb); struct pohmelfs_inode *npi, *parent; struct qstr str = dentry->d_name; int err; parent = POHMELFS_I(dir); err = pohmelfs_data_lock(parent, 0, ~0, POHMELFS_WRITE_LOCK); if (err) return err; str.hash = jhash(dentry->d_name.name, dentry->d_name.len, 0); npi = pohmelfs_create_entry_local(psb, parent, &str, start, mode); if (IS_ERR(npi)) return PTR_ERR(npi); d_instantiate(dentry, &npi->vfs_inode); dprintk("%s: parent: %llu, inode: %llu, name: '%s', parent_nlink: %d, nlink: %d.\n", __func__, parent->ino, npi->ino, dentry->d_name.name, (signed)dir->i_nlink, (signed)npi->vfs_inode.i_nlink); return 0; } /* * VFS create and mkdir callbacks. */ static int pohmelfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) { return pohmelfs_create_entry(dir, dentry, 0, mode); } static int pohmelfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) { int err; inode_inc_link_count(dir); err = pohmelfs_create_entry(dir, dentry, 0, mode | S_IFDIR); if (err) inode_dec_link_count(dir); return err; } static int pohmelfs_remove_entry(struct inode *dir, struct dentry *dentry) { struct pohmelfs_sb *psb = POHMELFS_SB(dir->i_sb); struct inode *inode = dentry->d_inode; struct pohmelfs_inode *parent = POHMELFS_I(dir), *pi = POHMELFS_I(inode); struct pohmelfs_name *n; int err = -ENOENT; struct qstr str = dentry->d_name; err = pohmelfs_data_lock(parent, 0, ~0, POHMELFS_WRITE_LOCK); if (err) return err; str.hash = jhash(dentry->d_name.name, dentry->d_name.len, 0); dprintk("%s: dir_ino: %llu, inode: %llu, name: '%s', nlink: %d.\n", __func__, parent->ino, pi->ino, str.name, (signed)inode->i_nlink); BUG_ON(!inode); mutex_lock(&parent->offset_lock); n = pohmelfs_search_hash(parent, str.hash); if (n) { pohmelfs_fix_offset(parent, n); if (test_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state)) pohmelfs_remove_child(pi, n); pohmelfs_name_free(parent, n); err = 0; } mutex_unlock(&parent->offset_lock); if (!err) { psb->avail_size += inode->i_size; pohmelfs_inode_del_inode(psb, pi); mark_inode_dirty(dir); inode->i_ctime = dir->i_ctime; if (inode->i_nlink) inode_dec_link_count(inode); } return err; } /* * Unlink and rmdir VFS callbacks. */ static int pohmelfs_unlink(struct inode *dir, struct dentry *dentry) { return pohmelfs_remove_entry(dir, dentry); } static int pohmelfs_rmdir(struct inode *dir, struct dentry *dentry) { int err; struct inode *inode = dentry->d_inode; dprintk("%s: parent: %llu, inode: %llu, name: '%s', parent_nlink: %d, nlink: %d.\n", __func__, POHMELFS_I(dir)->ino, POHMELFS_I(inode)->ino, dentry->d_name.name, (signed)dir->i_nlink, (signed)inode->i_nlink); err = pohmelfs_remove_entry(dir, dentry); if (!err) { inode_dec_link_count(dir); inode_dec_link_count(inode); } return err; } /* * Link creation is synchronous. * I'm lazy. * Earth is somewhat round. */ static int pohmelfs_create_link(struct pohmelfs_inode *parent, struct qstr *obj, struct pohmelfs_inode *target, struct qstr *tstr) { struct super_block *sb = parent->vfs_inode.i_sb; struct pohmelfs_sb *psb = POHMELFS_SB(sb); struct netfs_cmd *cmd; struct netfs_trans *t; void *data; int err, parent_len, target_len = 0, cur_len, path_size = 0; err = pohmelfs_data_lock(parent, 0, ~0, POHMELFS_WRITE_LOCK); if (err) return err; err = sb->s_op->write_inode(&parent->vfs_inode, 0); if (err) goto err_out_exit; if (tstr) target_len = tstr->len; parent_len = pohmelfs_path_length(parent); if (target) target_len += pohmelfs_path_length(target); if (parent_len < 0) { err = parent_len; goto err_out_exit; } if (target_len < 0) { err = target_len; goto err_out_exit; } t = netfs_trans_alloc(psb, parent_len + target_len + obj->len + 2, 0, 0); if (!t) { err = -ENOMEM; goto err_out_exit; } cur_len = netfs_trans_cur_len(t); cmd = netfs_trans_current(t); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); goto err_out_free; } data = (void *)(cmd + 1); cur_len -= sizeof(struct netfs_cmd); err = pohmelfs_construct_path_string(parent, data, parent_len); if (err > 0) { /* Do not place null-byte before the slash */ path_size = err - 1; cur_len -= path_size; err = snprintf(data + path_size, cur_len, "/%s|", obj->name); path_size += err; cur_len -= err; cmd->ext = path_size - 1; /* No | symbol */ if (target) { err = pohmelfs_construct_path_string(target, data + path_size, target_len); if (err > 0) { path_size += err; cur_len -= err; } } } if (err < 0) goto err_out_free; cmd->start = 0; if (!target && tstr) { if (tstr->len > cur_len - 1) { err = -ENAMETOOLONG; goto err_out_free; } err = snprintf(data + path_size, cur_len, "%s", tstr->name) + 1; /* 0-byte */ path_size += err; cur_len -= err; cmd->start = 1; } dprintk("%s: parent: %llu, obj: '%s', target_inode: %llu, target_str: '%s', full: '%s'.\n", __func__, parent->ino, obj->name, (target) ? target->ino : 0, (tstr) ? tstr->name : NULL, (char *)data); cmd->cmd = NETFS_LINK; cmd->size = path_size; cmd->id = parent->ino; netfs_convert_cmd(cmd); netfs_trans_update(cmd, t, path_size); err = netfs_trans_finish(t, psb); if (err) goto err_out_exit; return 0; err_out_free: t->result = err; netfs_trans_put(t); err_out_exit: return err; } /* * VFS hard and soft link callbacks. */ static int pohmelfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; struct pohmelfs_inode *pi = POHMELFS_I(inode); int err; struct qstr str = dentry->d_name; str.hash = jhash(dentry->d_name.name, dentry->d_name.len, 0); err = inode->i_sb->s_op->write_inode(inode, 0); if (err) return err; err = pohmelfs_create_link(POHMELFS_I(dir), &str, pi, NULL); if (err) return err; return pohmelfs_create_entry(dir, dentry, pi->ino, inode->i_mode); } static int pohmelfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct qstr sym_str; struct qstr str = dentry->d_name; struct inode *inode; int err; str.hash = jhash(dentry->d_name.name, dentry->d_name.len, 0); sym_str.name = symname; sym_str.len = strlen(symname); err = pohmelfs_create_link(POHMELFS_I(dir), &str, NULL, &sym_str); if (err) goto err_out_exit; err = pohmelfs_create_entry(dir, dentry, 0, S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO); if (err) goto err_out_exit; inode = dentry->d_inode; err = page_symlink(inode, symname, sym_str.len + 1); if (err) goto err_out_put; return 0; err_out_put: iput(inode); err_out_exit: return err; } static int pohmelfs_send_rename(struct pohmelfs_inode *pi, struct pohmelfs_inode *parent, struct qstr *str) { int path_len, err, total_len = 0, inode_len, parent_len; char *path; struct netfs_trans *t; struct netfs_cmd *cmd; struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb); parent_len = pohmelfs_path_length(parent); inode_len = pohmelfs_path_length(pi); if (parent_len < 0 || inode_len < 0) return -EINVAL; path_len = parent_len + inode_len + str->len + 3; t = netfs_trans_alloc(psb, path_len, 0, 0); if (!t) return -ENOMEM; cmd = netfs_trans_current(t); path = (char *)(cmd + 1); err = pohmelfs_construct_path_string(pi, path, inode_len); if (err < 0) goto err_out_unlock; cmd->ext = err; path += err; total_len += err; path_len -= err; *path = '|'; path++; total_len++; path_len--; err = pohmelfs_construct_path_string(parent, path, parent_len); if (err < 0) goto err_out_unlock; /* * Do not place a null-byte before the final slash and the name. */ err--; path += err; total_len += err; path_len -= err; err = snprintf(path, path_len - 1, "/%s", str->name); total_len += err + 1; /* 0 symbol */ path_len -= err + 1; cmd->cmd = NETFS_RENAME; cmd->id = pi->ino; cmd->start = parent->ino; cmd->size = total_len; netfs_convert_cmd(cmd); netfs_trans_update(cmd, t, total_len); return netfs_trans_finish(t, psb); err_out_unlock: netfs_trans_free(t); return err; } static int pohmelfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct inode *inode = old_dentry->d_inode; struct pohmelfs_inode *old_parent, *pi, *new_parent; struct qstr str = new_dentry->d_name; struct pohmelfs_name *n; unsigned int old_hash; int err = -ENOENT; pi = POHMELFS_I(inode); old_parent = POHMELFS_I(old_dir); if (new_dir) new_dir->i_sb->s_op->write_inode(new_dir, 0); old_hash = jhash(old_dentry->d_name.name, old_dentry->d_name.len, 0); str.hash = jhash(new_dentry->d_name.name, new_dentry->d_name.len, 0); str.len = new_dentry->d_name.len; str.name = new_dentry->d_name.name; str.hash = jhash(new_dentry->d_name.name, new_dentry->d_name.len, 0); if (new_dir) { new_parent = POHMELFS_I(new_dir); err = -ENOTEMPTY; if (S_ISDIR(inode->i_mode) && new_parent->total_len <= 3) goto err_out_exit; } else { new_parent = old_parent; } dprintk("%s: ino: %llu, parent: %llu, name: '%s' -> parent: %llu, name: '%s', i_size: %llu.\n", __func__, pi->ino, old_parent->ino, old_dentry->d_name.name, new_parent->ino, new_dentry->d_name.name, inode->i_size); if (test_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state) && test_bit(NETFS_INODE_OWNED, &pi->state)) { err = pohmelfs_send_rename(pi, new_parent, &str); if (err) goto err_out_exit; } n = pohmelfs_name_alloc(str.len + 1); if (!n) goto err_out_exit; mutex_lock(&new_parent->offset_lock); n->ino = pi->ino; n->mode = inode->i_mode; n->len = str.len; n->hash = str.hash; sprintf(n->data, "%s", str.name); err = pohmelfs_insert_name(new_parent, n); mutex_unlock(&new_parent->offset_lock); if (err) goto err_out_exit; mutex_lock(&old_parent->offset_lock); n = pohmelfs_search_hash(old_parent, old_hash); if (n) pohmelfs_name_del(old_parent, n); mutex_unlock(&old_parent->offset_lock); mark_inode_dirty(inode); mark_inode_dirty(&new_parent->vfs_inode); WARN_ON_ONCE(list_empty(&inode->i_dentry)); return 0; err_out_exit: clear_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state); mutex_unlock(&inode->i_mutex); return err; } /* * POHMELFS directory inode operations. */ const struct inode_operations pohmelfs_dir_inode_ops = { .link = pohmelfs_link, .symlink = pohmelfs_symlink, .unlink = pohmelfs_unlink, .mkdir = pohmelfs_mkdir, .rmdir = pohmelfs_rmdir, .create = pohmelfs_create, .lookup = pohmelfs_lookup, .setattr = pohmelfs_setattr, .rename = pohmelfs_rename, };
gpl-2.0
seem-sky/linux
drivers/tty/serial/bfin_sport_uart.c
1270
23295
/* * Blackfin On-Chip Sport Emulated UART Driver * * Copyright 2006-2009 Analog Devices Inc. * * Enter bugs at http://blackfin.uclinux.org/ * * Licensed under the GPL-2 or later. */ /* * This driver and the hardware supported are in term of EE-191 of ADI. * http://www.analog.com/static/imported-files/application_notes/EE191.pdf * This application note describe how to implement a UART on a Sharc DSP, * but this driver is implemented on Blackfin Processor. * Transmit Frame Sync is not used by this driver to transfer data out. */ /* #define DEBUG */ #define DRV_NAME "bfin-sport-uart" #define DEVICE_NAME "ttySS" #define pr_fmt(fmt) DRV_NAME ": " fmt #include <linux/module.h> #include <linux/ioport.h> #include <linux/io.h> #include <linux/init.h> #include <linux/console.h> #include <linux/sysrq.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial_core.h> #include <linux/gpio.h> #include <asm/bfin_sport.h> #include <asm/delay.h> #include <asm/portmux.h> #include "bfin_sport_uart.h" struct sport_uart_port { struct uart_port port; int err_irq; unsigned short csize; unsigned short rxmask; unsigned short txmask1; unsigned short txmask2; unsigned char stopb; /* unsigned char parib; */ #ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS int cts_pin; int rts_pin; #endif }; static int sport_uart_tx_chars(struct sport_uart_port *up); static void sport_stop_tx(struct uart_port *port); static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value) { pr_debug("%s value:%x, mask1=0x%x, mask2=0x%x\n", __func__, value, up->txmask1, up->txmask2); /* Place Start and Stop bits */ __asm__ __volatile__ ( "%[val] <<= 1;" "%[val] = %[val] & %[mask1];" "%[val] = %[val] | %[mask2];" : [val]"+d"(value) : [mask1]"d"(up->txmask1), [mask2]"d"(up->txmask2) : "ASTAT" ); pr_debug("%s value:%x\n", __func__, value); SPORT_PUT_TX(up, value); } static inline unsigned char rx_one_byte(struct sport_uart_port *up) { unsigned int value; unsigned char extract; u32 tmp_mask1, tmp_mask2, tmp_shift, tmp; if ((up->csize + up->stopb) > 7) value = SPORT_GET_RX32(up); else value = SPORT_GET_RX(up); pr_debug("%s value:%x, cs=%d, mask=0x%x\n", __func__, value, up->csize, up->rxmask); /* Extract data */ __asm__ __volatile__ ( "%[extr] = 0;" "%[mask1] = %[rxmask];" "%[mask2] = 0x0200(Z);" "%[shift] = 0;" "LSETUP(.Lloop_s, .Lloop_e) LC0 = %[lc];" ".Lloop_s:" "%[tmp] = extract(%[val], %[mask1].L)(Z);" "%[tmp] <<= %[shift];" "%[extr] = %[extr] | %[tmp];" "%[mask1] = %[mask1] - %[mask2];" ".Lloop_e:" "%[shift] += 1;" : [extr]"=&d"(extract), [shift]"=&d"(tmp_shift), [tmp]"=&d"(tmp), [mask1]"=&d"(tmp_mask1), [mask2]"=&d"(tmp_mask2) : [val]"d"(value), [rxmask]"d"(up->rxmask), [lc]"a"(up->csize) : "ASTAT", "LB0", "LC0", "LT0" ); pr_debug(" extract:%x\n", extract); return extract; } static int sport_uart_setup(struct sport_uart_port *up, int size, int baud_rate) { int tclkdiv, rclkdiv; unsigned int sclk = get_sclk(); /* Set TCR1 and TCR2, TFSR is not enabled for uart */ SPORT_PUT_TCR1(up, (LATFS | ITFS | TFSR | TLSBIT | ITCLK)); SPORT_PUT_TCR2(up, size + 1); pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up)); /* Set RCR1 and RCR2 */ SPORT_PUT_RCR1(up, (RCKFE | LARFS | LRFS | RFSR | IRCLK)); SPORT_PUT_RCR2(up, (size + 1) * 2 - 1); pr_debug("%s RCR1:%x, RCR2:%x\n", __func__, SPORT_GET_RCR1(up), SPORT_GET_RCR2(up)); tclkdiv = sclk / (2 * baud_rate) - 1; /* The actual uart baud rate of devices vary between +/-2%. The sport * RX sample rate should be faster than the double of the worst case, * otherwise, wrong data are received. So, set sport RX clock to be * 3% faster. */ rclkdiv = sclk / (2 * baud_rate * 2 * 97 / 100) - 1; SPORT_PUT_TCLKDIV(up, tclkdiv); SPORT_PUT_RCLKDIV(up, rclkdiv); SSYNC(); pr_debug("%s sclk:%d, baud_rate:%d, tclkdiv:%d, rclkdiv:%d\n", __func__, sclk, baud_rate, tclkdiv, rclkdiv); return 0; } static irqreturn_t sport_uart_rx_irq(int irq, void *dev_id) { struct sport_uart_port *up = dev_id; struct tty_port *port = &up->port.state->port; unsigned int ch; spin_lock(&up->port.lock); while (SPORT_GET_STAT(up) & RXNE) { ch = rx_one_byte(up); up->port.icount.rx++; if (!uart_handle_sysrq_char(&up->port, ch)) tty_insert_flip_char(port, ch, TTY_NORMAL); } spin_unlock(&up->port.lock); /* XXX this won't deadlock with lowlat? */ tty_flip_buffer_push(port); return IRQ_HANDLED; } static irqreturn_t sport_uart_tx_irq(int irq, void *dev_id) { struct sport_uart_port *up = dev_id; spin_lock(&up->port.lock); sport_uart_tx_chars(up); spin_unlock(&up->port.lock); return IRQ_HANDLED; } static irqreturn_t sport_uart_err_irq(int irq, void *dev_id) { struct sport_uart_port *up = dev_id; unsigned int stat = SPORT_GET_STAT(up); spin_lock(&up->port.lock); /* Overflow in RX FIFO */ if (stat & ROVF) { up->port.icount.overrun++; tty_insert_flip_char(&up->port.state->port, 0, TTY_OVERRUN); SPORT_PUT_STAT(up, ROVF); /* Clear ROVF bit */ } /* These should not happen */ if (stat & (TOVF | TUVF | RUVF)) { pr_err("SPORT Error:%s %s %s\n", (stat & TOVF) ? "TX overflow" : "", (stat & TUVF) ? "TX underflow" : "", (stat & RUVF) ? "RX underflow" : ""); SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN); SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) & ~RSPEN); } SSYNC(); spin_unlock(&up->port.lock); /* XXX we don't push the overrun bit to TTY? */ return IRQ_HANDLED; } #ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS static unsigned int sport_get_mctrl(struct uart_port *port) { struct sport_uart_port *up = (struct sport_uart_port *)port; if (up->cts_pin < 0) return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; /* CTS PIN is negative assertive. */ if (SPORT_UART_GET_CTS(up)) return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; else return TIOCM_DSR | TIOCM_CAR; } static void sport_set_mctrl(struct uart_port *port, unsigned int mctrl) { struct sport_uart_port *up = (struct sport_uart_port *)port; if (up->rts_pin < 0) return; /* RTS PIN is negative assertive. */ if (mctrl & TIOCM_RTS) SPORT_UART_ENABLE_RTS(up); else SPORT_UART_DISABLE_RTS(up); } /* * Handle any change of modem status signal. */ static irqreturn_t sport_mctrl_cts_int(int irq, void *dev_id) { struct sport_uart_port *up = (struct sport_uart_port *)dev_id; unsigned int status; status = sport_get_mctrl(&up->port); uart_handle_cts_change(&up->port, status & TIOCM_CTS); return IRQ_HANDLED; } #else static unsigned int sport_get_mctrl(struct uart_port *port) { pr_debug("%s enter\n", __func__); return TIOCM_CTS | TIOCM_CD | TIOCM_DSR; } static void sport_set_mctrl(struct uart_port *port, unsigned int mctrl) { pr_debug("%s enter\n", __func__); } #endif /* Reqeust IRQ, Setup clock */ static int sport_startup(struct uart_port *port) { struct sport_uart_port *up = (struct sport_uart_port *)port; int ret; pr_debug("%s enter\n", __func__); ret = request_irq(up->port.irq, sport_uart_rx_irq, 0, "SPORT_UART_RX", up); if (ret) { dev_err(port->dev, "unable to request SPORT RX interrupt\n"); return ret; } ret = request_irq(up->port.irq+1, sport_uart_tx_irq, 0, "SPORT_UART_TX", up); if (ret) { dev_err(port->dev, "unable to request SPORT TX interrupt\n"); goto fail1; } ret = request_irq(up->err_irq, sport_uart_err_irq, 0, "SPORT_UART_STATUS", up); if (ret) { dev_err(port->dev, "unable to request SPORT status interrupt\n"); goto fail2; } #ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS if (up->cts_pin >= 0) { if (request_irq(gpio_to_irq(up->cts_pin), sport_mctrl_cts_int, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | 0, "BFIN_SPORT_UART_CTS", up)) { up->cts_pin = -1; dev_info(port->dev, "Unable to attach BlackFin UART over SPORT CTS interrupt. So, disable it.\n"); } } if (up->rts_pin >= 0) { if (gpio_request(up->rts_pin, DRV_NAME)) { dev_info(port->dev, "fail to request RTS PIN at GPIO_%d\n", up->rts_pin); up->rts_pin = -1; } else gpio_direction_output(up->rts_pin, 0); } #endif return 0; fail2: free_irq(up->port.irq+1, up); fail1: free_irq(up->port.irq, up); return ret; } /* * sport_uart_tx_chars * * ret 1 means need to enable sport. * ret 0 means do nothing. */ static int sport_uart_tx_chars(struct sport_uart_port *up) { struct circ_buf *xmit = &up->port.state->xmit; if (SPORT_GET_STAT(up) & TXF) return 0; if (up->port.x_char) { tx_one_byte(up, up->port.x_char); up->port.icount.tx++; up->port.x_char = 0; return 1; } if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { /* The waiting loop to stop SPORT TX from TX interrupt is * too long. This may block SPORT RX interrupts and cause * RX FIFO overflow. So, do stop sport TX only after the last * char in TX FIFO is moved into the shift register. */ if (SPORT_GET_STAT(up) & TXHRE) sport_stop_tx(&up->port); return 0; } while(!(SPORT_GET_STAT(up) & TXF) && !uart_circ_empty(xmit)) { tx_one_byte(up, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE -1); up->port.icount.tx++; } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&up->port); return 1; } static unsigned int sport_tx_empty(struct uart_port *port) { struct sport_uart_port *up = (struct sport_uart_port *)port; unsigned int stat; stat = SPORT_GET_STAT(up); pr_debug("%s stat:%04x\n", __func__, stat); if (stat & TXHRE) { return TIOCSER_TEMT; } else return 0; } static void sport_stop_tx(struct uart_port *port) { struct sport_uart_port *up = (struct sport_uart_port *)port; pr_debug("%s enter\n", __func__); if (!(SPORT_GET_TCR1(up) & TSPEN)) return; /* Although the hold register is empty, last byte is still in shift * register and not sent out yet. So, put a dummy data into TX FIFO. * Then, sport tx stops when last byte is shift out and the dummy * data is moved into the shift register. */ SPORT_PUT_TX(up, 0xffff); while (!(SPORT_GET_STAT(up) & TXHRE)) cpu_relax(); SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN)); SSYNC(); return; } static void sport_start_tx(struct uart_port *port) { struct sport_uart_port *up = (struct sport_uart_port *)port; pr_debug("%s enter\n", __func__); /* Write data into SPORT FIFO before enable SPROT to transmit */ if (sport_uart_tx_chars(up)) { /* Enable transmit, then an interrupt will generated */ SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN)); SSYNC(); } pr_debug("%s exit\n", __func__); } static void sport_stop_rx(struct uart_port *port) { struct sport_uart_port *up = (struct sport_uart_port *)port; pr_debug("%s enter\n", __func__); /* Disable sport to stop rx */ SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) & ~RSPEN)); SSYNC(); } static void sport_break_ctl(struct uart_port *port, int break_state) { pr_debug("%s enter\n", __func__); } static void sport_shutdown(struct uart_port *port) { struct sport_uart_port *up = (struct sport_uart_port *)port; dev_dbg(port->dev, "%s enter\n", __func__); /* Disable sport */ SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN)); SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) & ~RSPEN)); SSYNC(); free_irq(up->port.irq, up); free_irq(up->port.irq+1, up); free_irq(up->err_irq, up); #ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS if (up->cts_pin >= 0) free_irq(gpio_to_irq(up->cts_pin), up); if (up->rts_pin >= 0) gpio_free(up->rts_pin); #endif } static const char *sport_type(struct uart_port *port) { struct sport_uart_port *up = (struct sport_uart_port *)port; pr_debug("%s enter\n", __func__); return up->port.type == PORT_BFIN_SPORT ? "BFIN-SPORT-UART" : NULL; } static void sport_release_port(struct uart_port *port) { pr_debug("%s enter\n", __func__); } static int sport_request_port(struct uart_port *port) { pr_debug("%s enter\n", __func__); return 0; } static void sport_config_port(struct uart_port *port, int flags) { struct sport_uart_port *up = (struct sport_uart_port *)port; pr_debug("%s enter\n", __func__); up->port.type = PORT_BFIN_SPORT; } static int sport_verify_port(struct uart_port *port, struct serial_struct *ser) { pr_debug("%s enter\n", __func__); return 0; } static void sport_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct sport_uart_port *up = (struct sport_uart_port *)port; unsigned long flags; int i; pr_debug("%s enter, c_cflag:%08x\n", __func__, termios->c_cflag); #ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS if (old == NULL && up->cts_pin != -1) termios->c_cflag |= CRTSCTS; else if (up->cts_pin == -1) termios->c_cflag &= ~CRTSCTS; #endif switch (termios->c_cflag & CSIZE) { case CS8: up->csize = 8; break; case CS7: up->csize = 7; break; case CS6: up->csize = 6; break; case CS5: up->csize = 5; break; default: pr_warn("requested word length not supported\n"); break; } if (termios->c_cflag & CSTOPB) { up->stopb = 1; } if (termios->c_cflag & PARENB) { pr_warn("PAREN bit is not supported yet\n"); /* up->parib = 1; */ } spin_lock_irqsave(&up->port.lock, flags); port->read_status_mask = 0; /* * Characters to ignore */ port->ignore_status_mask = 0; /* RX extract mask */ up->rxmask = 0x01 | (((up->csize + up->stopb) * 2 - 1) << 0x8); /* TX masks, 8 bit data and 1 bit stop for example: * mask1 = b#0111111110 * mask2 = b#1000000000 */ for (i = 0, up->txmask1 = 0; i < up->csize; i++) up->txmask1 |= (1<<i); up->txmask2 = (1<<i); if (up->stopb) { ++i; up->txmask2 |= (1<<i); } up->txmask1 <<= 1; up->txmask2 <<= 1; /* uart baud rate */ port->uartclk = uart_get_baud_rate(port, termios, old, 0, get_sclk()/16); /* Disable UART */ SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN); SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) & ~RSPEN); sport_uart_setup(up, up->csize + up->stopb, port->uartclk); /* driver TX line high after config, one dummy data is * necessary to stop sport after shift one byte */ SPORT_PUT_TX(up, 0xffff); SPORT_PUT_TX(up, 0xffff); SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN)); SSYNC(); while (!(SPORT_GET_STAT(up) & TXHRE)) cpu_relax(); SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN); SSYNC(); /* Port speed changed, update the per-port timeout. */ uart_update_timeout(port, termios->c_cflag, port->uartclk); /* Enable sport rx */ SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) | RSPEN); SSYNC(); spin_unlock_irqrestore(&up->port.lock, flags); } struct uart_ops sport_uart_ops = { .tx_empty = sport_tx_empty, .set_mctrl = sport_set_mctrl, .get_mctrl = sport_get_mctrl, .stop_tx = sport_stop_tx, .start_tx = sport_start_tx, .stop_rx = sport_stop_rx, .break_ctl = sport_break_ctl, .startup = sport_startup, .shutdown = sport_shutdown, .set_termios = sport_set_termios, .type = sport_type, .release_port = sport_release_port, .request_port = sport_request_port, .config_port = sport_config_port, .verify_port = sport_verify_port, }; #define BFIN_SPORT_UART_MAX_PORTS 4 static struct sport_uart_port *bfin_sport_uart_ports[BFIN_SPORT_UART_MAX_PORTS]; #ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE #define CLASS_BFIN_SPORT_CONSOLE "bfin-sport-console" static int __init sport_uart_console_setup(struct console *co, char *options) { struct sport_uart_port *up; int baud = 57600; int bits = 8; int parity = 'n'; # ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS int flow = 'r'; # else int flow = 'n'; # endif /* Check whether an invalid uart number has been specified */ if (co->index < 0 || co->index >= BFIN_SPORT_UART_MAX_PORTS) return -ENODEV; up = bfin_sport_uart_ports[co->index]; if (!up) return -ENODEV; if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); return uart_set_options(&up->port, co, baud, parity, bits, flow); } static void sport_uart_console_putchar(struct uart_port *port, int ch) { struct sport_uart_port *up = (struct sport_uart_port *)port; while (SPORT_GET_STAT(up) & TXF) barrier(); tx_one_byte(up, ch); } /* * Interrupts are disabled on entering */ static void sport_uart_console_write(struct console *co, const char *s, unsigned int count) { struct sport_uart_port *up = bfin_sport_uart_ports[co->index]; unsigned long flags; spin_lock_irqsave(&up->port.lock, flags); if (SPORT_GET_TCR1(up) & TSPEN) uart_console_write(&up->port, s, count, sport_uart_console_putchar); else { /* dummy data to start sport */ while (SPORT_GET_STAT(up) & TXF) barrier(); SPORT_PUT_TX(up, 0xffff); /* Enable transmit, then an interrupt will generated */ SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN)); SSYNC(); uart_console_write(&up->port, s, count, sport_uart_console_putchar); /* Although the hold register is empty, last byte is still in shift * register and not sent out yet. So, put a dummy data into TX FIFO. * Then, sport tx stops when last byte is shift out and the dummy * data is moved into the shift register. */ while (SPORT_GET_STAT(up) & TXF) barrier(); SPORT_PUT_TX(up, 0xffff); while (!(SPORT_GET_STAT(up) & TXHRE)) barrier(); /* Stop sport tx transfer */ SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN)); SSYNC(); } spin_unlock_irqrestore(&up->port.lock, flags); } static struct uart_driver sport_uart_reg; static struct console sport_uart_console = { .name = DEVICE_NAME, .write = sport_uart_console_write, .device = uart_console_device, .setup = sport_uart_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &sport_uart_reg, }; #define SPORT_UART_CONSOLE (&sport_uart_console) #else #define SPORT_UART_CONSOLE NULL #endif /* CONFIG_SERIAL_BFIN_SPORT_CONSOLE */ static struct uart_driver sport_uart_reg = { .owner = THIS_MODULE, .driver_name = DRV_NAME, .dev_name = DEVICE_NAME, .major = 204, .minor = 84, .nr = BFIN_SPORT_UART_MAX_PORTS, .cons = SPORT_UART_CONSOLE, }; #ifdef CONFIG_PM static int sport_uart_suspend(struct device *dev) { struct sport_uart_port *sport = dev_get_drvdata(dev); dev_dbg(dev, "%s enter\n", __func__); if (sport) uart_suspend_port(&sport_uart_reg, &sport->port); return 0; } static int sport_uart_resume(struct device *dev) { struct sport_uart_port *sport = dev_get_drvdata(dev); dev_dbg(dev, "%s enter\n", __func__); if (sport) uart_resume_port(&sport_uart_reg, &sport->port); return 0; } static struct dev_pm_ops bfin_sport_uart_dev_pm_ops = { .suspend = sport_uart_suspend, .resume = sport_uart_resume, }; #endif static int sport_uart_probe(struct platform_device *pdev) { struct resource *res; struct sport_uart_port *sport; int ret = 0; dev_dbg(&pdev->dev, "%s enter\n", __func__); if (pdev->id < 0 || pdev->id >= BFIN_SPORT_UART_MAX_PORTS) { dev_err(&pdev->dev, "Wrong sport uart platform device id.\n"); return -ENOENT; } if (bfin_sport_uart_ports[pdev->id] == NULL) { bfin_sport_uart_ports[pdev->id] = kzalloc(sizeof(struct sport_uart_port), GFP_KERNEL); sport = bfin_sport_uart_ports[pdev->id]; if (!sport) { dev_err(&pdev->dev, "Fail to malloc sport_uart_port\n"); return -ENOMEM; } ret = peripheral_request_list(dev_get_platdata(&pdev->dev), DRV_NAME); if (ret) { dev_err(&pdev->dev, "Fail to request SPORT peripherals\n"); goto out_error_free_mem; } spin_lock_init(&sport->port.lock); sport->port.fifosize = SPORT_TX_FIFO_SIZE, sport->port.ops = &sport_uart_ops; sport->port.line = pdev->id; sport->port.iotype = UPIO_MEM; sport->port.flags = UPF_BOOT_AUTOCONF; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n"); ret = -ENOENT; goto out_error_free_peripherals; } sport->port.membase = ioremap(res->start, resource_size(res)); if (!sport->port.membase) { dev_err(&pdev->dev, "Cannot map sport IO\n"); ret = -ENXIO; goto out_error_free_peripherals; } sport->port.mapbase = res->start; sport->port.irq = platform_get_irq(pdev, 0); if ((int)sport->port.irq < 0) { dev_err(&pdev->dev, "No sport RX/TX IRQ specified\n"); ret = -ENOENT; goto out_error_unmap; } sport->err_irq = platform_get_irq(pdev, 1); if (sport->err_irq < 0) { dev_err(&pdev->dev, "No sport status IRQ specified\n"); ret = -ENOENT; goto out_error_unmap; } #ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (res == NULL) sport->cts_pin = -1; else sport->cts_pin = res->start; res = platform_get_resource(pdev, IORESOURCE_IO, 1); if (res == NULL) sport->rts_pin = -1; else sport->rts_pin = res->start; #endif } #ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE if (!is_early_platform_device(pdev)) { #endif sport = bfin_sport_uart_ports[pdev->id]; sport->port.dev = &pdev->dev; dev_set_drvdata(&pdev->dev, sport); ret = uart_add_one_port(&sport_uart_reg, &sport->port); #ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE } #endif if (!ret) return 0; if (sport) { out_error_unmap: iounmap(sport->port.membase); out_error_free_peripherals: peripheral_free_list(dev_get_platdata(&pdev->dev)); out_error_free_mem: kfree(sport); bfin_sport_uart_ports[pdev->id] = NULL; } return ret; } static int sport_uart_remove(struct platform_device *pdev) { struct sport_uart_port *sport = platform_get_drvdata(pdev); dev_dbg(&pdev->dev, "%s enter\n", __func__); dev_set_drvdata(&pdev->dev, NULL); if (sport) { uart_remove_one_port(&sport_uart_reg, &sport->port); iounmap(sport->port.membase); peripheral_free_list(dev_get_platdata(&pdev->dev)); kfree(sport); bfin_sport_uart_ports[pdev->id] = NULL; } return 0; } static struct platform_driver sport_uart_driver = { .probe = sport_uart_probe, .remove = sport_uart_remove, .driver = { .name = DRV_NAME, #ifdef CONFIG_PM .pm = &bfin_sport_uart_dev_pm_ops, #endif }, }; #ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE static struct early_platform_driver early_sport_uart_driver __initdata = { .class_str = CLASS_BFIN_SPORT_CONSOLE, .pdrv = &sport_uart_driver, .requested_id = EARLY_PLATFORM_ID_UNSET, }; static int __init sport_uart_rs_console_init(void) { early_platform_driver_register(&early_sport_uart_driver, DRV_NAME); early_platform_driver_probe(CLASS_BFIN_SPORT_CONSOLE, BFIN_SPORT_UART_MAX_PORTS, 0); register_console(&sport_uart_console); return 0; } console_initcall(sport_uart_rs_console_init); #endif static int __init sport_uart_init(void) { int ret; pr_info("Blackfin uart over sport driver\n"); ret = uart_register_driver(&sport_uart_reg); if (ret) { pr_err("failed to register %s:%d\n", sport_uart_reg.driver_name, ret); return ret; } ret = platform_driver_register(&sport_uart_driver); if (ret) { pr_err("failed to register sport uart driver:%d\n", ret); uart_unregister_driver(&sport_uart_reg); } return ret; } module_init(sport_uart_init); static void __exit sport_uart_exit(void) { platform_driver_unregister(&sport_uart_driver); uart_unregister_driver(&sport_uart_reg); } module_exit(sport_uart_exit); MODULE_AUTHOR("Sonic Zhang, Roy Huang"); MODULE_DESCRIPTION("Blackfin serial over SPORT driver"); MODULE_LICENSE("GPL");
gpl-2.0
JustAkan/F220K_Stock_Kernel
sound/soc/codecs/msm_stub.c
1270
2502
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/of_device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> /* A dummy driver useful only to advertise hardware parameters */ static struct snd_soc_dai_driver msm_stub_dais[] = { { .name = "msm-stub-rx", .playback = { /* Support maximum range */ .stream_name = "Playback", .channels_min = 1, .channels_max = 8, .rates = SNDRV_PCM_RATE_8000_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, }, { .name = "msm-stub-tx", .capture = { /* Support maximum range */ .stream_name = "Record", .channels_min = 1, .channels_max = 4, .rates = SNDRV_PCM_RATE_8000_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, }, }; static struct snd_soc_codec_driver soc_msm_stub = {}; static int __devinit msm_stub_dev_probe(struct platform_device *pdev) { if (pdev->dev.of_node) dev_set_name(&pdev->dev, "%s.%d", "msm-stub-codec", 1); dev_dbg(&pdev->dev, "dev name %s\n", dev_name(&pdev->dev)); return snd_soc_register_codec(&pdev->dev, &soc_msm_stub, msm_stub_dais, ARRAY_SIZE(msm_stub_dais)); } static int __devexit msm_stub_dev_remove(struct platform_device *pdev) { snd_soc_unregister_codec(&pdev->dev); return 0; } static const struct of_device_id msm_stub_codec_dt_match[] = { { .compatible = "qcom,msm-stub-codec", }, {} }; static struct platform_driver msm_stub_driver = { .driver = { .name = "msm-stub-codec", .owner = THIS_MODULE, .of_match_table = msm_stub_codec_dt_match, }, .probe = msm_stub_dev_probe, .remove = __devexit_p(msm_stub_dev_remove), }; static int __init msm_stub_init(void) { return platform_driver_register(&msm_stub_driver); } module_init(msm_stub_init); static void __exit msm_stub_exit(void) { platform_driver_unregister(&msm_stub_driver); } module_exit(msm_stub_exit); MODULE_DESCRIPTION("Generic MSM CODEC driver"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL v2");
gpl-2.0
imoseyon/leanKernel-note3
drivers/sensorhub/stm32f/factory/gesture_max88922.c
1782
4638
/* * Copyright (C) 2012, Samsung Electronics Co. Ltd. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "../ssp.h" #define VENDOR "MAXIM" #define CHIP_ID "MAX88922" static ssize_t gestrue_vendor_show(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", VENDOR); } static ssize_t gestrue_name_show(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", CHIP_ID); } static ssize_t raw_data_read(struct device *dev, struct device_attribute *attr, char *buf) { struct ssp_data *data = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%d,%d,%d,%d\n", data->buf[GESTURE_SENSOR].data[1], data->buf[GESTURE_SENSOR].data[2], data->buf[GESTURE_SENSOR].data[3], data->buf[GESTURE_SENSOR].data[9]); } static ssize_t gesture_get_selftest_show(struct device *dev, struct device_attribute *attr, char *buf) { s16 raw_A = 0, raw_B = 0, raw_C = 0, raw_D = 0; int iRet = 0; char chTempBuf[8] = { 0, }; struct ssp_data *data = dev_get_drvdata(dev); struct ssp_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL); msg->cmd = GESTURE_FACTORY; msg->length = 8; msg->options = AP2HUB_READ; msg->buffer = chTempBuf; msg->free_buffer = 0; iRet = ssp_spi_sync(data, msg, 2000); if (iRet != SUCCESS) { pr_err("[SSP]: %s - Gesture Selftest Timeout!!\n", __func__); goto exit; } printk("%x %x %x %x %x %x %x %x \n", chTempBuf[0], chTempBuf[1], chTempBuf[2], chTempBuf[3], chTempBuf[4], chTempBuf[5], chTempBuf[6], chTempBuf[7]); raw_A = ((((s16)chTempBuf[0]) << 8) + ((s16)chTempBuf[1])) - 1023; raw_B = ((((s16)chTempBuf[2]) << 8) + ((s16)chTempBuf[3])) - 1023; raw_C = ((((s16)chTempBuf[4]) << 8) + ((s16)chTempBuf[5])) - 1023; raw_D = ((((s16)chTempBuf[6]) << 8) + ((s16)chTempBuf[7])) - 1023; pr_info("[SSP] %s: self test A = %d, B = %d, C = %d, D = %d\n", __func__, raw_A, raw_B, raw_C, raw_D); exit: return sprintf(buf, "%d,%d,%d,%d\n", raw_A, raw_B, raw_C, raw_D); } static ssize_t ir_current_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ssp_data *data = dev_get_drvdata(dev); ssp_dbg("[SSP]: %s - Ir_Current Setting = %d\n", __func__, data->uIr_Current); return sprintf(buf, "%d\n", data->uIr_Current); } static ssize_t ir_current_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { u16 uNewIrCurrent = DEFUALT_IR_CURRENT; int iRet = 0; u16 current_index = 0; struct ssp_data *data = dev_get_drvdata(dev); static u16 set_current[2][16] = { {0, 6, 13, 20, 26, 33, 40, 46, 53, 60, 66, 73, 80, 86, 93, 100}, {0<<4, 1<<4, 2<<4, 3<<4, 4<<4, 5<<4, 6<<4, 7<<4, 8<<4, 9<<4, 10<<4, 11<<4, 12<<4, 13<<4, 14<<4, 15<<4} }; iRet = kstrtou16(buf, 10, &uNewIrCurrent); if (iRet < 0) pr_err("[SSP]: %s - kstrtoint failed.(%d)\n", __func__, iRet); else { for(current_index = 0; current_index < 16; current_index++) { if (set_current[0][current_index] == uNewIrCurrent) { data->uIr_Current = set_current[1][current_index]; break; } } if(current_index == 16) // current setting value wrong. { return ERROR; } set_gesture_current(data, data->uIr_Current); data->uIr_Current= uNewIrCurrent; } ssp_dbg("[SSP]: %s - new Ir_Current Setting : %d\n", __func__, data->uIr_Current); return size; } static DEVICE_ATTR(vendor, S_IRUGO, gestrue_vendor_show, NULL); static DEVICE_ATTR(name, S_IRUGO, gestrue_name_show, NULL); static DEVICE_ATTR(raw_data, S_IRUGO, raw_data_read, NULL); static DEVICE_ATTR(selftest, S_IRUGO, gesture_get_selftest_show, NULL); static DEVICE_ATTR(ir_current, S_IRUGO | S_IWUSR | S_IWGRP, ir_current_show, ir_current_store); static struct device_attribute *gesture_attrs[] = { &dev_attr_vendor, &dev_attr_name, &dev_attr_raw_data, &dev_attr_selftest, &dev_attr_ir_current, NULL, }; void initialize_gesture_factorytest(struct ssp_data *data) { sensors_register(data->ges_device, data, gesture_attrs, "gesture_sensor"); } void remove_gesture_factorytest(struct ssp_data *data) { sensors_unregister(data->ges_device, gesture_attrs); }
gpl-2.0
gearslam/v20j-geeb
arch/hexagon/kernel/topology.c
1782
1408
/* * CPU topology for Hexagon * * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <linux/cpu.h> #include <linux/cpumask.h> #include <linux/init.h> #include <linux/node.h> #include <linux/nodemask.h> #include <linux/percpu.h> /* Swiped from MIPS. */ static DEFINE_PER_CPU(struct cpu, cpu_devices); static int __init topology_init(void) { int i, ret; for_each_present_cpu(i) { /* * register_cpu takes a per_cpu pointer and * just points it at another per_cpu struct... */ ret = register_cpu(&per_cpu(cpu_devices, i), i); if (ret) printk(KERN_WARNING "topology_init: register_cpu %d " "failed (%d)\n", i, ret); } return 0; } subsys_initcall(topology_init);
gpl-2.0
sonicxml/Popcornkernel-Tuna
drivers/usb/serial/spcp8x5.c
3062
19074
/* * spcp8x5 USB to serial adaptor driver * * Copyright (C) 2010 Johan Hovold (jhovold@gmail.com) * Copyright (C) 2006 Linxb (xubin.lin@worldplus.com.cn) * Copyright (C) 2006 S1 Corp. * * Original driver for 2.6.10 pl2303 driver by * Greg Kroah-Hartman (greg@kroah.com) * Changes for 2.6.20 by Harald Klein <hari@vt100.at> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/usb.h> #include <linux/usb/serial.h> /* Version Information */ #define DRIVER_VERSION "v0.10" #define DRIVER_DESC "SPCP8x5 USB to serial adaptor driver" static int debug; #define SPCP8x5_007_VID 0x04FC #define SPCP8x5_007_PID 0x0201 #define SPCP8x5_008_VID 0x04fc #define SPCP8x5_008_PID 0x0235 #define SPCP8x5_PHILIPS_VID 0x0471 #define SPCP8x5_PHILIPS_PID 0x081e #define SPCP8x5_INTERMATIC_VID 0x04FC #define SPCP8x5_INTERMATIC_PID 0x0204 #define SPCP8x5_835_VID 0x04fc #define SPCP8x5_835_PID 0x0231 static const struct usb_device_id id_table[] = { { USB_DEVICE(SPCP8x5_PHILIPS_VID , SPCP8x5_PHILIPS_PID)}, { USB_DEVICE(SPCP8x5_INTERMATIC_VID, SPCP8x5_INTERMATIC_PID)}, { USB_DEVICE(SPCP8x5_835_VID, SPCP8x5_835_PID)}, { USB_DEVICE(SPCP8x5_008_VID, SPCP8x5_008_PID)}, { USB_DEVICE(SPCP8x5_007_VID, SPCP8x5_007_PID)}, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); struct spcp8x5_usb_ctrl_arg { u8 type; u8 cmd; u8 cmd_type; u16 value; u16 index; u16 length; }; /* spcp8x5 spec register define */ #define MCR_CONTROL_LINE_RTS 0x02 #define MCR_CONTROL_LINE_DTR 0x01 #define MCR_DTR 0x01 #define MCR_RTS 0x02 #define MSR_STATUS_LINE_DCD 0x80 #define MSR_STATUS_LINE_RI 0x40 #define MSR_STATUS_LINE_DSR 0x20 #define MSR_STATUS_LINE_CTS 0x10 /* verdor command here , we should define myself */ #define SET_DEFAULT 0x40 #define SET_DEFAULT_TYPE 0x20 #define SET_UART_FORMAT 0x40 #define SET_UART_FORMAT_TYPE 0x21 #define SET_UART_FORMAT_SIZE_5 0x00 #define SET_UART_FORMAT_SIZE_6 0x01 #define SET_UART_FORMAT_SIZE_7 0x02 #define SET_UART_FORMAT_SIZE_8 0x03 #define SET_UART_FORMAT_STOP_1 0x00 #define SET_UART_FORMAT_STOP_2 0x04 #define SET_UART_FORMAT_PAR_NONE 0x00 #define SET_UART_FORMAT_PAR_ODD 0x10 #define SET_UART_FORMAT_PAR_EVEN 0x30 #define SET_UART_FORMAT_PAR_MASK 0xD0 #define SET_UART_FORMAT_PAR_SPACE 0x90 #define GET_UART_STATUS_TYPE 0xc0 #define GET_UART_STATUS 0x22 #define GET_UART_STATUS_MSR 0x06 #define SET_UART_STATUS 0x40 #define SET_UART_STATUS_TYPE 0x23 #define SET_UART_STATUS_MCR 0x0004 #define SET_UART_STATUS_MCR_DTR 0x01 #define SET_UART_STATUS_MCR_RTS 0x02 #define SET_UART_STATUS_MCR_LOOP 0x10 #define SET_WORKING_MODE 0x40 #define SET_WORKING_MODE_TYPE 0x24 #define SET_WORKING_MODE_U2C 0x00 #define SET_WORKING_MODE_RS485 0x01 #define SET_WORKING_MODE_PDMA 0x02 #define SET_WORKING_MODE_SPP 0x03 #define SET_FLOWCTL_CHAR 0x40 #define SET_FLOWCTL_CHAR_TYPE 0x25 #define GET_VERSION 0xc0 #define GET_VERSION_TYPE 0x26 #define SET_REGISTER 0x40 #define SET_REGISTER_TYPE 0x27 #define GET_REGISTER 0xc0 #define GET_REGISTER_TYPE 0x28 #define SET_RAM 0x40 #define SET_RAM_TYPE 0x31 #define GET_RAM 0xc0 #define GET_RAM_TYPE 0x32 /* how come ??? */ #define UART_STATE 0x08 #define UART_STATE_TRANSIENT_MASK 0x75 #define UART_DCD 0x01 #define UART_DSR 0x02 #define UART_BREAK_ERROR 0x04 #define UART_RING 0x08 #define UART_FRAME_ERROR 0x10 #define UART_PARITY_ERROR 0x20 #define UART_OVERRUN_ERROR 0x40 #define UART_CTS 0x80 enum spcp8x5_type { SPCP825_007_TYPE, SPCP825_008_TYPE, SPCP825_PHILIP_TYPE, SPCP825_INTERMATIC_TYPE, SPCP835_TYPE, }; static struct usb_driver spcp8x5_driver = { .name = "spcp8x5", .probe = usb_serial_probe, .disconnect = usb_serial_disconnect, .id_table = id_table, .no_dynamic_id = 1, }; struct spcp8x5_private { spinlock_t lock; enum spcp8x5_type type; wait_queue_head_t delta_msr_wait; u8 line_control; u8 line_status; }; /* desc : when device plug in,this function would be called. * thanks to usb_serial subsystem,then do almost every things for us. And what * we should do just alloc the buffer */ static int spcp8x5_startup(struct usb_serial *serial) { struct spcp8x5_private *priv; int i; enum spcp8x5_type type = SPCP825_007_TYPE; u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); if (product == 0x0201) type = SPCP825_007_TYPE; else if (product == 0x0231) type = SPCP835_TYPE; else if (product == 0x0235) type = SPCP825_008_TYPE; else if (product == 0x0204) type = SPCP825_INTERMATIC_TYPE; else if (product == 0x0471 && serial->dev->descriptor.idVendor == cpu_to_le16(0x081e)) type = SPCP825_PHILIP_TYPE; dev_dbg(&serial->dev->dev, "device type = %d\n", (int)type); for (i = 0; i < serial->num_ports; ++i) { priv = kzalloc(sizeof(struct spcp8x5_private), GFP_KERNEL); if (!priv) goto cleanup; spin_lock_init(&priv->lock); init_waitqueue_head(&priv->delta_msr_wait); priv->type = type; usb_set_serial_port_data(serial->port[i] , priv); } return 0; cleanup: for (--i; i >= 0; --i) { priv = usb_get_serial_port_data(serial->port[i]); kfree(priv); usb_set_serial_port_data(serial->port[i] , NULL); } return -ENOMEM; } /* call when the device plug out. free all the memory alloced by probe */ static void spcp8x5_release(struct usb_serial *serial) { int i; for (i = 0; i < serial->num_ports; i++) kfree(usb_get_serial_port_data(serial->port[i])); } /* set the modem control line of the device. * NOTE spcp825-007 not supported this */ static int spcp8x5_set_ctrlLine(struct usb_device *dev, u8 value, enum spcp8x5_type type) { int retval; u8 mcr = 0 ; if (type == SPCP825_007_TYPE) return -EPERM; mcr = (unsigned short)value; retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), SET_UART_STATUS_TYPE, SET_UART_STATUS, mcr, 0x04, NULL, 0, 100); if (retval != 0) dev_dbg(&dev->dev, "usb_control_msg return %#x\n", retval); return retval; } /* get the modem status register of the device * NOTE spcp825-007 not supported this */ static int spcp8x5_get_msr(struct usb_device *dev, u8 *status, enum spcp8x5_type type) { u8 *status_buffer; int ret; /* I return Permited not support here but seem inval device * is more fix */ if (type == SPCP825_007_TYPE) return -EPERM; if (status == NULL) return -EINVAL; status_buffer = kmalloc(1, GFP_KERNEL); if (!status_buffer) return -ENOMEM; status_buffer[0] = status[0]; ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), GET_UART_STATUS, GET_UART_STATUS_TYPE, 0, GET_UART_STATUS_MSR, status_buffer, 1, 100); if (ret < 0) dev_dbg(&dev->dev, "Get MSR = 0x%p failed (error = %d)", status_buffer, ret); dev_dbg(&dev->dev, "0xc0:0x22:0:6 %d - 0x%p ", ret, status_buffer); status[0] = status_buffer[0]; kfree(status_buffer); return ret; } /* select the work mode. * NOTE this function not supported by spcp825-007 */ static void spcp8x5_set_workMode(struct usb_device *dev, u16 value, u16 index, enum spcp8x5_type type) { int ret; /* I return Permited not support here but seem inval device * is more fix */ if (type == SPCP825_007_TYPE) return; ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), SET_WORKING_MODE_TYPE, SET_WORKING_MODE, value, index, NULL, 0, 100); dev_dbg(&dev->dev, "value = %#x , index = %#x\n", value, index); if (ret < 0) dev_dbg(&dev->dev, "RTSCTS usb_control_msg(enable flowctrl) = %d\n", ret); } static int spcp8x5_carrier_raised(struct usb_serial_port *port) { struct spcp8x5_private *priv = usb_get_serial_port_data(port); if (priv->line_status & MSR_STATUS_LINE_DCD) return 1; return 0; } static void spcp8x5_dtr_rts(struct usb_serial_port *port, int on) { struct spcp8x5_private *priv = usb_get_serial_port_data(port); unsigned long flags; u8 control; spin_lock_irqsave(&priv->lock, flags); if (on) priv->line_control = MCR_CONTROL_LINE_DTR | MCR_CONTROL_LINE_RTS; else priv->line_control &= ~ (MCR_CONTROL_LINE_DTR | MCR_CONTROL_LINE_RTS); control = priv->line_control; spin_unlock_irqrestore(&priv->lock, flags); spcp8x5_set_ctrlLine(port->serial->dev, control , priv->type); } static void spcp8x5_init_termios(struct tty_struct *tty) { /* for the 1st time call this function */ *(tty->termios) = tty_std_termios; tty->termios->c_cflag = B115200 | CS8 | CREAD | HUPCL | CLOCAL; tty->termios->c_ispeed = 115200; tty->termios->c_ospeed = 115200; } /* set the serial param for transfer. we should check if we really need to * transfer. if we set flow control we should do this too. */ static void spcp8x5_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { struct usb_serial *serial = port->serial; struct spcp8x5_private *priv = usb_get_serial_port_data(port); unsigned long flags; unsigned int cflag = tty->termios->c_cflag; unsigned int old_cflag = old_termios->c_cflag; unsigned short uartdata; unsigned char buf[2] = {0, 0}; int baud; int i; u8 control; /* check that they really want us to change something */ if (!tty_termios_hw_change(tty->termios, old_termios)) return; /* set DTR/RTS active */ spin_lock_irqsave(&priv->lock, flags); control = priv->line_control; if ((old_cflag & CBAUD) == B0) { priv->line_control |= MCR_DTR; if (!(old_cflag & CRTSCTS)) priv->line_control |= MCR_RTS; } if (control != priv->line_control) { control = priv->line_control; spin_unlock_irqrestore(&priv->lock, flags); spcp8x5_set_ctrlLine(serial->dev, control , priv->type); } else { spin_unlock_irqrestore(&priv->lock, flags); } /* Set Baud Rate */ baud = tty_get_baud_rate(tty); switch (baud) { case 300: buf[0] = 0x00; break; case 600: buf[0] = 0x01; break; case 1200: buf[0] = 0x02; break; case 2400: buf[0] = 0x03; break; case 4800: buf[0] = 0x04; break; case 9600: buf[0] = 0x05; break; case 19200: buf[0] = 0x07; break; case 38400: buf[0] = 0x09; break; case 57600: buf[0] = 0x0a; break; case 115200: buf[0] = 0x0b; break; case 230400: buf[0] = 0x0c; break; case 460800: buf[0] = 0x0d; break; case 921600: buf[0] = 0x0e; break; /* case 1200000: buf[0] = 0x0f; break; */ /* case 2400000: buf[0] = 0x10; break; */ case 3000000: buf[0] = 0x11; break; /* case 6000000: buf[0] = 0x12; break; */ case 0: case 1000000: buf[0] = 0x0b; break; default: dev_err(&port->dev, "spcp825 driver does not support the " "baudrate requested, using default of 9600.\n"); } /* Set Data Length : 00:5bit, 01:6bit, 10:7bit, 11:8bit */ if (cflag & CSIZE) { switch (cflag & CSIZE) { case CS5: buf[1] |= SET_UART_FORMAT_SIZE_5; break; case CS6: buf[1] |= SET_UART_FORMAT_SIZE_6; break; case CS7: buf[1] |= SET_UART_FORMAT_SIZE_7; break; default: case CS8: buf[1] |= SET_UART_FORMAT_SIZE_8; break; } } /* Set Stop bit2 : 0:1bit 1:2bit */ buf[1] |= (cflag & CSTOPB) ? SET_UART_FORMAT_STOP_2 : SET_UART_FORMAT_STOP_1; /* Set Parity bit3-4 01:Odd 11:Even */ if (cflag & PARENB) { buf[1] |= (cflag & PARODD) ? SET_UART_FORMAT_PAR_ODD : SET_UART_FORMAT_PAR_EVEN ; } else buf[1] |= SET_UART_FORMAT_PAR_NONE; uartdata = buf[0] | buf[1]<<8; i = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), SET_UART_FORMAT_TYPE, SET_UART_FORMAT, uartdata, 0, NULL, 0, 100); if (i < 0) dev_err(&port->dev, "Set UART format %#x failed (error = %d)\n", uartdata, i); dbg("0x21:0x40:0:0 %d", i); if (cflag & CRTSCTS) { /* enable hardware flow control */ spcp8x5_set_workMode(serial->dev, 0x000a, SET_WORKING_MODE_U2C, priv->type); } } /* open the serial port. do some usb system call. set termios and get the line * status of the device. */ static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port) { struct ktermios tmp_termios; struct usb_serial *serial = port->serial; struct spcp8x5_private *priv = usb_get_serial_port_data(port); int ret; unsigned long flags; u8 status = 0x30; /* status 0x30 means DSR and CTS = 1 other CDC RI and delta = 0 */ dbg("%s - port %d", __func__, port->number); usb_clear_halt(serial->dev, port->write_urb->pipe); usb_clear_halt(serial->dev, port->read_urb->pipe); ret = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 0x09, 0x00, 0x01, 0x00, NULL, 0x00, 100); if (ret) return ret; spcp8x5_set_ctrlLine(serial->dev, priv->line_control , priv->type); /* Setup termios */ if (tty) spcp8x5_set_termios(tty, port, &tmp_termios); spcp8x5_get_msr(serial->dev, &status, priv->type); /* may be we should update uart status here but now we did not do */ spin_lock_irqsave(&priv->lock, flags); priv->line_status = status & 0xf0 ; spin_unlock_irqrestore(&priv->lock, flags); port->port.drain_delay = 256; return usb_serial_generic_open(tty, port); } static void spcp8x5_process_read_urb(struct urb *urb) { struct usb_serial_port *port = urb->context; struct spcp8x5_private *priv = usb_get_serial_port_data(port); struct tty_struct *tty; unsigned char *data = urb->transfer_buffer; unsigned long flags; u8 status; char tty_flag; /* get tty_flag from status */ tty_flag = TTY_NORMAL; spin_lock_irqsave(&priv->lock, flags); status = priv->line_status; priv->line_status &= ~UART_STATE_TRANSIENT_MASK; spin_unlock_irqrestore(&priv->lock, flags); /* wake up the wait for termios */ wake_up_interruptible(&priv->delta_msr_wait); if (!urb->actual_length) return; tty = tty_port_tty_get(&port->port); if (!tty) return; if (status & UART_STATE_TRANSIENT_MASK) { /* break takes precedence over parity, which takes precedence * over framing errors */ if (status & UART_BREAK_ERROR) tty_flag = TTY_BREAK; else if (status & UART_PARITY_ERROR) tty_flag = TTY_PARITY; else if (status & UART_FRAME_ERROR) tty_flag = TTY_FRAME; dev_dbg(&port->dev, "tty_flag = %d\n", tty_flag); /* overrun is special, not associated with a char */ if (status & UART_OVERRUN_ERROR) tty_insert_flip_char(tty, 0, TTY_OVERRUN); if (status & UART_DCD) usb_serial_handle_dcd_change(port, tty, priv->line_status & MSR_STATUS_LINE_DCD); } tty_insert_flip_string_fixed_flag(tty, data, tty_flag, urb->actual_length); tty_flip_buffer_push(tty); tty_kref_put(tty); } static int spcp8x5_wait_modem_info(struct usb_serial_port *port, unsigned int arg) { struct spcp8x5_private *priv = usb_get_serial_port_data(port); unsigned long flags; unsigned int prevstatus; unsigned int status; unsigned int changed; spin_lock_irqsave(&priv->lock, flags); prevstatus = priv->line_status; spin_unlock_irqrestore(&priv->lock, flags); while (1) { /* wake up in bulk read */ interruptible_sleep_on(&priv->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; spin_lock_irqsave(&priv->lock, flags); status = priv->line_status; spin_unlock_irqrestore(&priv->lock, flags); changed = prevstatus^status; if (((arg & TIOCM_RNG) && (changed & MSR_STATUS_LINE_RI)) || ((arg & TIOCM_DSR) && (changed & MSR_STATUS_LINE_DSR)) || ((arg & TIOCM_CD) && (changed & MSR_STATUS_LINE_DCD)) || ((arg & TIOCM_CTS) && (changed & MSR_STATUS_LINE_CTS))) return 0; prevstatus = status; } /* NOTREACHED */ return 0; } static int spcp8x5_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct usb_serial_port *port = tty->driver_data; dbg("%s (%d) cmd = 0x%04x", __func__, port->number, cmd); switch (cmd) { case TIOCMIWAIT: dbg("%s (%d) TIOCMIWAIT", __func__, port->number); return spcp8x5_wait_modem_info(port, arg); default: dbg("%s not supported = 0x%04x", __func__, cmd); break; } return -ENOIOCTLCMD; } static int spcp8x5_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct spcp8x5_private *priv = usb_get_serial_port_data(port); unsigned long flags; u8 control; spin_lock_irqsave(&priv->lock, flags); if (set & TIOCM_RTS) priv->line_control |= MCR_RTS; if (set & TIOCM_DTR) priv->line_control |= MCR_DTR; if (clear & TIOCM_RTS) priv->line_control &= ~MCR_RTS; if (clear & TIOCM_DTR) priv->line_control &= ~MCR_DTR; control = priv->line_control; spin_unlock_irqrestore(&priv->lock, flags); return spcp8x5_set_ctrlLine(port->serial->dev, control , priv->type); } static int spcp8x5_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct spcp8x5_private *priv = usb_get_serial_port_data(port); unsigned long flags; unsigned int mcr; unsigned int status; unsigned int result; spin_lock_irqsave(&priv->lock, flags); mcr = priv->line_control; status = priv->line_status; spin_unlock_irqrestore(&priv->lock, flags); result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0) | ((mcr & MCR_RTS) ? TIOCM_RTS : 0) | ((status & MSR_STATUS_LINE_CTS) ? TIOCM_CTS : 0) | ((status & MSR_STATUS_LINE_DSR) ? TIOCM_DSR : 0) | ((status & MSR_STATUS_LINE_RI) ? TIOCM_RI : 0) | ((status & MSR_STATUS_LINE_DCD) ? TIOCM_CD : 0); return result; } /* All of the device info needed for the spcp8x5 SIO serial converter */ static struct usb_serial_driver spcp8x5_device = { .driver = { .owner = THIS_MODULE, .name = "SPCP8x5", }, .id_table = id_table, .usb_driver = &spcp8x5_driver, .num_ports = 1, .open = spcp8x5_open, .dtr_rts = spcp8x5_dtr_rts, .carrier_raised = spcp8x5_carrier_raised, .set_termios = spcp8x5_set_termios, .init_termios = spcp8x5_init_termios, .ioctl = spcp8x5_ioctl, .tiocmget = spcp8x5_tiocmget, .tiocmset = spcp8x5_tiocmset, .attach = spcp8x5_startup, .release = spcp8x5_release, .process_read_urb = spcp8x5_process_read_urb, }; static int __init spcp8x5_init(void) { int retval; retval = usb_serial_register(&spcp8x5_device); if (retval) goto failed_usb_serial_register; retval = usb_register(&spcp8x5_driver); if (retval) goto failed_usb_register; printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":" DRIVER_DESC "\n"); return 0; failed_usb_register: usb_serial_deregister(&spcp8x5_device); failed_usb_serial_register: return retval; } static void __exit spcp8x5_exit(void) { usb_deregister(&spcp8x5_driver); usb_serial_deregister(&spcp8x5_device); } module_init(spcp8x5_init); module_exit(spcp8x5_exit); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug enabled or not");
gpl-2.0
PureNexusProject/android_kernel_asus_flo
drivers/watchdog/coh901327_wdt.c
4854
13040
/* * coh901327_wdt.c * * Copyright (C) 2008-2009 ST-Ericsson AB * License terms: GNU General Public License (GPL) version 2 * Watchdog driver for the ST-Ericsson AB COH 901 327 IP core * Author: Linus Walleij <linus.walleij@stericsson.com> */ #include <linux/module.h> #include <linux/types.h> #include <linux/watchdog.h> #include <linux/interrupt.h> #include <linux/pm.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/bitops.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #define DRV_NAME "WDOG COH 901 327" /* * COH 901 327 register definitions */ /* WDOG_FEED Register 32bit (-/W) */ #define U300_WDOG_FR 0x00 #define U300_WDOG_FR_FEED_RESTART_TIMER 0xFEEDU /* WDOG_TIMEOUT Register 32bit (R/W) */ #define U300_WDOG_TR 0x04 #define U300_WDOG_TR_TIMEOUT_MASK 0x7FFFU /* WDOG_DISABLE1 Register 32bit (-/W) */ #define U300_WDOG_D1R 0x08 #define U300_WDOG_D1R_DISABLE1_DISABLE_TIMER 0x2BADU /* WDOG_DISABLE2 Register 32bit (R/W) */ #define U300_WDOG_D2R 0x0C #define U300_WDOG_D2R_DISABLE2_DISABLE_TIMER 0xCAFEU #define U300_WDOG_D2R_DISABLE_STATUS_DISABLED 0xDABEU #define U300_WDOG_D2R_DISABLE_STATUS_ENABLED 0x0000U /* WDOG_STATUS Register 32bit (R/W) */ #define U300_WDOG_SR 0x10 #define U300_WDOG_SR_STATUS_TIMED_OUT 0xCFE8U #define U300_WDOG_SR_STATUS_NORMAL 0x0000U #define U300_WDOG_SR_RESET_STATUS_RESET 0xE8B4U /* WDOG_COUNT Register 32bit (R/-) */ #define U300_WDOG_CR 0x14 #define U300_WDOG_CR_VALID_IND 0x8000U #define U300_WDOG_CR_VALID_STABLE 0x0000U #define U300_WDOG_CR_COUNT_VALUE_MASK 0x7FFFU /* WDOG_JTAGOVR Register 32bit (R/W) */ #define U300_WDOG_JOR 0x18 #define U300_WDOG_JOR_JTAG_MODE_IND 0x0002U #define U300_WDOG_JOR_JTAG_WATCHDOG_ENABLE 0x0001U /* WDOG_RESTART Register 32bit (-/W) */ #define U300_WDOG_RR 0x1C #define U300_WDOG_RR_RESTART_VALUE_RESUME 0xACEDU /* WDOG_IRQ_EVENT Register 32bit (R/W) */ #define U300_WDOG_IER 0x20 #define U300_WDOG_IER_WILL_BARK_IRQ_EVENT_IND 0x0001U #define U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE 0x0001U /* WDOG_IRQ_MASK Register 32bit (R/W) */ #define U300_WDOG_IMR 0x24 #define U300_WDOG_IMR_WILL_BARK_IRQ_ENABLE 0x0001U /* WDOG_IRQ_FORCE Register 32bit (R/W) */ #define U300_WDOG_IFR 0x28 #define U300_WDOG_IFR_WILL_BARK_IRQ_FORCE_ENABLE 0x0001U /* Default timeout in seconds = 1 minute */ static unsigned int margin = 60; static resource_size_t phybase; static resource_size_t physize; static int irq; static void __iomem *virtbase; static struct device *parent; /* * The watchdog block is of course always clocked, the * clk_enable()/clk_disable() calls are mainly for performing reference * counting higher up in the clock hierarchy. */ static struct clk *clk; /* * Enabling and disabling functions. */ static void coh901327_enable(u16 timeout) { u16 val; unsigned long freq; unsigned long delay_ns; clk_enable(clk); /* Restart timer if it is disabled */ val = readw(virtbase + U300_WDOG_D2R); if (val == U300_WDOG_D2R_DISABLE_STATUS_DISABLED) writew(U300_WDOG_RR_RESTART_VALUE_RESUME, virtbase + U300_WDOG_RR); /* Acknowledge any pending interrupt so it doesn't just fire off */ writew(U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE, virtbase + U300_WDOG_IER); /* * The interrupt is cleared in the 32 kHz clock domain. * Wait 3 32 kHz cycles for it to take effect */ freq = clk_get_rate(clk); delay_ns = DIV_ROUND_UP(1000000000, freq); /* Freq to ns and round up */ delay_ns = 3 * delay_ns; /* Wait 3 cycles */ ndelay(delay_ns); /* Enable the watchdog interrupt */ writew(U300_WDOG_IMR_WILL_BARK_IRQ_ENABLE, virtbase + U300_WDOG_IMR); /* Activate the watchdog timer */ writew(timeout, virtbase + U300_WDOG_TR); /* Start the watchdog timer */ writew(U300_WDOG_FR_FEED_RESTART_TIMER, virtbase + U300_WDOG_FR); /* * Extra read so that this change propagate in the watchdog. */ (void) readw(virtbase + U300_WDOG_CR); val = readw(virtbase + U300_WDOG_D2R); clk_disable(clk); if (val != U300_WDOG_D2R_DISABLE_STATUS_ENABLED) dev_err(parent, "%s(): watchdog not enabled! D2R value %04x\n", __func__, val); } static void coh901327_disable(void) { u16 val; clk_enable(clk); /* Disable the watchdog interrupt if it is active */ writew(0x0000U, virtbase + U300_WDOG_IMR); /* If the watchdog is currently enabled, attempt to disable it */ val = readw(virtbase + U300_WDOG_D2R); if (val != U300_WDOG_D2R_DISABLE_STATUS_DISABLED) { writew(U300_WDOG_D1R_DISABLE1_DISABLE_TIMER, virtbase + U300_WDOG_D1R); writew(U300_WDOG_D2R_DISABLE2_DISABLE_TIMER, virtbase + U300_WDOG_D2R); /* Write this twice (else problems occur) */ writew(U300_WDOG_D2R_DISABLE2_DISABLE_TIMER, virtbase + U300_WDOG_D2R); } val = readw(virtbase + U300_WDOG_D2R); clk_disable(clk); if (val != U300_WDOG_D2R_DISABLE_STATUS_DISABLED) dev_err(parent, "%s(): watchdog not disabled! D2R value %04x\n", __func__, val); } static int coh901327_start(struct watchdog_device *wdt_dev) { coh901327_enable(wdt_dev->timeout * 100); return 0; } static int coh901327_stop(struct watchdog_device *wdt_dev) { coh901327_disable(); return 0; } static int coh901327_ping(struct watchdog_device *wdd) { clk_enable(clk); /* Feed the watchdog */ writew(U300_WDOG_FR_FEED_RESTART_TIMER, virtbase + U300_WDOG_FR); clk_disable(clk); return 0; } static int coh901327_settimeout(struct watchdog_device *wdt_dev, unsigned int time) { wdt_dev->timeout = time; clk_enable(clk); /* Set new timeout value */ writew(time * 100, virtbase + U300_WDOG_TR); /* Feed the dog */ writew(U300_WDOG_FR_FEED_RESTART_TIMER, virtbase + U300_WDOG_FR); clk_disable(clk); return 0; } static unsigned int coh901327_gettimeleft(struct watchdog_device *wdt_dev) { u16 val; clk_enable(clk); /* Read repeatedly until the value is stable! */ val = readw(virtbase + U300_WDOG_CR); while (val & U300_WDOG_CR_VALID_IND) val = readw(virtbase + U300_WDOG_CR); val &= U300_WDOG_CR_COUNT_VALUE_MASK; clk_disable(clk); if (val != 0) val /= 100; return val; } /* * This interrupt occurs 10 ms before the watchdog WILL bark. */ static irqreturn_t coh901327_interrupt(int irq, void *data) { u16 val; /* * Ack IRQ? If this occurs we're FUBAR anyway, so * just acknowledge, disable the interrupt and await the imminent end. * If you at some point need a host of callbacks to be called * when the system is about to watchdog-reset, add them here! * * NOTE: on future versions of this IP-block, it will be possible * to prevent a watchdog reset by feeding the watchdog at this * point. */ clk_enable(clk); val = readw(virtbase + U300_WDOG_IER); if (val == U300_WDOG_IER_WILL_BARK_IRQ_EVENT_IND) writew(U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE, virtbase + U300_WDOG_IER); writew(0x0000U, virtbase + U300_WDOG_IMR); clk_disable(clk); dev_crit(parent, "watchdog is barking!\n"); return IRQ_HANDLED; } static const struct watchdog_info coh901327_ident = { .options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, .identity = DRV_NAME, }; static struct watchdog_ops coh901327_ops = { .owner = THIS_MODULE, .start = coh901327_start, .stop = coh901327_stop, .ping = coh901327_ping, .set_timeout = coh901327_settimeout, .get_timeleft = coh901327_gettimeleft, }; static struct watchdog_device coh901327_wdt = { .info = &coh901327_ident, .ops = &coh901327_ops, /* * Max timeout is 327 since the 10ms * timeout register is max * 0x7FFF = 327670ms ~= 327s. */ .min_timeout = 0, .max_timeout = 327, }; static int __exit coh901327_remove(struct platform_device *pdev) { watchdog_unregister_device(&coh901327_wdt); coh901327_disable(); free_irq(irq, pdev); clk_put(clk); iounmap(virtbase); release_mem_region(phybase, physize); return 0; } static int __init coh901327_probe(struct platform_device *pdev) { int ret; u16 val; struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENOENT; parent = &pdev->dev; physize = resource_size(res); phybase = res->start; if (request_mem_region(phybase, physize, DRV_NAME) == NULL) { ret = -EBUSY; goto out; } virtbase = ioremap(phybase, physize); if (!virtbase) { ret = -ENOMEM; goto out_no_remap; } clk = clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { ret = PTR_ERR(clk); dev_err(&pdev->dev, "could not get clock\n"); goto out_no_clk; } ret = clk_enable(clk); if (ret) { dev_err(&pdev->dev, "could not enable clock\n"); goto out_no_clk_enable; } val = readw(virtbase + U300_WDOG_SR); switch (val) { case U300_WDOG_SR_STATUS_TIMED_OUT: dev_info(&pdev->dev, "watchdog timed out since last chip reset!\n"); coh901327_wdt.bootstatus |= WDIOF_CARDRESET; /* Status will be cleared below */ break; case U300_WDOG_SR_STATUS_NORMAL: dev_info(&pdev->dev, "in normal status, no timeouts have occurred.\n"); break; default: dev_info(&pdev->dev, "contains an illegal status code (%08x)\n", val); break; } val = readw(virtbase + U300_WDOG_D2R); switch (val) { case U300_WDOG_D2R_DISABLE_STATUS_DISABLED: dev_info(&pdev->dev, "currently disabled.\n"); break; case U300_WDOG_D2R_DISABLE_STATUS_ENABLED: dev_info(&pdev->dev, "currently enabled! (disabling it now)\n"); coh901327_disable(); break; default: dev_err(&pdev->dev, "contains an illegal enable/disable code (%08x)\n", val); break; } /* Reset the watchdog */ writew(U300_WDOG_SR_RESET_STATUS_RESET, virtbase + U300_WDOG_SR); irq = platform_get_irq(pdev, 0); if (request_irq(irq, coh901327_interrupt, 0, DRV_NAME " Bark", pdev)) { ret = -EIO; goto out_no_irq; } clk_disable(clk); if (margin < 1 || margin > 327) margin = 60; coh901327_wdt.timeout = margin; ret = watchdog_register_device(&coh901327_wdt); if (ret == 0) dev_info(&pdev->dev, "initialized. timer margin=%d sec\n", margin); else goto out_no_wdog; return 0; out_no_wdog: free_irq(irq, pdev); out_no_irq: clk_disable(clk); out_no_clk_enable: clk_put(clk); out_no_clk: iounmap(virtbase); out_no_remap: release_mem_region(phybase, SZ_4K); out: return ret; } #ifdef CONFIG_PM static u16 wdogenablestore; static u16 irqmaskstore; static int coh901327_suspend(struct platform_device *pdev, pm_message_t state) { irqmaskstore = readw(virtbase + U300_WDOG_IMR) & 0x0001U; wdogenablestore = readw(virtbase + U300_WDOG_D2R); /* If watchdog is on, disable it here and now */ if (wdogenablestore == U300_WDOG_D2R_DISABLE_STATUS_ENABLED) coh901327_disable(); return 0; } static int coh901327_resume(struct platform_device *pdev) { /* Restore the watchdog interrupt */ writew(irqmaskstore, virtbase + U300_WDOG_IMR); if (wdogenablestore == U300_WDOG_D2R_DISABLE_STATUS_ENABLED) { /* Restart the watchdog timer */ writew(U300_WDOG_RR_RESTART_VALUE_RESUME, virtbase + U300_WDOG_RR); writew(U300_WDOG_FR_FEED_RESTART_TIMER, virtbase + U300_WDOG_FR); } return 0; } #else #define coh901327_suspend NULL #define coh901327_resume NULL #endif /* * Mistreating the watchdog is the only way to perform a software reset of the * system on EMP platforms. So we implement this and export a symbol for it. */ void coh901327_watchdog_reset(void) { /* Enable even if on JTAG too */ writew(U300_WDOG_JOR_JTAG_WATCHDOG_ENABLE, virtbase + U300_WDOG_JOR); /* * Timeout = 5s, we have to wait for the watchdog reset to * actually take place: the watchdog will be reloaded with the * default value immediately, so we HAVE to reboot and get back * into the kernel in 30s, or the device will reboot again! * The boot loader will typically deactivate the watchdog, so we * need time enough for the boot loader to get to the point of * deactivating the watchdog before it is shut down by it. * * NOTE: on future versions of the watchdog, this restriction is * gone: the watchdog will be reloaded with a default value (1 min) * instead of last value, and you can conveniently set the watchdog * timeout to 10ms (value = 1) without any problems. */ coh901327_enable(500); /* Return and await doom */ } static struct platform_driver coh901327_driver = { .driver = { .owner = THIS_MODULE, .name = "coh901327_wdog", }, .remove = __exit_p(coh901327_remove), .suspend = coh901327_suspend, .resume = coh901327_resume, }; static int __init coh901327_init(void) { return platform_driver_probe(&coh901327_driver, coh901327_probe); } module_init(coh901327_init); static void __exit coh901327_exit(void) { platform_driver_unregister(&coh901327_driver); } module_exit(coh901327_exit); MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>"); MODULE_DESCRIPTION("COH 901 327 Watchdog"); module_param(margin, uint, 0); MODULE_PARM_DESC(margin, "Watchdog margin in seconds (default 60s)"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:coh901327-watchdog");
gpl-2.0
Solitarily/jolla-kernel-Stock_KK
drivers/s390/char/tape_core.c
5110
35668
/* * drivers/s390/char/tape_core.c * basic function of the tape device driver * * S390 and zSeries version * Copyright IBM Corp. 2001, 2009 * Author(s): Carsten Otte <cotte@de.ibm.com> * Michael Holzheu <holzheu@de.ibm.com> * Tuan Ngo-Anh <ngoanh@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Stefan Bader <shbader@de.ibm.com> */ #define KMSG_COMPONENT "tape" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/init.h> // for kernel parameters #include <linux/kmod.h> // for requesting modules #include <linux/spinlock.h> // for locks #include <linux/vmalloc.h> #include <linux/list.h> #include <linux/slab.h> #include <asm/types.h> // for variable types #define TAPE_DBF_AREA tape_core_dbf #include "tape.h" #include "tape_std.h" #define LONG_BUSY_TIMEOUT 180 /* seconds */ static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); static void tape_delayed_next_request(struct work_struct *); static void tape_long_busy_timeout(unsigned long data); /* * One list to contain all tape devices of all disciplines, so * we can assign the devices to minor numbers of the same major * The list is protected by the rwlock */ static LIST_HEAD(tape_device_list); static DEFINE_RWLOCK(tape_device_lock); /* * Pointer to debug area. */ debug_info_t *TAPE_DBF_AREA = NULL; EXPORT_SYMBOL(TAPE_DBF_AREA); /* * Printable strings for tape enumerations. */ const char *tape_state_verbose[TS_SIZE] = { [TS_UNUSED] = "UNUSED", [TS_IN_USE] = "IN_USE", [TS_BLKUSE] = "BLKUSE", [TS_INIT] = "INIT ", [TS_NOT_OPER] = "NOT_OP" }; const char *tape_op_verbose[TO_SIZE] = { [TO_BLOCK] = "BLK", [TO_BSB] = "BSB", [TO_BSF] = "BSF", [TO_DSE] = "DSE", [TO_FSB] = "FSB", [TO_FSF] = "FSF", [TO_LBL] = "LBL", [TO_NOP] = "NOP", [TO_RBA] = "RBA", [TO_RBI] = "RBI", [TO_RFO] = "RFO", [TO_REW] = "REW", [TO_RUN] = "RUN", [TO_WRI] = "WRI", [TO_WTM] = "WTM", [TO_MSEN] = "MSN", [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF", [TO_READ_ATTMSG] = "RAT", [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS", [TO_UNASSIGN] = "UAS", [TO_CRYPT_ON] = "CON", [TO_CRYPT_OFF] = "COF", [TO_KEKL_SET] = "KLS", [TO_KEKL_QUERY] = "KLQ",[TO_RDC] = "RDC", }; static int devid_to_int(struct ccw_dev_id *dev_id) { return dev_id->devno + (dev_id->ssid << 16); } /* * Some channel attached tape specific attributes. * * FIXME: In the future the first_minor and blocksize attribute should be * replaced by a link to the cdev tree. */ static ssize_t tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tape_device *tdev; tdev = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state); } static DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL); static ssize_t tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tape_device *tdev; tdev = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor); } static DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL); static ssize_t tape_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tape_device *tdev; tdev = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ? "OFFLINE" : tape_state_verbose[tdev->tape_state]); } static DEVICE_ATTR(state, 0444, tape_state_show, NULL); static ssize_t tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tape_device *tdev; ssize_t rc; tdev = dev_get_drvdata(dev); if (tdev->first_minor < 0) return scnprintf(buf, PAGE_SIZE, "N/A\n"); spin_lock_irq(get_ccwdev_lock(tdev->cdev)); if (list_empty(&tdev->req_queue)) rc = scnprintf(buf, PAGE_SIZE, "---\n"); else { struct tape_request *req; req = list_entry(tdev->req_queue.next, struct tape_request, list); rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]); } spin_unlock_irq(get_ccwdev_lock(tdev->cdev)); return rc; } static DEVICE_ATTR(operation, 0444, tape_operation_show, NULL); static ssize_t tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tape_device *tdev; tdev = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size); } static DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL); static struct attribute *tape_attrs[] = { &dev_attr_medium_state.attr, &dev_attr_first_minor.attr, &dev_attr_state.attr, &dev_attr_operation.attr, &dev_attr_blocksize.attr, NULL }; static struct attribute_group tape_attr_group = { .attrs = tape_attrs, }; /* * Tape state functions */ void tape_state_set(struct tape_device *device, enum tape_state newstate) { const char *str; if (device->tape_state == TS_NOT_OPER) { DBF_EVENT(3, "ts_set err: not oper\n"); return; } DBF_EVENT(4, "ts. dev: %x\n", device->first_minor); DBF_EVENT(4, "old ts:\t\n"); if (device->tape_state < TS_SIZE && device->tape_state >=0 ) str = tape_state_verbose[device->tape_state]; else str = "UNKNOWN TS"; DBF_EVENT(4, "%s\n", str); DBF_EVENT(4, "new ts:\t\n"); if (newstate < TS_SIZE && newstate >= 0) str = tape_state_verbose[newstate]; else str = "UNKNOWN TS"; DBF_EVENT(4, "%s\n", str); device->tape_state = newstate; wake_up(&device->state_change_wq); } struct tape_med_state_work_data { struct tape_device *device; enum tape_medium_state state; struct work_struct work; }; static void tape_med_state_work_handler(struct work_struct *work) { static char env_state_loaded[] = "MEDIUM_STATE=LOADED"; static char env_state_unloaded[] = "MEDIUM_STATE=UNLOADED"; struct tape_med_state_work_data *p = container_of(work, struct tape_med_state_work_data, work); struct tape_device *device = p->device; char *envp[] = { NULL, NULL }; switch (p->state) { case MS_UNLOADED: pr_info("%s: The tape cartridge has been successfully " "unloaded\n", dev_name(&device->cdev->dev)); envp[0] = env_state_unloaded; kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp); break; case MS_LOADED: pr_info("%s: A tape cartridge has been mounted\n", dev_name(&device->cdev->dev)); envp[0] = env_state_loaded; kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp); break; default: break; } tape_put_device(device); kfree(p); } static void tape_med_state_work(struct tape_device *device, enum tape_medium_state state) { struct tape_med_state_work_data *p; p = kzalloc(sizeof(*p), GFP_ATOMIC); if (p) { INIT_WORK(&p->work, tape_med_state_work_handler); p->device = tape_get_device(device); p->state = state; schedule_work(&p->work); } } void tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) { enum tape_medium_state oldstate; oldstate = device->medium_state; if (oldstate == newstate) return; device->medium_state = newstate; switch(newstate){ case MS_UNLOADED: device->tape_generic_status |= GMT_DR_OPEN(~0); if (oldstate == MS_LOADED) tape_med_state_work(device, MS_UNLOADED); break; case MS_LOADED: device->tape_generic_status &= ~GMT_DR_OPEN(~0); if (oldstate == MS_UNLOADED) tape_med_state_work(device, MS_LOADED); break; default: break; } wake_up(&device->state_change_wq); } /* * Stop running ccw. Has to be called with the device lock held. */ static int __tape_cancel_io(struct tape_device *device, struct tape_request *request) { int retries; int rc; /* Check if interrupt has already been processed */ if (request->callback == NULL) return 0; rc = 0; for (retries = 0; retries < 5; retries++) { rc = ccw_device_clear(device->cdev, (long) request); switch (rc) { case 0: request->status = TAPE_REQUEST_DONE; return 0; case -EBUSY: request->status = TAPE_REQUEST_CANCEL; schedule_delayed_work(&device->tape_dnr, 0); return 0; case -ENODEV: DBF_EXCEPTION(2, "device gone, retry\n"); break; case -EIO: DBF_EXCEPTION(2, "I/O error, retry\n"); break; default: BUG(); } } return rc; } /* * Add device into the sorted list, giving it the first * available minor number. */ static int tape_assign_minor(struct tape_device *device) { struct tape_device *tmp; int minor; minor = 0; write_lock(&tape_device_lock); list_for_each_entry(tmp, &tape_device_list, node) { if (minor < tmp->first_minor) break; minor += TAPE_MINORS_PER_DEV; } if (minor >= 256) { write_unlock(&tape_device_lock); return -ENODEV; } device->first_minor = minor; list_add_tail(&device->node, &tmp->node); write_unlock(&tape_device_lock); return 0; } /* remove device from the list */ static void tape_remove_minor(struct tape_device *device) { write_lock(&tape_device_lock); list_del_init(&device->node); device->first_minor = -1; write_unlock(&tape_device_lock); } /* * Set a device online. * * This function is called by the common I/O layer to move a device from the * detected but offline into the online state. * If we return an error (RC < 0) the device remains in the offline state. This * can happen if the device is assigned somewhere else, for example. */ int tape_generic_online(struct tape_device *device, struct tape_discipline *discipline) { int rc; DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline); if (device->tape_state != TS_INIT) { DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state); return -EINVAL; } init_timer(&device->lb_timeout); device->lb_timeout.function = tape_long_busy_timeout; /* Let the discipline have a go at the device. */ device->discipline = discipline; if (!try_module_get(discipline->owner)) { return -EINVAL; } rc = discipline->setup_device(device); if (rc) goto out; rc = tape_assign_minor(device); if (rc) goto out_discipline; rc = tapechar_setup_device(device); if (rc) goto out_minor; rc = tapeblock_setup_device(device); if (rc) goto out_char; tape_state_set(device, TS_UNUSED); DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id); return 0; out_char: tapechar_cleanup_device(device); out_minor: tape_remove_minor(device); out_discipline: device->discipline->cleanup_device(device); device->discipline = NULL; out: module_put(discipline->owner); return rc; } static void tape_cleanup_device(struct tape_device *device) { tapeblock_cleanup_device(device); tapechar_cleanup_device(device); device->discipline->cleanup_device(device); module_put(device->discipline->owner); tape_remove_minor(device); tape_med_state_set(device, MS_UNKNOWN); } /* * Suspend device. * * Called by the common I/O layer if the drive should be suspended on user * request. We refuse to suspend if the device is loaded or in use for the * following reason: * While the Linux guest is suspended, it might be logged off which causes * devices to be detached. Tape devices are automatically rewound and unloaded * during DETACH processing (unless the tape device was attached with the * NOASSIGN or MULTIUSER option). After rewind/unload, there is no way to * resume the original state of the tape device, since we would need to * manually re-load the cartridge which was active at suspend time. */ int tape_generic_pm_suspend(struct ccw_device *cdev) { struct tape_device *device; device = dev_get_drvdata(&cdev->dev); if (!device) { return -ENODEV; } DBF_LH(3, "(%08x): tape_generic_pm_suspend(%p)\n", device->cdev_id, device); if (device->medium_state != MS_UNLOADED) { pr_err("A cartridge is loaded in tape device %s, " "refusing to suspend\n", dev_name(&cdev->dev)); return -EBUSY; } spin_lock_irq(get_ccwdev_lock(device->cdev)); switch (device->tape_state) { case TS_INIT: case TS_NOT_OPER: case TS_UNUSED: spin_unlock_irq(get_ccwdev_lock(device->cdev)); break; default: pr_err("Tape device %s is busy, refusing to " "suspend\n", dev_name(&cdev->dev)); spin_unlock_irq(get_ccwdev_lock(device->cdev)); return -EBUSY; } DBF_LH(3, "(%08x): Drive suspended.\n", device->cdev_id); return 0; } /* * Set device offline. * * Called by the common I/O layer if the drive should set offline on user * request. We may prevent this by returning an error. * Manual offline is only allowed while the drive is not in use. */ int tape_generic_offline(struct ccw_device *cdev) { struct tape_device *device; device = dev_get_drvdata(&cdev->dev); if (!device) { return -ENODEV; } DBF_LH(3, "(%08x): tape_generic_offline(%p)\n", device->cdev_id, device); spin_lock_irq(get_ccwdev_lock(device->cdev)); switch (device->tape_state) { case TS_INIT: case TS_NOT_OPER: spin_unlock_irq(get_ccwdev_lock(device->cdev)); break; case TS_UNUSED: tape_state_set(device, TS_INIT); spin_unlock_irq(get_ccwdev_lock(device->cdev)); tape_cleanup_device(device); break; default: DBF_EVENT(3, "(%08x): Set offline failed " "- drive in use.\n", device->cdev_id); spin_unlock_irq(get_ccwdev_lock(device->cdev)); return -EBUSY; } DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id); return 0; } /* * Allocate memory for a new device structure. */ static struct tape_device * tape_alloc_device(void) { struct tape_device *device; device = kzalloc(sizeof(struct tape_device), GFP_KERNEL); if (device == NULL) { DBF_EXCEPTION(2, "ti:no mem\n"); return ERR_PTR(-ENOMEM); } device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA); if (device->modeset_byte == NULL) { DBF_EXCEPTION(2, "ti:no mem\n"); kfree(device); return ERR_PTR(-ENOMEM); } mutex_init(&device->mutex); INIT_LIST_HEAD(&device->req_queue); INIT_LIST_HEAD(&device->node); init_waitqueue_head(&device->state_change_wq); init_waitqueue_head(&device->wait_queue); device->tape_state = TS_INIT; device->medium_state = MS_UNKNOWN; *device->modeset_byte = 0; device->first_minor = -1; atomic_set(&device->ref_count, 1); INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request); return device; } /* * Get a reference to an existing device structure. This will automatically * increment the reference count. */ struct tape_device * tape_get_device(struct tape_device *device) { int count; count = atomic_inc_return(&device->ref_count); DBF_EVENT(4, "tape_get_device(%p) = %i\n", device, count); return device; } /* * Decrease the reference counter of a devices structure. If the * reference counter reaches zero free the device structure. * The function returns a NULL pointer to be used by the caller * for clearing reference pointers. */ void tape_put_device(struct tape_device *device) { int count; count = atomic_dec_return(&device->ref_count); DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, count); BUG_ON(count < 0); if (count == 0) { kfree(device->modeset_byte); kfree(device); } } /* * Find tape device by a device index. */ struct tape_device * tape_find_device(int devindex) { struct tape_device *device, *tmp; device = ERR_PTR(-ENODEV); read_lock(&tape_device_lock); list_for_each_entry(tmp, &tape_device_list, node) { if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) { device = tape_get_device(tmp); break; } } read_unlock(&tape_device_lock); return device; } /* * Driverfs tape probe function. */ int tape_generic_probe(struct ccw_device *cdev) { struct tape_device *device; int ret; struct ccw_dev_id dev_id; device = tape_alloc_device(); if (IS_ERR(device)) return -ENODEV; ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH); ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group); if (ret) { tape_put_device(device); return ret; } dev_set_drvdata(&cdev->dev, device); cdev->handler = __tape_do_irq; device->cdev = cdev; ccw_device_get_id(cdev, &dev_id); device->cdev_id = devid_to_int(&dev_id); return ret; } static void __tape_discard_requests(struct tape_device *device) { struct tape_request * request; struct list_head * l, *n; list_for_each_safe(l, n, &device->req_queue) { request = list_entry(l, struct tape_request, list); if (request->status == TAPE_REQUEST_IN_IO) request->status = TAPE_REQUEST_DONE; list_del(&request->list); /* Decrease ref_count for removed request. */ request->device = NULL; tape_put_device(device); request->rc = -EIO; if (request->callback != NULL) request->callback(request, request->callback_data); } } /* * Driverfs tape remove function. * * This function is called whenever the common I/O layer detects the device * gone. This can happen at any time and we cannot refuse. */ void tape_generic_remove(struct ccw_device *cdev) { struct tape_device * device; device = dev_get_drvdata(&cdev->dev); if (!device) { return; } DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev); spin_lock_irq(get_ccwdev_lock(device->cdev)); switch (device->tape_state) { case TS_INIT: tape_state_set(device, TS_NOT_OPER); case TS_NOT_OPER: /* * Nothing to do. */ spin_unlock_irq(get_ccwdev_lock(device->cdev)); break; case TS_UNUSED: /* * Need only to release the device. */ tape_state_set(device, TS_NOT_OPER); spin_unlock_irq(get_ccwdev_lock(device->cdev)); tape_cleanup_device(device); break; default: /* * There may be requests on the queue. We will not get * an interrupt for a request that was running. So we * just post them all as I/O errors. */ DBF_EVENT(3, "(%08x): Drive in use vanished!\n", device->cdev_id); pr_warning("%s: A tape unit was detached while in " "use\n", dev_name(&device->cdev->dev)); tape_state_set(device, TS_NOT_OPER); __tape_discard_requests(device); spin_unlock_irq(get_ccwdev_lock(device->cdev)); tape_cleanup_device(device); } device = dev_get_drvdata(&cdev->dev); if (device) { sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group); dev_set_drvdata(&cdev->dev, NULL); tape_put_device(device); } } /* * Allocate a new tape ccw request */ struct tape_request * tape_alloc_request(int cplength, int datasize) { struct tape_request *request; BUG_ON(datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE); DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize); request = kzalloc(sizeof(struct tape_request), GFP_KERNEL); if (request == NULL) { DBF_EXCEPTION(1, "cqra nomem\n"); return ERR_PTR(-ENOMEM); } /* allocate channel program */ if (cplength > 0) { request->cpaddr = kcalloc(cplength, sizeof(struct ccw1), GFP_ATOMIC | GFP_DMA); if (request->cpaddr == NULL) { DBF_EXCEPTION(1, "cqra nomem\n"); kfree(request); return ERR_PTR(-ENOMEM); } } /* alloc small kernel buffer */ if (datasize > 0) { request->cpdata = kzalloc(datasize, GFP_KERNEL | GFP_DMA); if (request->cpdata == NULL) { DBF_EXCEPTION(1, "cqra nomem\n"); kfree(request->cpaddr); kfree(request); return ERR_PTR(-ENOMEM); } } DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr, request->cpdata); return request; } /* * Free tape ccw request */ void tape_free_request (struct tape_request * request) { DBF_LH(6, "Free request %p\n", request); if (request->device) tape_put_device(request->device); kfree(request->cpdata); kfree(request->cpaddr); kfree(request); } static int __tape_start_io(struct tape_device *device, struct tape_request *request) { int rc; #ifdef CONFIG_S390_TAPE_BLOCK if (request->op == TO_BLOCK) device->discipline->check_locate(device, request); #endif rc = ccw_device_start( device->cdev, request->cpaddr, (unsigned long) request, 0x00, request->options ); if (rc == 0) { request->status = TAPE_REQUEST_IN_IO; } else if (rc == -EBUSY) { /* The common I/O subsystem is currently busy. Retry later. */ request->status = TAPE_REQUEST_QUEUED; schedule_delayed_work(&device->tape_dnr, 0); rc = 0; } else { /* Start failed. Remove request and indicate failure. */ DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc); } return rc; } static void __tape_start_next_request(struct tape_device *device) { struct list_head *l, *n; struct tape_request *request; int rc; DBF_LH(6, "__tape_start_next_request(%p)\n", device); /* * Try to start each request on request queue until one is * started successful. */ list_for_each_safe(l, n, &device->req_queue) { request = list_entry(l, struct tape_request, list); /* * Avoid race condition if bottom-half was triggered more than * once. */ if (request->status == TAPE_REQUEST_IN_IO) return; /* * Request has already been stopped. We have to wait until * the request is removed from the queue in the interrupt * handling. */ if (request->status == TAPE_REQUEST_DONE) return; /* * We wanted to cancel the request but the common I/O layer * was busy at that time. This can only happen if this * function is called by delayed_next_request. * Otherwise we start the next request on the queue. */ if (request->status == TAPE_REQUEST_CANCEL) { rc = __tape_cancel_io(device, request); } else { rc = __tape_start_io(device, request); } if (rc == 0) return; /* Set ending status. */ request->rc = rc; request->status = TAPE_REQUEST_DONE; /* Remove from request queue. */ list_del(&request->list); /* Do callback. */ if (request->callback != NULL) request->callback(request, request->callback_data); } } static void tape_delayed_next_request(struct work_struct *work) { struct tape_device *device = container_of(work, struct tape_device, tape_dnr.work); DBF_LH(6, "tape_delayed_next_request(%p)\n", device); spin_lock_irq(get_ccwdev_lock(device->cdev)); __tape_start_next_request(device); spin_unlock_irq(get_ccwdev_lock(device->cdev)); } static void tape_long_busy_timeout(unsigned long data) { struct tape_request *request; struct tape_device *device; device = (struct tape_device *) data; spin_lock_irq(get_ccwdev_lock(device->cdev)); request = list_entry(device->req_queue.next, struct tape_request, list); BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY); DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id); __tape_start_next_request(device); device->lb_timeout.data = 0UL; tape_put_device(device); spin_unlock_irq(get_ccwdev_lock(device->cdev)); } static void __tape_end_request( struct tape_device * device, struct tape_request * request, int rc) { DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc); if (request) { request->rc = rc; request->status = TAPE_REQUEST_DONE; /* Remove from request queue. */ list_del(&request->list); /* Do callback. */ if (request->callback != NULL) request->callback(request, request->callback_data); } /* Start next request. */ if (!list_empty(&device->req_queue)) __tape_start_next_request(device); } /* * Write sense data to dbf */ void tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request, struct irb *irb) { unsigned int *sptr; const char* op; if (request != NULL) op = tape_op_verbose[request->op]; else op = "---"; DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n", irb->scsw.cmd.dstat, irb->scsw.cmd.cstat); DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op); sptr = (unsigned int *) irb->ecw; DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]); DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]); DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]); DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]); } /* * I/O helper function. Adds the request to the request queue * and starts it if the tape is idle. Has to be called with * the device lock held. */ static int __tape_start_request(struct tape_device *device, struct tape_request *request) { int rc; switch (request->op) { case TO_MSEN: case TO_ASSIGN: case TO_UNASSIGN: case TO_READ_ATTMSG: case TO_RDC: if (device->tape_state == TS_INIT) break; if (device->tape_state == TS_UNUSED) break; default: if (device->tape_state == TS_BLKUSE) break; if (device->tape_state != TS_IN_USE) return -ENODEV; } /* Increase use count of device for the added request. */ request->device = tape_get_device(device); if (list_empty(&device->req_queue)) { /* No other requests are on the queue. Start this one. */ rc = __tape_start_io(device, request); if (rc) return rc; DBF_LH(5, "Request %p added for execution.\n", request); list_add(&request->list, &device->req_queue); } else { DBF_LH(5, "Request %p add to queue.\n", request); request->status = TAPE_REQUEST_QUEUED; list_add_tail(&request->list, &device->req_queue); } return 0; } /* * Add the request to the request queue, try to start it if the * tape is idle. Return without waiting for end of i/o. */ int tape_do_io_async(struct tape_device *device, struct tape_request *request) { int rc; DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request); spin_lock_irq(get_ccwdev_lock(device->cdev)); /* Add request to request queue and try to start it. */ rc = __tape_start_request(device, request); spin_unlock_irq(get_ccwdev_lock(device->cdev)); return rc; } /* * tape_do_io/__tape_wake_up * Add the request to the request queue, try to start it if the * tape is idle and wait uninterruptible for its completion. */ static void __tape_wake_up(struct tape_request *request, void *data) { request->callback = NULL; wake_up((wait_queue_head_t *) data); } int tape_do_io(struct tape_device *device, struct tape_request *request) { int rc; spin_lock_irq(get_ccwdev_lock(device->cdev)); /* Setup callback */ request->callback = __tape_wake_up; request->callback_data = &device->wait_queue; /* Add request to request queue and try to start it. */ rc = __tape_start_request(device, request); spin_unlock_irq(get_ccwdev_lock(device->cdev)); if (rc) return rc; /* Request added to the queue. Wait for its completion. */ wait_event(device->wait_queue, (request->callback == NULL)); /* Get rc from request */ return request->rc; } /* * tape_do_io_interruptible/__tape_wake_up_interruptible * Add the request to the request queue, try to start it if the * tape is idle and wait uninterruptible for its completion. */ static void __tape_wake_up_interruptible(struct tape_request *request, void *data) { request->callback = NULL; wake_up_interruptible((wait_queue_head_t *) data); } int tape_do_io_interruptible(struct tape_device *device, struct tape_request *request) { int rc; spin_lock_irq(get_ccwdev_lock(device->cdev)); /* Setup callback */ request->callback = __tape_wake_up_interruptible; request->callback_data = &device->wait_queue; rc = __tape_start_request(device, request); spin_unlock_irq(get_ccwdev_lock(device->cdev)); if (rc) return rc; /* Request added to the queue. Wait for its completion. */ rc = wait_event_interruptible(device->wait_queue, (request->callback == NULL)); if (rc != -ERESTARTSYS) /* Request finished normally. */ return request->rc; /* Interrupted by a signal. We have to stop the current request. */ spin_lock_irq(get_ccwdev_lock(device->cdev)); rc = __tape_cancel_io(device, request); spin_unlock_irq(get_ccwdev_lock(device->cdev)); if (rc == 0) { /* Wait for the interrupt that acknowledges the halt. */ do { rc = wait_event_interruptible( device->wait_queue, (request->callback == NULL) ); } while (rc == -ERESTARTSYS); DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id); rc = -ERESTARTSYS; } return rc; } /* * Stop running ccw. */ int tape_cancel_io(struct tape_device *device, struct tape_request *request) { int rc; spin_lock_irq(get_ccwdev_lock(device->cdev)); rc = __tape_cancel_io(device, request); spin_unlock_irq(get_ccwdev_lock(device->cdev)); return rc; } /* * Tape interrupt routine, called from the ccw_device layer */ static void __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) { struct tape_device *device; struct tape_request *request; int rc; device = dev_get_drvdata(&cdev->dev); if (device == NULL) { return; } request = (struct tape_request *) intparm; DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request); /* On special conditions irb is an error pointer */ if (IS_ERR(irb)) { /* FIXME: What to do with the request? */ switch (PTR_ERR(irb)) { case -ETIMEDOUT: DBF_LH(1, "(%08x): Request timed out\n", device->cdev_id); case -EIO: __tape_end_request(device, request, -EIO); break; default: DBF_LH(1, "(%08x): Unexpected i/o error %li\n", device->cdev_id, PTR_ERR(irb)); } return; } /* * If the condition code is not zero and the start function bit is * still set, this is an deferred error and the last start I/O did * not succeed. At this point the condition that caused the deferred * error might still apply. So we just schedule the request to be * started later. */ if (irb->scsw.cmd.cc != 0 && (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && (request->status == TAPE_REQUEST_IN_IO)) { DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n", device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl); request->status = TAPE_REQUEST_QUEUED; schedule_delayed_work(&device->tape_dnr, HZ); return; } /* May be an unsolicited irq */ if(request != NULL) request->rescnt = irb->scsw.cmd.count; else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) && !list_empty(&device->req_queue)) { /* Not Ready to Ready after long busy ? */ struct tape_request *req; req = list_entry(device->req_queue.next, struct tape_request, list); if (req->status == TAPE_REQUEST_LONG_BUSY) { DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id); if (del_timer(&device->lb_timeout)) { device->lb_timeout.data = 0UL; tape_put_device(device); __tape_start_next_request(device); } return; } } if (irb->scsw.cmd.dstat != 0x0c) { /* Set the 'ONLINE' flag depending on sense byte 1 */ if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) device->tape_generic_status |= GMT_ONLINE(~0); else device->tape_generic_status &= ~GMT_ONLINE(~0); /* * Any request that does not come back with channel end * and device end is unusual. Log the sense data. */ DBF_EVENT(3,"-- Tape Interrupthandler --\n"); tape_dump_sense_dbf(device, request, irb); } else { /* Upon normal completion the device _is_ online */ device->tape_generic_status |= GMT_ONLINE(~0); } if (device->tape_state == TS_NOT_OPER) { DBF_EVENT(6, "tape:device is not operational\n"); return; } /* * Request that were canceled still come back with an interrupt. * To detect these request the state will be set to TAPE_REQUEST_DONE. */ if(request != NULL && request->status == TAPE_REQUEST_DONE) { __tape_end_request(device, request, -EIO); return; } rc = device->discipline->irq(device, request, irb); /* * rc < 0 : request finished unsuccessfully. * rc == TAPE_IO_SUCCESS: request finished successfully. * rc == TAPE_IO_PENDING: request is still running. Ignore rc. * rc == TAPE_IO_RETRY: request finished but needs another go. * rc == TAPE_IO_STOP: request needs to get terminated. */ switch (rc) { case TAPE_IO_SUCCESS: /* Upon normal completion the device _is_ online */ device->tape_generic_status |= GMT_ONLINE(~0); __tape_end_request(device, request, rc); break; case TAPE_IO_PENDING: break; case TAPE_IO_LONG_BUSY: device->lb_timeout.data = (unsigned long) tape_get_device(device); device->lb_timeout.expires = jiffies + LONG_BUSY_TIMEOUT * HZ; DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id); add_timer(&device->lb_timeout); request->status = TAPE_REQUEST_LONG_BUSY; break; case TAPE_IO_RETRY: rc = __tape_start_io(device, request); if (rc) __tape_end_request(device, request, rc); break; case TAPE_IO_STOP: rc = __tape_cancel_io(device, request); if (rc) __tape_end_request(device, request, rc); break; default: if (rc > 0) { DBF_EVENT(6, "xunknownrc\n"); __tape_end_request(device, request, -EIO); } else { __tape_end_request(device, request, rc); } break; } } /* * Tape device open function used by tape_char & tape_block frontends. */ int tape_open(struct tape_device *device) { int rc; spin_lock_irq(get_ccwdev_lock(device->cdev)); if (device->tape_state == TS_NOT_OPER) { DBF_EVENT(6, "TAPE:nodev\n"); rc = -ENODEV; } else if (device->tape_state == TS_IN_USE) { DBF_EVENT(6, "TAPE:dbusy\n"); rc = -EBUSY; } else if (device->tape_state == TS_BLKUSE) { DBF_EVENT(6, "TAPE:dbusy\n"); rc = -EBUSY; } else if (device->discipline != NULL && !try_module_get(device->discipline->owner)) { DBF_EVENT(6, "TAPE:nodisc\n"); rc = -ENODEV; } else { tape_state_set(device, TS_IN_USE); rc = 0; } spin_unlock_irq(get_ccwdev_lock(device->cdev)); return rc; } /* * Tape device release function used by tape_char & tape_block frontends. */ int tape_release(struct tape_device *device) { spin_lock_irq(get_ccwdev_lock(device->cdev)); if (device->tape_state == TS_IN_USE) tape_state_set(device, TS_UNUSED); module_put(device->discipline->owner); spin_unlock_irq(get_ccwdev_lock(device->cdev)); return 0; } /* * Execute a magnetic tape command a number of times. */ int tape_mtop(struct tape_device *device, int mt_op, int mt_count) { tape_mtop_fn fn; int rc; DBF_EVENT(6, "TAPE:mtio\n"); DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op); DBF_EVENT(6, "TAPE:arg: %x\n", mt_count); if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS) return -EINVAL; fn = device->discipline->mtop_array[mt_op]; if (fn == NULL) return -EINVAL; /* We assume that the backends can handle count up to 500. */ if (mt_op == MTBSR || mt_op == MTFSR || mt_op == MTFSF || mt_op == MTBSF || mt_op == MTFSFM || mt_op == MTBSFM) { rc = 0; for (; mt_count > 500; mt_count -= 500) if ((rc = fn(device, 500)) != 0) break; if (rc == 0) rc = fn(device, mt_count); } else rc = fn(device, mt_count); return rc; } /* * Tape init function. */ static int tape_init (void) { TAPE_DBF_AREA = debug_register ( "tape", 2, 2, 4*sizeof(long)); debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view); #ifdef DBF_LIKE_HELL debug_set_level(TAPE_DBF_AREA, 6); #endif DBF_EVENT(3, "tape init\n"); tape_proc_init(); tapechar_init (); tapeblock_init (); return 0; } /* * Tape exit function. */ static void tape_exit(void) { DBF_EVENT(6, "tape exit\n"); /* Get rid of the frontends */ tapechar_exit(); tapeblock_exit(); tape_proc_cleanup(); debug_unregister (TAPE_DBF_AREA); } MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and " "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)"); MODULE_DESCRIPTION("Linux on zSeries channel attached tape device driver"); MODULE_LICENSE("GPL"); module_init(tape_init); module_exit(tape_exit); EXPORT_SYMBOL(tape_generic_remove); EXPORT_SYMBOL(tape_generic_probe); EXPORT_SYMBOL(tape_generic_online); EXPORT_SYMBOL(tape_generic_offline); EXPORT_SYMBOL(tape_generic_pm_suspend); EXPORT_SYMBOL(tape_put_device); EXPORT_SYMBOL(tape_get_device); EXPORT_SYMBOL(tape_state_verbose); EXPORT_SYMBOL(tape_op_verbose); EXPORT_SYMBOL(tape_state_set); EXPORT_SYMBOL(tape_med_state_set); EXPORT_SYMBOL(tape_alloc_request); EXPORT_SYMBOL(tape_free_request); EXPORT_SYMBOL(tape_dump_sense_dbf); EXPORT_SYMBOL(tape_do_io); EXPORT_SYMBOL(tape_do_io_async); EXPORT_SYMBOL(tape_do_io_interruptible); EXPORT_SYMBOL(tape_cancel_io); EXPORT_SYMBOL(tape_mtop);
gpl-2.0
Freack-v/android_kernel_eagle
drivers/pps/kc.c
8438
3840
/* * PPS kernel consumer API * * Copyright (C) 2009-2010 Alexander Gordeev <lasaine@lvk.cs.msu.su> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/pps_kernel.h> #include "kc.h" /* * Global variables */ /* state variables to bind kernel consumer */ DEFINE_SPINLOCK(pps_kc_hardpps_lock); /* PPS API (RFC 2783): current source and mode for kernel consumer */ struct pps_device *pps_kc_hardpps_dev; /* unique pointer to device */ int pps_kc_hardpps_mode; /* mode bits for kernel consumer */ /* pps_kc_bind - control PPS kernel consumer binding * @pps: the PPS source * @bind_args: kernel consumer bind parameters * * This function is used to bind or unbind PPS kernel consumer according to * supplied parameters. Should not be called in interrupt context. */ int pps_kc_bind(struct pps_device *pps, struct pps_bind_args *bind_args) { /* Check if another consumer is already bound */ spin_lock_irq(&pps_kc_hardpps_lock); if (bind_args->edge == 0) if (pps_kc_hardpps_dev == pps) { pps_kc_hardpps_mode = 0; pps_kc_hardpps_dev = NULL; spin_unlock_irq(&pps_kc_hardpps_lock); dev_info(pps->dev, "unbound kernel" " consumer\n"); } else { spin_unlock_irq(&pps_kc_hardpps_lock); dev_err(pps->dev, "selected kernel consumer" " is not bound\n"); return -EINVAL; } else if (pps_kc_hardpps_dev == NULL || pps_kc_hardpps_dev == pps) { pps_kc_hardpps_mode = bind_args->edge; pps_kc_hardpps_dev = pps; spin_unlock_irq(&pps_kc_hardpps_lock); dev_info(pps->dev, "bound kernel consumer: " "edge=0x%x\n", bind_args->edge); } else { spin_unlock_irq(&pps_kc_hardpps_lock); dev_err(pps->dev, "another kernel consumer" " is already bound\n"); return -EINVAL; } return 0; } /* pps_kc_remove - unbind kernel consumer on PPS source removal * @pps: the PPS source * * This function is used to disable kernel consumer on PPS source removal * if this source was bound to PPS kernel consumer. Can be called on any * source safely. Should not be called in interrupt context. */ void pps_kc_remove(struct pps_device *pps) { spin_lock_irq(&pps_kc_hardpps_lock); if (pps == pps_kc_hardpps_dev) { pps_kc_hardpps_mode = 0; pps_kc_hardpps_dev = NULL; spin_unlock_irq(&pps_kc_hardpps_lock); dev_info(pps->dev, "unbound kernel consumer" " on device removal\n"); } else spin_unlock_irq(&pps_kc_hardpps_lock); } /* pps_kc_event - call hardpps() on PPS event * @pps: the PPS source * @ts: PPS event timestamp * @event: PPS event edge * * This function calls hardpps() when an event from bound PPS source occurs. */ void pps_kc_event(struct pps_device *pps, struct pps_event_time *ts, int event) { unsigned long flags; /* Pass some events to kernel consumer if activated */ spin_lock_irqsave(&pps_kc_hardpps_lock, flags); if (pps == pps_kc_hardpps_dev && event & pps_kc_hardpps_mode) hardpps(&ts->ts_real, &ts->ts_raw); spin_unlock_irqrestore(&pps_kc_hardpps_lock, flags); }
gpl-2.0
SlimRoms/kernel_nvidia_shieldtablet
drivers/media/pci/zoran/videocodec.c
12790
9286
/* * VIDEO MOTION CODECs internal API for video devices * * Interface for MJPEG (and maybe later MPEG/WAVELETS) codec's * bound to a master device. * * (c) 2002 Wolfgang Scherr <scherr@net4you.at> * * $Id: videocodec.c,v 1.1.2.8 2003/03/29 07:16:04 rbultje Exp $ * * ------------------------------------------------------------------------ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * ------------------------------------------------------------------------ */ #define VIDEOCODEC_VERSION "v0.2" #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/slab.h> // kernel config is here (procfs flag) #ifdef CONFIG_PROC_FS #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <asm/uaccess.h> #endif #include "videocodec.h" static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0-4)"); #define dprintk(num, format, args...) \ do { \ if (debug >= num) \ printk(format, ##args); \ } while (0) struct attached_list { struct videocodec *codec; struct attached_list *next; }; struct codec_list { const struct videocodec *codec; int attached; struct attached_list *list; struct codec_list *next; }; static struct codec_list *codeclist_top = NULL; /* ================================================= */ /* function prototypes of the master/slave interface */ /* ================================================= */ struct videocodec * videocodec_attach (struct videocodec_master *master) { struct codec_list *h = codeclist_top; struct attached_list *a, *ptr; struct videocodec *codec; int res; if (!master) { dprintk(1, KERN_ERR "videocodec_attach: no data\n"); return NULL; } dprintk(2, "videocodec_attach: '%s', flags %lx, magic %lx\n", master->name, master->flags, master->magic); if (!h) { dprintk(1, KERN_ERR "videocodec_attach: no device available\n"); return NULL; } while (h) { // attach only if the slave has at least the flags // expected by the master if ((master->flags & h->codec->flags) == master->flags) { dprintk(4, "videocodec_attach: try '%s'\n", h->codec->name); if (!try_module_get(h->codec->owner)) return NULL; codec = kmemdup(h->codec, sizeof(struct videocodec), GFP_KERNEL); if (!codec) { dprintk(1, KERN_ERR "videocodec_attach: no mem\n"); goto out_module_put; } snprintf(codec->name, sizeof(codec->name), "%s[%d]", codec->name, h->attached); codec->master_data = master; res = codec->setup(codec); if (res == 0) { dprintk(3, "videocodec_attach '%s'\n", codec->name); ptr = kzalloc(sizeof(struct attached_list), GFP_KERNEL); if (!ptr) { dprintk(1, KERN_ERR "videocodec_attach: no memory\n"); goto out_kfree; } ptr->codec = codec; a = h->list; if (!a) { h->list = ptr; dprintk(4, "videocodec: first element\n"); } else { while (a->next) a = a->next; // find end a->next = ptr; dprintk(4, "videocodec: in after '%s'\n", h->codec->name); } h->attached += 1; return codec; } else { kfree(codec); } } h = h->next; } dprintk(1, KERN_ERR "videocodec_attach: no codec found!\n"); return NULL; out_module_put: module_put(h->codec->owner); out_kfree: kfree(codec); return NULL; } int videocodec_detach (struct videocodec *codec) { struct codec_list *h = codeclist_top; struct attached_list *a, *prev; int res; if (!codec) { dprintk(1, KERN_ERR "videocodec_detach: no data\n"); return -EINVAL; } dprintk(2, "videocodec_detach: '%s', type: %x, flags %lx, magic %lx\n", codec->name, codec->type, codec->flags, codec->magic); if (!h) { dprintk(1, KERN_ERR "videocodec_detach: no device left...\n"); return -ENXIO; } while (h) { a = h->list; prev = NULL; while (a) { if (codec == a->codec) { res = a->codec->unset(a->codec); if (res >= 0) { dprintk(3, "videocodec_detach: '%s'\n", a->codec->name); a->codec->master_data = NULL; } else { dprintk(1, KERN_ERR "videocodec_detach: '%s'\n", a->codec->name); a->codec->master_data = NULL; } if (prev == NULL) { h->list = a->next; dprintk(4, "videocodec: delete first\n"); } else { prev->next = a->next; dprintk(4, "videocodec: delete middle\n"); } module_put(a->codec->owner); kfree(a->codec); kfree(a); h->attached -= 1; return 0; } prev = a; a = a->next; } h = h->next; } dprintk(1, KERN_ERR "videocodec_detach: given codec not found!\n"); return -EINVAL; } int videocodec_register (const struct videocodec *codec) { struct codec_list *ptr, *h = codeclist_top; if (!codec) { dprintk(1, KERN_ERR "videocodec_register: no data!\n"); return -EINVAL; } dprintk(2, "videocodec: register '%s', type: %x, flags %lx, magic %lx\n", codec->name, codec->type, codec->flags, codec->magic); ptr = kzalloc(sizeof(struct codec_list), GFP_KERNEL); if (!ptr) { dprintk(1, KERN_ERR "videocodec_register: no memory\n"); return -ENOMEM; } ptr->codec = codec; if (!h) { codeclist_top = ptr; dprintk(4, "videocodec: hooked in as first element\n"); } else { while (h->next) h = h->next; // find the end h->next = ptr; dprintk(4, "videocodec: hooked in after '%s'\n", h->codec->name); } return 0; } int videocodec_unregister (const struct videocodec *codec) { struct codec_list *prev = NULL, *h = codeclist_top; if (!codec) { dprintk(1, KERN_ERR "videocodec_unregister: no data!\n"); return -EINVAL; } dprintk(2, "videocodec: unregister '%s', type: %x, flags %lx, magic %lx\n", codec->name, codec->type, codec->flags, codec->magic); if (!h) { dprintk(1, KERN_ERR "videocodec_unregister: no device left...\n"); return -ENXIO; } while (h) { if (codec == h->codec) { if (h->attached) { dprintk(1, KERN_ERR "videocodec: '%s' is used\n", h->codec->name); return -EBUSY; } dprintk(3, "videocodec: unregister '%s' is ok.\n", h->codec->name); if (prev == NULL) { codeclist_top = h->next; dprintk(4, "videocodec: delete first element\n"); } else { prev->next = h->next; dprintk(4, "videocodec: delete middle element\n"); } kfree(h); return 0; } prev = h; h = h->next; } dprintk(1, KERN_ERR "videocodec_unregister: given codec not found!\n"); return -EINVAL; } #ifdef CONFIG_PROC_FS static int proc_videocodecs_show(struct seq_file *m, void *v) { struct codec_list *h = codeclist_top; struct attached_list *a; seq_printf(m, "<S>lave or attached <M>aster name type flags magic "); seq_printf(m, "(connected as)\n"); h = codeclist_top; while (h) { seq_printf(m, "S %32s %04x %08lx %08lx (TEMPLATE)\n", h->codec->name, h->codec->type, h->codec->flags, h->codec->magic); a = h->list; while (a) { seq_printf(m, "M %32s %04x %08lx %08lx (%s)\n", a->codec->master_data->name, a->codec->master_data->type, a->codec->master_data->flags, a->codec->master_data->magic, a->codec->name); a = a->next; } h = h->next; } return 0; } static int proc_videocodecs_open(struct inode *inode, struct file *file) { return single_open(file, proc_videocodecs_show, NULL); } static const struct file_operations videocodecs_proc_fops = { .owner = THIS_MODULE, .open = proc_videocodecs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif /* ===================== */ /* hook in driver module */ /* ===================== */ static int __init videocodec_init (void) { #ifdef CONFIG_PROC_FS static struct proc_dir_entry *videocodec_proc_entry; #endif printk(KERN_INFO "Linux video codec intermediate layer: %s\n", VIDEOCODEC_VERSION); #ifdef CONFIG_PROC_FS videocodec_proc_entry = proc_create("videocodecs", 0, NULL, &videocodecs_proc_fops); if (!videocodec_proc_entry) { dprintk(1, KERN_ERR "videocodec: can't init procfs.\n"); } #endif return 0; } static void __exit videocodec_exit (void) { #ifdef CONFIG_PROC_FS remove_proc_entry("videocodecs", NULL); #endif } EXPORT_SYMBOL(videocodec_attach); EXPORT_SYMBOL(videocodec_detach); EXPORT_SYMBOL(videocodec_register); EXPORT_SYMBOL(videocodec_unregister); module_init(videocodec_init); module_exit(videocodec_exit); MODULE_AUTHOR("Wolfgang Scherr <scherr@net4you.at>"); MODULE_DESCRIPTION("Intermediate API module for video codecs " VIDEOCODEC_VERSION); MODULE_LICENSE("GPL");
gpl-2.0
KryptonOmni/android_kernel_lge_mako
drivers/media/video/zoran/videocodec.c
12790
9286
/* * VIDEO MOTION CODECs internal API for video devices * * Interface for MJPEG (and maybe later MPEG/WAVELETS) codec's * bound to a master device. * * (c) 2002 Wolfgang Scherr <scherr@net4you.at> * * $Id: videocodec.c,v 1.1.2.8 2003/03/29 07:16:04 rbultje Exp $ * * ------------------------------------------------------------------------ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * ------------------------------------------------------------------------ */ #define VIDEOCODEC_VERSION "v0.2" #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/slab.h> // kernel config is here (procfs flag) #ifdef CONFIG_PROC_FS #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <asm/uaccess.h> #endif #include "videocodec.h" static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0-4)"); #define dprintk(num, format, args...) \ do { \ if (debug >= num) \ printk(format, ##args); \ } while (0) struct attached_list { struct videocodec *codec; struct attached_list *next; }; struct codec_list { const struct videocodec *codec; int attached; struct attached_list *list; struct codec_list *next; }; static struct codec_list *codeclist_top = NULL; /* ================================================= */ /* function prototypes of the master/slave interface */ /* ================================================= */ struct videocodec * videocodec_attach (struct videocodec_master *master) { struct codec_list *h = codeclist_top; struct attached_list *a, *ptr; struct videocodec *codec; int res; if (!master) { dprintk(1, KERN_ERR "videocodec_attach: no data\n"); return NULL; } dprintk(2, "videocodec_attach: '%s', flags %lx, magic %lx\n", master->name, master->flags, master->magic); if (!h) { dprintk(1, KERN_ERR "videocodec_attach: no device available\n"); return NULL; } while (h) { // attach only if the slave has at least the flags // expected by the master if ((master->flags & h->codec->flags) == master->flags) { dprintk(4, "videocodec_attach: try '%s'\n", h->codec->name); if (!try_module_get(h->codec->owner)) return NULL; codec = kmemdup(h->codec, sizeof(struct videocodec), GFP_KERNEL); if (!codec) { dprintk(1, KERN_ERR "videocodec_attach: no mem\n"); goto out_module_put; } snprintf(codec->name, sizeof(codec->name), "%s[%d]", codec->name, h->attached); codec->master_data = master; res = codec->setup(codec); if (res == 0) { dprintk(3, "videocodec_attach '%s'\n", codec->name); ptr = kzalloc(sizeof(struct attached_list), GFP_KERNEL); if (!ptr) { dprintk(1, KERN_ERR "videocodec_attach: no memory\n"); goto out_kfree; } ptr->codec = codec; a = h->list; if (!a) { h->list = ptr; dprintk(4, "videocodec: first element\n"); } else { while (a->next) a = a->next; // find end a->next = ptr; dprintk(4, "videocodec: in after '%s'\n", h->codec->name); } h->attached += 1; return codec; } else { kfree(codec); } } h = h->next; } dprintk(1, KERN_ERR "videocodec_attach: no codec found!\n"); return NULL; out_module_put: module_put(h->codec->owner); out_kfree: kfree(codec); return NULL; } int videocodec_detach (struct videocodec *codec) { struct codec_list *h = codeclist_top; struct attached_list *a, *prev; int res; if (!codec) { dprintk(1, KERN_ERR "videocodec_detach: no data\n"); return -EINVAL; } dprintk(2, "videocodec_detach: '%s', type: %x, flags %lx, magic %lx\n", codec->name, codec->type, codec->flags, codec->magic); if (!h) { dprintk(1, KERN_ERR "videocodec_detach: no device left...\n"); return -ENXIO; } while (h) { a = h->list; prev = NULL; while (a) { if (codec == a->codec) { res = a->codec->unset(a->codec); if (res >= 0) { dprintk(3, "videocodec_detach: '%s'\n", a->codec->name); a->codec->master_data = NULL; } else { dprintk(1, KERN_ERR "videocodec_detach: '%s'\n", a->codec->name); a->codec->master_data = NULL; } if (prev == NULL) { h->list = a->next; dprintk(4, "videocodec: delete first\n"); } else { prev->next = a->next; dprintk(4, "videocodec: delete middle\n"); } module_put(a->codec->owner); kfree(a->codec); kfree(a); h->attached -= 1; return 0; } prev = a; a = a->next; } h = h->next; } dprintk(1, KERN_ERR "videocodec_detach: given codec not found!\n"); return -EINVAL; } int videocodec_register (const struct videocodec *codec) { struct codec_list *ptr, *h = codeclist_top; if (!codec) { dprintk(1, KERN_ERR "videocodec_register: no data!\n"); return -EINVAL; } dprintk(2, "videocodec: register '%s', type: %x, flags %lx, magic %lx\n", codec->name, codec->type, codec->flags, codec->magic); ptr = kzalloc(sizeof(struct codec_list), GFP_KERNEL); if (!ptr) { dprintk(1, KERN_ERR "videocodec_register: no memory\n"); return -ENOMEM; } ptr->codec = codec; if (!h) { codeclist_top = ptr; dprintk(4, "videocodec: hooked in as first element\n"); } else { while (h->next) h = h->next; // find the end h->next = ptr; dprintk(4, "videocodec: hooked in after '%s'\n", h->codec->name); } return 0; } int videocodec_unregister (const struct videocodec *codec) { struct codec_list *prev = NULL, *h = codeclist_top; if (!codec) { dprintk(1, KERN_ERR "videocodec_unregister: no data!\n"); return -EINVAL; } dprintk(2, "videocodec: unregister '%s', type: %x, flags %lx, magic %lx\n", codec->name, codec->type, codec->flags, codec->magic); if (!h) { dprintk(1, KERN_ERR "videocodec_unregister: no device left...\n"); return -ENXIO; } while (h) { if (codec == h->codec) { if (h->attached) { dprintk(1, KERN_ERR "videocodec: '%s' is used\n", h->codec->name); return -EBUSY; } dprintk(3, "videocodec: unregister '%s' is ok.\n", h->codec->name); if (prev == NULL) { codeclist_top = h->next; dprintk(4, "videocodec: delete first element\n"); } else { prev->next = h->next; dprintk(4, "videocodec: delete middle element\n"); } kfree(h); return 0; } prev = h; h = h->next; } dprintk(1, KERN_ERR "videocodec_unregister: given codec not found!\n"); return -EINVAL; } #ifdef CONFIG_PROC_FS static int proc_videocodecs_show(struct seq_file *m, void *v) { struct codec_list *h = codeclist_top; struct attached_list *a; seq_printf(m, "<S>lave or attached <M>aster name type flags magic "); seq_printf(m, "(connected as)\n"); h = codeclist_top; while (h) { seq_printf(m, "S %32s %04x %08lx %08lx (TEMPLATE)\n", h->codec->name, h->codec->type, h->codec->flags, h->codec->magic); a = h->list; while (a) { seq_printf(m, "M %32s %04x %08lx %08lx (%s)\n", a->codec->master_data->name, a->codec->master_data->type, a->codec->master_data->flags, a->codec->master_data->magic, a->codec->name); a = a->next; } h = h->next; } return 0; } static int proc_videocodecs_open(struct inode *inode, struct file *file) { return single_open(file, proc_videocodecs_show, NULL); } static const struct file_operations videocodecs_proc_fops = { .owner = THIS_MODULE, .open = proc_videocodecs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif /* ===================== */ /* hook in driver module */ /* ===================== */ static int __init videocodec_init (void) { #ifdef CONFIG_PROC_FS static struct proc_dir_entry *videocodec_proc_entry; #endif printk(KERN_INFO "Linux video codec intermediate layer: %s\n", VIDEOCODEC_VERSION); #ifdef CONFIG_PROC_FS videocodec_proc_entry = proc_create("videocodecs", 0, NULL, &videocodecs_proc_fops); if (!videocodec_proc_entry) { dprintk(1, KERN_ERR "videocodec: can't init procfs.\n"); } #endif return 0; } static void __exit videocodec_exit (void) { #ifdef CONFIG_PROC_FS remove_proc_entry("videocodecs", NULL); #endif } EXPORT_SYMBOL(videocodec_attach); EXPORT_SYMBOL(videocodec_detach); EXPORT_SYMBOL(videocodec_register); EXPORT_SYMBOL(videocodec_unregister); module_init(videocodec_init); module_exit(videocodec_exit); MODULE_AUTHOR("Wolfgang Scherr <scherr@net4you.at>"); MODULE_DESCRIPTION("Intermediate API module for video codecs " VIDEOCODEC_VERSION); MODULE_LICENSE("GPL");
gpl-2.0
bio4554/ker.nl
drivers/net/irda/tekram-sir.c
13302
5823
/********************************************************************* * * Filename: tekram.c * Version: 1.3 * Description: Implementation of the Tekram IrMate IR-210B dongle * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Wed Oct 21 20:02:35 1998 * Modified at: Sun Oct 27 22:02:38 2002 * Modified by: Martin Diehl <mad@mdiehl.de> * * Copyright (c) 1998-1999 Dag Brattli, * Copyright (c) 2002 Martin Diehl, * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ #include <linux/module.h> #include <linux/delay.h> #include <linux/init.h> #include <net/irda/irda.h> #include "sir-dev.h" static int tekram_delay = 150; /* default is 150 ms */ module_param(tekram_delay, int, 0); MODULE_PARM_DESC(tekram_delay, "tekram dongle write complete delay"); static int tekram_open(struct sir_dev *); static int tekram_close(struct sir_dev *); static int tekram_change_speed(struct sir_dev *, unsigned); static int tekram_reset(struct sir_dev *); #define TEKRAM_115200 0x00 #define TEKRAM_57600 0x01 #define TEKRAM_38400 0x02 #define TEKRAM_19200 0x03 #define TEKRAM_9600 0x04 #define TEKRAM_PW 0x10 /* Pulse select bit */ static struct dongle_driver tekram = { .owner = THIS_MODULE, .driver_name = "Tekram IR-210B", .type = IRDA_TEKRAM_DONGLE, .open = tekram_open, .close = tekram_close, .reset = tekram_reset, .set_speed = tekram_change_speed, }; static int __init tekram_sir_init(void) { if (tekram_delay < 1 || tekram_delay > 500) tekram_delay = 200; IRDA_DEBUG(1, "%s - using %d ms delay\n", tekram.driver_name, tekram_delay); return irda_register_dongle(&tekram); } static void __exit tekram_sir_cleanup(void) { irda_unregister_dongle(&tekram); } static int tekram_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; IRDA_DEBUG(2, "%s()\n", __func__); sirdev_set_dtr_rts(dev, TRUE, TRUE); qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200; qos->min_turn_time.bits = 0x01; /* Needs at least 10 ms */ irda_qos_bits_to_value(qos); /* irda thread waits 50 msec for power settling */ return 0; } static int tekram_close(struct sir_dev *dev) { IRDA_DEBUG(2, "%s()\n", __func__); /* Power off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); return 0; } /* * Function tekram_change_speed (dev, state, speed) * * Set the speed for the Tekram IRMate 210 type dongle. Warning, this * function must be called with a process context! * * Algorithm * 1. clear DTR * 2. set RTS, and wait at least 7 us * 3. send Control Byte to the IR-210 through TXD to set new baud rate * wait until the stop bit of Control Byte is sent (for 9600 baud rate, * it takes about 100 msec) * * [oops, why 100 msec? sending 1 byte (10 bits) takes 1.05 msec * - is this probably to compensate for delays in tty layer?] * * 5. clear RTS (return to NORMAL Operation) * 6. wait at least 50 us, new setting (baud rate, etc) takes effect here * after */ #define TEKRAM_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED + 1) static int tekram_change_speed(struct sir_dev *dev, unsigned speed) { unsigned state = dev->fsm.substate; unsigned delay = 0; u8 byte; static int ret = 0; IRDA_DEBUG(2, "%s()\n", __func__); switch(state) { case SIRDEV_STATE_DONGLE_SPEED: switch (speed) { default: speed = 9600; ret = -EINVAL; /* fall thru */ case 9600: byte = TEKRAM_PW|TEKRAM_9600; break; case 19200: byte = TEKRAM_PW|TEKRAM_19200; break; case 38400: byte = TEKRAM_PW|TEKRAM_38400; break; case 57600: byte = TEKRAM_PW|TEKRAM_57600; break; case 115200: byte = TEKRAM_115200; break; } /* Set DTR, Clear RTS */ sirdev_set_dtr_rts(dev, TRUE, FALSE); /* Wait at least 7us */ udelay(14); /* Write control byte */ sirdev_raw_write(dev, &byte, 1); dev->speed = speed; state = TEKRAM_STATE_WAIT_SPEED; delay = tekram_delay; break; case TEKRAM_STATE_WAIT_SPEED: /* Set DTR, Set RTS */ sirdev_set_dtr_rts(dev, TRUE, TRUE); udelay(50); break; default: IRDA_ERROR("%s - undefined state %d\n", __func__, state); ret = -EINVAL; break; } dev->fsm.substate = state; return (delay > 0) ? delay : ret; } /* * Function tekram_reset (driver) * * This function resets the tekram dongle. Warning, this function * must be called with a process context!! * * Algorithm: * 0. Clear RTS and DTR, and wait 50 ms (power off the IR-210 ) * 1. clear RTS * 2. set DTR, and wait at least 1 ms * 3. clear DTR to SPACE state, wait at least 50 us for further * operation */ static int tekram_reset(struct sir_dev *dev) { IRDA_DEBUG(2, "%s()\n", __func__); /* Clear DTR, Set RTS */ sirdev_set_dtr_rts(dev, FALSE, TRUE); /* Should sleep 1 ms */ msleep(1); /* Set DTR, Set RTS */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Wait at least 50 us */ udelay(75); dev->speed = 9600; return 0; } MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>"); MODULE_DESCRIPTION("Tekram IrMate IR-210B dongle driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("irda-dongle-0"); /* IRDA_TEKRAM_DONGLE */ module_init(tekram_sir_init); module_exit(tekram_sir_cleanup);
gpl-2.0
gauravds/linux
arch/mips/ath25/ar5312.c
247
10511
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved. * Copyright (C) 2006 FON Technology, SL. * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org> * Copyright (C) 2006-2009 Felix Fietkau <nbd@openwrt.org> * Copyright (C) 2012 Alexandros C. Couloumbis <alex@ozo.com> */ /* * Platform devices for Atheros AR5312 SoCs */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/bitops.h> #include <linux/irqdomain.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/mtd/physmap.h> #include <linux/reboot.h> #include <asm/bootinfo.h> #include <asm/reboot.h> #include <asm/time.h> #include <ath25_platform.h> #include "devices.h" #include "ar5312.h" #include "ar5312_regs.h" static void __iomem *ar5312_rst_base; static struct irq_domain *ar5312_misc_irq_domain; static inline u32 ar5312_rst_reg_read(u32 reg) { return __raw_readl(ar5312_rst_base + reg); } static inline void ar5312_rst_reg_write(u32 reg, u32 val) { __raw_writel(val, ar5312_rst_base + reg); } static inline void ar5312_rst_reg_mask(u32 reg, u32 mask, u32 val) { u32 ret = ar5312_rst_reg_read(reg); ret &= ~mask; ret |= val; ar5312_rst_reg_write(reg, ret); } static irqreturn_t ar5312_ahb_err_handler(int cpl, void *dev_id) { u32 proc1 = ar5312_rst_reg_read(AR5312_PROC1); u32 proc_addr = ar5312_rst_reg_read(AR5312_PROCADDR); /* clears error */ u32 dma1 = ar5312_rst_reg_read(AR5312_DMA1); u32 dma_addr = ar5312_rst_reg_read(AR5312_DMAADDR); /* clears error */ pr_emerg("AHB interrupt: PROCADDR=0x%8.8x PROC1=0x%8.8x DMAADDR=0x%8.8x DMA1=0x%8.8x\n", proc_addr, proc1, dma_addr, dma1); machine_restart("AHB error"); /* Catastrophic failure */ return IRQ_HANDLED; } static struct irqaction ar5312_ahb_err_interrupt = { .handler = ar5312_ahb_err_handler, .name = "ar5312-ahb-error", }; static void ar5312_misc_irq_handler(unsigned irq, struct irq_desc *desc) { u32 pending = ar5312_rst_reg_read(AR5312_ISR) & ar5312_rst_reg_read(AR5312_IMR); unsigned nr, misc_irq = 0; if (pending) { struct irq_domain *domain = irq_desc_get_handler_data(desc); nr = __ffs(pending); misc_irq = irq_find_mapping(domain, nr); } if (misc_irq) { generic_handle_irq(misc_irq); if (nr == AR5312_MISC_IRQ_TIMER) ar5312_rst_reg_read(AR5312_TIMER); } else { spurious_interrupt(); } } /* Enable the specified AR5312_MISC_IRQ interrupt */ static void ar5312_misc_irq_unmask(struct irq_data *d) { ar5312_rst_reg_mask(AR5312_IMR, 0, BIT(d->hwirq)); } /* Disable the specified AR5312_MISC_IRQ interrupt */ static void ar5312_misc_irq_mask(struct irq_data *d) { ar5312_rst_reg_mask(AR5312_IMR, BIT(d->hwirq), 0); ar5312_rst_reg_read(AR5312_IMR); /* flush write buffer */ } static struct irq_chip ar5312_misc_irq_chip = { .name = "ar5312-misc", .irq_unmask = ar5312_misc_irq_unmask, .irq_mask = ar5312_misc_irq_mask, }; static int ar5312_misc_irq_map(struct irq_domain *d, unsigned irq, irq_hw_number_t hw) { irq_set_chip_and_handler(irq, &ar5312_misc_irq_chip, handle_level_irq); return 0; } static struct irq_domain_ops ar5312_misc_irq_domain_ops = { .map = ar5312_misc_irq_map, }; static void ar5312_irq_dispatch(void) { u32 pending = read_c0_status() & read_c0_cause(); if (pending & CAUSEF_IP2) do_IRQ(AR5312_IRQ_WLAN0); else if (pending & CAUSEF_IP5) do_IRQ(AR5312_IRQ_WLAN1); else if (pending & CAUSEF_IP6) do_IRQ(AR5312_IRQ_MISC); else if (pending & CAUSEF_IP7) do_IRQ(ATH25_IRQ_CPU_CLOCK); else spurious_interrupt(); } void __init ar5312_arch_init_irq(void) { struct irq_domain *domain; unsigned irq; ath25_irq_dispatch = ar5312_irq_dispatch; domain = irq_domain_add_linear(NULL, AR5312_MISC_IRQ_COUNT, &ar5312_misc_irq_domain_ops, NULL); if (!domain) panic("Failed to add IRQ domain"); irq = irq_create_mapping(domain, AR5312_MISC_IRQ_AHB_PROC); setup_irq(irq, &ar5312_ahb_err_interrupt); irq_set_chained_handler_and_data(AR5312_IRQ_MISC, ar5312_misc_irq_handler, domain); ar5312_misc_irq_domain = domain; } static struct physmap_flash_data ar5312_flash_data = { .width = 2, }; static struct resource ar5312_flash_resource = { .start = AR5312_FLASH_BASE, .end = AR5312_FLASH_BASE + AR5312_FLASH_SIZE - 1, .flags = IORESOURCE_MEM, }; static struct platform_device ar5312_physmap_flash = { .name = "physmap-flash", .id = 0, .dev.platform_data = &ar5312_flash_data, .resource = &ar5312_flash_resource, .num_resources = 1, }; static void __init ar5312_flash_init(void) { void __iomem *flashctl_base; u32 ctl; flashctl_base = ioremap_nocache(AR5312_FLASHCTL_BASE, AR5312_FLASHCTL_SIZE); ctl = __raw_readl(flashctl_base + AR5312_FLASHCTL0); ctl &= AR5312_FLASHCTL_MW; /* fixup flash width */ switch (ctl) { case AR5312_FLASHCTL_MW16: ar5312_flash_data.width = 2; break; case AR5312_FLASHCTL_MW8: default: ar5312_flash_data.width = 1; break; } /* * Configure flash bank 0. * Assume 8M window size. Flash will be aliased if it's smaller */ ctl |= AR5312_FLASHCTL_E | AR5312_FLASHCTL_AC_8M | AR5312_FLASHCTL_RBLE; ctl |= 0x01 << AR5312_FLASHCTL_IDCY_S; ctl |= 0x07 << AR5312_FLASHCTL_WST1_S; ctl |= 0x07 << AR5312_FLASHCTL_WST2_S; __raw_writel(ctl, flashctl_base + AR5312_FLASHCTL0); /* Disable other flash banks */ ctl = __raw_readl(flashctl_base + AR5312_FLASHCTL1); ctl &= ~(AR5312_FLASHCTL_E | AR5312_FLASHCTL_AC); __raw_writel(ctl, flashctl_base + AR5312_FLASHCTL1); ctl = __raw_readl(flashctl_base + AR5312_FLASHCTL2); ctl &= ~(AR5312_FLASHCTL_E | AR5312_FLASHCTL_AC); __raw_writel(ctl, flashctl_base + AR5312_FLASHCTL2); iounmap(flashctl_base); } void __init ar5312_init_devices(void) { struct ath25_boarddata *config; ar5312_flash_init(); /* Locate board/radio config data */ ath25_find_config(AR5312_FLASH_BASE, AR5312_FLASH_SIZE); config = ath25_board.config; /* AR2313 has CPU minor rev. 10 */ if ((current_cpu_data.processor_id & 0xff) == 0x0a) ath25_soc = ATH25_SOC_AR2313; /* AR2312 shares the same Silicon ID as AR5312 */ else if (config->flags & BD_ISCASPER) ath25_soc = ATH25_SOC_AR2312; /* Everything else is probably AR5312 or compatible */ else ath25_soc = ATH25_SOC_AR5312; platform_device_register(&ar5312_physmap_flash); switch (ath25_soc) { case ATH25_SOC_AR5312: if (!ath25_board.radio) return; if (!(config->flags & BD_WLAN0)) break; ath25_add_wmac(0, AR5312_WLAN0_BASE, AR5312_IRQ_WLAN0); break; case ATH25_SOC_AR2312: case ATH25_SOC_AR2313: if (!ath25_board.radio) return; break; default: break; } if (config->flags & BD_WLAN1) ath25_add_wmac(1, AR5312_WLAN1_BASE, AR5312_IRQ_WLAN1); } static void ar5312_restart(char *command) { /* reset the system */ local_irq_disable(); while (1) ar5312_rst_reg_write(AR5312_RESET, AR5312_RESET_SYSTEM); } /* * This table is indexed by bits 5..4 of the CLOCKCTL1 register * to determine the predevisor value. */ static unsigned clockctl1_predivide_table[4] __initdata = { 1, 2, 4, 5 }; static unsigned __init ar5312_cpu_frequency(void) { u32 scratch, devid, clock_ctl1; u32 predivide_mask, multiplier_mask, doubler_mask; unsigned predivide_shift, multiplier_shift; unsigned predivide_select, predivisor, multiplier; /* Trust the bootrom's idea of cpu frequency. */ scratch = ar5312_rst_reg_read(AR5312_SCRATCH); if (scratch) return scratch; devid = ar5312_rst_reg_read(AR5312_REV); devid = (devid & AR5312_REV_MAJ) >> AR5312_REV_MAJ_S; if (devid == AR5312_REV_MAJ_AR2313) { predivide_mask = AR2313_CLOCKCTL1_PREDIVIDE_MASK; predivide_shift = AR2313_CLOCKCTL1_PREDIVIDE_SHIFT; multiplier_mask = AR2313_CLOCKCTL1_MULTIPLIER_MASK; multiplier_shift = AR2313_CLOCKCTL1_MULTIPLIER_SHIFT; doubler_mask = AR2313_CLOCKCTL1_DOUBLER_MASK; } else { /* AR5312 and AR2312 */ predivide_mask = AR5312_CLOCKCTL1_PREDIVIDE_MASK; predivide_shift = AR5312_CLOCKCTL1_PREDIVIDE_SHIFT; multiplier_mask = AR5312_CLOCKCTL1_MULTIPLIER_MASK; multiplier_shift = AR5312_CLOCKCTL1_MULTIPLIER_SHIFT; doubler_mask = AR5312_CLOCKCTL1_DOUBLER_MASK; } /* * Clocking is derived from a fixed 40MHz input clock. * * cpu_freq = input_clock * MULT (where MULT is PLL multiplier) * sys_freq = cpu_freq / 4 (used for APB clock, serial, * flash, Timer, Watchdog Timer) * * cnt_freq = cpu_freq / 2 (use for CPU count/compare) * * So, for example, with a PLL multiplier of 5, we have * * cpu_freq = 200MHz * sys_freq = 50MHz * cnt_freq = 100MHz * * We compute the CPU frequency, based on PLL settings. */ clock_ctl1 = ar5312_rst_reg_read(AR5312_CLOCKCTL1); predivide_select = (clock_ctl1 & predivide_mask) >> predivide_shift; predivisor = clockctl1_predivide_table[predivide_select]; multiplier = (clock_ctl1 & multiplier_mask) >> multiplier_shift; if (clock_ctl1 & doubler_mask) multiplier <<= 1; return (40000000 / predivisor) * multiplier; } static inline unsigned ar5312_sys_frequency(void) { return ar5312_cpu_frequency() / 4; } void __init ar5312_plat_time_init(void) { mips_hpt_frequency = ar5312_cpu_frequency() / 2; } void __init ar5312_plat_mem_setup(void) { void __iomem *sdram_base; u32 memsize, memcfg, bank0_ac, bank1_ac; u32 devid; /* Detect memory size */ sdram_base = ioremap_nocache(AR5312_SDRAMCTL_BASE, AR5312_SDRAMCTL_SIZE); memcfg = __raw_readl(sdram_base + AR5312_MEM_CFG1); bank0_ac = ATH25_REG_MS(memcfg, AR5312_MEM_CFG1_AC0); bank1_ac = ATH25_REG_MS(memcfg, AR5312_MEM_CFG1_AC1); memsize = (bank0_ac ? (1 << (bank0_ac + 1)) : 0) + (bank1_ac ? (1 << (bank1_ac + 1)) : 0); memsize <<= 20; add_memory_region(0, memsize, BOOT_MEM_RAM); iounmap(sdram_base); ar5312_rst_base = ioremap_nocache(AR5312_RST_BASE, AR5312_RST_SIZE); devid = ar5312_rst_reg_read(AR5312_REV); devid >>= AR5312_REV_WMAC_MIN_S; devid &= AR5312_REV_CHIP; ath25_board.devid = (u16)devid; /* Clear any lingering AHB errors */ ar5312_rst_reg_read(AR5312_PROCADDR); ar5312_rst_reg_read(AR5312_DMAADDR); ar5312_rst_reg_write(AR5312_WDT_CTRL, AR5312_WDT_CTRL_IGNORE); _machine_restart = ar5312_restart; } void __init ar5312_arch_init(void) { unsigned irq = irq_create_mapping(ar5312_misc_irq_domain, AR5312_MISC_IRQ_UART0); ath25_serial_setup(AR5312_UART0_BASE, irq, ar5312_sys_frequency()); }
gpl-2.0
627656505/linux
drivers/crypto/nx/nx-aes-gcm.c
247
14234
/** * AES GCM routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2012 International Business Machines Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 only. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Author: Kent Yoder <yoder1@us.ibm.com> */ #include <crypto/internal/aead.h> #include <crypto/aes.h> #include <crypto/scatterwalk.h> #include <linux/module.h> #include <linux/types.h> #include <asm/vio.h> #include "nx_csbcpb.h" #include "nx.h" static int gcm_aes_nx_set_key(struct crypto_aead *tfm, const u8 *in_key, unsigned int key_len) { struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; nx_ctx_init(nx_ctx, HCOP_FC_AES); switch (key_len) { case AES_KEYSIZE_128: NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128); nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; break; case AES_KEYSIZE_192: NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192); NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192); nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192]; break; case AES_KEYSIZE_256: NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256); NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256); nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256]; break; default: return -EINVAL; } csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len); csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA; memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len); return 0; } static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm, const u8 *in_key, unsigned int key_len) { struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm); char *nonce = nx_ctx->priv.gcm.nonce; int rc; if (key_len < 4) return -EINVAL; key_len -= 4; rc = gcm_aes_nx_set_key(tfm, in_key, key_len); if (rc) goto out; memcpy(nonce, in_key + key_len, 4); out: return rc; } static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { switch (authsize) { case 8: case 12: case 16: break; default: return -EINVAL; } return 0; } static int nx_gca(struct nx_crypto_ctx *nx_ctx, struct aead_request *req, u8 *out, unsigned int assoclen) { int rc; struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; struct scatter_walk walk; struct nx_sg *nx_sg = nx_ctx->in_sg; unsigned int nbytes = assoclen; unsigned int processed = 0, to_process; unsigned int max_sg_len; if (nbytes <= AES_BLOCK_SIZE) { scatterwalk_start(&walk, req->src); scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG); scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); return 0; } NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION; /* page_limit: number of sg entries that fit on one page */ max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg), nx_ctx->ap->sglen); max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); do { /* * to_process: the data chunk to process in this update. * This value is bound by sg list limits. */ to_process = min_t(u64, nbytes - processed, nx_ctx->ap->databytelen); to_process = min_t(u64, to_process, NX_PAGE_SIZE * (max_sg_len - 1)); nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, req->src, processed, &to_process); if ((to_process + processed) < nbytes) NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE; else NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE; nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) * sizeof(struct nx_sg); rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) return rc; memcpy(csbcpb_aead->cpb.aes_gca.in_pat, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE); NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes)); processed += to_process; } while (processed < nbytes); memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE); return rc; } static int gmac(struct aead_request *req, struct blkcipher_desc *desc, unsigned int assoclen) { int rc; struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_sg *nx_sg; unsigned int nbytes = assoclen; unsigned int processed = 0, to_process; unsigned int max_sg_len; /* Set GMAC mode */ csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC; NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; /* page_limit: number of sg entries that fit on one page */ max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg), nx_ctx->ap->sglen); max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); /* Copy IV */ memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE); do { /* * to_process: the data chunk to process in this update. * This value is bound by sg list limits. */ to_process = min_t(u64, nbytes - processed, nx_ctx->ap->databytelen); to_process = min_t(u64, to_process, NX_PAGE_SIZE * (max_sg_len - 1)); nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, req->src, processed, &to_process); if ((to_process + processed) < nbytes) NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; else NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg) * sizeof(struct nx_sg); csbcpb->cpb.aes_gcm.bit_length_data = 0; csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes; rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad, csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE); memcpy(csbcpb->cpb.aes_gcm.in_s0, csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE); NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes)); processed += to_process; } while (processed < nbytes); out: /* Restore GCM mode */ csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; return rc; } static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc, int enc) { int rc; struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; char out[AES_BLOCK_SIZE]; struct nx_sg *in_sg, *out_sg; int len; /* For scenarios where the input message is zero length, AES CTR mode * may be used. Set the source data to be a single block (16B) of all * zeros, and set the input IV value to be the same as the GMAC IV * value. - nx_wb 4.8.1.3 */ /* Change to ECB mode */ csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB; memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key, sizeof(csbcpb->cpb.aes_ecb.key)); if (enc) NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; else NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; len = AES_BLOCK_SIZE; /* Encrypt the counter/IV */ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info, &len, nx_ctx->ap->sglen); if (len != AES_BLOCK_SIZE) return -EINVAL; len = sizeof(out); out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len, nx_ctx->ap->sglen); if (len != sizeof(out)) return -EINVAL; nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; atomic_inc(&(nx_ctx->stats->aes_ops)); /* Copy out the auth tag */ memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out, crypto_aead_authsize(crypto_aead_reqtfm(req))); out: /* Restore XCBC mode */ csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; /* * ECB key uses the same region that GCM AAD and counter, so it's safe * to just fill it with zeroes. */ memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key)); return rc; } static int gcm_aes_nx_crypt(struct aead_request *req, int enc, unsigned int assoclen) { struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct nx_gcm_rctx *rctx = aead_request_ctx(req); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct blkcipher_desc desc; unsigned int nbytes = req->cryptlen; unsigned int processed = 0, to_process; unsigned long irq_flags; int rc = -EINVAL; spin_lock_irqsave(&nx_ctx->lock, irq_flags); desc.info = rctx->iv; /* initialize the counter */ *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; if (nbytes == 0) { if (assoclen == 0) rc = gcm_empty(req, &desc, enc); else rc = gmac(req, &desc, assoclen); if (rc) goto out; else goto mac; } /* Process associated data */ csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8; if (assoclen) { rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad, assoclen); if (rc) goto out; } /* Set flags for encryption */ NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; if (enc) { NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; } else { NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req)); } do { to_process = nbytes - processed; csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, req->src, &to_process, processed + req->assoclen, csbcpb->cpb.aes_gcm.iv_or_cnt); if (rc) goto out; if ((to_process + processed) < nbytes) NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; else NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE); memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad, csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE); memcpy(csbcpb->cpb.aes_gcm.in_s0, csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE); NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(csbcpb->csb.processed_byte_count, &(nx_ctx->stats->aes_bytes)); processed += to_process; } while (processed < nbytes); mac: if (enc) { /* copy out the auth tag */ scatterwalk_map_and_copy( csbcpb->cpb.aes_gcm.out_pat_or_mac, req->dst, req->assoclen + nbytes, crypto_aead_authsize(crypto_aead_reqtfm(req)), SCATTERWALK_TO_SG); } else { u8 *itag = nx_ctx->priv.gcm.iauth_tag; u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac; scatterwalk_map_and_copy( itag, req->src, req->assoclen + nbytes, crypto_aead_authsize(crypto_aead_reqtfm(req)), SCATTERWALK_FROM_SG); rc = memcmp(itag, otag, crypto_aead_authsize(crypto_aead_reqtfm(req))) ? -EBADMSG : 0; } out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } static int gcm_aes_nx_encrypt(struct aead_request *req) { struct nx_gcm_rctx *rctx = aead_request_ctx(req); char *iv = rctx->iv; memcpy(iv, req->iv, 12); return gcm_aes_nx_crypt(req, 1, req->assoclen); } static int gcm_aes_nx_decrypt(struct aead_request *req) { struct nx_gcm_rctx *rctx = aead_request_ctx(req); char *iv = rctx->iv; memcpy(iv, req->iv, 12); return gcm_aes_nx_crypt(req, 0, req->assoclen); } static int gcm4106_aes_nx_encrypt(struct aead_request *req) { struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct nx_gcm_rctx *rctx = aead_request_ctx(req); char *iv = rctx->iv; char *nonce = nx_ctx->priv.gcm.nonce; memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); if (req->assoclen < 8) return -EINVAL; return gcm_aes_nx_crypt(req, 1, req->assoclen - 8); } static int gcm4106_aes_nx_decrypt(struct aead_request *req) { struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct nx_gcm_rctx *rctx = aead_request_ctx(req); char *iv = rctx->iv; char *nonce = nx_ctx->priv.gcm.nonce; memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); if (req->assoclen < 8) return -EINVAL; return gcm_aes_nx_crypt(req, 0, req->assoclen - 8); } /* tell the block cipher walk routines that this is a stream cipher by * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block * during encrypt/decrypt doesn't solve this problem, because it calls * blkcipher_walk_done under the covers, which doesn't use walk->blocksize, * but instead uses this tfm->blocksize. */ struct aead_alg nx_gcm_aes_alg = { .base = { .cra_name = "gcm(aes)", .cra_driver_name = "gcm-aes-nx", .cra_priority = 300, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct nx_crypto_ctx), .cra_module = THIS_MODULE, }, .init = nx_crypto_ctx_aes_gcm_init, .exit = nx_crypto_ctx_aead_exit, .ivsize = 12, .maxauthsize = AES_BLOCK_SIZE, .setkey = gcm_aes_nx_set_key, .encrypt = gcm_aes_nx_encrypt, .decrypt = gcm_aes_nx_decrypt, }; struct aead_alg nx_gcm4106_aes_alg = { .base = { .cra_name = "rfc4106(gcm(aes))", .cra_driver_name = "rfc4106-gcm-aes-nx", .cra_priority = 300, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct nx_crypto_ctx), .cra_module = THIS_MODULE, }, .init = nx_crypto_ctx_aes_gcm_init, .exit = nx_crypto_ctx_aead_exit, .ivsize = 8, .maxauthsize = AES_BLOCK_SIZE, .setkey = gcm4106_aes_nx_set_key, .setauthsize = gcm4106_aes_nx_setauthsize, .encrypt = gcm4106_aes_nx_encrypt, .decrypt = gcm4106_aes_nx_decrypt, };
gpl-2.0
TheTypoMaster/ubuntu-utopic
drivers/hwmon/coretemp.c
503
22585
/* * coretemp.c - Linux kernel module for hardware monitoring * * Copyright (C) 2007 Rudolf Marek <r.marek@assembler.cz> * * Inspired from many hwmon drivers * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301 USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/hwmon.h> #include <linux/sysfs.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/platform_device.h> #include <linux/cpu.h> #include <linux/smp.h> #include <linux/moduleparam.h> #include <linux/pci.h> #include <asm/msr.h> #include <asm/processor.h> #include <asm/cpu_device_id.h> #define DRVNAME "coretemp" /* * force_tjmax only matters when TjMax can't be read from the CPU itself. * When set, it replaces the driver's suboptimal heuristic. */ static int force_tjmax; module_param_named(tjmax, force_tjmax, int, 0444); MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ #define NUM_REAL_CORES 32 /* Number of Real cores per cpu */ #define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */ #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) #define TO_PHYS_ID(cpu) (cpu_data(cpu).phys_proc_id) #define TO_CORE_ID(cpu) (cpu_data(cpu).cpu_core_id) #define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO) #ifdef CONFIG_SMP #define for_each_sibling(i, cpu) for_each_cpu(i, cpu_sibling_mask(cpu)) #else #define for_each_sibling(i, cpu) for (i = 0; false; ) #endif /* * Per-Core Temperature Data * @last_updated: The time when the current temperature value was updated * earlier (in jiffies). * @cpu_core_id: The CPU Core from which temperature values should be read * This value is passed as "id" field to rdmsr/wrmsr functions. * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS, * from where the temperature values should be read. * @attr_size: Total number of pre-core attrs displayed in the sysfs. * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data. * Otherwise, temp_data holds coretemp data. * @valid: If this is 1, the current temperature is valid. */ struct temp_data { int temp; int ttarget; int tjmax; unsigned long last_updated; unsigned int cpu; u32 cpu_core_id; u32 status_reg; int attr_size; bool is_pkg_data; bool valid; struct sensor_device_attribute sd_attrs[TOTAL_ATTRS]; char attr_name[TOTAL_ATTRS][CORETEMP_NAME_LENGTH]; struct attribute *attrs[TOTAL_ATTRS + 1]; struct attribute_group attr_group; struct mutex update_lock; }; /* Platform Data per Physical CPU */ struct platform_data { struct device *hwmon_dev; u16 phys_proc_id; struct temp_data *core_data[MAX_CORE_DATA]; struct device_attribute name_attr; }; struct pdev_entry { struct list_head list; struct platform_device *pdev; u16 phys_proc_id; }; static LIST_HEAD(pdev_list); static DEFINE_MUTEX(pdev_list_mutex); static ssize_t show_label(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct platform_data *pdata = dev_get_drvdata(dev); struct temp_data *tdata = pdata->core_data[attr->index]; if (tdata->is_pkg_data) return sprintf(buf, "Physical id %u\n", pdata->phys_proc_id); return sprintf(buf, "Core %u\n", tdata->cpu_core_id); } static ssize_t show_crit_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { u32 eax, edx; struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct platform_data *pdata = dev_get_drvdata(dev); struct temp_data *tdata = pdata->core_data[attr->index]; rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx); return sprintf(buf, "%d\n", (eax >> 5) & 1); } static ssize_t show_tjmax(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct platform_data *pdata = dev_get_drvdata(dev); return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tjmax); } static ssize_t show_ttarget(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct platform_data *pdata = dev_get_drvdata(dev); return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget); } static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { u32 eax, edx; struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct platform_data *pdata = dev_get_drvdata(dev); struct temp_data *tdata = pdata->core_data[attr->index]; mutex_lock(&tdata->update_lock); /* Check whether the time interval has elapsed */ if (!tdata->valid || time_after(jiffies, tdata->last_updated + HZ)) { rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx); /* * Ignore the valid bit. In all observed cases the register * value is either low or zero if the valid bit is 0. * Return it instead of reporting an error which doesn't * really help at all. */ tdata->temp = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000; tdata->valid = 1; tdata->last_updated = jiffies; } mutex_unlock(&tdata->update_lock); return sprintf(buf, "%d\n", tdata->temp); } struct tjmax_pci { unsigned int device; int tjmax; }; static const struct tjmax_pci tjmax_pci_table[] = { { 0x0708, 110000 }, /* CE41x0 (Sodaville ) */ { 0x0c72, 102000 }, /* Atom S1240 (Centerton) */ { 0x0c73, 95000 }, /* Atom S1220 (Centerton) */ { 0x0c75, 95000 }, /* Atom S1260 (Centerton) */ }; struct tjmax { char const *id; int tjmax; }; static const struct tjmax tjmax_table[] = { { "CPU 230", 100000 }, /* Model 0x1c, stepping 2 */ { "CPU 330", 125000 }, /* Model 0x1c, stepping 2 */ }; struct tjmax_model { u8 model; u8 mask; int tjmax; }; #define ANY 0xff static const struct tjmax_model tjmax_model_table[] = { { 0x1c, 10, 100000 }, /* D4xx, K4xx, N4xx, D5xx, K5xx, N5xx */ { 0x1c, ANY, 90000 }, /* Z5xx, N2xx, possibly others * Note: Also matches 230 and 330, * which are covered by tjmax_table */ { 0x26, ANY, 90000 }, /* Atom Tunnel Creek (Exx), Lincroft (Z6xx) * Note: TjMax for E6xxT is 110C, but CPU type * is undetectable by software */ { 0x27, ANY, 90000 }, /* Atom Medfield (Z2460) */ { 0x35, ANY, 90000 }, /* Atom Clover Trail/Cloverview (Z27x0) */ { 0x36, ANY, 100000 }, /* Atom Cedar Trail/Cedarview (N2xxx, D2xxx) * Also matches S12x0 (stepping 9), covered by * PCI table */ }; static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) { /* The 100C is default for both mobile and non mobile CPUs */ int tjmax = 100000; int tjmax_ee = 85000; int usemsr_ee = 1; int err; u32 eax, edx; int i; struct pci_dev *host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); /* * Explicit tjmax table entries override heuristics. * First try PCI host bridge IDs, followed by model ID strings * and model/stepping information. */ if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL) { for (i = 0; i < ARRAY_SIZE(tjmax_pci_table); i++) { if (host_bridge->device == tjmax_pci_table[i].device) return tjmax_pci_table[i].tjmax; } } for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) { if (strstr(c->x86_model_id, tjmax_table[i].id)) return tjmax_table[i].tjmax; } for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) { const struct tjmax_model *tm = &tjmax_model_table[i]; if (c->x86_model == tm->model && (tm->mask == ANY || c->x86_mask == tm->mask)) return tm->tjmax; } /* Early chips have no MSR for TjMax */ if (c->x86_model == 0xf && c->x86_mask < 4) usemsr_ee = 0; if (c->x86_model > 0xe && usemsr_ee) { u8 platform_id; /* * Now we can detect the mobile CPU using Intel provided table * http://softwarecommunity.intel.com/Wiki/Mobility/720.htm * For Core2 cores, check MSR 0x17, bit 28 1 = Mobile CPU */ err = rdmsr_safe_on_cpu(id, 0x17, &eax, &edx); if (err) { dev_warn(dev, "Unable to access MSR 0x17, assuming desktop" " CPU\n"); usemsr_ee = 0; } else if (c->x86_model < 0x17 && !(eax & 0x10000000)) { /* * Trust bit 28 up to Penryn, I could not find any * documentation on that; if you happen to know * someone at Intel please ask */ usemsr_ee = 0; } else { /* Platform ID bits 52:50 (EDX starts at bit 32) */ platform_id = (edx >> 18) & 0x7; /* * Mobile Penryn CPU seems to be platform ID 7 or 5 * (guesswork) */ if (c->x86_model == 0x17 && (platform_id == 5 || platform_id == 7)) { /* * If MSR EE bit is set, set it to 90 degrees C, * otherwise 105 degrees C */ tjmax_ee = 90000; tjmax = 105000; } } } if (usemsr_ee) { err = rdmsr_safe_on_cpu(id, 0xee, &eax, &edx); if (err) { dev_warn(dev, "Unable to access MSR 0xEE, for Tjmax, left" " at default\n"); } else if (eax & 0x40000000) { tjmax = tjmax_ee; } } else if (tjmax == 100000) { /* * If we don't use msr EE it means we are desktop CPU * (with exeception of Atom) */ dev_warn(dev, "Using relative temperature scale!\n"); } return tjmax; } static bool cpu_has_tjmax(struct cpuinfo_x86 *c) { u8 model = c->x86_model; return model > 0xe && model != 0x1c && model != 0x26 && model != 0x27 && model != 0x35 && model != 0x36; } static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) { int err; u32 eax, edx; u32 val; /* * A new feature of current Intel(R) processors, the * IA32_TEMPERATURE_TARGET contains the TjMax value */ err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); if (err) { if (cpu_has_tjmax(c)) dev_warn(dev, "Unable to read TjMax from CPU %u\n", id); } else { val = (eax >> 16) & 0xff; /* * If the TjMax is not plausible, an assumption * will be used */ if (val) { dev_dbg(dev, "TjMax is %d degrees C\n", val); return val * 1000; } } if (force_tjmax) { dev_notice(dev, "TjMax forced to %d degrees C by user\n", force_tjmax); return force_tjmax * 1000; } /* * An assumption is made for early CPUs and unreadable MSR. * NOTE: the calculated value may not be correct. */ return adjust_tjmax(c, id, dev); } static int create_core_attrs(struct temp_data *tdata, struct device *dev, int attr_no) { int i; static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev, struct device_attribute *devattr, char *buf) = { show_label, show_crit_alarm, show_temp, show_tjmax, show_ttarget }; static const char *const names[TOTAL_ATTRS] = { "temp%d_label", "temp%d_crit_alarm", "temp%d_input", "temp%d_crit", "temp%d_max" }; for (i = 0; i < tdata->attr_size; i++) { snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i], attr_no); sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr); tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i]; tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO; tdata->sd_attrs[i].dev_attr.show = rd_ptr[i]; tdata->sd_attrs[i].index = attr_no; tdata->attrs[i] = &tdata->sd_attrs[i].dev_attr.attr; } tdata->attr_group.attrs = tdata->attrs; return sysfs_create_group(&dev->kobj, &tdata->attr_group); } static int chk_ucode_version(unsigned int cpu) { struct cpuinfo_x86 *c = &cpu_data(cpu); /* * Check if we have problem with errata AE18 of Core processors: * Readings might stop update when processor visited too deep sleep, * fixed for stepping D0 (6EC). */ if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) { pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n"); return -ENODEV; } return 0; } static struct platform_device *coretemp_get_pdev(unsigned int cpu) { u16 phys_proc_id = TO_PHYS_ID(cpu); struct pdev_entry *p; mutex_lock(&pdev_list_mutex); list_for_each_entry(p, &pdev_list, list) if (p->phys_proc_id == phys_proc_id) { mutex_unlock(&pdev_list_mutex); return p->pdev; } mutex_unlock(&pdev_list_mutex); return NULL; } static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag) { struct temp_data *tdata; tdata = kzalloc(sizeof(struct temp_data), GFP_KERNEL); if (!tdata) return NULL; tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS : MSR_IA32_THERM_STATUS; tdata->is_pkg_data = pkg_flag; tdata->cpu = cpu; tdata->cpu_core_id = TO_CORE_ID(cpu); tdata->attr_size = MAX_CORE_ATTRS; mutex_init(&tdata->update_lock); return tdata; } static int create_core_data(struct platform_device *pdev, unsigned int cpu, int pkg_flag) { struct temp_data *tdata; struct platform_data *pdata = platform_get_drvdata(pdev); struct cpuinfo_x86 *c = &cpu_data(cpu); u32 eax, edx; int err, attr_no; /* * Find attr number for sysfs: * We map the attr number to core id of the CPU * The attr number is always core id + 2 * The Pkgtemp will always show up as temp1_*, if available */ attr_no = pkg_flag ? 1 : TO_ATTR_NO(cpu); if (attr_no > MAX_CORE_DATA - 1) return -ERANGE; /* * Provide a single set of attributes for all HT siblings of a core * to avoid duplicate sensors (the processor ID and core ID of all * HT siblings of a core are the same). * Skip if a HT sibling of this core is already registered. * This is not an error. */ if (pdata->core_data[attr_no] != NULL) return 0; tdata = init_temp_data(cpu, pkg_flag); if (!tdata) return -ENOMEM; /* Test if we can access the status register */ err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx); if (err) goto exit_free; /* We can access status register. Get Critical Temperature */ tdata->tjmax = get_tjmax(c, cpu, &pdev->dev); /* * Read the still undocumented bits 8:15 of IA32_TEMPERATURE_TARGET. * The target temperature is available on older CPUs but not in this * register. Atoms don't have the register at all. */ if (c->x86_model > 0xe && c->x86_model != 0x1c) { err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); if (!err) { tdata->ttarget = tdata->tjmax - ((eax >> 8) & 0xff) * 1000; tdata->attr_size++; } } pdata->core_data[attr_no] = tdata; /* Create sysfs interfaces */ err = create_core_attrs(tdata, pdata->hwmon_dev, attr_no); if (err) goto exit_free; return 0; exit_free: pdata->core_data[attr_no] = NULL; kfree(tdata); return err; } static void coretemp_add_core(unsigned int cpu, int pkg_flag) { struct platform_device *pdev = coretemp_get_pdev(cpu); int err; if (!pdev) return; err = create_core_data(pdev, cpu, pkg_flag); if (err) dev_err(&pdev->dev, "Adding Core %u failed\n", cpu); } static void coretemp_remove_core(struct platform_data *pdata, int indx) { struct temp_data *tdata = pdata->core_data[indx]; /* Remove the sysfs attributes */ sysfs_remove_group(&pdata->hwmon_dev->kobj, &tdata->attr_group); kfree(pdata->core_data[indx]); pdata->core_data[indx] = NULL; } static int coretemp_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct platform_data *pdata; /* Initialize the per-package data structures */ pdata = devm_kzalloc(dev, sizeof(struct platform_data), GFP_KERNEL); if (!pdata) return -ENOMEM; pdata->phys_proc_id = pdev->id; platform_set_drvdata(pdev, pdata); pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME, pdata, NULL); return PTR_ERR_OR_ZERO(pdata->hwmon_dev); } static int coretemp_remove(struct platform_device *pdev) { struct platform_data *pdata = platform_get_drvdata(pdev); int i; for (i = MAX_CORE_DATA - 1; i >= 0; --i) if (pdata->core_data[i]) coretemp_remove_core(pdata, i); return 0; } static struct platform_driver coretemp_driver = { .driver = { .owner = THIS_MODULE, .name = DRVNAME, }, .probe = coretemp_probe, .remove = coretemp_remove, }; static int coretemp_device_add(unsigned int cpu) { int err; struct platform_device *pdev; struct pdev_entry *pdev_entry; mutex_lock(&pdev_list_mutex); pdev = platform_device_alloc(DRVNAME, TO_PHYS_ID(cpu)); if (!pdev) { err = -ENOMEM; pr_err("Device allocation failed\n"); goto exit; } pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL); if (!pdev_entry) { err = -ENOMEM; goto exit_device_put; } err = platform_device_add(pdev); if (err) { pr_err("Device addition failed (%d)\n", err); goto exit_device_free; } pdev_entry->pdev = pdev; pdev_entry->phys_proc_id = pdev->id; list_add_tail(&pdev_entry->list, &pdev_list); mutex_unlock(&pdev_list_mutex); return 0; exit_device_free: kfree(pdev_entry); exit_device_put: platform_device_put(pdev); exit: mutex_unlock(&pdev_list_mutex); return err; } static void coretemp_device_remove(unsigned int cpu) { struct pdev_entry *p, *n; u16 phys_proc_id = TO_PHYS_ID(cpu); mutex_lock(&pdev_list_mutex); list_for_each_entry_safe(p, n, &pdev_list, list) { if (p->phys_proc_id != phys_proc_id) continue; platform_device_unregister(p->pdev); list_del(&p->list); kfree(p); } mutex_unlock(&pdev_list_mutex); } static bool is_any_core_online(struct platform_data *pdata) { int i; /* Find online cores, except pkgtemp data */ for (i = MAX_CORE_DATA - 1; i >= 0; --i) { if (pdata->core_data[i] && !pdata->core_data[i]->is_pkg_data) { return true; } } return false; } static void get_core_online(unsigned int cpu) { struct cpuinfo_x86 *c = &cpu_data(cpu); struct platform_device *pdev = coretemp_get_pdev(cpu); int err; /* * CPUID.06H.EAX[0] indicates whether the CPU has thermal * sensors. We check this bit only, all the early CPUs * without thermal sensors will be filtered out. */ if (!cpu_has(c, X86_FEATURE_DTHERM)) return; if (!pdev) { /* Check the microcode version of the CPU */ if (chk_ucode_version(cpu)) return; /* * Alright, we have DTS support. * We are bringing the _first_ core in this pkg * online. So, initialize per-pkg data structures and * then bring this core online. */ err = coretemp_device_add(cpu); if (err) return; /* * Check whether pkgtemp support is available. * If so, add interfaces for pkgtemp. */ if (cpu_has(c, X86_FEATURE_PTS)) coretemp_add_core(cpu, 1); } /* * Physical CPU device already exists. * So, just add interfaces for this core. */ coretemp_add_core(cpu, 0); } static void put_core_offline(unsigned int cpu) { int i, indx; struct platform_data *pdata; struct platform_device *pdev = coretemp_get_pdev(cpu); /* If the physical CPU device does not exist, just return */ if (!pdev) return; pdata = platform_get_drvdata(pdev); indx = TO_ATTR_NO(cpu); /* The core id is too big, just return */ if (indx > MAX_CORE_DATA - 1) return; if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu) coretemp_remove_core(pdata, indx); /* * If a HT sibling of a core is taken offline, but another HT sibling * of the same core is still online, register the alternate sibling. * This ensures that exactly one set of attributes is provided as long * as at least one HT sibling of a core is online. */ for_each_sibling(i, cpu) { if (i != cpu) { get_core_online(i); /* * Display temperature sensor data for one HT sibling * per core only, so abort the loop after one such * sibling has been found. */ break; } } /* * If all cores in this pkg are offline, remove the device. * coretemp_device_remove calls unregister_platform_device, * which in turn calls coretemp_remove. This removes the * pkgtemp entry and does other clean ups. */ if (!is_any_core_online(pdata)) coretemp_device_remove(cpu); } static int coretemp_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long) hcpu; switch (action) { case CPU_ONLINE: case CPU_DOWN_FAILED: get_core_online(cpu); break; case CPU_DOWN_PREPARE: put_core_offline(cpu); break; } return NOTIFY_OK; } static struct notifier_block coretemp_cpu_notifier __refdata = { .notifier_call = coretemp_cpu_callback, }; static const struct x86_cpu_id __initconst coretemp_ids[] = { { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM }, {} }; MODULE_DEVICE_TABLE(x86cpu, coretemp_ids); static int __init coretemp_init(void) { int i, err; /* * CPUID.06H.EAX[0] indicates whether the CPU has thermal * sensors. We check this bit only, all the early CPUs * without thermal sensors will be filtered out. */ if (!x86_match_cpu(coretemp_ids)) return -ENODEV; err = platform_driver_register(&coretemp_driver); if (err) goto exit; cpu_notifier_register_begin(); for_each_online_cpu(i) get_core_online(i); #ifndef CONFIG_HOTPLUG_CPU if (list_empty(&pdev_list)) { cpu_notifier_register_done(); err = -ENODEV; goto exit_driver_unreg; } #endif __register_hotcpu_notifier(&coretemp_cpu_notifier); cpu_notifier_register_done(); return 0; #ifndef CONFIG_HOTPLUG_CPU exit_driver_unreg: platform_driver_unregister(&coretemp_driver); #endif exit: return err; } static void __exit coretemp_exit(void) { struct pdev_entry *p, *n; cpu_notifier_register_begin(); __unregister_hotcpu_notifier(&coretemp_cpu_notifier); mutex_lock(&pdev_list_mutex); list_for_each_entry_safe(p, n, &pdev_list, list) { platform_device_unregister(p->pdev); list_del(&p->list); kfree(p); } mutex_unlock(&pdev_list_mutex); cpu_notifier_register_done(); platform_driver_unregister(&coretemp_driver); } MODULE_AUTHOR("Rudolf Marek <r.marek@assembler.cz>"); MODULE_DESCRIPTION("Intel Core temperature monitor"); MODULE_LICENSE("GPL"); module_init(coretemp_init) module_exit(coretemp_exit)
gpl-2.0
TheGreatSega/Rush-Kernel
drivers/input/mouse/lifebook.c
503
8184
/* * Fujitsu B-series Lifebook PS/2 TouchScreen driver * * Copyright (c) 2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Kenan Esau <kenan.esau@conan.de> * * TouchScreen detection, absolute mode setting and packet layout is taken from * Harald Hoyer's description of the device. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/input.h> #include <linux/serio.h> #include <linux/libps2.h> #include <linux/dmi.h> #include "psmouse.h" #include "lifebook.h" struct lifebook_data { struct input_dev *dev2; /* Relative device */ char phys[32]; }; static const char *desired_serio_phys; static int lifebook_set_serio_phys(const struct dmi_system_id *d) { desired_serio_phys = d->driver_data; return 0; } static bool lifebook_use_6byte_proto; static int lifebook_set_6byte_proto(const struct dmi_system_id *d) { lifebook_use_6byte_proto = true; return 0; } static const struct dmi_system_id lifebook_dmi_table[] = { { .ident = "FLORA-ie 55mi", .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "FLORA-ie 55mi"), }, }, { .ident = "LifeBook B", .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook B Series"), }, }, { .ident = "Lifebook B", .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK B Series"), }, }, { .ident = "Lifebook B-2130", .matches = { DMI_MATCH(DMI_BOARD_NAME, "ZEPHYR"), }, }, { .ident = "Lifebook B213x/B2150", .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook B2131/B2133/B2150"), }, }, { .ident = "Zephyr", .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "ZEPHYR"), }, }, { .ident = "CF-18", .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "CF-18"), }, .callback = lifebook_set_serio_phys, .driver_data = "isa0060/serio3", }, { .ident = "Panasonic CF-28", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"), DMI_MATCH(DMI_PRODUCT_NAME, "CF-28"), }, .callback = lifebook_set_6byte_proto, }, { .ident = "Panasonic CF-29", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"), DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"), }, .callback = lifebook_set_6byte_proto, }, { .ident = "CF-72", .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "CF-72"), }, .callback = lifebook_set_6byte_proto, }, { .ident = "Lifebook B142", .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook B142"), }, }, { } }; static psmouse_ret_t lifebook_process_byte(struct psmouse *psmouse) { struct lifebook_data *priv = psmouse->private; struct input_dev *dev1 = psmouse->dev; struct input_dev *dev2 = priv ? priv->dev2 : NULL; unsigned char *packet = psmouse->packet; bool relative_packet = packet[0] & 0x08; if (relative_packet || !lifebook_use_6byte_proto) { if (psmouse->pktcnt != 3) return PSMOUSE_GOOD_DATA; } else { switch (psmouse->pktcnt) { case 1: return (packet[0] & 0xf8) == 0x00 ? PSMOUSE_GOOD_DATA : PSMOUSE_BAD_DATA; case 2: return PSMOUSE_GOOD_DATA; case 3: return ((packet[2] & 0x30) << 2) == (packet[2] & 0xc0) ? PSMOUSE_GOOD_DATA : PSMOUSE_BAD_DATA; case 4: return (packet[3] & 0xf8) == 0xc0 ? PSMOUSE_GOOD_DATA : PSMOUSE_BAD_DATA; case 5: return (packet[4] & 0xc0) == (packet[2] & 0xc0) ? PSMOUSE_GOOD_DATA : PSMOUSE_BAD_DATA; case 6: if (((packet[5] & 0x30) << 2) != (packet[5] & 0xc0)) return PSMOUSE_BAD_DATA; if ((packet[5] & 0xc0) != (packet[1] & 0xc0)) return PSMOUSE_BAD_DATA; break; /* report data */ } } if (relative_packet) { if (!dev2) printk(KERN_WARNING "lifebook.c: got relative packet " "but no relative device set up\n"); } else { if (lifebook_use_6byte_proto) { input_report_abs(dev1, ABS_X, ((packet[1] & 0x3f) << 6) | (packet[2] & 0x3f)); input_report_abs(dev1, ABS_Y, 4096 - (((packet[4] & 0x3f) << 6) | (packet[5] & 0x3f))); } else { input_report_abs(dev1, ABS_X, (packet[1] | ((packet[0] & 0x30) << 4))); input_report_abs(dev1, ABS_Y, 1024 - (packet[2] | ((packet[0] & 0xC0) << 2))); } input_report_key(dev1, BTN_TOUCH, packet[0] & 0x04); input_sync(dev1); } if (dev2) { if (relative_packet) { input_report_rel(dev2, REL_X, ((packet[0] & 0x10) ? packet[1] - 256 : packet[1])); input_report_rel(dev2, REL_Y, -(int)((packet[0] & 0x20) ? packet[2] - 256 : packet[2])); } input_report_key(dev2, BTN_LEFT, packet[0] & 0x01); input_report_key(dev2, BTN_RIGHT, packet[0] & 0x02); input_sync(dev2); } return PSMOUSE_FULL_PACKET; } static int lifebook_absolute_mode(struct psmouse *psmouse) { struct ps2dev *ps2dev = &psmouse->ps2dev; unsigned char param; if (psmouse_reset(psmouse)) return -1; /* Enable absolute output -- ps2_command fails always but if you leave this call out the touchsreen will never send absolute coordinates */ param = lifebook_use_6byte_proto ? 0x08 : 0x07; ps2_command(ps2dev, &param, PSMOUSE_CMD_SETRES); return 0; } static void lifebook_relative_mode(struct psmouse *psmouse) { struct ps2dev *ps2dev = &psmouse->ps2dev; unsigned char param = 0x06; ps2_command(ps2dev, &param, PSMOUSE_CMD_SETRES); } static void lifebook_set_resolution(struct psmouse *psmouse, unsigned int resolution) { static const unsigned char params[] = { 0, 1, 2, 2, 3 }; unsigned char p; if (resolution == 0 || resolution > 400) resolution = 400; p = params[resolution / 100]; ps2_command(&psmouse->ps2dev, &p, PSMOUSE_CMD_SETRES); psmouse->resolution = 50 << p; } static void lifebook_disconnect(struct psmouse *psmouse) { struct lifebook_data *priv = psmouse->private; psmouse_reset(psmouse); if (priv) { input_unregister_device(priv->dev2); kfree(priv); } psmouse->private = NULL; } int lifebook_detect(struct psmouse *psmouse, bool set_properties) { if (!dmi_check_system(lifebook_dmi_table)) return -1; if (desired_serio_phys && strcmp(psmouse->ps2dev.serio->phys, desired_serio_phys)) return -1; if (set_properties) { psmouse->vendor = "Fujitsu"; psmouse->name = "Lifebook TouchScreen"; } return 0; } static int lifebook_create_relative_device(struct psmouse *psmouse) { struct input_dev *dev2; struct lifebook_data *priv; int error = -ENOMEM; priv = kzalloc(sizeof(struct lifebook_data), GFP_KERNEL); dev2 = input_allocate_device(); if (!priv || !dev2) goto err_out; priv->dev2 = dev2; snprintf(priv->phys, sizeof(priv->phys), "%s/input1", psmouse->ps2dev.serio->phys); dev2->phys = priv->phys; dev2->name = "PS/2 Touchpad"; dev2->id.bustype = BUS_I8042; dev2->id.vendor = 0x0002; dev2->id.product = PSMOUSE_LIFEBOOK; dev2->id.version = 0x0000; dev2->dev.parent = &psmouse->ps2dev.serio->dev; dev2->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); dev2->relbit[BIT_WORD(REL_X)] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); dev2->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT); error = input_register_device(priv->dev2); if (error) goto err_out; psmouse->private = priv; return 0; err_out: input_free_device(dev2); kfree(priv); return error; } int lifebook_init(struct psmouse *psmouse) { struct input_dev *dev1 = psmouse->dev; int max_coord = lifebook_use_6byte_proto ? 4096 : 1024; if (lifebook_absolute_mode(psmouse)) return -1; dev1->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY); dev1->relbit[0] = 0; dev1->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(dev1, ABS_X, 0, max_coord, 0, 0); input_set_abs_params(dev1, ABS_Y, 0, max_coord, 0, 0); if (!desired_serio_phys) { if (lifebook_create_relative_device(psmouse)) { lifebook_relative_mode(psmouse); return -1; } } psmouse->protocol_handler = lifebook_process_byte; psmouse->set_resolution = lifebook_set_resolution; psmouse->disconnect = lifebook_disconnect; psmouse->reconnect = lifebook_absolute_mode; psmouse->model = lifebook_use_6byte_proto ? 6 : 3; /* * Use packet size = 3 even when using 6-byte protocol because * that's what POLL will return on Lifebooks (according to spec). */ psmouse->pktsize = 3; return 0; }
gpl-2.0
kbc-developers/android_kernel_samsung_d2dcm
drivers/net/can/usb/peak_usb/pcan_usb_core.c
503
22709
/* * CAN driver for PEAK System USB adapters * Derived from the PCAN project file driver/src/pcan_usb_core.c * * Copyright (C) 2003-2010 PEAK System-Technik GmbH * Copyright (C) 2010-2012 Stephane Grosjean <s.grosjean@peak-system.com> * * Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published * by the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/init.h> #include <linux/signal.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/usb.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include "pcan_usb_core.h" MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); MODULE_DESCRIPTION("CAN driver for PEAK-System USB adapters"); MODULE_LICENSE("GPL v2"); /* Table of devices that work with this driver */ static struct usb_device_id peak_usb_table[] = { {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USB_PRODUCT_ID)}, {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)}, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, peak_usb_table); /* List of supported PCAN-USB adapters (NULL terminated list) */ static struct peak_usb_adapter *peak_usb_adapters_list[] = { &pcan_usb, &pcan_usb_pro, NULL, }; /* * dump memory */ #define DUMP_WIDTH 16 void dump_mem(char *prompt, void *p, int l) { pr_info("%s dumping %s (%d bytes):\n", PCAN_USB_DRIVER_NAME, prompt ? prompt : "memory", l); print_hex_dump(KERN_INFO, PCAN_USB_DRIVER_NAME " ", DUMP_PREFIX_NONE, DUMP_WIDTH, 1, p, l, false); } /* * initialize a time_ref object with usb adapter own settings */ void peak_usb_init_time_ref(struct peak_time_ref *time_ref, struct peak_usb_adapter *adapter) { if (time_ref) { memset(time_ref, 0, sizeof(struct peak_time_ref)); time_ref->adapter = adapter; } } static void peak_usb_add_us(struct timeval *tv, u32 delta_us) { /* number of s. to add to final time */ u32 delta_s = delta_us / 1000000; delta_us -= delta_s * 1000000; tv->tv_usec += delta_us; if (tv->tv_usec >= 1000000) { tv->tv_usec -= 1000000; delta_s++; } tv->tv_sec += delta_s; } /* * sometimes, another now may be more recent than current one... */ void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now) { time_ref->ts_dev_2 = ts_now; /* should wait at least two passes before computing */ if (time_ref->tv_host.tv_sec > 0) { u32 delta_ts = time_ref->ts_dev_2 - time_ref->ts_dev_1; if (time_ref->ts_dev_2 < time_ref->ts_dev_1) delta_ts &= (1 << time_ref->adapter->ts_used_bits) - 1; time_ref->ts_total += delta_ts; } } /* * register device timestamp as now */ void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now) { if (time_ref->tv_host_0.tv_sec == 0) { /* use monotonic clock to correctly compute further deltas */ time_ref->tv_host_0 = ktime_to_timeval(ktime_get()); time_ref->tv_host.tv_sec = 0; } else { /* * delta_us should not be >= 2^32 => delta_s should be < 4294 * handle 32-bits wrapping here: if count of s. reaches 4200, * reset counters and change time base */ if (time_ref->tv_host.tv_sec != 0) { u32 delta_s = time_ref->tv_host.tv_sec - time_ref->tv_host_0.tv_sec; if (delta_s > 4200) { time_ref->tv_host_0 = time_ref->tv_host; time_ref->ts_total = 0; } } time_ref->tv_host = ktime_to_timeval(ktime_get()); time_ref->tick_count++; } time_ref->ts_dev_1 = time_ref->ts_dev_2; peak_usb_update_ts_now(time_ref, ts_now); } /* * compute timeval according to current ts and time_ref data */ void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts, struct timeval *tv) { /* protect from getting timeval before setting now */ if (time_ref->tv_host.tv_sec > 0) { u64 delta_us; delta_us = ts - time_ref->ts_dev_2; if (ts < time_ref->ts_dev_2) delta_us &= (1 << time_ref->adapter->ts_used_bits) - 1; delta_us += time_ref->ts_total; delta_us *= time_ref->adapter->us_per_ts_scale; delta_us >>= time_ref->adapter->us_per_ts_shift; *tv = time_ref->tv_host_0; peak_usb_add_us(tv, (u32)delta_us); } else { *tv = ktime_to_timeval(ktime_get()); } } /* * callback for bulk Rx urb */ static void peak_usb_read_bulk_callback(struct urb *urb) { struct peak_usb_device *dev = urb->context; struct net_device *netdev; int err; netdev = dev->netdev; if (!netif_device_present(netdev)) return; /* check reception status */ switch (urb->status) { case 0: /* success */ break; case -EILSEQ: case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: return; default: if (net_ratelimit()) netdev_err(netdev, "Rx urb aborted (%d)\n", urb->status); goto resubmit_urb; } /* protect from any incoming empty msgs */ if ((urb->actual_length > 0) && (dev->adapter->dev_decode_buf)) { /* handle these kinds of msgs only if _start callback called */ if (dev->state & PCAN_USB_STATE_STARTED) { err = dev->adapter->dev_decode_buf(dev, urb); if (err) dump_mem("received usb message", urb->transfer_buffer, urb->transfer_buffer_length); } } resubmit_urb: usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep_msg_in), urb->transfer_buffer, dev->adapter->rx_buffer_size, peak_usb_read_bulk_callback, dev); usb_anchor_urb(urb, &dev->rx_submitted); err = usb_submit_urb(urb, GFP_ATOMIC); if (!err) return; usb_unanchor_urb(urb); if (err == -ENODEV) netif_device_detach(netdev); else netdev_err(netdev, "failed resubmitting read bulk urb: %d\n", err); } /* * callback for bulk Tx urb */ static void peak_usb_write_bulk_callback(struct urb *urb) { struct peak_tx_urb_context *context = urb->context; struct peak_usb_device *dev; struct net_device *netdev; BUG_ON(!context); dev = context->dev; netdev = dev->netdev; atomic_dec(&dev->active_tx_urbs); if (!netif_device_present(netdev)) return; /* check tx status */ switch (urb->status) { case 0: /* transmission complete */ netdev->stats.tx_packets++; netdev->stats.tx_bytes += context->dlc; /* prevent tx timeout */ netdev->trans_start = jiffies; break; default: if (net_ratelimit()) netdev_err(netdev, "Tx urb aborted (%d)\n", urb->status); case -EPROTO: case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: break; } /* should always release echo skb and corresponding context */ can_get_echo_skb(netdev, context->echo_index); context->echo_index = PCAN_USB_MAX_TX_URBS; /* do wakeup tx queue in case of success only */ if (!urb->status) netif_wake_queue(netdev); } /* * called by netdev to send one skb on the CAN interface. */ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct peak_usb_device *dev = netdev_priv(netdev); struct peak_tx_urb_context *context = NULL; struct net_device_stats *stats = &netdev->stats; struct can_frame *cf = (struct can_frame *)skb->data; struct urb *urb; u8 *obuf; int i, err; size_t size = dev->adapter->tx_buffer_size; if (can_dropped_invalid_skb(netdev, skb)) return NETDEV_TX_OK; for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) if (dev->tx_contexts[i].echo_index == PCAN_USB_MAX_TX_URBS) { context = dev->tx_contexts + i; break; } if (!context) { /* should not occur except during restart */ return NETDEV_TX_BUSY; } urb = context->urb; obuf = urb->transfer_buffer; err = dev->adapter->dev_encode_msg(dev, skb, obuf, &size); if (err) { if (net_ratelimit()) netdev_err(netdev, "packet dropped\n"); dev_kfree_skb(skb); stats->tx_dropped++; return NETDEV_TX_OK; } context->echo_index = i; context->dlc = cf->can_dlc; usb_anchor_urb(urb, &dev->tx_submitted); can_put_echo_skb(skb, netdev, context->echo_index); atomic_inc(&dev->active_tx_urbs); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { can_free_echo_skb(netdev, context->echo_index); usb_unanchor_urb(urb); /* this context is not used in fact */ context->echo_index = PCAN_USB_MAX_TX_URBS; atomic_dec(&dev->active_tx_urbs); switch (err) { case -ENODEV: netif_device_detach(netdev); break; default: netdev_warn(netdev, "tx urb submitting failed err=%d\n", err); case -ENOENT: /* cable unplugged */ stats->tx_dropped++; } } else { netdev->trans_start = jiffies; /* slow down tx path */ if (atomic_read(&dev->active_tx_urbs) >= PCAN_USB_MAX_TX_URBS) netif_stop_queue(netdev); } return NETDEV_TX_OK; } /* * start the CAN interface. * Rx and Tx urbs are allocated here. Rx urbs are submitted here. */ static int peak_usb_start(struct peak_usb_device *dev) { struct net_device *netdev = dev->netdev; int err, i; for (i = 0; i < PCAN_USB_MAX_RX_URBS; i++) { struct urb *urb; u8 *buf; /* create a URB, and a buffer for it, to receive usb messages */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { netdev_err(netdev, "No memory left for URBs\n"); err = -ENOMEM; break; } buf = kmalloc(dev->adapter->rx_buffer_size, GFP_KERNEL); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); err = -ENOMEM; break; } usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep_msg_in), buf, dev->adapter->rx_buffer_size, peak_usb_read_bulk_callback, dev); /* ask last usb_free_urb() to also kfree() transfer_buffer */ urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(urb, &dev->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); if (err) { if (err == -ENODEV) netif_device_detach(dev->netdev); usb_unanchor_urb(urb); kfree(buf); usb_free_urb(urb); break; } /* drop reference, USB core will take care of freeing it */ usb_free_urb(urb); } /* did we submit any URBs? Warn if we was not able to submit all urbs */ if (i < PCAN_USB_MAX_RX_URBS) { if (i == 0) { netdev_err(netdev, "couldn't setup any rx URB\n"); return err; } netdev_warn(netdev, "rx performance may be slow\n"); } /* pre-alloc tx buffers and corresponding urbs */ for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) { struct peak_tx_urb_context *context; struct urb *urb; u8 *buf; /* create a URB and a buffer for it, to transmit usb messages */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { netdev_err(netdev, "No memory left for URBs\n"); err = -ENOMEM; break; } buf = kmalloc(dev->adapter->tx_buffer_size, GFP_KERNEL); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); err = -ENOMEM; break; } context = dev->tx_contexts + i; context->dev = dev; context->urb = urb; usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, dev->ep_msg_out), buf, dev->adapter->tx_buffer_size, peak_usb_write_bulk_callback, context); /* ask last usb_free_urb() to also kfree() transfer_buffer */ urb->transfer_flags |= URB_FREE_BUFFER; } /* warn if we were not able to allocate enough tx contexts */ if (i < PCAN_USB_MAX_TX_URBS) { if (i == 0) { netdev_err(netdev, "couldn't setup any tx URB\n"); return err; } netdev_warn(netdev, "tx performance may be slow\n"); } if (dev->adapter->dev_start) { err = dev->adapter->dev_start(dev); if (err) goto failed; } dev->state |= PCAN_USB_STATE_STARTED; /* can set bus on now */ if (dev->adapter->dev_set_bus) { err = dev->adapter->dev_set_bus(dev, 1); if (err) goto failed; } dev->can.state = CAN_STATE_ERROR_ACTIVE; return 0; failed: if (err == -ENODEV) netif_device_detach(dev->netdev); netdev_warn(netdev, "couldn't submit control: %d\n", err); return err; } /* * called by netdev to open the corresponding CAN interface. */ static int peak_usb_ndo_open(struct net_device *netdev) { struct peak_usb_device *dev = netdev_priv(netdev); int err; /* common open */ err = open_candev(netdev); if (err) return err; /* finally start device */ err = peak_usb_start(dev); if (err) { netdev_err(netdev, "couldn't start device: %d\n", err); close_candev(netdev); return err; } dev->open_time = jiffies; netif_start_queue(netdev); return 0; } /* * unlink in-flight Rx and Tx urbs and free their memory. */ static void peak_usb_unlink_all_urbs(struct peak_usb_device *dev) { int i; /* free all Rx (submitted) urbs */ usb_kill_anchored_urbs(&dev->rx_submitted); /* free unsubmitted Tx urbs first */ for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) { struct urb *urb = dev->tx_contexts[i].urb; if (!urb || dev->tx_contexts[i].echo_index != PCAN_USB_MAX_TX_URBS) { /* * this urb is already released or always submitted, * let usb core free by itself */ continue; } usb_free_urb(urb); dev->tx_contexts[i].urb = NULL; } /* then free all submitted Tx urbs */ usb_kill_anchored_urbs(&dev->tx_submitted); atomic_set(&dev->active_tx_urbs, 0); } /* * called by netdev to close the corresponding CAN interface. */ static int peak_usb_ndo_stop(struct net_device *netdev) { struct peak_usb_device *dev = netdev_priv(netdev); dev->state &= ~PCAN_USB_STATE_STARTED; netif_stop_queue(netdev); /* unlink all pending urbs and free used memory */ peak_usb_unlink_all_urbs(dev); if (dev->adapter->dev_stop) dev->adapter->dev_stop(dev); close_candev(netdev); dev->open_time = 0; dev->can.state = CAN_STATE_STOPPED; /* can set bus off now */ if (dev->adapter->dev_set_bus) { int err = dev->adapter->dev_set_bus(dev, 0); if (err) return err; } return 0; } /* * handle end of waiting for the device to reset */ void peak_usb_restart_complete(struct peak_usb_device *dev) { /* finally MUST update can state */ dev->can.state = CAN_STATE_ERROR_ACTIVE; /* netdev queue can be awaken now */ netif_wake_queue(dev->netdev); } void peak_usb_async_complete(struct urb *urb) { kfree(urb->transfer_buffer); usb_free_urb(urb); } /* * device (auto-)restart mechanism runs in a timer context => * MUST handle restart with asynchronous usb transfers */ static int peak_usb_restart(struct peak_usb_device *dev) { struct urb *urb; int err; u8 *buf; /* * if device doesn't define any asynchronous restart handler, simply * wake the netdev queue up */ if (!dev->adapter->dev_restart_async) { peak_usb_restart_complete(dev); return 0; } /* first allocate a urb to handle the asynchronous steps */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { netdev_err(dev->netdev, "no memory left for urb\n"); return -ENOMEM; } /* also allocate enough space for the commands to send */ buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_ATOMIC); if (!buf) { netdev_err(dev->netdev, "no memory left for async cmd\n"); usb_free_urb(urb); return -ENOMEM; } /* call the device specific handler for the restart */ err = dev->adapter->dev_restart_async(dev, urb, buf); if (!err) return 0; kfree(buf); usb_free_urb(urb); return err; } /* * candev callback used to change CAN mode. * Warning: this is called from a timer context! */ static int peak_usb_set_mode(struct net_device *netdev, enum can_mode mode) { struct peak_usb_device *dev = netdev_priv(netdev); int err = 0; if (!dev->open_time) return -EINVAL; switch (mode) { case CAN_MODE_START: err = peak_usb_restart(dev); if (err) netdev_err(netdev, "couldn't start device (err %d)\n", err); break; default: return -EOPNOTSUPP; } return err; } /* * candev callback used to set device bitrate. */ static int peak_usb_set_bittiming(struct net_device *netdev) { struct peak_usb_device *dev = netdev_priv(netdev); struct can_bittiming *bt = &dev->can.bittiming; if (dev->adapter->dev_set_bittiming) { int err = dev->adapter->dev_set_bittiming(dev, bt); if (err) netdev_info(netdev, "couldn't set bitrate (err %d)\n", err); return err; } return 0; } static const struct net_device_ops peak_usb_netdev_ops = { .ndo_open = peak_usb_ndo_open, .ndo_stop = peak_usb_ndo_stop, .ndo_start_xmit = peak_usb_ndo_start_xmit, }; /* * create one device which is attached to CAN controller #ctrl_idx of the * usb adapter. */ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter, struct usb_interface *intf, int ctrl_idx) { struct usb_device *usb_dev = interface_to_usbdev(intf); int sizeof_candev = peak_usb_adapter->sizeof_dev_private; struct peak_usb_device *dev; struct net_device *netdev; int i, err; u16 tmp16; if (sizeof_candev < sizeof(struct peak_usb_device)) sizeof_candev = sizeof(struct peak_usb_device); netdev = alloc_candev(sizeof_candev, PCAN_USB_MAX_TX_URBS); if (!netdev) { dev_err(&intf->dev, "%s: couldn't alloc candev\n", PCAN_USB_DRIVER_NAME); return -ENOMEM; } dev = netdev_priv(netdev); /* allocate a buffer large enough to send commands */ dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); if (!dev->cmd_buf) { dev_err(&intf->dev, "%s: couldn't alloc cmd buffer\n", PCAN_USB_DRIVER_NAME); err = -ENOMEM; goto lbl_free_candev; } dev->udev = usb_dev; dev->netdev = netdev; dev->adapter = peak_usb_adapter; dev->ctrl_idx = ctrl_idx; dev->state = PCAN_USB_STATE_CONNECTED; dev->ep_msg_in = peak_usb_adapter->ep_msg_in; dev->ep_msg_out = peak_usb_adapter->ep_msg_out[ctrl_idx]; dev->can.clock = peak_usb_adapter->clock; dev->can.bittiming_const = &peak_usb_adapter->bittiming_const; dev->can.do_set_bittiming = peak_usb_set_bittiming; dev->can.do_set_mode = peak_usb_set_mode; dev->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY; netdev->netdev_ops = &peak_usb_netdev_ops; netdev->flags |= IFF_ECHO; /* we support local echo */ init_usb_anchor(&dev->rx_submitted); init_usb_anchor(&dev->tx_submitted); atomic_set(&dev->active_tx_urbs, 0); for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) dev->tx_contexts[i].echo_index = PCAN_USB_MAX_TX_URBS; dev->prev_siblings = usb_get_intfdata(intf); usb_set_intfdata(intf, dev); SET_NETDEV_DEV(netdev, &intf->dev); err = register_candev(netdev); if (err) { dev_err(&intf->dev, "couldn't register CAN device: %d\n", err); goto lbl_restore_intf_data; } if (dev->prev_siblings) (dev->prev_siblings)->next_siblings = dev; /* keep hw revision into the netdevice */ tmp16 = le16_to_cpu(usb_dev->descriptor.bcdDevice); dev->device_rev = tmp16 >> 8; if (dev->adapter->dev_init) { err = dev->adapter->dev_init(dev); if (err) goto lbl_unregister_candev; } /* set bus off */ if (dev->adapter->dev_set_bus) { err = dev->adapter->dev_set_bus(dev, 0); if (err) goto lbl_unregister_candev; } /* get device number early */ if (dev->adapter->dev_get_device_id) dev->adapter->dev_get_device_id(dev, &dev->device_number); netdev_info(netdev, "attached to %s channel %u (device %u)\n", peak_usb_adapter->name, ctrl_idx, dev->device_number); return 0; lbl_unregister_candev: unregister_candev(netdev); lbl_restore_intf_data: usb_set_intfdata(intf, dev->prev_siblings); kfree(dev->cmd_buf); lbl_free_candev: free_candev(netdev); return err; } /* * called by the usb core when the device is unplugged from the system */ static void peak_usb_disconnect(struct usb_interface *intf) { struct peak_usb_device *dev; /* unregister as many netdev devices as siblings */ for (dev = usb_get_intfdata(intf); dev; dev = dev->prev_siblings) { struct net_device *netdev = dev->netdev; char name[IFNAMSIZ]; dev->state &= ~PCAN_USB_STATE_CONNECTED; strncpy(name, netdev->name, IFNAMSIZ); unregister_netdev(netdev); free_candev(netdev); kfree(dev->cmd_buf); dev->next_siblings = NULL; if (dev->adapter->dev_free) dev->adapter->dev_free(dev); dev_info(&intf->dev, "%s removed\n", name); } usb_set_intfdata(intf, NULL); } /* * probe function for new PEAK-System devices */ static int peak_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *usb_dev = interface_to_usbdev(intf); struct peak_usb_adapter *peak_usb_adapter, **pp; int i, err = -ENOMEM; usb_dev = interface_to_usbdev(intf); /* get corresponding PCAN-USB adapter */ for (pp = peak_usb_adapters_list; *pp; pp++) if ((*pp)->device_id == usb_dev->descriptor.idProduct) break; peak_usb_adapter = *pp; if (!peak_usb_adapter) { /* should never come except device_id bad usage in this file */ pr_err("%s: didn't find device id. 0x%x in devices list\n", PCAN_USB_DRIVER_NAME, usb_dev->descriptor.idProduct); return -ENODEV; } /* got corresponding adapter: check if it handles current interface */ if (peak_usb_adapter->intf_probe) { err = peak_usb_adapter->intf_probe(intf); if (err) return err; } for (i = 0; i < peak_usb_adapter->ctrl_count; i++) { err = peak_usb_create_dev(peak_usb_adapter, intf, i); if (err) { /* deregister already created devices */ peak_usb_disconnect(intf); break; } } return err; } /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver peak_usb_driver = { .name = PCAN_USB_DRIVER_NAME, .disconnect = peak_usb_disconnect, .probe = peak_usb_probe, .id_table = peak_usb_table, }; static int __init peak_usb_init(void) { int err; /* register this driver with the USB subsystem */ err = usb_register(&peak_usb_driver); if (err) pr_err("%s: usb_register failed (err %d)\n", PCAN_USB_DRIVER_NAME, err); return err; } static int peak_usb_do_device_exit(struct device *d, void *arg) { struct usb_interface *intf = to_usb_interface(d); struct peak_usb_device *dev; /* stop as many netdev devices as siblings */ for (dev = usb_get_intfdata(intf); dev; dev = dev->prev_siblings) { struct net_device *netdev = dev->netdev; if (netif_device_present(netdev)) if (dev->adapter->dev_exit) dev->adapter->dev_exit(dev); } return 0; } static void __exit peak_usb_exit(void) { int err; /* last chance do send any synchronous commands here */ err = driver_for_each_device(&peak_usb_driver.drvwrap.driver, NULL, NULL, peak_usb_do_device_exit); if (err) pr_err("%s: failed to stop all can devices (err %d)\n", PCAN_USB_DRIVER_NAME, err); /* deregister this driver with the USB subsystem */ usb_deregister(&peak_usb_driver); pr_info("%s: PCAN-USB interfaces driver unloaded\n", PCAN_USB_DRIVER_NAME); } module_init(peak_usb_init); module_exit(peak_usb_exit);
gpl-2.0
kodos96/backport
drivers/media/dvb/dvb-core/dvb_frontend.c
759
61612
/* * dvb_frontend.c: DVB frontend tuning interface/thread * * * Copyright (C) 1999-2001 Ralph Metzler * Marcus Metzler * Holger Waechtler * for convergence integrated media GmbH * * Copyright (C) 2004 Andrew de Quincey (tuning thread cleanup) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Or, point your browser to http://www.gnu.org/copyleft/gpl.html */ #include <linux/string.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/semaphore.h> #include <linux/module.h> #include <linux/list.h> #include <linux/freezer.h> #include <linux/jiffies.h> #include <linux/smp_lock.h> #include <linux/kthread.h> #include <asm/processor.h> #include "dvb_frontend.h" #include "dvbdev.h" #include <linux/dvb/version.h> static int dvb_frontend_debug; static int dvb_shutdown_timeout; static int dvb_force_auto_inversion; static int dvb_override_tune_delay; static int dvb_powerdown_on_sleep = 1; static int dvb_mfe_wait_time = 5; module_param_named(frontend_debug, dvb_frontend_debug, int, 0644); MODULE_PARM_DESC(frontend_debug, "Turn on/off frontend core debugging (default:off)."); module_param(dvb_shutdown_timeout, int, 0644); MODULE_PARM_DESC(dvb_shutdown_timeout, "wait <shutdown_timeout> seconds after close() before suspending hardware"); module_param(dvb_force_auto_inversion, int, 0644); MODULE_PARM_DESC(dvb_force_auto_inversion, "0: normal (default), 1: INVERSION_AUTO forced always"); module_param(dvb_override_tune_delay, int, 0644); MODULE_PARM_DESC(dvb_override_tune_delay, "0: normal (default), >0 => delay in milliseconds to wait for lock after a tune attempt"); module_param(dvb_powerdown_on_sleep, int, 0644); MODULE_PARM_DESC(dvb_powerdown_on_sleep, "0: do not power down, 1: turn LNB voltage off on sleep (default)"); module_param(dvb_mfe_wait_time, int, 0644); MODULE_PARM_DESC(dvb_mfe_wait_time, "Wait up to <mfe_wait_time> seconds on open() for multi-frontend to become available (default:5 seconds)"); #define dprintk if (dvb_frontend_debug) printk #define FESTATE_IDLE 1 #define FESTATE_RETUNE 2 #define FESTATE_TUNING_FAST 4 #define FESTATE_TUNING_SLOW 8 #define FESTATE_TUNED 16 #define FESTATE_ZIGZAG_FAST 32 #define FESTATE_ZIGZAG_SLOW 64 #define FESTATE_DISEQC 128 #define FESTATE_ERROR 256 #define FESTATE_WAITFORLOCK (FESTATE_TUNING_FAST | FESTATE_TUNING_SLOW | FESTATE_ZIGZAG_FAST | FESTATE_ZIGZAG_SLOW | FESTATE_DISEQC) #define FESTATE_SEARCHING_FAST (FESTATE_TUNING_FAST | FESTATE_ZIGZAG_FAST) #define FESTATE_SEARCHING_SLOW (FESTATE_TUNING_SLOW | FESTATE_ZIGZAG_SLOW) #define FESTATE_LOSTLOCK (FESTATE_ZIGZAG_FAST | FESTATE_ZIGZAG_SLOW) #define FE_ALGO_HW 1 /* * FESTATE_IDLE. No tuning parameters have been supplied and the loop is idling. * FESTATE_RETUNE. Parameters have been supplied, but we have not yet performed the first tune. * FESTATE_TUNING_FAST. Tuning parameters have been supplied and fast zigzag scan is in progress. * FESTATE_TUNING_SLOW. Tuning parameters have been supplied. Fast zigzag failed, so we're trying again, but slower. * FESTATE_TUNED. The frontend has successfully locked on. * FESTATE_ZIGZAG_FAST. The lock has been lost, and a fast zigzag has been initiated to try and regain it. * FESTATE_ZIGZAG_SLOW. The lock has been lost. Fast zigzag has been failed, so we're trying again, but slower. * FESTATE_DISEQC. A DISEQC command has just been issued. * FESTATE_WAITFORLOCK. When we're waiting for a lock. * FESTATE_SEARCHING_FAST. When we're searching for a signal using a fast zigzag scan. * FESTATE_SEARCHING_SLOW. When we're searching for a signal using a slow zigzag scan. * FESTATE_LOSTLOCK. When the lock has been lost, and we're searching it again. */ #define DVB_FE_NO_EXIT 0 #define DVB_FE_NORMAL_EXIT 1 #define DVB_FE_DEVICE_REMOVED 2 static DEFINE_MUTEX(frontend_mutex); struct dvb_frontend_private { /* thread/frontend values */ struct dvb_device *dvbdev; struct dvb_frontend_parameters parameters; struct dvb_fe_events events; struct semaphore sem; struct list_head list_head; wait_queue_head_t wait_queue; struct task_struct *thread; unsigned long release_jiffies; unsigned int exit; unsigned int wakeup; fe_status_t status; unsigned long tune_mode_flags; unsigned int delay; unsigned int reinitialise; int tone; int voltage; /* swzigzag values */ unsigned int state; unsigned int bending; int lnb_drift; unsigned int inversion; unsigned int auto_step; unsigned int auto_sub_step; unsigned int started_auto_step; unsigned int min_delay; unsigned int max_drift; unsigned int step_size; int quality; unsigned int check_wrapped; enum dvbfe_search algo_status; }; static void dvb_frontend_wakeup(struct dvb_frontend *fe); static void dvb_frontend_add_event(struct dvb_frontend *fe, fe_status_t status) { struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dvb_fe_events *events = &fepriv->events; struct dvb_frontend_event *e; int wp; dprintk ("%s\n", __func__); if (mutex_lock_interruptible (&events->mtx)) return; wp = (events->eventw + 1) % MAX_EVENT; if (wp == events->eventr) { events->overflow = 1; events->eventr = (events->eventr + 1) % MAX_EVENT; } e = &events->events[events->eventw]; memcpy (&e->parameters, &fepriv->parameters, sizeof (struct dvb_frontend_parameters)); if (status & FE_HAS_LOCK) if (fe->ops.get_frontend) fe->ops.get_frontend(fe, &e->parameters); events->eventw = wp; mutex_unlock(&events->mtx); e->status = status; wake_up_interruptible (&events->wait_queue); } static int dvb_frontend_get_event(struct dvb_frontend *fe, struct dvb_frontend_event *event, int flags) { struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dvb_fe_events *events = &fepriv->events; dprintk ("%s\n", __func__); if (events->overflow) { events->overflow = 0; return -EOVERFLOW; } if (events->eventw == events->eventr) { int ret; if (flags & O_NONBLOCK) return -EWOULDBLOCK; up(&fepriv->sem); ret = wait_event_interruptible (events->wait_queue, events->eventw != events->eventr); if (down_interruptible (&fepriv->sem)) return -ERESTARTSYS; if (ret < 0) return ret; } if (mutex_lock_interruptible (&events->mtx)) return -ERESTARTSYS; memcpy (event, &events->events[events->eventr], sizeof(struct dvb_frontend_event)); events->eventr = (events->eventr + 1) % MAX_EVENT; mutex_unlock(&events->mtx); return 0; } static void dvb_frontend_init(struct dvb_frontend *fe) { dprintk ("DVB: initialising adapter %i frontend %i (%s)...\n", fe->dvb->num, fe->id, fe->ops.info.name); if (fe->ops.init) fe->ops.init(fe); if (fe->ops.tuner_ops.init) { if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); fe->ops.tuner_ops.init(fe); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } } void dvb_frontend_reinitialise(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; fepriv->reinitialise = 1; dvb_frontend_wakeup(fe); } EXPORT_SYMBOL(dvb_frontend_reinitialise); static void dvb_frontend_swzigzag_update_delay(struct dvb_frontend_private *fepriv, int locked) { int q2; dprintk ("%s\n", __func__); if (locked) (fepriv->quality) = (fepriv->quality * 220 + 36*256) / 256; else (fepriv->quality) = (fepriv->quality * 220 + 0) / 256; q2 = fepriv->quality - 128; q2 *= q2; fepriv->delay = fepriv->min_delay + q2 * HZ / (128*128); } /** * Performs automatic twiddling of frontend parameters. * * @param fe The frontend concerned. * @param check_wrapped Checks if an iteration has completed. DO NOT SET ON THE FIRST ATTEMPT * @returns Number of complete iterations that have been performed. */ static int dvb_frontend_swzigzag_autotune(struct dvb_frontend *fe, int check_wrapped) { int autoinversion; int ready = 0; int fe_set_err = 0; struct dvb_frontend_private *fepriv = fe->frontend_priv; int original_inversion = fepriv->parameters.inversion; u32 original_frequency = fepriv->parameters.frequency; /* are we using autoinversion? */ autoinversion = ((!(fe->ops.info.caps & FE_CAN_INVERSION_AUTO)) && (fepriv->parameters.inversion == INVERSION_AUTO)); /* setup parameters correctly */ while(!ready) { /* calculate the lnb_drift */ fepriv->lnb_drift = fepriv->auto_step * fepriv->step_size; /* wrap the auto_step if we've exceeded the maximum drift */ if (fepriv->lnb_drift > fepriv->max_drift) { fepriv->auto_step = 0; fepriv->auto_sub_step = 0; fepriv->lnb_drift = 0; } /* perform inversion and +/- zigzag */ switch(fepriv->auto_sub_step) { case 0: /* try with the current inversion and current drift setting */ ready = 1; break; case 1: if (!autoinversion) break; fepriv->inversion = (fepriv->inversion == INVERSION_OFF) ? INVERSION_ON : INVERSION_OFF; ready = 1; break; case 2: if (fepriv->lnb_drift == 0) break; fepriv->lnb_drift = -fepriv->lnb_drift; ready = 1; break; case 3: if (fepriv->lnb_drift == 0) break; if (!autoinversion) break; fepriv->inversion = (fepriv->inversion == INVERSION_OFF) ? INVERSION_ON : INVERSION_OFF; fepriv->lnb_drift = -fepriv->lnb_drift; ready = 1; break; default: fepriv->auto_step++; fepriv->auto_sub_step = -1; /* it'll be incremented to 0 in a moment */ break; } if (!ready) fepriv->auto_sub_step++; } /* if this attempt would hit where we started, indicate a complete * iteration has occurred */ if ((fepriv->auto_step == fepriv->started_auto_step) && (fepriv->auto_sub_step == 0) && check_wrapped) { return 1; } dprintk("%s: drift:%i inversion:%i auto_step:%i " "auto_sub_step:%i started_auto_step:%i\n", __func__, fepriv->lnb_drift, fepriv->inversion, fepriv->auto_step, fepriv->auto_sub_step, fepriv->started_auto_step); /* set the frontend itself */ fepriv->parameters.frequency += fepriv->lnb_drift; if (autoinversion) fepriv->parameters.inversion = fepriv->inversion; if (fe->ops.set_frontend) fe_set_err = fe->ops.set_frontend(fe, &fepriv->parameters); if (fe_set_err < 0) { fepriv->state = FESTATE_ERROR; return fe_set_err; } fepriv->parameters.frequency = original_frequency; fepriv->parameters.inversion = original_inversion; fepriv->auto_sub_step++; return 0; } static void dvb_frontend_swzigzag(struct dvb_frontend *fe) { fe_status_t s = 0; int retval = 0; struct dvb_frontend_private *fepriv = fe->frontend_priv; /* if we've got no parameters, just keep idling */ if (fepriv->state & FESTATE_IDLE) { fepriv->delay = 3*HZ; fepriv->quality = 0; return; } /* in SCAN mode, we just set the frontend when asked and leave it alone */ if (fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT) { if (fepriv->state & FESTATE_RETUNE) { if (fe->ops.set_frontend) retval = fe->ops.set_frontend(fe, &fepriv->parameters); if (retval < 0) fepriv->state = FESTATE_ERROR; else fepriv->state = FESTATE_TUNED; } fepriv->delay = 3*HZ; fepriv->quality = 0; return; } /* get the frontend status */ if (fepriv->state & FESTATE_RETUNE) { s = 0; } else { if (fe->ops.read_status) fe->ops.read_status(fe, &s); if (s != fepriv->status) { dvb_frontend_add_event(fe, s); fepriv->status = s; } } /* if we're not tuned, and we have a lock, move to the TUNED state */ if ((fepriv->state & FESTATE_WAITFORLOCK) && (s & FE_HAS_LOCK)) { dvb_frontend_swzigzag_update_delay(fepriv, s & FE_HAS_LOCK); fepriv->state = FESTATE_TUNED; /* if we're tuned, then we have determined the correct inversion */ if ((!(fe->ops.info.caps & FE_CAN_INVERSION_AUTO)) && (fepriv->parameters.inversion == INVERSION_AUTO)) { fepriv->parameters.inversion = fepriv->inversion; } return; } /* if we are tuned already, check we're still locked */ if (fepriv->state & FESTATE_TUNED) { dvb_frontend_swzigzag_update_delay(fepriv, s & FE_HAS_LOCK); /* we're tuned, and the lock is still good... */ if (s & FE_HAS_LOCK) { return; } else { /* if we _WERE_ tuned, but now don't have a lock */ fepriv->state = FESTATE_ZIGZAG_FAST; fepriv->started_auto_step = fepriv->auto_step; fepriv->check_wrapped = 0; } } /* don't actually do anything if we're in the LOSTLOCK state, * the frontend is set to FE_CAN_RECOVER, and the max_drift is 0 */ if ((fepriv->state & FESTATE_LOSTLOCK) && (fe->ops.info.caps & FE_CAN_RECOVER) && (fepriv->max_drift == 0)) { dvb_frontend_swzigzag_update_delay(fepriv, s & FE_HAS_LOCK); return; } /* don't do anything if we're in the DISEQC state, since this * might be someone with a motorized dish controlled by DISEQC. * If its actually a re-tune, there will be a SET_FRONTEND soon enough. */ if (fepriv->state & FESTATE_DISEQC) { dvb_frontend_swzigzag_update_delay(fepriv, s & FE_HAS_LOCK); return; } /* if we're in the RETUNE state, set everything up for a brand * new scan, keeping the current inversion setting, as the next * tune is _very_ likely to require the same */ if (fepriv->state & FESTATE_RETUNE) { fepriv->lnb_drift = 0; fepriv->auto_step = 0; fepriv->auto_sub_step = 0; fepriv->started_auto_step = 0; fepriv->check_wrapped = 0; } /* fast zigzag. */ if ((fepriv->state & FESTATE_SEARCHING_FAST) || (fepriv->state & FESTATE_RETUNE)) { fepriv->delay = fepriv->min_delay; /* peform a tune */ retval = dvb_frontend_swzigzag_autotune(fe, fepriv->check_wrapped); if (retval < 0) { return; } else if (retval) { /* OK, if we've run out of trials at the fast speed. * Drop back to slow for the _next_ attempt */ fepriv->state = FESTATE_SEARCHING_SLOW; fepriv->started_auto_step = fepriv->auto_step; return; } fepriv->check_wrapped = 1; /* if we've just retuned, enter the ZIGZAG_FAST state. * This ensures we cannot return from an * FE_SET_FRONTEND ioctl before the first frontend tune * occurs */ if (fepriv->state & FESTATE_RETUNE) { fepriv->state = FESTATE_TUNING_FAST; } } /* slow zigzag */ if (fepriv->state & FESTATE_SEARCHING_SLOW) { dvb_frontend_swzigzag_update_delay(fepriv, s & FE_HAS_LOCK); /* Note: don't bother checking for wrapping; we stay in this * state until we get a lock */ dvb_frontend_swzigzag_autotune(fe, 0); } } static int dvb_frontend_is_exiting(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; if (fepriv->exit != DVB_FE_NO_EXIT) return 1; if (fepriv->dvbdev->writers == 1) if (time_after(jiffies, fepriv->release_jiffies + dvb_shutdown_timeout * HZ)) return 1; return 0; } static int dvb_frontend_should_wakeup(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; if (fepriv->wakeup) { fepriv->wakeup = 0; return 1; } return dvb_frontend_is_exiting(fe); } static void dvb_frontend_wakeup(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; fepriv->wakeup = 1; wake_up_interruptible(&fepriv->wait_queue); } static int dvb_frontend_thread(void *data) { struct dvb_frontend *fe = data; struct dvb_frontend_private *fepriv = fe->frontend_priv; unsigned long timeout; fe_status_t s; enum dvbfe_algo algo; struct dvb_frontend_parameters *params; dprintk("%s\n", __func__); fepriv->check_wrapped = 0; fepriv->quality = 0; fepriv->delay = 3*HZ; fepriv->status = 0; fepriv->wakeup = 0; fepriv->reinitialise = 0; dvb_frontend_init(fe); set_freezable(); while (1) { up(&fepriv->sem); /* is locked when we enter the thread... */ restart: timeout = wait_event_interruptible_timeout(fepriv->wait_queue, dvb_frontend_should_wakeup(fe) || kthread_should_stop() || freezing(current), fepriv->delay); if (kthread_should_stop() || dvb_frontend_is_exiting(fe)) { /* got signal or quitting */ fepriv->exit = DVB_FE_NORMAL_EXIT; break; } if (try_to_freeze()) goto restart; if (down_interruptible(&fepriv->sem)) break; if (fepriv->reinitialise) { dvb_frontend_init(fe); if (fepriv->tone != -1) { fe->ops.set_tone(fe, fepriv->tone); } if (fepriv->voltage != -1) { fe->ops.set_voltage(fe, fepriv->voltage); } fepriv->reinitialise = 0; } /* do an iteration of the tuning loop */ if (fe->ops.get_frontend_algo) { algo = fe->ops.get_frontend_algo(fe); switch (algo) { case DVBFE_ALGO_HW: dprintk("%s: Frontend ALGO = DVBFE_ALGO_HW\n", __func__); params = NULL; /* have we been asked to RETUNE ? */ if (fepriv->state & FESTATE_RETUNE) { dprintk("%s: Retune requested, FESTATE_RETUNE\n", __func__); params = &fepriv->parameters; fepriv->state = FESTATE_TUNED; } if (fe->ops.tune) fe->ops.tune(fe, params, fepriv->tune_mode_flags, &fepriv->delay, &s); if (s != fepriv->status && !(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT)) { dprintk("%s: state changed, adding current state\n", __func__); dvb_frontend_add_event(fe, s); fepriv->status = s; } break; case DVBFE_ALGO_SW: dprintk("%s: Frontend ALGO = DVBFE_ALGO_SW\n", __func__); dvb_frontend_swzigzag(fe); break; case DVBFE_ALGO_CUSTOM: params = NULL; /* have we been asked to RETUNE ? */ dprintk("%s: Frontend ALGO = DVBFE_ALGO_CUSTOM, state=%d\n", __func__, fepriv->state); if (fepriv->state & FESTATE_RETUNE) { dprintk("%s: Retune requested, FESTAT_RETUNE\n", __func__); params = &fepriv->parameters; fepriv->state = FESTATE_TUNED; } /* Case where we are going to search for a carrier * User asked us to retune again for some reason, possibly * requesting a search with a new set of parameters */ if (fepriv->algo_status & DVBFE_ALGO_SEARCH_AGAIN) { if (fe->ops.search) { fepriv->algo_status = fe->ops.search(fe, &fepriv->parameters); /* We did do a search as was requested, the flags are * now unset as well and has the flags wrt to search. */ } else { fepriv->algo_status &= ~DVBFE_ALGO_SEARCH_AGAIN; } } /* Track the carrier if the search was successful */ if (fepriv->algo_status == DVBFE_ALGO_SEARCH_SUCCESS) { if (fe->ops.track) fe->ops.track(fe, &fepriv->parameters); } else { fepriv->algo_status |= DVBFE_ALGO_SEARCH_AGAIN; fepriv->delay = HZ / 2; } fe->ops.read_status(fe, &s); if (s != fepriv->status) { dvb_frontend_add_event(fe, s); /* update event list */ fepriv->status = s; if (!(s & FE_HAS_LOCK)) { fepriv->delay = HZ / 10; fepriv->algo_status |= DVBFE_ALGO_SEARCH_AGAIN; } else { fepriv->delay = 60 * HZ; } } break; default: dprintk("%s: UNDEFINED ALGO !\n", __func__); break; } } else { dvb_frontend_swzigzag(fe); } } if (dvb_powerdown_on_sleep) { if (fe->ops.set_voltage) fe->ops.set_voltage(fe, SEC_VOLTAGE_OFF); if (fe->ops.tuner_ops.sleep) { if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); fe->ops.tuner_ops.sleep(fe); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } if (fe->ops.sleep) fe->ops.sleep(fe); } fepriv->thread = NULL; if (kthread_should_stop()) fepriv->exit = DVB_FE_DEVICE_REMOVED; else fepriv->exit = DVB_FE_NO_EXIT; mb(); dvb_frontend_wakeup(fe); return 0; } static void dvb_frontend_stop(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; dprintk ("%s\n", __func__); fepriv->exit = DVB_FE_NORMAL_EXIT; mb(); if (!fepriv->thread) return; kthread_stop(fepriv->thread); init_MUTEX (&fepriv->sem); fepriv->state = FESTATE_IDLE; /* paranoia check in case a signal arrived */ if (fepriv->thread) printk("dvb_frontend_stop: warning: thread %p won't exit\n", fepriv->thread); } s32 timeval_usec_diff(struct timeval lasttime, struct timeval curtime) { return ((curtime.tv_usec < lasttime.tv_usec) ? 1000000 - lasttime.tv_usec + curtime.tv_usec : curtime.tv_usec - lasttime.tv_usec); } EXPORT_SYMBOL(timeval_usec_diff); static inline void timeval_usec_add(struct timeval *curtime, u32 add_usec) { curtime->tv_usec += add_usec; if (curtime->tv_usec >= 1000000) { curtime->tv_usec -= 1000000; curtime->tv_sec++; } } /* * Sleep until gettimeofday() > waketime + add_usec * This needs to be as precise as possible, but as the delay is * usually between 2ms and 32ms, it is done using a scheduled msleep * followed by usleep (normally a busy-wait loop) for the remainder */ void dvb_frontend_sleep_until(struct timeval *waketime, u32 add_usec) { struct timeval lasttime; s32 delta, newdelta; timeval_usec_add(waketime, add_usec); do_gettimeofday(&lasttime); delta = timeval_usec_diff(lasttime, *waketime); if (delta > 2500) { msleep((delta - 1500) / 1000); do_gettimeofday(&lasttime); newdelta = timeval_usec_diff(lasttime, *waketime); delta = (newdelta > delta) ? 0 : newdelta; } if (delta > 0) udelay(delta); } EXPORT_SYMBOL(dvb_frontend_sleep_until); static int dvb_frontend_start(struct dvb_frontend *fe) { int ret; struct dvb_frontend_private *fepriv = fe->frontend_priv; struct task_struct *fe_thread; dprintk ("%s\n", __func__); if (fepriv->thread) { if (fepriv->exit == DVB_FE_NO_EXIT) return 0; else dvb_frontend_stop (fe); } if (signal_pending(current)) return -EINTR; if (down_interruptible (&fepriv->sem)) return -EINTR; fepriv->state = FESTATE_IDLE; fepriv->exit = DVB_FE_NO_EXIT; fepriv->thread = NULL; mb(); fe_thread = kthread_run(dvb_frontend_thread, fe, "kdvb-ad-%i-fe-%i", fe->dvb->num,fe->id); if (IS_ERR(fe_thread)) { ret = PTR_ERR(fe_thread); printk("dvb_frontend_start: failed to start kthread (%d)\n", ret); up(&fepriv->sem); return ret; } fepriv->thread = fe_thread; return 0; } static void dvb_frontend_get_frequeny_limits(struct dvb_frontend *fe, u32 *freq_min, u32 *freq_max) { *freq_min = max(fe->ops.info.frequency_min, fe->ops.tuner_ops.info.frequency_min); if (fe->ops.info.frequency_max == 0) *freq_max = fe->ops.tuner_ops.info.frequency_max; else if (fe->ops.tuner_ops.info.frequency_max == 0) *freq_max = fe->ops.info.frequency_max; else *freq_max = min(fe->ops.info.frequency_max, fe->ops.tuner_ops.info.frequency_max); if (*freq_min == 0 || *freq_max == 0) printk(KERN_WARNING "DVB: adapter %i frontend %u frequency limits undefined - fix the driver\n", fe->dvb->num,fe->id); } static int dvb_frontend_check_parameters(struct dvb_frontend *fe, struct dvb_frontend_parameters *parms) { u32 freq_min; u32 freq_max; /* range check: frequency */ dvb_frontend_get_frequeny_limits(fe, &freq_min, &freq_max); if ((freq_min && parms->frequency < freq_min) || (freq_max && parms->frequency > freq_max)) { printk(KERN_WARNING "DVB: adapter %i frontend %i frequency %u out of range (%u..%u)\n", fe->dvb->num, fe->id, parms->frequency, freq_min, freq_max); return -EINVAL; } /* range check: symbol rate */ if (fe->ops.info.type == FE_QPSK) { if ((fe->ops.info.symbol_rate_min && parms->u.qpsk.symbol_rate < fe->ops.info.symbol_rate_min) || (fe->ops.info.symbol_rate_max && parms->u.qpsk.symbol_rate > fe->ops.info.symbol_rate_max)) { printk(KERN_WARNING "DVB: adapter %i frontend %i symbol rate %u out of range (%u..%u)\n", fe->dvb->num, fe->id, parms->u.qpsk.symbol_rate, fe->ops.info.symbol_rate_min, fe->ops.info.symbol_rate_max); return -EINVAL; } } else if (fe->ops.info.type == FE_QAM) { if ((fe->ops.info.symbol_rate_min && parms->u.qam.symbol_rate < fe->ops.info.symbol_rate_min) || (fe->ops.info.symbol_rate_max && parms->u.qam.symbol_rate > fe->ops.info.symbol_rate_max)) { printk(KERN_WARNING "DVB: adapter %i frontend %i symbol rate %u out of range (%u..%u)\n", fe->dvb->num, fe->id, parms->u.qam.symbol_rate, fe->ops.info.symbol_rate_min, fe->ops.info.symbol_rate_max); return -EINVAL; } } /* check for supported modulation */ if (fe->ops.info.type == FE_QAM && (parms->u.qam.modulation > QAM_AUTO || !((1 << (parms->u.qam.modulation + 10)) & fe->ops.info.caps))) { printk(KERN_WARNING "DVB: adapter %i frontend %i modulation %u not supported\n", fe->dvb->num, fe->id, parms->u.qam.modulation); return -EINVAL; } return 0; } static int dvb_frontend_clear_cache(struct dvb_frontend *fe) { int i; memset(&(fe->dtv_property_cache), 0, sizeof(struct dtv_frontend_properties)); fe->dtv_property_cache.state = DTV_CLEAR; fe->dtv_property_cache.delivery_system = SYS_UNDEFINED; fe->dtv_property_cache.inversion = INVERSION_AUTO; fe->dtv_property_cache.fec_inner = FEC_AUTO; fe->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_AUTO; fe->dtv_property_cache.bandwidth_hz = BANDWIDTH_AUTO; fe->dtv_property_cache.guard_interval = GUARD_INTERVAL_AUTO; fe->dtv_property_cache.hierarchy = HIERARCHY_AUTO; fe->dtv_property_cache.symbol_rate = QAM_AUTO; fe->dtv_property_cache.code_rate_HP = FEC_AUTO; fe->dtv_property_cache.code_rate_LP = FEC_AUTO; fe->dtv_property_cache.isdbt_partial_reception = -1; fe->dtv_property_cache.isdbt_sb_mode = -1; fe->dtv_property_cache.isdbt_sb_subchannel = -1; fe->dtv_property_cache.isdbt_sb_segment_idx = -1; fe->dtv_property_cache.isdbt_sb_segment_count = -1; fe->dtv_property_cache.isdbt_layer_enabled = 0x7; for (i = 0; i < 3; i++) { fe->dtv_property_cache.layer[i].fec = FEC_AUTO; fe->dtv_property_cache.layer[i].modulation = QAM_AUTO; fe->dtv_property_cache.layer[i].interleaving = -1; fe->dtv_property_cache.layer[i].segment_count = -1; } return 0; } #define _DTV_CMD(n, s, b) \ [n] = { \ .name = #n, \ .cmd = n, \ .set = s,\ .buffer = b \ } static struct dtv_cmds_h dtv_cmds[] = { _DTV_CMD(DTV_TUNE, 1, 0), _DTV_CMD(DTV_CLEAR, 1, 0), /* Set */ _DTV_CMD(DTV_FREQUENCY, 1, 0), _DTV_CMD(DTV_BANDWIDTH_HZ, 1, 0), _DTV_CMD(DTV_MODULATION, 1, 0), _DTV_CMD(DTV_INVERSION, 1, 0), _DTV_CMD(DTV_DISEQC_MASTER, 1, 1), _DTV_CMD(DTV_SYMBOL_RATE, 1, 0), _DTV_CMD(DTV_INNER_FEC, 1, 0), _DTV_CMD(DTV_VOLTAGE, 1, 0), _DTV_CMD(DTV_TONE, 1, 0), _DTV_CMD(DTV_PILOT, 1, 0), _DTV_CMD(DTV_ROLLOFF, 1, 0), _DTV_CMD(DTV_DELIVERY_SYSTEM, 1, 0), _DTV_CMD(DTV_HIERARCHY, 1, 0), _DTV_CMD(DTV_CODE_RATE_HP, 1, 0), _DTV_CMD(DTV_CODE_RATE_LP, 1, 0), _DTV_CMD(DTV_GUARD_INTERVAL, 1, 0), _DTV_CMD(DTV_TRANSMISSION_MODE, 1, 0), _DTV_CMD(DTV_ISDBT_PARTIAL_RECEPTION, 1, 0), _DTV_CMD(DTV_ISDBT_SOUND_BROADCASTING, 1, 0), _DTV_CMD(DTV_ISDBT_SB_SUBCHANNEL_ID, 1, 0), _DTV_CMD(DTV_ISDBT_SB_SEGMENT_IDX, 1, 0), _DTV_CMD(DTV_ISDBT_SB_SEGMENT_COUNT, 1, 0), _DTV_CMD(DTV_ISDBT_LAYER_ENABLED, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERA_FEC, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERA_MODULATION, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERA_SEGMENT_COUNT, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERA_TIME_INTERLEAVING, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERB_FEC, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERB_MODULATION, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERB_SEGMENT_COUNT, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERB_TIME_INTERLEAVING, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERC_FEC, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERC_MODULATION, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 1, 0), _DTV_CMD(DTV_ISDBT_PARTIAL_RECEPTION, 0, 0), _DTV_CMD(DTV_ISDBT_SOUND_BROADCASTING, 0, 0), _DTV_CMD(DTV_ISDBT_SB_SUBCHANNEL_ID, 0, 0), _DTV_CMD(DTV_ISDBT_SB_SEGMENT_IDX, 0, 0), _DTV_CMD(DTV_ISDBT_SB_SEGMENT_COUNT, 0, 0), _DTV_CMD(DTV_ISDBT_LAYER_ENABLED, 0, 0), _DTV_CMD(DTV_ISDBT_LAYERA_FEC, 0, 0), _DTV_CMD(DTV_ISDBT_LAYERA_MODULATION, 0, 0), _DTV_CMD(DTV_ISDBT_LAYERA_SEGMENT_COUNT, 0, 0), _DTV_CMD(DTV_ISDBT_LAYERA_TIME_INTERLEAVING, 0, 0), _DTV_CMD(DTV_ISDBT_LAYERB_FEC, 0, 0), _DTV_CMD(DTV_ISDBT_LAYERB_MODULATION, 0, 0), _DTV_CMD(DTV_ISDBT_LAYERB_SEGMENT_COUNT, 0, 0), _DTV_CMD(DTV_ISDBT_LAYERB_TIME_INTERLEAVING, 0, 0), _DTV_CMD(DTV_ISDBT_LAYERC_FEC, 0, 0), _DTV_CMD(DTV_ISDBT_LAYERC_MODULATION, 0, 0), _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 0, 0), _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 0, 0), _DTV_CMD(DTV_ISDBS_TS_ID, 1, 0), /* Get */ _DTV_CMD(DTV_DISEQC_SLAVE_REPLY, 0, 1), _DTV_CMD(DTV_API_VERSION, 0, 0), _DTV_CMD(DTV_CODE_RATE_HP, 0, 0), _DTV_CMD(DTV_CODE_RATE_LP, 0, 0), _DTV_CMD(DTV_GUARD_INTERVAL, 0, 0), _DTV_CMD(DTV_TRANSMISSION_MODE, 0, 0), _DTV_CMD(DTV_HIERARCHY, 0, 0), }; static void dtv_property_dump(struct dtv_property *tvp) { int i; if (tvp->cmd <= 0 || tvp->cmd > DTV_MAX_COMMAND) { printk(KERN_WARNING "%s: tvp.cmd = 0x%08x undefined\n", __func__, tvp->cmd); return; } dprintk("%s() tvp.cmd = 0x%08x (%s)\n" ,__func__ ,tvp->cmd ,dtv_cmds[ tvp->cmd ].name); if(dtv_cmds[ tvp->cmd ].buffer) { dprintk("%s() tvp.u.buffer.len = 0x%02x\n" ,__func__ ,tvp->u.buffer.len); for(i = 0; i < tvp->u.buffer.len; i++) dprintk("%s() tvp.u.buffer.data[0x%02x] = 0x%02x\n" ,__func__ ,i ,tvp->u.buffer.data[i]); } else dprintk("%s() tvp.u.data = 0x%08x\n", __func__, tvp->u.data); } static int is_legacy_delivery_system(fe_delivery_system_t s) { if((s == SYS_UNDEFINED) || (s == SYS_DVBC_ANNEX_AC) || (s == SYS_DVBC_ANNEX_B) || (s == SYS_DVBT) || (s == SYS_DVBS) || (s == SYS_ATSC)) return 1; return 0; } /* Synchronise the legacy tuning parameters into the cache, so that demodulator * drivers can use a single set_frontend tuning function, regardless of whether * it's being used for the legacy or new API, reducing code and complexity. */ static void dtv_property_cache_sync(struct dvb_frontend *fe, struct dvb_frontend_parameters *p) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; c->frequency = p->frequency; c->inversion = p->inversion; switch (fe->ops.info.type) { case FE_QPSK: c->modulation = QPSK; /* implied for DVB-S in legacy API */ c->rolloff = ROLLOFF_35;/* implied for DVB-S */ c->symbol_rate = p->u.qpsk.symbol_rate; c->fec_inner = p->u.qpsk.fec_inner; c->delivery_system = SYS_DVBS; break; case FE_QAM: c->symbol_rate = p->u.qam.symbol_rate; c->fec_inner = p->u.qam.fec_inner; c->modulation = p->u.qam.modulation; c->delivery_system = SYS_DVBC_ANNEX_AC; break; case FE_OFDM: if (p->u.ofdm.bandwidth == BANDWIDTH_6_MHZ) c->bandwidth_hz = 6000000; else if (p->u.ofdm.bandwidth == BANDWIDTH_7_MHZ) c->bandwidth_hz = 7000000; else if (p->u.ofdm.bandwidth == BANDWIDTH_8_MHZ) c->bandwidth_hz = 8000000; else /* Including BANDWIDTH_AUTO */ c->bandwidth_hz = 0; c->code_rate_HP = p->u.ofdm.code_rate_HP; c->code_rate_LP = p->u.ofdm.code_rate_LP; c->modulation = p->u.ofdm.constellation; c->transmission_mode = p->u.ofdm.transmission_mode; c->guard_interval = p->u.ofdm.guard_interval; c->hierarchy = p->u.ofdm.hierarchy_information; c->delivery_system = SYS_DVBT; break; case FE_ATSC: c->modulation = p->u.vsb.modulation; if ((c->modulation == VSB_8) || (c->modulation == VSB_16)) c->delivery_system = SYS_ATSC; else c->delivery_system = SYS_DVBC_ANNEX_B; break; } } /* Ensure the cached values are set correctly in the frontend * legacy tuning structures, for the advanced tuning API. */ static void dtv_property_legacy_params_sync(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dvb_frontend_parameters *p = &fepriv->parameters; p->frequency = c->frequency; p->inversion = c->inversion; switch (fe->ops.info.type) { case FE_QPSK: dprintk("%s() Preparing QPSK req\n", __func__); p->u.qpsk.symbol_rate = c->symbol_rate; p->u.qpsk.fec_inner = c->fec_inner; c->delivery_system = SYS_DVBS; break; case FE_QAM: dprintk("%s() Preparing QAM req\n", __func__); p->u.qam.symbol_rate = c->symbol_rate; p->u.qam.fec_inner = c->fec_inner; p->u.qam.modulation = c->modulation; c->delivery_system = SYS_DVBC_ANNEX_AC; break; case FE_OFDM: dprintk("%s() Preparing OFDM req\n", __func__); if (c->bandwidth_hz == 6000000) p->u.ofdm.bandwidth = BANDWIDTH_6_MHZ; else if (c->bandwidth_hz == 7000000) p->u.ofdm.bandwidth = BANDWIDTH_7_MHZ; else if (c->bandwidth_hz == 8000000) p->u.ofdm.bandwidth = BANDWIDTH_8_MHZ; else p->u.ofdm.bandwidth = BANDWIDTH_AUTO; p->u.ofdm.code_rate_HP = c->code_rate_HP; p->u.ofdm.code_rate_LP = c->code_rate_LP; p->u.ofdm.constellation = c->modulation; p->u.ofdm.transmission_mode = c->transmission_mode; p->u.ofdm.guard_interval = c->guard_interval; p->u.ofdm.hierarchy_information = c->hierarchy; c->delivery_system = SYS_DVBT; break; case FE_ATSC: dprintk("%s() Preparing VSB req\n", __func__); p->u.vsb.modulation = c->modulation; if ((c->modulation == VSB_8) || (c->modulation == VSB_16)) c->delivery_system = SYS_ATSC; else c->delivery_system = SYS_DVBC_ANNEX_B; break; } } /* Ensure the cached values are set correctly in the frontend * legacy tuning structures, for the legacy tuning API. */ static void dtv_property_adv_params_sync(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dvb_frontend_parameters *p = &fepriv->parameters; p->frequency = c->frequency; p->inversion = c->inversion; switch(c->modulation) { case PSK_8: case APSK_16: case APSK_32: case QPSK: p->u.qpsk.symbol_rate = c->symbol_rate; p->u.qpsk.fec_inner = c->fec_inner; break; default: break; } if(c->delivery_system == SYS_ISDBT) { /* Fake out a generic DVB-T request so we pass validation in the ioctl */ p->frequency = c->frequency; p->inversion = c->inversion; p->u.ofdm.constellation = QAM_AUTO; p->u.ofdm.code_rate_HP = FEC_AUTO; p->u.ofdm.code_rate_LP = FEC_AUTO; p->u.ofdm.transmission_mode = TRANSMISSION_MODE_AUTO; p->u.ofdm.guard_interval = GUARD_INTERVAL_AUTO; p->u.ofdm.hierarchy_information = HIERARCHY_AUTO; if (c->bandwidth_hz == 8000000) p->u.ofdm.bandwidth = BANDWIDTH_8_MHZ; else if (c->bandwidth_hz == 7000000) p->u.ofdm.bandwidth = BANDWIDTH_7_MHZ; else if (c->bandwidth_hz == 6000000) p->u.ofdm.bandwidth = BANDWIDTH_6_MHZ; else p->u.ofdm.bandwidth = BANDWIDTH_AUTO; } } static void dtv_property_cache_submit(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; /* For legacy delivery systems we don't need the delivery_system to * be specified, but we populate the older structures from the cache * so we can call set_frontend on older drivers. */ if(is_legacy_delivery_system(c->delivery_system)) { dprintk("%s() legacy, modulation = %d\n", __func__, c->modulation); dtv_property_legacy_params_sync(fe); } else { dprintk("%s() adv, modulation = %d\n", __func__, c->modulation); /* For advanced delivery systems / modulation types ... * we seed the lecacy dvb_frontend_parameters structure * so that the sanity checking code later in the IOCTL processing * can validate our basic frequency ranges, symbolrates, modulation * etc. */ dtv_property_adv_params_sync(fe); } } static int dvb_frontend_ioctl_legacy(struct file *file, unsigned int cmd, void *parg); static int dvb_frontend_ioctl_properties(struct file *file, unsigned int cmd, void *parg); static int dtv_property_process_get(struct dvb_frontend *fe, struct dtv_property *tvp, struct file *file) { int r = 0; /* Allow the frontend to validate incoming properties */ if (fe->ops.get_property) r = fe->ops.get_property(fe, tvp); if (r < 0) return r; switch(tvp->cmd) { case DTV_FREQUENCY: tvp->u.data = fe->dtv_property_cache.frequency; break; case DTV_MODULATION: tvp->u.data = fe->dtv_property_cache.modulation; break; case DTV_BANDWIDTH_HZ: tvp->u.data = fe->dtv_property_cache.bandwidth_hz; break; case DTV_INVERSION: tvp->u.data = fe->dtv_property_cache.inversion; break; case DTV_SYMBOL_RATE: tvp->u.data = fe->dtv_property_cache.symbol_rate; break; case DTV_INNER_FEC: tvp->u.data = fe->dtv_property_cache.fec_inner; break; case DTV_PILOT: tvp->u.data = fe->dtv_property_cache.pilot; break; case DTV_ROLLOFF: tvp->u.data = fe->dtv_property_cache.rolloff; break; case DTV_DELIVERY_SYSTEM: tvp->u.data = fe->dtv_property_cache.delivery_system; break; case DTV_VOLTAGE: tvp->u.data = fe->dtv_property_cache.voltage; break; case DTV_TONE: tvp->u.data = fe->dtv_property_cache.sectone; break; case DTV_API_VERSION: tvp->u.data = (DVB_API_VERSION << 8) | DVB_API_VERSION_MINOR; break; case DTV_CODE_RATE_HP: tvp->u.data = fe->dtv_property_cache.code_rate_HP; break; case DTV_CODE_RATE_LP: tvp->u.data = fe->dtv_property_cache.code_rate_LP; break; case DTV_GUARD_INTERVAL: tvp->u.data = fe->dtv_property_cache.guard_interval; break; case DTV_TRANSMISSION_MODE: tvp->u.data = fe->dtv_property_cache.transmission_mode; break; case DTV_HIERARCHY: tvp->u.data = fe->dtv_property_cache.hierarchy; break; /* ISDB-T Support here */ case DTV_ISDBT_PARTIAL_RECEPTION: tvp->u.data = fe->dtv_property_cache.isdbt_partial_reception; break; case DTV_ISDBT_SOUND_BROADCASTING: tvp->u.data = fe->dtv_property_cache.isdbt_sb_mode; break; case DTV_ISDBT_SB_SUBCHANNEL_ID: tvp->u.data = fe->dtv_property_cache.isdbt_sb_subchannel; break; case DTV_ISDBT_SB_SEGMENT_IDX: tvp->u.data = fe->dtv_property_cache.isdbt_sb_segment_idx; break; case DTV_ISDBT_SB_SEGMENT_COUNT: tvp->u.data = fe->dtv_property_cache.isdbt_sb_segment_count; break; case DTV_ISDBT_LAYER_ENABLED: tvp->u.data = fe->dtv_property_cache.isdbt_layer_enabled; break; case DTV_ISDBT_LAYERA_FEC: tvp->u.data = fe->dtv_property_cache.layer[0].fec; break; case DTV_ISDBT_LAYERA_MODULATION: tvp->u.data = fe->dtv_property_cache.layer[0].modulation; break; case DTV_ISDBT_LAYERA_SEGMENT_COUNT: tvp->u.data = fe->dtv_property_cache.layer[0].segment_count; break; case DTV_ISDBT_LAYERA_TIME_INTERLEAVING: tvp->u.data = fe->dtv_property_cache.layer[0].interleaving; break; case DTV_ISDBT_LAYERB_FEC: tvp->u.data = fe->dtv_property_cache.layer[1].fec; break; case DTV_ISDBT_LAYERB_MODULATION: tvp->u.data = fe->dtv_property_cache.layer[1].modulation; break; case DTV_ISDBT_LAYERB_SEGMENT_COUNT: tvp->u.data = fe->dtv_property_cache.layer[1].segment_count; break; case DTV_ISDBT_LAYERB_TIME_INTERLEAVING: tvp->u.data = fe->dtv_property_cache.layer[1].interleaving; break; case DTV_ISDBT_LAYERC_FEC: tvp->u.data = fe->dtv_property_cache.layer[2].fec; break; case DTV_ISDBT_LAYERC_MODULATION: tvp->u.data = fe->dtv_property_cache.layer[2].modulation; break; case DTV_ISDBT_LAYERC_SEGMENT_COUNT: tvp->u.data = fe->dtv_property_cache.layer[2].segment_count; break; case DTV_ISDBT_LAYERC_TIME_INTERLEAVING: tvp->u.data = fe->dtv_property_cache.layer[2].interleaving; break; case DTV_ISDBS_TS_ID: tvp->u.data = fe->dtv_property_cache.isdbs_ts_id; break; default: r = -1; } dtv_property_dump(tvp); return r; } static int dtv_property_process_set(struct dvb_frontend *fe, struct dtv_property *tvp, struct file *file) { int r = 0; struct dvb_frontend_private *fepriv = fe->frontend_priv; dtv_property_dump(tvp); /* Allow the frontend to validate incoming properties */ if (fe->ops.set_property) r = fe->ops.set_property(fe, tvp); if (r < 0) return r; switch(tvp->cmd) { case DTV_CLEAR: /* Reset a cache of data specific to the frontend here. This does * not effect hardware. */ dvb_frontend_clear_cache(fe); dprintk("%s() Flushing property cache\n", __func__); break; case DTV_TUNE: /* interpret the cache of data, build either a traditional frontend * tunerequest so we can pass validation in the FE_SET_FRONTEND * ioctl. */ fe->dtv_property_cache.state = tvp->cmd; dprintk("%s() Finalised property cache\n", __func__); dtv_property_cache_submit(fe); r |= dvb_frontend_ioctl_legacy(file, FE_SET_FRONTEND, &fepriv->parameters); break; case DTV_FREQUENCY: fe->dtv_property_cache.frequency = tvp->u.data; break; case DTV_MODULATION: fe->dtv_property_cache.modulation = tvp->u.data; break; case DTV_BANDWIDTH_HZ: fe->dtv_property_cache.bandwidth_hz = tvp->u.data; break; case DTV_INVERSION: fe->dtv_property_cache.inversion = tvp->u.data; break; case DTV_SYMBOL_RATE: fe->dtv_property_cache.symbol_rate = tvp->u.data; break; case DTV_INNER_FEC: fe->dtv_property_cache.fec_inner = tvp->u.data; break; case DTV_PILOT: fe->dtv_property_cache.pilot = tvp->u.data; break; case DTV_ROLLOFF: fe->dtv_property_cache.rolloff = tvp->u.data; break; case DTV_DELIVERY_SYSTEM: fe->dtv_property_cache.delivery_system = tvp->u.data; break; case DTV_VOLTAGE: fe->dtv_property_cache.voltage = tvp->u.data; r = dvb_frontend_ioctl_legacy(file, FE_SET_VOLTAGE, (void *)fe->dtv_property_cache.voltage); break; case DTV_TONE: fe->dtv_property_cache.sectone = tvp->u.data; r = dvb_frontend_ioctl_legacy(file, FE_SET_TONE, (void *)fe->dtv_property_cache.sectone); break; case DTV_CODE_RATE_HP: fe->dtv_property_cache.code_rate_HP = tvp->u.data; break; case DTV_CODE_RATE_LP: fe->dtv_property_cache.code_rate_LP = tvp->u.data; break; case DTV_GUARD_INTERVAL: fe->dtv_property_cache.guard_interval = tvp->u.data; break; case DTV_TRANSMISSION_MODE: fe->dtv_property_cache.transmission_mode = tvp->u.data; break; case DTV_HIERARCHY: fe->dtv_property_cache.hierarchy = tvp->u.data; break; /* ISDB-T Support here */ case DTV_ISDBT_PARTIAL_RECEPTION: fe->dtv_property_cache.isdbt_partial_reception = tvp->u.data; break; case DTV_ISDBT_SOUND_BROADCASTING: fe->dtv_property_cache.isdbt_sb_mode = tvp->u.data; break; case DTV_ISDBT_SB_SUBCHANNEL_ID: fe->dtv_property_cache.isdbt_sb_subchannel = tvp->u.data; break; case DTV_ISDBT_SB_SEGMENT_IDX: fe->dtv_property_cache.isdbt_sb_segment_idx = tvp->u.data; break; case DTV_ISDBT_SB_SEGMENT_COUNT: fe->dtv_property_cache.isdbt_sb_segment_count = tvp->u.data; break; case DTV_ISDBT_LAYER_ENABLED: fe->dtv_property_cache.isdbt_layer_enabled = tvp->u.data; break; case DTV_ISDBT_LAYERA_FEC: fe->dtv_property_cache.layer[0].fec = tvp->u.data; break; case DTV_ISDBT_LAYERA_MODULATION: fe->dtv_property_cache.layer[0].modulation = tvp->u.data; break; case DTV_ISDBT_LAYERA_SEGMENT_COUNT: fe->dtv_property_cache.layer[0].segment_count = tvp->u.data; break; case DTV_ISDBT_LAYERA_TIME_INTERLEAVING: fe->dtv_property_cache.layer[0].interleaving = tvp->u.data; break; case DTV_ISDBT_LAYERB_FEC: fe->dtv_property_cache.layer[1].fec = tvp->u.data; break; case DTV_ISDBT_LAYERB_MODULATION: fe->dtv_property_cache.layer[1].modulation = tvp->u.data; break; case DTV_ISDBT_LAYERB_SEGMENT_COUNT: fe->dtv_property_cache.layer[1].segment_count = tvp->u.data; break; case DTV_ISDBT_LAYERB_TIME_INTERLEAVING: fe->dtv_property_cache.layer[1].interleaving = tvp->u.data; break; case DTV_ISDBT_LAYERC_FEC: fe->dtv_property_cache.layer[2].fec = tvp->u.data; break; case DTV_ISDBT_LAYERC_MODULATION: fe->dtv_property_cache.layer[2].modulation = tvp->u.data; break; case DTV_ISDBT_LAYERC_SEGMENT_COUNT: fe->dtv_property_cache.layer[2].segment_count = tvp->u.data; break; case DTV_ISDBT_LAYERC_TIME_INTERLEAVING: fe->dtv_property_cache.layer[2].interleaving = tvp->u.data; break; case DTV_ISDBS_TS_ID: fe->dtv_property_cache.isdbs_ts_id = tvp->u.data; break; default: r = -1; } return r; } static int dvb_frontend_ioctl(struct file *file, unsigned int cmd, void *parg) { struct dvb_device *dvbdev = file->private_data; struct dvb_frontend *fe = dvbdev->priv; struct dvb_frontend_private *fepriv = fe->frontend_priv; int err = -EOPNOTSUPP; dprintk("%s (%d)\n", __func__, _IOC_NR(cmd)); if (fepriv->exit != DVB_FE_NO_EXIT) return -ENODEV; if ((file->f_flags & O_ACCMODE) == O_RDONLY && (_IOC_DIR(cmd) != _IOC_READ || cmd == FE_GET_EVENT || cmd == FE_DISEQC_RECV_SLAVE_REPLY)) return -EPERM; if (down_interruptible (&fepriv->sem)) return -ERESTARTSYS; if ((cmd == FE_SET_PROPERTY) || (cmd == FE_GET_PROPERTY)) err = dvb_frontend_ioctl_properties(file, cmd, parg); else { fe->dtv_property_cache.state = DTV_UNDEFINED; err = dvb_frontend_ioctl_legacy(file, cmd, parg); } up(&fepriv->sem); return err; } static int dvb_frontend_ioctl_properties(struct file *file, unsigned int cmd, void *parg) { struct dvb_device *dvbdev = file->private_data; struct dvb_frontend *fe = dvbdev->priv; int err = 0; struct dtv_properties *tvps = NULL; struct dtv_property *tvp = NULL; int i; dprintk("%s\n", __func__); if(cmd == FE_SET_PROPERTY) { tvps = (struct dtv_properties __user *)parg; dprintk("%s() properties.num = %d\n", __func__, tvps->num); dprintk("%s() properties.props = %p\n", __func__, tvps->props); /* Put an arbitrary limit on the number of messages that can * be sent at once */ if ((tvps->num == 0) || (tvps->num > DTV_IOCTL_MAX_MSGS)) return -EINVAL; tvp = kmalloc(tvps->num * sizeof(struct dtv_property), GFP_KERNEL); if (!tvp) { err = -ENOMEM; goto out; } if (copy_from_user(tvp, tvps->props, tvps->num * sizeof(struct dtv_property))) { err = -EFAULT; goto out; } for (i = 0; i < tvps->num; i++) { (tvp + i)->result = dtv_property_process_set(fe, tvp + i, file); err |= (tvp + i)->result; } if(fe->dtv_property_cache.state == DTV_TUNE) dprintk("%s() Property cache is full, tuning\n", __func__); } else if(cmd == FE_GET_PROPERTY) { tvps = (struct dtv_properties __user *)parg; dprintk("%s() properties.num = %d\n", __func__, tvps->num); dprintk("%s() properties.props = %p\n", __func__, tvps->props); /* Put an arbitrary limit on the number of messages that can * be sent at once */ if ((tvps->num == 0) || (tvps->num > DTV_IOCTL_MAX_MSGS)) return -EINVAL; tvp = kmalloc(tvps->num * sizeof(struct dtv_property), GFP_KERNEL); if (!tvp) { err = -ENOMEM; goto out; } if (copy_from_user(tvp, tvps->props, tvps->num * sizeof(struct dtv_property))) { err = -EFAULT; goto out; } for (i = 0; i < tvps->num; i++) { (tvp + i)->result = dtv_property_process_get(fe, tvp + i, file); err |= (tvp + i)->result; } if (copy_to_user(tvps->props, tvp, tvps->num * sizeof(struct dtv_property))) { err = -EFAULT; goto out; } } else err = -EOPNOTSUPP; out: kfree(tvp); return err; } static int dvb_frontend_ioctl_legacy(struct file *file, unsigned int cmd, void *parg) { struct dvb_device *dvbdev = file->private_data; struct dvb_frontend *fe = dvbdev->priv; struct dvb_frontend_private *fepriv = fe->frontend_priv; int cb_err, err = -EOPNOTSUPP; if (fe->dvb->fe_ioctl_override) { cb_err = fe->dvb->fe_ioctl_override(fe, cmd, parg, DVB_FE_IOCTL_PRE); if (cb_err < 0) return cb_err; if (cb_err > 0) return 0; /* fe_ioctl_override returning 0 allows * dvb-core to continue handling the ioctl */ } switch (cmd) { case FE_GET_INFO: { struct dvb_frontend_info* info = parg; memcpy(info, &fe->ops.info, sizeof(struct dvb_frontend_info)); dvb_frontend_get_frequeny_limits(fe, &info->frequency_min, &info->frequency_max); /* Force the CAN_INVERSION_AUTO bit on. If the frontend doesn't * do it, it is done for it. */ info->caps |= FE_CAN_INVERSION_AUTO; err = 0; break; } case FE_READ_STATUS: { fe_status_t* status = parg; /* if retune was requested but hasn't occured yet, prevent * that user get signal state from previous tuning */ if (fepriv->state == FESTATE_RETUNE || fepriv->state == FESTATE_ERROR) { err=0; *status = 0; break; } if (fe->ops.read_status) err = fe->ops.read_status(fe, status); break; } case FE_READ_BER: if (fe->ops.read_ber) err = fe->ops.read_ber(fe, (__u32*) parg); break; case FE_READ_SIGNAL_STRENGTH: if (fe->ops.read_signal_strength) err = fe->ops.read_signal_strength(fe, (__u16*) parg); break; case FE_READ_SNR: if (fe->ops.read_snr) err = fe->ops.read_snr(fe, (__u16*) parg); break; case FE_READ_UNCORRECTED_BLOCKS: if (fe->ops.read_ucblocks) err = fe->ops.read_ucblocks(fe, (__u32*) parg); break; case FE_DISEQC_RESET_OVERLOAD: if (fe->ops.diseqc_reset_overload) { err = fe->ops.diseqc_reset_overload(fe); fepriv->state = FESTATE_DISEQC; fepriv->status = 0; } break; case FE_DISEQC_SEND_MASTER_CMD: if (fe->ops.diseqc_send_master_cmd) { err = fe->ops.diseqc_send_master_cmd(fe, (struct dvb_diseqc_master_cmd*) parg); fepriv->state = FESTATE_DISEQC; fepriv->status = 0; } break; case FE_DISEQC_SEND_BURST: if (fe->ops.diseqc_send_burst) { err = fe->ops.diseqc_send_burst(fe, (fe_sec_mini_cmd_t) parg); fepriv->state = FESTATE_DISEQC; fepriv->status = 0; } break; case FE_SET_TONE: if (fe->ops.set_tone) { err = fe->ops.set_tone(fe, (fe_sec_tone_mode_t) parg); fepriv->tone = (fe_sec_tone_mode_t) parg; fepriv->state = FESTATE_DISEQC; fepriv->status = 0; } break; case FE_SET_VOLTAGE: if (fe->ops.set_voltage) { err = fe->ops.set_voltage(fe, (fe_sec_voltage_t) parg); fepriv->voltage = (fe_sec_voltage_t) parg; fepriv->state = FESTATE_DISEQC; fepriv->status = 0; } break; case FE_DISHNETWORK_SEND_LEGACY_CMD: if (fe->ops.dishnetwork_send_legacy_command) { err = fe->ops.dishnetwork_send_legacy_command(fe, (unsigned long) parg); fepriv->state = FESTATE_DISEQC; fepriv->status = 0; } else if (fe->ops.set_voltage) { /* * NOTE: This is a fallback condition. Some frontends * (stv0299 for instance) take longer than 8msec to * respond to a set_voltage command. Those switches * need custom routines to switch properly. For all * other frontends, the following shoule work ok. * Dish network legacy switches (as used by Dish500) * are controlled by sending 9-bit command words * spaced 8msec apart. * the actual command word is switch/port dependant * so it is up to the userspace application to send * the right command. * The command must always start with a '0' after * initialization, so parg is 8 bits and does not * include the initialization or start bit */ unsigned long swcmd = ((unsigned long) parg) << 1; struct timeval nexttime; struct timeval tv[10]; int i; u8 last = 1; if (dvb_frontend_debug) printk("%s switch command: 0x%04lx\n", __func__, swcmd); do_gettimeofday(&nexttime); if (dvb_frontend_debug) memcpy(&tv[0], &nexttime, sizeof(struct timeval)); /* before sending a command, initialize by sending * a 32ms 18V to the switch */ fe->ops.set_voltage(fe, SEC_VOLTAGE_18); dvb_frontend_sleep_until(&nexttime, 32000); for (i = 0; i < 9; i++) { if (dvb_frontend_debug) do_gettimeofday(&tv[i + 1]); if ((swcmd & 0x01) != last) { /* set voltage to (last ? 13V : 18V) */ fe->ops.set_voltage(fe, (last) ? SEC_VOLTAGE_13 : SEC_VOLTAGE_18); last = (last) ? 0 : 1; } swcmd = swcmd >> 1; if (i != 8) dvb_frontend_sleep_until(&nexttime, 8000); } if (dvb_frontend_debug) { printk("%s(%d): switch delay (should be 32k followed by all 8k\n", __func__, fe->dvb->num); for (i = 1; i < 10; i++) printk("%d: %d\n", i, timeval_usec_diff(tv[i-1] , tv[i])); } err = 0; fepriv->state = FESTATE_DISEQC; fepriv->status = 0; } break; case FE_DISEQC_RECV_SLAVE_REPLY: if (fe->ops.diseqc_recv_slave_reply) err = fe->ops.diseqc_recv_slave_reply(fe, (struct dvb_diseqc_slave_reply*) parg); break; case FE_ENABLE_HIGH_LNB_VOLTAGE: if (fe->ops.enable_high_lnb_voltage) err = fe->ops.enable_high_lnb_voltage(fe, (long) parg); break; case FE_SET_FRONTEND: { struct dvb_frontend_tune_settings fetunesettings; if(fe->dtv_property_cache.state == DTV_TUNE) { if (dvb_frontend_check_parameters(fe, &fepriv->parameters) < 0) { err = -EINVAL; break; } } else { if (dvb_frontend_check_parameters(fe, parg) < 0) { err = -EINVAL; break; } memcpy (&fepriv->parameters, parg, sizeof (struct dvb_frontend_parameters)); dtv_property_cache_sync(fe, &fepriv->parameters); } memset(&fetunesettings, 0, sizeof(struct dvb_frontend_tune_settings)); memcpy(&fetunesettings.parameters, parg, sizeof (struct dvb_frontend_parameters)); /* force auto frequency inversion if requested */ if (dvb_force_auto_inversion) { fepriv->parameters.inversion = INVERSION_AUTO; fetunesettings.parameters.inversion = INVERSION_AUTO; } if (fe->ops.info.type == FE_OFDM) { /* without hierarchical coding code_rate_LP is irrelevant, * so we tolerate the otherwise invalid FEC_NONE setting */ if (fepriv->parameters.u.ofdm.hierarchy_information == HIERARCHY_NONE && fepriv->parameters.u.ofdm.code_rate_LP == FEC_NONE) fepriv->parameters.u.ofdm.code_rate_LP = FEC_AUTO; } /* get frontend-specific tuning settings */ if (fe->ops.get_tune_settings && (fe->ops.get_tune_settings(fe, &fetunesettings) == 0)) { fepriv->min_delay = (fetunesettings.min_delay_ms * HZ) / 1000; fepriv->max_drift = fetunesettings.max_drift; fepriv->step_size = fetunesettings.step_size; } else { /* default values */ switch(fe->ops.info.type) { case FE_QPSK: fepriv->min_delay = HZ/20; fepriv->step_size = fepriv->parameters.u.qpsk.symbol_rate / 16000; fepriv->max_drift = fepriv->parameters.u.qpsk.symbol_rate / 2000; break; case FE_QAM: fepriv->min_delay = HZ/20; fepriv->step_size = 0; /* no zigzag */ fepriv->max_drift = 0; break; case FE_OFDM: fepriv->min_delay = HZ/20; fepriv->step_size = fe->ops.info.frequency_stepsize * 2; fepriv->max_drift = (fe->ops.info.frequency_stepsize * 2) + 1; break; case FE_ATSC: fepriv->min_delay = HZ/20; fepriv->step_size = 0; fepriv->max_drift = 0; break; } } if (dvb_override_tune_delay > 0) fepriv->min_delay = (dvb_override_tune_delay * HZ) / 1000; fepriv->state = FESTATE_RETUNE; /* Request the search algorithm to search */ fepriv->algo_status |= DVBFE_ALGO_SEARCH_AGAIN; dvb_frontend_wakeup(fe); dvb_frontend_add_event(fe, 0); fepriv->status = 0; err = 0; break; } case FE_GET_EVENT: err = dvb_frontend_get_event (fe, parg, file->f_flags); break; case FE_GET_FRONTEND: if (fe->ops.get_frontend) { memcpy (parg, &fepriv->parameters, sizeof (struct dvb_frontend_parameters)); err = fe->ops.get_frontend(fe, (struct dvb_frontend_parameters*) parg); } break; case FE_SET_FRONTEND_TUNE_MODE: fepriv->tune_mode_flags = (unsigned long) parg; err = 0; break; }; if (fe->dvb->fe_ioctl_override) { cb_err = fe->dvb->fe_ioctl_override(fe, cmd, parg, DVB_FE_IOCTL_POST); if (cb_err < 0) return cb_err; } return err; } static unsigned int dvb_frontend_poll(struct file *file, struct poll_table_struct *wait) { struct dvb_device *dvbdev = file->private_data; struct dvb_frontend *fe = dvbdev->priv; struct dvb_frontend_private *fepriv = fe->frontend_priv; dprintk ("%s\n", __func__); poll_wait (file, &fepriv->events.wait_queue, wait); if (fepriv->events.eventw != fepriv->events.eventr) return (POLLIN | POLLRDNORM | POLLPRI); return 0; } static int dvb_frontend_open(struct inode *inode, struct file *file) { struct dvb_device *dvbdev = file->private_data; struct dvb_frontend *fe = dvbdev->priv; struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dvb_adapter *adapter = fe->dvb; int ret; dprintk ("%s\n", __func__); if (fepriv->exit == DVB_FE_DEVICE_REMOVED) return -ENODEV; if (adapter->mfe_shared) { mutex_lock (&adapter->mfe_lock); if (adapter->mfe_dvbdev == NULL) adapter->mfe_dvbdev = dvbdev; else if (adapter->mfe_dvbdev != dvbdev) { struct dvb_device *mfedev = adapter->mfe_dvbdev; struct dvb_frontend *mfe = mfedev->priv; struct dvb_frontend_private *mfepriv = mfe->frontend_priv; int mferetry = (dvb_mfe_wait_time << 1); mutex_unlock (&adapter->mfe_lock); while (mferetry-- && (mfedev->users != -1 || mfepriv->thread != NULL)) { if(msleep_interruptible(500)) { if(signal_pending(current)) return -EINTR; } } mutex_lock (&adapter->mfe_lock); if(adapter->mfe_dvbdev != dvbdev) { mfedev = adapter->mfe_dvbdev; mfe = mfedev->priv; mfepriv = mfe->frontend_priv; if (mfedev->users != -1 || mfepriv->thread != NULL) { mutex_unlock (&adapter->mfe_lock); return -EBUSY; } adapter->mfe_dvbdev = dvbdev; } } } if (dvbdev->users == -1 && fe->ops.ts_bus_ctrl) { if ((ret = fe->ops.ts_bus_ctrl(fe, 1)) < 0) goto err0; } if ((ret = dvb_generic_open (inode, file)) < 0) goto err1; if ((file->f_flags & O_ACCMODE) != O_RDONLY) { /* normal tune mode when opened R/W */ fepriv->tune_mode_flags &= ~FE_TUNE_MODE_ONESHOT; fepriv->tone = -1; fepriv->voltage = -1; ret = dvb_frontend_start (fe); if (ret) goto err2; /* empty event queue */ fepriv->events.eventr = fepriv->events.eventw = 0; } if (adapter->mfe_shared) mutex_unlock (&adapter->mfe_lock); return ret; err2: dvb_generic_release(inode, file); err1: if (dvbdev->users == -1 && fe->ops.ts_bus_ctrl) fe->ops.ts_bus_ctrl(fe, 0); err0: if (adapter->mfe_shared) mutex_unlock (&adapter->mfe_lock); return ret; } static int dvb_frontend_release(struct inode *inode, struct file *file) { struct dvb_device *dvbdev = file->private_data; struct dvb_frontend *fe = dvbdev->priv; struct dvb_frontend_private *fepriv = fe->frontend_priv; int ret; dprintk ("%s\n", __func__); if ((file->f_flags & O_ACCMODE) != O_RDONLY) fepriv->release_jiffies = jiffies; ret = dvb_generic_release (inode, file); if (dvbdev->users == -1) { if (fepriv->exit != DVB_FE_NO_EXIT) { fops_put(file->f_op); file->f_op = NULL; wake_up(&dvbdev->wait_queue); } if (fe->ops.ts_bus_ctrl) fe->ops.ts_bus_ctrl(fe, 0); } return ret; } static const struct file_operations dvb_frontend_fops = { .owner = THIS_MODULE, .unlocked_ioctl = dvb_generic_ioctl, .poll = dvb_frontend_poll, .open = dvb_frontend_open, .release = dvb_frontend_release }; int dvb_register_frontend(struct dvb_adapter* dvb, struct dvb_frontend* fe) { struct dvb_frontend_private *fepriv; static const struct dvb_device dvbdev_template = { .users = ~0, .writers = 1, .readers = (~0)-1, .fops = &dvb_frontend_fops, .kernel_ioctl = dvb_frontend_ioctl }; dprintk ("%s\n", __func__); if (mutex_lock_interruptible(&frontend_mutex)) return -ERESTARTSYS; fe->frontend_priv = kzalloc(sizeof(struct dvb_frontend_private), GFP_KERNEL); if (fe->frontend_priv == NULL) { mutex_unlock(&frontend_mutex); return -ENOMEM; } fepriv = fe->frontend_priv; init_MUTEX (&fepriv->sem); init_waitqueue_head (&fepriv->wait_queue); init_waitqueue_head (&fepriv->events.wait_queue); mutex_init(&fepriv->events.mtx); fe->dvb = dvb; fepriv->inversion = INVERSION_OFF; printk ("DVB: registering adapter %i frontend %i (%s)...\n", fe->dvb->num, fe->id, fe->ops.info.name); dvb_register_device (fe->dvb, &fepriv->dvbdev, &dvbdev_template, fe, DVB_DEVICE_FRONTEND); mutex_unlock(&frontend_mutex); return 0; } EXPORT_SYMBOL(dvb_register_frontend); int dvb_unregister_frontend(struct dvb_frontend* fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; dprintk ("%s\n", __func__); mutex_lock(&frontend_mutex); dvb_frontend_stop (fe); mutex_unlock(&frontend_mutex); if (fepriv->dvbdev->users < -1) wait_event(fepriv->dvbdev->wait_queue, fepriv->dvbdev->users==-1); mutex_lock(&frontend_mutex); dvb_unregister_device (fepriv->dvbdev); /* fe is invalid now */ kfree(fepriv); mutex_unlock(&frontend_mutex); return 0; } EXPORT_SYMBOL(dvb_unregister_frontend); #ifdef CONFIG_MEDIA_ATTACH void dvb_frontend_detach(struct dvb_frontend* fe) { void *ptr; if (fe->ops.release_sec) { fe->ops.release_sec(fe); symbol_put_addr(fe->ops.release_sec); } if (fe->ops.tuner_ops.release) { fe->ops.tuner_ops.release(fe); symbol_put_addr(fe->ops.tuner_ops.release); } if (fe->ops.analog_ops.release) { fe->ops.analog_ops.release(fe); symbol_put_addr(fe->ops.analog_ops.release); } ptr = (void*)fe->ops.release; if (ptr) { fe->ops.release(fe); symbol_put_addr(ptr); } } #else void dvb_frontend_detach(struct dvb_frontend* fe) { if (fe->ops.release_sec) fe->ops.release_sec(fe); if (fe->ops.tuner_ops.release) fe->ops.tuner_ops.release(fe); if (fe->ops.analog_ops.release) fe->ops.analog_ops.release(fe); if (fe->ops.release) fe->ops.release(fe); } #endif EXPORT_SYMBOL(dvb_frontend_detach);
gpl-2.0
byeonggonlee/lynx-ns-gb
drivers/staging/hv/channel_mgmt.c
759
25519
/* * Copyright (c) 2009, Microsoft Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Authors: * Haiyang Zhang <haiyangz@microsoft.com> * Hank Janssen <hjanssen@microsoft.com> */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/module.h> #include <linux/completion.h> #include "osd.h" #include "logging.h" #include "vmbus_private.h" #include "utils.h" struct vmbus_channel_message_table_entry { enum vmbus_channel_message_type messageType; void (*messageHandler)(struct vmbus_channel_message_header *msg); }; #define MAX_MSG_TYPES 3 #define MAX_NUM_DEVICE_CLASSES_SUPPORTED 7 static const struct hv_guid gSupportedDeviceClasses[MAX_NUM_DEVICE_CLASSES_SUPPORTED] = { /* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */ /* Storage - SCSI */ { .data = { 0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f } }, /* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */ /* Network */ { .data = { 0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46, 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E } }, /* {CFA8B69E-5B4A-4cc0-B98B-8BA1A1F3F95A} */ /* Input */ { .data = { 0x9E, 0xB6, 0xA8, 0xCF, 0x4A, 0x5B, 0xc0, 0x4c, 0xB9, 0x8B, 0x8B, 0xA1, 0xA1, 0xF3, 0xF9, 0x5A } }, /* {32412632-86cb-44a2-9b5c-50d1417354f5} */ /* IDE */ { .data = { 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5 } }, /* 0E0B6031-5213-4934-818B-38D90CED39DB */ /* Shutdown */ { .data = { 0x31, 0x60, 0x0B, 0X0E, 0x13, 0x52, 0x34, 0x49, 0x81, 0x8B, 0x38, 0XD9, 0x0C, 0xED, 0x39, 0xDB } }, /* {9527E630-D0AE-497b-ADCE-E80AB0175CAF} */ /* TimeSync */ { .data = { 0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49, 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf } }, /* {57164f39-9115-4e78-ab55-382f3bd5422d} */ /* Heartbeat */ { .data = { 0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e, 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d } }, }; /** * prep_negotiate_resp() - Create default response for Hyper-V Negotiate message * @icmsghdrp: Pointer to msg header structure * @icmsg_negotiate: Pointer to negotiate message structure * @buf: Raw buffer channel data * * @icmsghdrp is of type &struct icmsg_hdr. * @negop is of type &struct icmsg_negotiate. * Set up and fill in default negotiate response message. This response can * come from both the vmbus driver and the hv_utils driver. The current api * will respond properly to both Windows 2008 and Windows 2008-R2 operating * systems. * * Mainly used by Hyper-V drivers. */ void prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, struct icmsg_negotiate *negop, u8 *buf) { if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { icmsghdrp->icmsgsize = 0x10; negop = (struct icmsg_negotiate *)&buf[ sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr)]; if (negop->icframe_vercnt == 2 && negop->icversion_data[1].major == 3) { negop->icversion_data[0].major = 3; negop->icversion_data[0].minor = 0; negop->icversion_data[1].major = 3; negop->icversion_data[1].minor = 0; } else { negop->icversion_data[0].major = 1; negop->icversion_data[0].minor = 0; negop->icversion_data[1].major = 1; negop->icversion_data[1].minor = 0; } negop->icframe_vercnt = 1; negop->icmsg_vercnt = 1; } } EXPORT_SYMBOL(prep_negotiate_resp); /** * chn_cb_negotiate() - Default handler for non IDE/SCSI/NETWORK * Hyper-V requests * @context: Pointer to argument structure. * * Set up the default handler for non device driver specific requests * from Hyper-V. This stub responds to the default negotiate messages * that come in for every non IDE/SCSI/Network request. * This behavior is normally overwritten in the hv_utils driver. That * driver handles requests like gracefull shutdown, heartbeats etc. * * Mainly used by Hyper-V drivers. */ void chn_cb_negotiate(void *context) { struct vmbus_channel *channel = context; u8 *buf; u32 buflen, recvlen; u64 requestid; struct icmsg_hdr *icmsghdrp; struct icmsg_negotiate *negop = NULL; buflen = PAGE_SIZE; buf = kmalloc(buflen, GFP_ATOMIC); VmbusChannelRecvPacket(channel, buf, buflen, &recvlen, &requestid); if (recvlen > 0) { icmsghdrp = (struct icmsg_hdr *)&buf[ sizeof(struct vmbuspipe_hdr)]; prep_negotiate_resp(icmsghdrp, negop, buf); icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE; VmbusChannelSendPacket(channel, buf, recvlen, requestid, VmbusPacketTypeDataInBand, 0); } kfree(buf); } EXPORT_SYMBOL(chn_cb_negotiate); /* * Function table used for message responses for non IDE/SCSI/Network type * messages. (Such as KVP/Shutdown etc) */ struct hyperv_service_callback hv_cb_utils[MAX_MSG_TYPES] = { /* 0E0B6031-5213-4934-818B-38D90CED39DB */ /* Shutdown */ { .msg_type = HV_SHUTDOWN_MSG, .data = { 0x31, 0x60, 0x0B, 0X0E, 0x13, 0x52, 0x34, 0x49, 0x81, 0x8B, 0x38, 0XD9, 0x0C, 0xED, 0x39, 0xDB }, .callback = chn_cb_negotiate, .log_msg = "Shutdown channel functionality initialized" }, /* {9527E630-D0AE-497b-ADCE-E80AB0175CAF} */ /* TimeSync */ { .msg_type = HV_TIMESYNC_MSG, .data = { 0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49, 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf }, .callback = chn_cb_negotiate, .log_msg = "Timesync channel functionality initialized" }, /* {57164f39-9115-4e78-ab55-382f3bd5422d} */ /* Heartbeat */ { .msg_type = HV_HEARTBEAT_MSG, .data = { 0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e, 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d }, .callback = chn_cb_negotiate, .log_msg = "Heartbeat channel functionality initialized" }, }; EXPORT_SYMBOL(hv_cb_utils); /* * AllocVmbusChannel - Allocate and initialize a vmbus channel object */ struct vmbus_channel *AllocVmbusChannel(void) { struct vmbus_channel *channel; channel = kzalloc(sizeof(*channel), GFP_ATOMIC); if (!channel) return NULL; spin_lock_init(&channel->inbound_lock); init_timer(&channel->poll_timer); channel->poll_timer.data = (unsigned long)channel; channel->poll_timer.function = VmbusChannelOnTimer; channel->ControlWQ = create_workqueue("hv_vmbus_ctl"); if (!channel->ControlWQ) { kfree(channel); return NULL; } return channel; } /* * ReleaseVmbusChannel - Release the vmbus channel object itself */ static inline void ReleaseVmbusChannel(void *context) { struct vmbus_channel *channel = context; DPRINT_ENTER(VMBUS); DPRINT_DBG(VMBUS, "releasing channel (%p)", channel); destroy_workqueue(channel->ControlWQ); DPRINT_DBG(VMBUS, "channel released (%p)", channel); kfree(channel); DPRINT_EXIT(VMBUS); } /* * FreeVmbusChannel - Release the resources used by the vmbus channel object */ void FreeVmbusChannel(struct vmbus_channel *Channel) { del_timer_sync(&Channel->poll_timer); /* * We have to release the channel's workqueue/thread in the vmbus's * workqueue/thread context * ie we can't destroy ourselves. */ osd_schedule_callback(gVmbusConnection.WorkQueue, ReleaseVmbusChannel, Channel); } DECLARE_COMPLETION(hv_channel_ready); /* * Count initialized channels, and ensure all channels are ready when hv_vmbus * module loading completes. */ static void count_hv_channel(void) { static int counter; unsigned long flags; spin_lock_irqsave(&gVmbusConnection.channel_lock, flags); if (++counter == MAX_MSG_TYPES) complete(&hv_channel_ready); spin_unlock_irqrestore(&gVmbusConnection.channel_lock, flags); } /* * VmbusChannelProcessOffer - Process the offer by creating a channel/device * associated with this offer */ static void VmbusChannelProcessOffer(void *context) { struct vmbus_channel *newChannel = context; struct vmbus_channel *channel; bool fNew = true; int ret; int cnt; unsigned long flags; DPRINT_ENTER(VMBUS); /* Make sure this is a new offer */ spin_lock_irqsave(&gVmbusConnection.channel_lock, flags); list_for_each_entry(channel, &gVmbusConnection.ChannelList, ListEntry) { if (!memcmp(&channel->OfferMsg.Offer.InterfaceType, &newChannel->OfferMsg.Offer.InterfaceType, sizeof(struct hv_guid)) && !memcmp(&channel->OfferMsg.Offer.InterfaceInstance, &newChannel->OfferMsg.Offer.InterfaceInstance, sizeof(struct hv_guid))) { fNew = false; break; } } if (fNew) list_add_tail(&newChannel->ListEntry, &gVmbusConnection.ChannelList); spin_unlock_irqrestore(&gVmbusConnection.channel_lock, flags); if (!fNew) { DPRINT_DBG(VMBUS, "Ignoring duplicate offer for relid (%d)", newChannel->OfferMsg.ChildRelId); FreeVmbusChannel(newChannel); DPRINT_EXIT(VMBUS); return; } /* * Start the process of binding this offer to the driver * We need to set the DeviceObject field before calling * VmbusChildDeviceAdd() */ newChannel->DeviceObject = VmbusChildDeviceCreate( &newChannel->OfferMsg.Offer.InterfaceType, &newChannel->OfferMsg.Offer.InterfaceInstance, newChannel); DPRINT_DBG(VMBUS, "child device object allocated - %p", newChannel->DeviceObject); /* * Add the new device to the bus. This will kick off device-driver * binding which eventually invokes the device driver's AddDevice() * method. */ ret = VmbusChildDeviceAdd(newChannel->DeviceObject); if (ret != 0) { DPRINT_ERR(VMBUS, "unable to add child device object (relid %d)", newChannel->OfferMsg.ChildRelId); spin_lock_irqsave(&gVmbusConnection.channel_lock, flags); list_del(&newChannel->ListEntry); spin_unlock_irqrestore(&gVmbusConnection.channel_lock, flags); FreeVmbusChannel(newChannel); } else { /* * This state is used to indicate a successful open * so that when we do close the channel normally, we * can cleanup properly */ newChannel->State = CHANNEL_OPEN_STATE; /* Open IC channels */ for (cnt = 0; cnt < MAX_MSG_TYPES; cnt++) { if (memcmp(&newChannel->OfferMsg.Offer.InterfaceType, &hv_cb_utils[cnt].data, sizeof(struct hv_guid)) == 0 && VmbusChannelOpen(newChannel, 2 * PAGE_SIZE, 2 * PAGE_SIZE, NULL, 0, hv_cb_utils[cnt].callback, newChannel) == 0) { hv_cb_utils[cnt].channel = newChannel; DPRINT_INFO(VMBUS, "%s", hv_cb_utils[cnt].log_msg); count_hv_channel(); } } } DPRINT_EXIT(VMBUS); } /* * VmbusChannelProcessRescindOffer - Rescind the offer by initiating a device removal */ static void VmbusChannelProcessRescindOffer(void *context) { struct vmbus_channel *channel = context; DPRINT_ENTER(VMBUS); VmbusChildDeviceRemove(channel->DeviceObject); DPRINT_EXIT(VMBUS); } /* * VmbusChannelOnOffer - Handler for channel offers from vmbus in parent partition. * * We ignore all offers except network and storage offers. For each network and * storage offers, we create a channel object and queue a work item to the * channel object to process the offer synchronously */ static void VmbusChannelOnOffer(struct vmbus_channel_message_header *hdr) { struct vmbus_channel_offer_channel *offer; struct vmbus_channel *newChannel; struct hv_guid *guidType; struct hv_guid *guidInstance; int i; int fSupported = 0; DPRINT_ENTER(VMBUS); offer = (struct vmbus_channel_offer_channel *)hdr; for (i = 0; i < MAX_NUM_DEVICE_CLASSES_SUPPORTED; i++) { if (memcmp(&offer->Offer.InterfaceType, &gSupportedDeviceClasses[i], sizeof(struct hv_guid)) == 0) { fSupported = 1; break; } } if (!fSupported) { DPRINT_DBG(VMBUS, "Ignoring channel offer notification for " "child relid %d", offer->ChildRelId); DPRINT_EXIT(VMBUS); return; } guidType = &offer->Offer.InterfaceType; guidInstance = &offer->Offer.InterfaceInstance; DPRINT_INFO(VMBUS, "Channel offer notification - " "child relid %d monitor id %d allocated %d, " "type {%02x%02x%02x%02x-%02x%02x-%02x%02x-" "%02x%02x%02x%02x%02x%02x%02x%02x} " "instance {%02x%02x%02x%02x-%02x%02x-%02x%02x-" "%02x%02x%02x%02x%02x%02x%02x%02x}", offer->ChildRelId, offer->MonitorId, offer->MonitorAllocated, guidType->data[3], guidType->data[2], guidType->data[1], guidType->data[0], guidType->data[5], guidType->data[4], guidType->data[7], guidType->data[6], guidType->data[8], guidType->data[9], guidType->data[10], guidType->data[11], guidType->data[12], guidType->data[13], guidType->data[14], guidType->data[15], guidInstance->data[3], guidInstance->data[2], guidInstance->data[1], guidInstance->data[0], guidInstance->data[5], guidInstance->data[4], guidInstance->data[7], guidInstance->data[6], guidInstance->data[8], guidInstance->data[9], guidInstance->data[10], guidInstance->data[11], guidInstance->data[12], guidInstance->data[13], guidInstance->data[14], guidInstance->data[15]); /* Allocate the channel object and save this offer. */ newChannel = AllocVmbusChannel(); if (!newChannel) { DPRINT_ERR(VMBUS, "unable to allocate channel object"); return; } DPRINT_DBG(VMBUS, "channel object allocated - %p", newChannel); memcpy(&newChannel->OfferMsg, offer, sizeof(struct vmbus_channel_offer_channel)); newChannel->MonitorGroup = (u8)offer->MonitorId / 32; newChannel->MonitorBit = (u8)offer->MonitorId % 32; /* TODO: Make sure the offer comes from our parent partition */ osd_schedule_callback(newChannel->ControlWQ, VmbusChannelProcessOffer, newChannel); DPRINT_EXIT(VMBUS); } /* * VmbusChannelOnOfferRescind - Rescind offer handler. * * We queue a work item to process this offer synchronously */ static void VmbusChannelOnOfferRescind(struct vmbus_channel_message_header *hdr) { struct vmbus_channel_rescind_offer *rescind; struct vmbus_channel *channel; DPRINT_ENTER(VMBUS); rescind = (struct vmbus_channel_rescind_offer *)hdr; channel = GetChannelFromRelId(rescind->ChildRelId); if (channel == NULL) { DPRINT_DBG(VMBUS, "channel not found for relId %d", rescind->ChildRelId); return; } osd_schedule_callback(channel->ControlWQ, VmbusChannelProcessRescindOffer, channel); DPRINT_EXIT(VMBUS); } /* * VmbusChannelOnOffersDelivered - This is invoked when all offers have been delivered. * * Nothing to do here. */ static void VmbusChannelOnOffersDelivered( struct vmbus_channel_message_header *hdr) { DPRINT_ENTER(VMBUS); DPRINT_EXIT(VMBUS); } /* * VmbusChannelOnOpenResult - Open result handler. * * This is invoked when we received a response to our channel open request. * Find the matching request, copy the response and signal the requesting * thread. */ static void VmbusChannelOnOpenResult(struct vmbus_channel_message_header *hdr) { struct vmbus_channel_open_result *result; struct list_head *curr; struct vmbus_channel_msginfo *msgInfo; struct vmbus_channel_message_header *requestHeader; struct vmbus_channel_open_channel *openMsg; unsigned long flags; DPRINT_ENTER(VMBUS); result = (struct vmbus_channel_open_result *)hdr; DPRINT_DBG(VMBUS, "vmbus open result - %d", result->Status); /* * Find the open msg, copy the result and signal/unblock the wait event */ spin_lock_irqsave(&gVmbusConnection.channelmsg_lock, flags); list_for_each(curr, &gVmbusConnection.ChannelMsgList) { /* FIXME: this should probably use list_entry() instead */ msgInfo = (struct vmbus_channel_msginfo *)curr; requestHeader = (struct vmbus_channel_message_header *)msgInfo->Msg; if (requestHeader->MessageType == ChannelMessageOpenChannel) { openMsg = (struct vmbus_channel_open_channel *)msgInfo->Msg; if (openMsg->ChildRelId == result->ChildRelId && openMsg->OpenId == result->OpenId) { memcpy(&msgInfo->Response.OpenResult, result, sizeof(struct vmbus_channel_open_result)); osd_WaitEventSet(msgInfo->WaitEvent); break; } } } spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags); DPRINT_EXIT(VMBUS); } /* * VmbusChannelOnGpadlCreated - GPADL created handler. * * This is invoked when we received a response to our gpadl create request. * Find the matching request, copy the response and signal the requesting * thread. */ static void VmbusChannelOnGpadlCreated(struct vmbus_channel_message_header *hdr) { struct vmbus_channel_gpadl_created *gpadlCreated; struct list_head *curr; struct vmbus_channel_msginfo *msgInfo; struct vmbus_channel_message_header *requestHeader; struct vmbus_channel_gpadl_header *gpadlHeader; unsigned long flags; DPRINT_ENTER(VMBUS); gpadlCreated = (struct vmbus_channel_gpadl_created *)hdr; DPRINT_DBG(VMBUS, "vmbus gpadl created result - %d", gpadlCreated->CreationStatus); /* * Find the establish msg, copy the result and signal/unblock the wait * event */ spin_lock_irqsave(&gVmbusConnection.channelmsg_lock, flags); list_for_each(curr, &gVmbusConnection.ChannelMsgList) { /* FIXME: this should probably use list_entry() instead */ msgInfo = (struct vmbus_channel_msginfo *)curr; requestHeader = (struct vmbus_channel_message_header *)msgInfo->Msg; if (requestHeader->MessageType == ChannelMessageGpadlHeader) { gpadlHeader = (struct vmbus_channel_gpadl_header *)requestHeader; if ((gpadlCreated->ChildRelId == gpadlHeader->ChildRelId) && (gpadlCreated->Gpadl == gpadlHeader->Gpadl)) { memcpy(&msgInfo->Response.GpadlCreated, gpadlCreated, sizeof(struct vmbus_channel_gpadl_created)); osd_WaitEventSet(msgInfo->WaitEvent); break; } } } spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags); DPRINT_EXIT(VMBUS); } /* * VmbusChannelOnGpadlTorndown - GPADL torndown handler. * * This is invoked when we received a response to our gpadl teardown request. * Find the matching request, copy the response and signal the requesting * thread. */ static void VmbusChannelOnGpadlTorndown( struct vmbus_channel_message_header *hdr) { struct vmbus_channel_gpadl_torndown *gpadlTorndown; struct list_head *curr; struct vmbus_channel_msginfo *msgInfo; struct vmbus_channel_message_header *requestHeader; struct vmbus_channel_gpadl_teardown *gpadlTeardown; unsigned long flags; DPRINT_ENTER(VMBUS); gpadlTorndown = (struct vmbus_channel_gpadl_torndown *)hdr; /* * Find the open msg, copy the result and signal/unblock the wait event */ spin_lock_irqsave(&gVmbusConnection.channelmsg_lock, flags); list_for_each(curr, &gVmbusConnection.ChannelMsgList) { /* FIXME: this should probably use list_entry() instead */ msgInfo = (struct vmbus_channel_msginfo *)curr; requestHeader = (struct vmbus_channel_message_header *)msgInfo->Msg; if (requestHeader->MessageType == ChannelMessageGpadlTeardown) { gpadlTeardown = (struct vmbus_channel_gpadl_teardown *)requestHeader; if (gpadlTorndown->Gpadl == gpadlTeardown->Gpadl) { memcpy(&msgInfo->Response.GpadlTorndown, gpadlTorndown, sizeof(struct vmbus_channel_gpadl_torndown)); osd_WaitEventSet(msgInfo->WaitEvent); break; } } } spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags); DPRINT_EXIT(VMBUS); } /* * VmbusChannelOnVersionResponse - Version response handler * * This is invoked when we received a response to our initiate contact request. * Find the matching request, copy the response and signal the requesting * thread. */ static void VmbusChannelOnVersionResponse( struct vmbus_channel_message_header *hdr) { struct list_head *curr; struct vmbus_channel_msginfo *msgInfo; struct vmbus_channel_message_header *requestHeader; struct vmbus_channel_initiate_contact *initiate; struct vmbus_channel_version_response *versionResponse; unsigned long flags; DPRINT_ENTER(VMBUS); versionResponse = (struct vmbus_channel_version_response *)hdr; spin_lock_irqsave(&gVmbusConnection.channelmsg_lock, flags); list_for_each(curr, &gVmbusConnection.ChannelMsgList) { /* FIXME: this should probably use list_entry() instead */ msgInfo = (struct vmbus_channel_msginfo *)curr; requestHeader = (struct vmbus_channel_message_header *)msgInfo->Msg; if (requestHeader->MessageType == ChannelMessageInitiateContact) { initiate = (struct vmbus_channel_initiate_contact *)requestHeader; memcpy(&msgInfo->Response.VersionResponse, versionResponse, sizeof(struct vmbus_channel_version_response)); osd_WaitEventSet(msgInfo->WaitEvent); } } spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags); DPRINT_EXIT(VMBUS); } /* Channel message dispatch table */ static struct vmbus_channel_message_table_entry gChannelMessageTable[ChannelMessageCount] = { {ChannelMessageInvalid, NULL}, {ChannelMessageOfferChannel, VmbusChannelOnOffer}, {ChannelMessageRescindChannelOffer, VmbusChannelOnOfferRescind}, {ChannelMessageRequestOffers, NULL}, {ChannelMessageAllOffersDelivered, VmbusChannelOnOffersDelivered}, {ChannelMessageOpenChannel, NULL}, {ChannelMessageOpenChannelResult, VmbusChannelOnOpenResult}, {ChannelMessageCloseChannel, NULL}, {ChannelMessageGpadlHeader, NULL}, {ChannelMessageGpadlBody, NULL}, {ChannelMessageGpadlCreated, VmbusChannelOnGpadlCreated}, {ChannelMessageGpadlTeardown, NULL}, {ChannelMessageGpadlTorndown, VmbusChannelOnGpadlTorndown}, {ChannelMessageRelIdReleased, NULL}, {ChannelMessageInitiateContact, NULL}, {ChannelMessageVersionResponse, VmbusChannelOnVersionResponse}, {ChannelMessageUnload, NULL}, }; /* * VmbusOnChannelMessage - Handler for channel protocol messages. * * This is invoked in the vmbus worker thread context. */ void VmbusOnChannelMessage(void *Context) { struct hv_message *msg = Context; struct vmbus_channel_message_header *hdr; int size; DPRINT_ENTER(VMBUS); hdr = (struct vmbus_channel_message_header *)msg->u.Payload; size = msg->Header.PayloadSize; DPRINT_DBG(VMBUS, "message type %d size %d", hdr->MessageType, size); if (hdr->MessageType >= ChannelMessageCount) { DPRINT_ERR(VMBUS, "Received invalid channel message type %d size %d", hdr->MessageType, size); print_hex_dump_bytes("", DUMP_PREFIX_NONE, (unsigned char *)msg->u.Payload, size); kfree(msg); return; } if (gChannelMessageTable[hdr->MessageType].messageHandler) gChannelMessageTable[hdr->MessageType].messageHandler(hdr); else DPRINT_ERR(VMBUS, "Unhandled channel message type %d", hdr->MessageType); /* Free the msg that was allocated in VmbusOnMsgDPC() */ kfree(msg); DPRINT_EXIT(VMBUS); } /* * VmbusChannelRequestOffers - Send a request to get all our pending offers. */ int VmbusChannelRequestOffers(void) { struct vmbus_channel_message_header *msg; struct vmbus_channel_msginfo *msgInfo; int ret; DPRINT_ENTER(VMBUS); msgInfo = kmalloc(sizeof(*msgInfo) + sizeof(struct vmbus_channel_message_header), GFP_KERNEL); if (!msgInfo) return -ENOMEM; msgInfo->WaitEvent = osd_WaitEventCreate(); if (!msgInfo->WaitEvent) { kfree(msgInfo); return -ENOMEM; } msg = (struct vmbus_channel_message_header *)msgInfo->Msg; msg->MessageType = ChannelMessageRequestOffers; /*SpinlockAcquire(gVmbusConnection.channelMsgLock); INSERT_TAIL_LIST(&gVmbusConnection.channelMsgList, &msgInfo->msgListEntry); SpinlockRelease(gVmbusConnection.channelMsgLock);*/ ret = VmbusPostMessage(msg, sizeof(struct vmbus_channel_message_header)); if (ret != 0) { DPRINT_ERR(VMBUS, "Unable to request offers - %d", ret); /*SpinlockAcquire(gVmbusConnection.channelMsgLock); REMOVE_ENTRY_LIST(&msgInfo->msgListEntry); SpinlockRelease(gVmbusConnection.channelMsgLock);*/ goto Cleanup; } /* osd_WaitEventWait(msgInfo->waitEvent); */ /*SpinlockAcquire(gVmbusConnection.channelMsgLock); REMOVE_ENTRY_LIST(&msgInfo->msgListEntry); SpinlockRelease(gVmbusConnection.channelMsgLock);*/ Cleanup: if (msgInfo) { kfree(msgInfo->WaitEvent); kfree(msgInfo); } DPRINT_EXIT(VMBUS); return ret; } /* * VmbusChannelReleaseUnattachedChannels - Release channels that are * unattached/unconnected ie (no drivers associated) */ void VmbusChannelReleaseUnattachedChannels(void) { struct vmbus_channel *channel, *pos; struct vmbus_channel *start = NULL; unsigned long flags; spin_lock_irqsave(&gVmbusConnection.channel_lock, flags); list_for_each_entry_safe(channel, pos, &gVmbusConnection.ChannelList, ListEntry) { if (channel == start) break; if (!channel->DeviceObject->Driver) { list_del(&channel->ListEntry); DPRINT_INFO(VMBUS, "Releasing unattached device object %p", channel->DeviceObject); VmbusChildDeviceRemove(channel->DeviceObject); FreeVmbusChannel(channel); } else { if (!start) start = channel; } } spin_unlock_irqrestore(&gVmbusConnection.channel_lock, flags); } /* eof */
gpl-2.0
stelios97/sony-kernel-msm7x27a
drivers/video/msm/mddi_toshiba_wvga_pt.c
759
1860
/* Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "msm_fb.h" #include "mddihost.h" #include "mddihosti.h" #include "mddi_toshiba.h" static struct msm_panel_info pinfo; static int __init mddi_toshiba_wvga_pt_init(void) { int ret; #ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT uint id; ret = msm_fb_detect_client("mddi_toshiba_wvga_pt"); if (ret == -ENODEV) return 0; if (ret) { id = mddi_get_client_id(); if (id != 0xd2638722) return 0; } #endif pinfo.xres = 480; pinfo.yres = 800; MSM_FB_SINGLE_MODE_PANEL(&pinfo); pinfo.type = MDDI_PANEL; pinfo.pdest = DISPLAY_1; pinfo.mddi.vdopkt = MDDI_DEFAULT_PRIM_PIX_ATTR; pinfo.wait_cycle = 0; pinfo.bpp = 18; pinfo.lcd.vsync_enable = TRUE; pinfo.lcd.refx100 = 6102; /* adjust refx100 to prevent tearing */ pinfo.mddi.is_type1 = TRUE; pinfo.lcd.v_back_porch = 8; /* vsw=10 + vbp = 8 */ pinfo.lcd.v_front_porch = 2; pinfo.lcd.v_pulse_width = 10; pinfo.lcd.hw_vsync_mode = FALSE; pinfo.lcd.vsync_notifier_period = (1 * HZ); pinfo.bl_max = 15; pinfo.bl_min = 1; pinfo.clk_rate = 222750000; pinfo.clk_min = 200000000; pinfo.clk_max = 240000000; pinfo.fb_num = 2; ret = mddi_toshiba_device_register(&pinfo, TOSHIBA_VGA_PRIM, LCD_TOSHIBA_2P4_WVGA_PT); if (ret) printk(KERN_ERR "%s: failed to register device!\n", __func__); return ret; } module_init(mddi_toshiba_wvga_pt_init);
gpl-2.0
gromaudio/linux-imx6-31053
drivers/net/wireless/ath/regd.c
1015
17468
/* * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/export.h> #include <net/cfg80211.h> #include <net/mac80211.h> #include "regd.h" #include "regd_common.h" static int __ath_regd_init(struct ath_regulatory *reg); /* * This is a set of common rules used by our world regulatory domains. * We have 12 world regulatory domains. To save space we consolidate * the regulatory domains in 5 structures by frequency and change * the flags on our reg_notifier() on a case by case basis. */ /* Only these channels all allow active scan on all world regulatory domains */ #define ATH9K_2GHZ_CH01_11 REG_RULE(2412-10, 2462+10, 40, 0, 20, 0) /* We enable active scan on these a case by case basis by regulatory domain */ #define ATH9K_2GHZ_CH12_13 REG_RULE(2467-10, 2472+10, 40, 0, 20,\ NL80211_RRF_PASSIVE_SCAN) #define ATH9K_2GHZ_CH14 REG_RULE(2484-10, 2484+10, 40, 0, 20,\ NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_OFDM) /* We allow IBSS on these on a case by case basis by regulatory domain */ #define ATH9K_5GHZ_5150_5350 REG_RULE(5150-10, 5350+10, 40, 0, 30,\ NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS) #define ATH9K_5GHZ_5470_5850 REG_RULE(5470-10, 5850+10, 40, 0, 30,\ NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS) #define ATH9K_5GHZ_5725_5850 REG_RULE(5725-10, 5850+10, 40, 0, 30,\ NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS) #define ATH9K_2GHZ_ALL ATH9K_2GHZ_CH01_11, \ ATH9K_2GHZ_CH12_13, \ ATH9K_2GHZ_CH14 #define ATH9K_5GHZ_ALL ATH9K_5GHZ_5150_5350, \ ATH9K_5GHZ_5470_5850 /* This one skips what we call "mid band" */ #define ATH9K_5GHZ_NO_MIDBAND ATH9K_5GHZ_5150_5350, \ ATH9K_5GHZ_5725_5850 /* Can be used for: * 0x60, 0x61, 0x62 */ static const struct ieee80211_regdomain ath_world_regdom_60_61_62 = { .n_reg_rules = 5, .alpha2 = "99", .reg_rules = { ATH9K_2GHZ_ALL, ATH9K_5GHZ_ALL, } }; /* Can be used by 0x63 and 0x65 */ static const struct ieee80211_regdomain ath_world_regdom_63_65 = { .n_reg_rules = 4, .alpha2 = "99", .reg_rules = { ATH9K_2GHZ_CH01_11, ATH9K_2GHZ_CH12_13, ATH9K_5GHZ_NO_MIDBAND, } }; /* Can be used by 0x64 only */ static const struct ieee80211_regdomain ath_world_regdom_64 = { .n_reg_rules = 3, .alpha2 = "99", .reg_rules = { ATH9K_2GHZ_CH01_11, ATH9K_5GHZ_NO_MIDBAND, } }; /* Can be used by 0x66 and 0x69 */ static const struct ieee80211_regdomain ath_world_regdom_66_69 = { .n_reg_rules = 3, .alpha2 = "99", .reg_rules = { ATH9K_2GHZ_CH01_11, ATH9K_5GHZ_ALL, } }; /* Can be used by 0x67, 0x68, 0x6A and 0x6C */ static const struct ieee80211_regdomain ath_world_regdom_67_68_6A_6C = { .n_reg_rules = 4, .alpha2 = "99", .reg_rules = { ATH9K_2GHZ_CH01_11, ATH9K_2GHZ_CH12_13, ATH9K_5GHZ_ALL, } }; static inline bool is_wwr_sku(u16 regd) { return ((regd & COUNTRY_ERD_FLAG) != COUNTRY_ERD_FLAG) && (((regd & WORLD_SKU_MASK) == WORLD_SKU_PREFIX) || (regd == WORLD)); } static u16 ath_regd_get_eepromRD(struct ath_regulatory *reg) { return reg->current_rd & ~WORLDWIDE_ROAMING_FLAG; } bool ath_is_world_regd(struct ath_regulatory *reg) { return is_wwr_sku(ath_regd_get_eepromRD(reg)); } EXPORT_SYMBOL(ath_is_world_regd); static const struct ieee80211_regdomain *ath_default_world_regdomain(void) { /* this is the most restrictive */ return &ath_world_regdom_64; } static const struct ieee80211_regdomain *ath_world_regdomain(struct ath_regulatory *reg) { switch (reg->regpair->regDmnEnum) { case 0x60: case 0x61: case 0x62: return &ath_world_regdom_60_61_62; case 0x63: case 0x65: return &ath_world_regdom_63_65; case 0x64: return &ath_world_regdom_64; case 0x66: case 0x69: return &ath_world_regdom_66_69; case 0x67: case 0x68: case 0x6A: case 0x6C: return &ath_world_regdom_67_68_6A_6C; default: WARN_ON(1); return ath_default_world_regdomain(); } } bool ath_is_49ghz_allowed(u16 regdomain) { /* possibly more */ return regdomain == MKK9_MKKC; } EXPORT_SYMBOL(ath_is_49ghz_allowed); /* Frequency is one where radar detection is required */ static bool ath_is_radar_freq(u16 center_freq) { return (center_freq >= 5260 && center_freq <= 5700); } /* * N.B: These exception rules do not apply radar freqs. * * - We enable adhoc (or beaconing) if allowed by 11d * - We enable active scan if the channel is allowed by 11d * - If no country IE has been processed and a we determine we have * received a beacon on a channel we can enable active scan and * adhoc (or beaconing). */ static void ath_reg_apply_beaconing_flags(struct wiphy *wiphy, enum nl80211_reg_initiator initiator) { enum ieee80211_band band; struct ieee80211_supported_band *sband; const struct ieee80211_reg_rule *reg_rule; struct ieee80211_channel *ch; unsigned int i; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { if (!wiphy->bands[band]) continue; sband = wiphy->bands[band]; for (i = 0; i < sband->n_channels; i++) { ch = &sband->channels[i]; if (ath_is_radar_freq(ch->center_freq) || (ch->flags & IEEE80211_CHAN_RADAR)) continue; if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) { reg_rule = freq_reg_info(wiphy, ch->center_freq); if (IS_ERR(reg_rule)) continue; /* * If 11d had a rule for this channel ensure * we enable adhoc/beaconing if it allows us to * use it. Note that we would have disabled it * by applying our static world regdomain by * default during init, prior to calling our * regulatory_hint(). */ if (!(reg_rule->flags & NL80211_RRF_NO_IBSS)) ch->flags &= ~IEEE80211_CHAN_NO_IBSS; if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN)) ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; } else { if (ch->beacon_found) ch->flags &= ~(IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_PASSIVE_SCAN); } } } } /* Allows active scan scan on Ch 12 and 13 */ static void ath_reg_apply_active_scan_flags(struct wiphy *wiphy, enum nl80211_reg_initiator initiator) { struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; const struct ieee80211_reg_rule *reg_rule; sband = wiphy->bands[IEEE80211_BAND_2GHZ]; if (!sband) return; /* * If no country IE has been received always enable active scan * on these channels. This is only done for specific regulatory SKUs */ if (initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) { ch = &sband->channels[11]; /* CH 12 */ if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; ch = &sband->channels[12]; /* CH 13 */ if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; return; } /* * If a country IE has been received check its rule for this * channel first before enabling active scan. The passive scan * would have been enforced by the initial processing of our * custom regulatory domain. */ ch = &sband->channels[11]; /* CH 12 */ reg_rule = freq_reg_info(wiphy, ch->center_freq); if (!IS_ERR(reg_rule)) { if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN)) if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; } ch = &sband->channels[12]; /* CH 13 */ reg_rule = freq_reg_info(wiphy, ch->center_freq); if (!IS_ERR(reg_rule)) { if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN)) if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; } } /* Always apply Radar/DFS rules on freq range 5260 MHz - 5700 MHz */ static void ath_reg_apply_radar_flags(struct wiphy *wiphy) { struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; unsigned int i; if (!wiphy->bands[IEEE80211_BAND_5GHZ]) return; sband = wiphy->bands[IEEE80211_BAND_5GHZ]; for (i = 0; i < sband->n_channels; i++) { ch = &sband->channels[i]; if (!ath_is_radar_freq(ch->center_freq)) continue; /* We always enable radar detection/DFS on this * frequency range. Additionally we also apply on * this frequency range: * - If STA mode does not yet have DFS supports disable * active scanning * - If adhoc mode does not support DFS yet then * disable adhoc in the frequency. * - If AP mode does not yet support radar detection/DFS * do not allow AP mode */ if (!(ch->flags & IEEE80211_CHAN_DISABLED)) ch->flags |= IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_PASSIVE_SCAN; } } static void ath_reg_apply_world_flags(struct wiphy *wiphy, enum nl80211_reg_initiator initiator, struct ath_regulatory *reg) { switch (reg->regpair->regDmnEnum) { case 0x60: case 0x63: case 0x66: case 0x67: case 0x6C: ath_reg_apply_beaconing_flags(wiphy, initiator); break; case 0x68: ath_reg_apply_beaconing_flags(wiphy, initiator); ath_reg_apply_active_scan_flags(wiphy, initiator); break; } } static u16 ath_regd_find_country_by_name(char *alpha2) { unsigned int i; for (i = 0; i < ARRAY_SIZE(allCountries); i++) { if (!memcmp(allCountries[i].isoName, alpha2, 2)) return allCountries[i].countryCode; } return -1; } void ath_reg_notifier_apply(struct wiphy *wiphy, struct regulatory_request *request, struct ath_regulatory *reg) { struct ath_common *common = container_of(reg, struct ath_common, regulatory); u16 country_code; /* We always apply this */ ath_reg_apply_radar_flags(wiphy); /* * This would happen when we have sent a custom regulatory request * a world regulatory domain and the scheduler hasn't yet processed * any pending requests in the queue. */ if (!request) return; switch (request->initiator) { case NL80211_REGDOM_SET_BY_CORE: /* * If common->reg_world_copy is world roaming it means we *were* * world roaming... so we now have to restore that data. */ if (!ath_is_world_regd(&common->reg_world_copy)) break; memcpy(reg, &common->reg_world_copy, sizeof(struct ath_regulatory)); break; case NL80211_REGDOM_SET_BY_DRIVER: case NL80211_REGDOM_SET_BY_USER: break; case NL80211_REGDOM_SET_BY_COUNTRY_IE: if (!ath_is_world_regd(reg)) break; country_code = ath_regd_find_country_by_name(request->alpha2); if (country_code == (u16) -1) break; reg->current_rd = COUNTRY_ERD_FLAG; reg->current_rd |= country_code; printk(KERN_DEBUG "ath: regdomain 0x%0x updated by CountryIE\n", reg->current_rd); __ath_regd_init(reg); ath_reg_apply_world_flags(wiphy, request->initiator, reg); break; } } EXPORT_SYMBOL(ath_reg_notifier_apply); static bool ath_regd_is_eeprom_valid(struct ath_regulatory *reg) { u16 rd = ath_regd_get_eepromRD(reg); int i; if (rd & COUNTRY_ERD_FLAG) { /* EEPROM value is a country code */ u16 cc = rd & ~COUNTRY_ERD_FLAG; printk(KERN_DEBUG "ath: EEPROM indicates we should expect " "a country code\n"); for (i = 0; i < ARRAY_SIZE(allCountries); i++) if (allCountries[i].countryCode == cc) return true; } else { /* EEPROM value is a regpair value */ if (rd != CTRY_DEFAULT) printk(KERN_DEBUG "ath: EEPROM indicates we " "should expect a direct regpair map\n"); for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) if (regDomainPairs[i].regDmnEnum == rd) return true; } printk(KERN_DEBUG "ath: invalid regulatory domain/country code 0x%x\n", rd); return false; } /* EEPROM country code to regpair mapping */ static struct country_code_to_enum_rd* ath_regd_find_country(u16 countryCode) { int i; for (i = 0; i < ARRAY_SIZE(allCountries); i++) { if (allCountries[i].countryCode == countryCode) return &allCountries[i]; } return NULL; } /* EEPROM rd code to regpair mapping */ static struct country_code_to_enum_rd* ath_regd_find_country_by_rd(int regdmn) { int i; for (i = 0; i < ARRAY_SIZE(allCountries); i++) { if (allCountries[i].regDmnEnum == regdmn) return &allCountries[i]; } return NULL; } /* Returns the map of the EEPROM set RD to a country code */ static u16 ath_regd_get_default_country(u16 rd) { if (rd & COUNTRY_ERD_FLAG) { struct country_code_to_enum_rd *country = NULL; u16 cc = rd & ~COUNTRY_ERD_FLAG; country = ath_regd_find_country(cc); if (country != NULL) return cc; } return CTRY_DEFAULT; } static struct reg_dmn_pair_mapping* ath_get_regpair(int regdmn) { int i; if (regdmn == NO_ENUMRD) return NULL; for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) { if (regDomainPairs[i].regDmnEnum == regdmn) return &regDomainPairs[i]; } return NULL; } static int ath_regd_init_wiphy(struct ath_regulatory *reg, struct wiphy *wiphy, void (*reg_notifier)(struct wiphy *wiphy, struct regulatory_request *request)) { const struct ieee80211_regdomain *regd; wiphy->reg_notifier = reg_notifier; wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY; if (ath_is_world_regd(reg)) { /* * Anything applied here (prior to wiphy registration) gets * saved on the wiphy orig_* parameters */ regd = ath_world_regdomain(reg); wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY; } else { /* * This gets applied in the case of the absence of CRDA, * it's our own custom world regulatory domain, similar to * cfg80211's but we enable passive scanning. */ regd = ath_default_world_regdomain(); } wiphy_apply_custom_regulatory(wiphy, regd); ath_reg_apply_radar_flags(wiphy); ath_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg); return 0; } /* * Some users have reported their EEPROM programmed with * 0x8000 set, this is not a supported regulatory domain * but since we have more than one user with it we need * a solution for them. We default to 0x64, which is the * default Atheros world regulatory domain. */ static void ath_regd_sanitize(struct ath_regulatory *reg) { if (reg->current_rd != COUNTRY_ERD_FLAG) return; printk(KERN_DEBUG "ath: EEPROM regdomain sanitized\n"); reg->current_rd = 0x64; } static int __ath_regd_init(struct ath_regulatory *reg) { struct country_code_to_enum_rd *country = NULL; u16 regdmn; if (!reg) return -EINVAL; ath_regd_sanitize(reg); printk(KERN_DEBUG "ath: EEPROM regdomain: 0x%0x\n", reg->current_rd); if (!ath_regd_is_eeprom_valid(reg)) { pr_err("Invalid EEPROM contents\n"); return -EINVAL; } regdmn = ath_regd_get_eepromRD(reg); reg->country_code = ath_regd_get_default_country(regdmn); if (reg->country_code == CTRY_DEFAULT && regdmn == CTRY_DEFAULT) { printk(KERN_DEBUG "ath: EEPROM indicates default " "country code should be used\n"); reg->country_code = CTRY_UNITED_STATES; } if (reg->country_code == CTRY_DEFAULT) { country = NULL; } else { printk(KERN_DEBUG "ath: doing EEPROM country->regdmn " "map search\n"); country = ath_regd_find_country(reg->country_code); if (country == NULL) { printk(KERN_DEBUG "ath: no valid country maps found for " "country code: 0x%0x\n", reg->country_code); return -EINVAL; } else { regdmn = country->regDmnEnum; printk(KERN_DEBUG "ath: country maps to " "regdmn code: 0x%0x\n", regdmn); } } reg->regpair = ath_get_regpair(regdmn); if (!reg->regpair) { printk(KERN_DEBUG "ath: " "No regulatory domain pair found, cannot continue\n"); return -EINVAL; } if (!country) country = ath_regd_find_country_by_rd(regdmn); if (country) { reg->alpha2[0] = country->isoName[0]; reg->alpha2[1] = country->isoName[1]; } else { reg->alpha2[0] = '0'; reg->alpha2[1] = '0'; } printk(KERN_DEBUG "ath: Country alpha2 being used: %c%c\n", reg->alpha2[0], reg->alpha2[1]); printk(KERN_DEBUG "ath: Regpair used: 0x%0x\n", reg->regpair->regDmnEnum); return 0; } int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy, void (*reg_notifier)(struct wiphy *wiphy, struct regulatory_request *request)) { struct ath_common *common = container_of(reg, struct ath_common, regulatory); int r; r = __ath_regd_init(reg); if (r) return r; if (ath_is_world_regd(reg)) memcpy(&common->reg_world_copy, reg, sizeof(struct ath_regulatory)); ath_regd_init_wiphy(reg, wiphy, reg_notifier); return 0; } EXPORT_SYMBOL(ath_regd_init); u32 ath_regd_get_band_ctl(struct ath_regulatory *reg, enum ieee80211_band band) { if (!reg->regpair || (reg->country_code == CTRY_DEFAULT && is_wwr_sku(ath_regd_get_eepromRD(reg)))) { return SD_NO_CTL; } switch (band) { case IEEE80211_BAND_2GHZ: return reg->regpair->reg_2ghz_ctl; case IEEE80211_BAND_5GHZ: return reg->regpair->reg_5ghz_ctl; default: return NO_CTL; } } EXPORT_SYMBOL(ath_regd_get_band_ctl);
gpl-2.0
friedrich420/S6_AEL_Kernel_Multivariant_LL-5.1.1
arch/powerpc/platforms/85xx/p3041_ds.c
2295
2238
/* * P3041 DS Setup * * Maintained by Kumar Gala (see MAINTAINERS for contact information) * * Copyright 2009-2010 Freescale Semiconductor Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/phy.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <mm/mmu_decl.h> #include <asm/prom.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <linux/of_platform.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include <asm/ehv_pic.h> #include "corenet_ds.h" /* * Called very early, device-tree isn't unflattened */ static int __init p3041_ds_probe(void) { unsigned long root = of_get_flat_dt_root(); #ifdef CONFIG_SMP extern struct smp_ops_t smp_85xx_ops; #endif if (of_flat_dt_is_compatible(root, "fsl,P3041DS")) return 1; /* Check if we're running under the Freescale hypervisor */ if (of_flat_dt_is_compatible(root, "fsl,P3041DS-hv")) { ppc_md.init_IRQ = ehv_pic_init; ppc_md.get_irq = ehv_pic_get_irq; ppc_md.restart = fsl_hv_restart; ppc_md.power_off = fsl_hv_halt; ppc_md.halt = fsl_hv_halt; #ifdef CONFIG_SMP /* * Disable the timebase sync operations because we can't write * to the timebase registers under the hypervisor. */ smp_85xx_ops.give_timebase = NULL; smp_85xx_ops.take_timebase = NULL; #endif return 1; } return 0; } define_machine(p3041_ds) { .name = "P3041 DS", .probe = p3041_ds_probe, .setup_arch = corenet_ds_setup_arch, .init_IRQ = corenet_ds_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif .get_irq = mpic_get_coreint_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, .power_save = e500_idle, }; machine_arch_initcall(p3041_ds, corenet_ds_publish_devices); #ifdef CONFIG_SWIOTLB machine_arch_initcall(p3041_ds, swiotlb_setup_bus_notifier); #endif
gpl-2.0
yu-validus/kernel_cyanogen_msm8916
drivers/gpu/drm/savage/savage_bci.c
2295
31338
/* savage_bci.c -- BCI support for Savage * * Copyright 2004 Felix Kuehling * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sub license, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <drm/drmP.h> #include <drm/savage_drm.h> #include "savage_drv.h" /* Need a long timeout for shadow status updates can take a while * and so can waiting for events when the queue is full. */ #define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */ #define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */ #define SAVAGE_FREELIST_DEBUG 0 static int savage_do_cleanup_bci(struct drm_device *dev); static int savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n) { uint32_t mask = dev_priv->status_used_mask; uint32_t threshold = dev_priv->bci_threshold_hi; uint32_t status; int i; #if SAVAGE_BCI_DEBUG if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold) DRM_ERROR("Trying to emit %d words " "(more than guaranteed space in COB)\n", n); #endif for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) { DRM_MEMORYBARRIER(); status = dev_priv->status_ptr[0]; if ((status & mask) < threshold) return 0; DRM_UDELAY(1); } #if SAVAGE_BCI_DEBUG DRM_ERROR("failed!\n"); DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold); #endif return -EBUSY; } static int savage_bci_wait_fifo_s3d(drm_savage_private_t * dev_priv, unsigned int n) { uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n; uint32_t status; int i; for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) { status = SAVAGE_READ(SAVAGE_STATUS_WORD0); if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed) return 0; DRM_UDELAY(1); } #if SAVAGE_BCI_DEBUG DRM_ERROR("failed!\n"); DRM_INFO(" status=0x%08x\n", status); #endif return -EBUSY; } static int savage_bci_wait_fifo_s4(drm_savage_private_t * dev_priv, unsigned int n) { uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n; uint32_t status; int i; for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) { status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0); if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed) return 0; DRM_UDELAY(1); } #if SAVAGE_BCI_DEBUG DRM_ERROR("failed!\n"); DRM_INFO(" status=0x%08x\n", status); #endif return -EBUSY; } /* * Waiting for events. * * The BIOSresets the event tag to 0 on mode changes. Therefore we * never emit 0 to the event tag. If we find a 0 event tag we know the * BIOS stomped on it and return success assuming that the BIOS waited * for engine idle. * * Note: if the Xserver uses the event tag it has to follow the same * rule. Otherwise there may be glitches every 2^16 events. */ static int savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e) { uint32_t status; int i; for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) { DRM_MEMORYBARRIER(); status = dev_priv->status_ptr[1]; if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff || (status & 0xffff) == 0) return 0; DRM_UDELAY(1); } #if SAVAGE_BCI_DEBUG DRM_ERROR("failed!\n"); DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); #endif return -EBUSY; } static int savage_bci_wait_event_reg(drm_savage_private_t * dev_priv, uint16_t e) { uint32_t status; int i; for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) { status = SAVAGE_READ(SAVAGE_STATUS_WORD1); if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff || (status & 0xffff) == 0) return 0; DRM_UDELAY(1); } #if SAVAGE_BCI_DEBUG DRM_ERROR("failed!\n"); DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); #endif return -EBUSY; } uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv, unsigned int flags) { uint16_t count; BCI_LOCALS; if (dev_priv->status_ptr) { /* coordinate with Xserver */ count = dev_priv->status_ptr[1023]; if (count < dev_priv->event_counter) dev_priv->event_wrap++; } else { count = dev_priv->event_counter; } count = (count + 1) & 0xffff; if (count == 0) { count++; /* See the comment above savage_wait_event_*. */ dev_priv->event_wrap++; } dev_priv->event_counter = count; if (dev_priv->status_ptr) dev_priv->status_ptr[1023] = (uint32_t) count; if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) { unsigned int wait_cmd = BCI_CMD_WAIT; if ((flags & SAVAGE_WAIT_2D)) wait_cmd |= BCI_CMD_WAIT_2D; if ((flags & SAVAGE_WAIT_3D)) wait_cmd |= BCI_CMD_WAIT_3D; BEGIN_BCI(2); BCI_WRITE(wait_cmd); } else { BEGIN_BCI(1); } BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t) count); return count; } /* * Freelist management */ static int savage_freelist_init(struct drm_device * dev) { drm_savage_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; drm_savage_buf_priv_t *entry; int i; DRM_DEBUG("count=%d\n", dma->buf_count); dev_priv->head.next = &dev_priv->tail; dev_priv->head.prev = NULL; dev_priv->head.buf = NULL; dev_priv->tail.next = NULL; dev_priv->tail.prev = &dev_priv->head; dev_priv->tail.buf = NULL; for (i = 0; i < dma->buf_count; i++) { buf = dma->buflist[i]; entry = buf->dev_private; SET_AGE(&entry->age, 0, 0); entry->buf = buf; entry->next = dev_priv->head.next; entry->prev = &dev_priv->head; dev_priv->head.next->prev = entry; dev_priv->head.next = entry; } return 0; } static struct drm_buf *savage_freelist_get(struct drm_device * dev) { drm_savage_private_t *dev_priv = dev->dev_private; drm_savage_buf_priv_t *tail = dev_priv->tail.prev; uint16_t event; unsigned int wrap; DRM_DEBUG("\n"); UPDATE_EVENT_COUNTER(); if (dev_priv->status_ptr) event = dev_priv->status_ptr[1] & 0xffff; else event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; wrap = dev_priv->event_wrap; if (event > dev_priv->event_counter) wrap--; /* hardware hasn't passed the last wrap yet */ DRM_DEBUG(" tail=0x%04x %d\n", tail->age.event, tail->age.wrap); DRM_DEBUG(" head=0x%04x %d\n", event, wrap); if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) { drm_savage_buf_priv_t *next = tail->next; drm_savage_buf_priv_t *prev = tail->prev; prev->next = next; next->prev = prev; tail->next = tail->prev = NULL; return tail->buf; } DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf); return NULL; } void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf) { drm_savage_private_t *dev_priv = dev->dev_private; drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next; DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap); if (entry->next != NULL || entry->prev != NULL) { DRM_ERROR("entry already on freelist.\n"); return; } prev = &dev_priv->head; next = prev->next; prev->next = entry; next->prev = entry; entry->prev = prev; entry->next = next; } /* * Command DMA */ static int savage_dma_init(drm_savage_private_t * dev_priv) { unsigned int i; dev_priv->nr_dma_pages = dev_priv->cmd_dma->size / (SAVAGE_DMA_PAGE_SIZE * 4); dev_priv->dma_pages = kmalloc(sizeof(drm_savage_dma_page_t) * dev_priv->nr_dma_pages, GFP_KERNEL); if (dev_priv->dma_pages == NULL) return -ENOMEM; for (i = 0; i < dev_priv->nr_dma_pages; ++i) { SET_AGE(&dev_priv->dma_pages[i].age, 0, 0); dev_priv->dma_pages[i].used = 0; dev_priv->dma_pages[i].flushed = 0; } SET_AGE(&dev_priv->last_dma_age, 0, 0); dev_priv->first_dma_page = 0; dev_priv->current_dma_page = 0; return 0; } void savage_dma_reset(drm_savage_private_t * dev_priv) { uint16_t event; unsigned int wrap, i; event = savage_bci_emit_event(dev_priv, 0); wrap = dev_priv->event_wrap; for (i = 0; i < dev_priv->nr_dma_pages; ++i) { SET_AGE(&dev_priv->dma_pages[i].age, event, wrap); dev_priv->dma_pages[i].used = 0; dev_priv->dma_pages[i].flushed = 0; } SET_AGE(&dev_priv->last_dma_age, event, wrap); dev_priv->first_dma_page = dev_priv->current_dma_page = 0; } void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page) { uint16_t event; unsigned int wrap; /* Faked DMA buffer pages don't age. */ if (dev_priv->cmd_dma == &dev_priv->fake_dma) return; UPDATE_EVENT_COUNTER(); if (dev_priv->status_ptr) event = dev_priv->status_ptr[1] & 0xffff; else event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; wrap = dev_priv->event_wrap; if (event > dev_priv->event_counter) wrap--; /* hardware hasn't passed the last wrap yet */ if (dev_priv->dma_pages[page].age.wrap > wrap || (dev_priv->dma_pages[page].age.wrap == wrap && dev_priv->dma_pages[page].age.event > event)) { if (dev_priv->wait_evnt(dev_priv, dev_priv->dma_pages[page].age.event) < 0) DRM_ERROR("wait_evnt failed!\n"); } } uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv, unsigned int n) { unsigned int cur = dev_priv->current_dma_page; unsigned int rest = SAVAGE_DMA_PAGE_SIZE - dev_priv->dma_pages[cur].used; unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE; uint32_t *dma_ptr; unsigned int i; DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n", cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages); if (cur + nr_pages < dev_priv->nr_dma_pages) { dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle + cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; if (n < rest) rest = n; dev_priv->dma_pages[cur].used += rest; n -= rest; cur++; } else { dev_priv->dma_flush(dev_priv); nr_pages = (n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE; for (i = cur; i < dev_priv->nr_dma_pages; ++i) { dev_priv->dma_pages[i].age = dev_priv->last_dma_age; dev_priv->dma_pages[i].used = 0; dev_priv->dma_pages[i].flushed = 0; } dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle; dev_priv->first_dma_page = cur = 0; } for (i = cur; nr_pages > 0; ++i, --nr_pages) { #if SAVAGE_DMA_DEBUG if (dev_priv->dma_pages[i].used) { DRM_ERROR("unflushed page %u: used=%u\n", i, dev_priv->dma_pages[i].used); } #endif if (n > SAVAGE_DMA_PAGE_SIZE) dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE; else dev_priv->dma_pages[i].used = n; n -= SAVAGE_DMA_PAGE_SIZE; } dev_priv->current_dma_page = --i; DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n", i, dev_priv->dma_pages[i].used, n); savage_dma_wait(dev_priv, dev_priv->current_dma_page); return dma_ptr; } static void savage_dma_flush(drm_savage_private_t * dev_priv) { unsigned int first = dev_priv->first_dma_page; unsigned int cur = dev_priv->current_dma_page; uint16_t event; unsigned int wrap, pad, align, len, i; unsigned long phys_addr; BCI_LOCALS; if (first == cur && dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed) return; /* pad length to multiples of 2 entries * align start of next DMA block to multiles of 8 entries */ pad = -dev_priv->dma_pages[cur].used & 1; align = -(dev_priv->dma_pages[cur].used + pad) & 7; DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, " "pad=%u, align=%u\n", first, cur, dev_priv->dma_pages[first].flushed, dev_priv->dma_pages[cur].used, pad, align); /* pad with noops */ if (pad) { uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle + cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; dev_priv->dma_pages[cur].used += pad; while (pad != 0) { *dma_ptr++ = BCI_CMD_WAIT; pad--; } } DRM_MEMORYBARRIER(); /* do flush ... */ phys_addr = dev_priv->cmd_dma->offset + (first * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[first].flushed) * 4; len = (cur - first) * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed; DRM_DEBUG("phys_addr=%lx, len=%u\n", phys_addr | dev_priv->dma_type, len); BEGIN_BCI(3); BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1); BCI_WRITE(phys_addr | dev_priv->dma_type); BCI_DMA(len); /* fix alignment of the start of the next block */ dev_priv->dma_pages[cur].used += align; /* age DMA pages */ event = savage_bci_emit_event(dev_priv, 0); wrap = dev_priv->event_wrap; for (i = first; i < cur; ++i) { SET_AGE(&dev_priv->dma_pages[i].age, event, wrap); dev_priv->dma_pages[i].used = 0; dev_priv->dma_pages[i].flushed = 0; } /* age the current page only when it's full */ if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) { SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap); dev_priv->dma_pages[cur].used = 0; dev_priv->dma_pages[cur].flushed = 0; /* advance to next page */ cur++; if (cur == dev_priv->nr_dma_pages) cur = 0; dev_priv->first_dma_page = dev_priv->current_dma_page = cur; } else { dev_priv->first_dma_page = cur; dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used; } SET_AGE(&dev_priv->last_dma_age, event, wrap); DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur, dev_priv->dma_pages[cur].used, dev_priv->dma_pages[cur].flushed); } static void savage_fake_dma_flush(drm_savage_private_t * dev_priv) { unsigned int i, j; BCI_LOCALS; if (dev_priv->first_dma_page == dev_priv->current_dma_page && dev_priv->dma_pages[dev_priv->current_dma_page].used == 0) return; DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n", dev_priv->first_dma_page, dev_priv->current_dma_page, dev_priv->dma_pages[dev_priv->current_dma_page].used); for (i = dev_priv->first_dma_page; i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used; ++i) { uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle + i * SAVAGE_DMA_PAGE_SIZE; #if SAVAGE_DMA_DEBUG /* Sanity check: all pages except the last one must be full. */ if (i < dev_priv->current_dma_page && dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) { DRM_ERROR("partial DMA page %u: used=%u", i, dev_priv->dma_pages[i].used); } #endif BEGIN_BCI(dev_priv->dma_pages[i].used); for (j = 0; j < dev_priv->dma_pages[i].used; ++j) { BCI_WRITE(dma_ptr[j]); } dev_priv->dma_pages[i].used = 0; } /* reset to first page */ dev_priv->first_dma_page = dev_priv->current_dma_page = 0; } int savage_driver_load(struct drm_device *dev, unsigned long chipset) { drm_savage_private_t *dev_priv; dev_priv = kzalloc(sizeof(drm_savage_private_t), GFP_KERNEL); if (dev_priv == NULL) return -ENOMEM; dev->dev_private = (void *)dev_priv; dev_priv->chipset = (enum savage_family)chipset; pci_set_master(dev->pdev); return 0; } /* * Initialize mappings. On Savage4 and SavageIX the alignment * and size of the aperture is not suitable for automatic MTRR setup * in drm_addmap. Therefore we add them manually before the maps are * initialized, and tear them down on last close. */ int savage_driver_firstopen(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; unsigned long mmio_base, fb_base, fb_size, aperture_base; /* fb_rsrc and aper_rsrc aren't really used currently, but still exist * in case we decide we need information on the BAR for BSD in the * future. */ unsigned int fb_rsrc, aper_rsrc; int ret = 0; dev_priv->mtrr[0].handle = -1; dev_priv->mtrr[1].handle = -1; dev_priv->mtrr[2].handle = -1; if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { fb_rsrc = 0; fb_base = pci_resource_start(dev->pdev, 0); fb_size = SAVAGE_FB_SIZE_S3; mmio_base = fb_base + SAVAGE_FB_SIZE_S3; aper_rsrc = 0; aperture_base = fb_base + SAVAGE_APERTURE_OFFSET; /* this should always be true */ if (pci_resource_len(dev->pdev, 0) == 0x08000000) { /* Don't make MMIO write-cobining! We need 3 * MTRRs. */ dev_priv->mtrr[0].base = fb_base; dev_priv->mtrr[0].size = 0x01000000; dev_priv->mtrr[0].handle = drm_mtrr_add(dev_priv->mtrr[0].base, dev_priv->mtrr[0].size, DRM_MTRR_WC); dev_priv->mtrr[1].base = fb_base + 0x02000000; dev_priv->mtrr[1].size = 0x02000000; dev_priv->mtrr[1].handle = drm_mtrr_add(dev_priv->mtrr[1].base, dev_priv->mtrr[1].size, DRM_MTRR_WC); dev_priv->mtrr[2].base = fb_base + 0x04000000; dev_priv->mtrr[2].size = 0x04000000; dev_priv->mtrr[2].handle = drm_mtrr_add(dev_priv->mtrr[2].base, dev_priv->mtrr[2].size, DRM_MTRR_WC); } else { DRM_ERROR("strange pci_resource_len %08llx\n", (unsigned long long) pci_resource_len(dev->pdev, 0)); } } else if (dev_priv->chipset != S3_SUPERSAVAGE && dev_priv->chipset != S3_SAVAGE2000) { mmio_base = pci_resource_start(dev->pdev, 0); fb_rsrc = 1; fb_base = pci_resource_start(dev->pdev, 1); fb_size = SAVAGE_FB_SIZE_S4; aper_rsrc = 1; aperture_base = fb_base + SAVAGE_APERTURE_OFFSET; /* this should always be true */ if (pci_resource_len(dev->pdev, 1) == 0x08000000) { /* Can use one MTRR to cover both fb and * aperture. */ dev_priv->mtrr[0].base = fb_base; dev_priv->mtrr[0].size = 0x08000000; dev_priv->mtrr[0].handle = drm_mtrr_add(dev_priv->mtrr[0].base, dev_priv->mtrr[0].size, DRM_MTRR_WC); } else { DRM_ERROR("strange pci_resource_len %08llx\n", (unsigned long long) pci_resource_len(dev->pdev, 1)); } } else { mmio_base = pci_resource_start(dev->pdev, 0); fb_rsrc = 1; fb_base = pci_resource_start(dev->pdev, 1); fb_size = pci_resource_len(dev->pdev, 1); aper_rsrc = 2; aperture_base = pci_resource_start(dev->pdev, 2); /* Automatic MTRR setup will do the right thing. */ } ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio); if (ret) return ret; ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, &dev_priv->fb); if (ret) return ret; ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE, _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, &dev_priv->aperture); return ret; } /* * Delete MTRRs and free device-private data. */ void savage_driver_lastclose(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; int i; for (i = 0; i < 3; ++i) if (dev_priv->mtrr[i].handle >= 0) drm_mtrr_del(dev_priv->mtrr[i].handle, dev_priv->mtrr[i].base, dev_priv->mtrr[i].size, DRM_MTRR_WC); } int savage_driver_unload(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; kfree(dev_priv); return 0; } static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init) { drm_savage_private_t *dev_priv = dev->dev_private; if (init->fb_bpp != 16 && init->fb_bpp != 32) { DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp); return -EINVAL; } if (init->depth_bpp != 16 && init->depth_bpp != 32) { DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp); return -EINVAL; } if (init->dma_type != SAVAGE_DMA_AGP && init->dma_type != SAVAGE_DMA_PCI) { DRM_ERROR("invalid dma memory type %d!\n", init->dma_type); return -EINVAL; } dev_priv->cob_size = init->cob_size; dev_priv->bci_threshold_lo = init->bci_threshold_lo; dev_priv->bci_threshold_hi = init->bci_threshold_hi; dev_priv->dma_type = init->dma_type; dev_priv->fb_bpp = init->fb_bpp; dev_priv->front_offset = init->front_offset; dev_priv->front_pitch = init->front_pitch; dev_priv->back_offset = init->back_offset; dev_priv->back_pitch = init->back_pitch; dev_priv->depth_bpp = init->depth_bpp; dev_priv->depth_offset = init->depth_offset; dev_priv->depth_pitch = init->depth_pitch; dev_priv->texture_offset = init->texture_offset; dev_priv->texture_size = init->texture_size; dev_priv->sarea = drm_getsarea(dev); if (!dev_priv->sarea) { DRM_ERROR("could not find sarea!\n"); savage_do_cleanup_bci(dev); return -EINVAL; } if (init->status_offset != 0) { dev_priv->status = drm_core_findmap(dev, init->status_offset); if (!dev_priv->status) { DRM_ERROR("could not find shadow status region!\n"); savage_do_cleanup_bci(dev); return -EINVAL; } } else { dev_priv->status = NULL; } if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) { dev->agp_buffer_token = init->buffers_offset; dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); if (!dev->agp_buffer_map) { DRM_ERROR("could not find DMA buffer region!\n"); savage_do_cleanup_bci(dev); return -EINVAL; } drm_core_ioremap(dev->agp_buffer_map, dev); if (!dev->agp_buffer_map->handle) { DRM_ERROR("failed to ioremap DMA buffer region!\n"); savage_do_cleanup_bci(dev); return -ENOMEM; } } if (init->agp_textures_offset) { dev_priv->agp_textures = drm_core_findmap(dev, init->agp_textures_offset); if (!dev_priv->agp_textures) { DRM_ERROR("could not find agp texture region!\n"); savage_do_cleanup_bci(dev); return -EINVAL; } } else { dev_priv->agp_textures = NULL; } if (init->cmd_dma_offset) { if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { DRM_ERROR("command DMA not supported on " "Savage3D/MX/IX.\n"); savage_do_cleanup_bci(dev); return -EINVAL; } if (dev->dma && dev->dma->buflist) { DRM_ERROR("command and vertex DMA not supported " "at the same time.\n"); savage_do_cleanup_bci(dev); return -EINVAL; } dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset); if (!dev_priv->cmd_dma) { DRM_ERROR("could not find command DMA region!\n"); savage_do_cleanup_bci(dev); return -EINVAL; } if (dev_priv->dma_type == SAVAGE_DMA_AGP) { if (dev_priv->cmd_dma->type != _DRM_AGP) { DRM_ERROR("AGP command DMA region is not a " "_DRM_AGP map!\n"); savage_do_cleanup_bci(dev); return -EINVAL; } drm_core_ioremap(dev_priv->cmd_dma, dev); if (!dev_priv->cmd_dma->handle) { DRM_ERROR("failed to ioremap command " "DMA region!\n"); savage_do_cleanup_bci(dev); return -ENOMEM; } } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) { DRM_ERROR("PCI command DMA region is not a " "_DRM_CONSISTENT map!\n"); savage_do_cleanup_bci(dev); return -EINVAL; } } else { dev_priv->cmd_dma = NULL; } dev_priv->dma_flush = savage_dma_flush; if (!dev_priv->cmd_dma) { DRM_DEBUG("falling back to faked command DMA.\n"); dev_priv->fake_dma.offset = 0; dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE; dev_priv->fake_dma.type = _DRM_SHM; dev_priv->fake_dma.handle = kmalloc(SAVAGE_FAKE_DMA_SIZE, GFP_KERNEL); if (!dev_priv->fake_dma.handle) { DRM_ERROR("could not allocate faked DMA buffer!\n"); savage_do_cleanup_bci(dev); return -ENOMEM; } dev_priv->cmd_dma = &dev_priv->fake_dma; dev_priv->dma_flush = savage_fake_dma_flush; } dev_priv->sarea_priv = (drm_savage_sarea_t *) ((uint8_t *) dev_priv->sarea->handle + init->sarea_priv_offset); /* setup bitmap descriptors */ { unsigned int color_tile_format; unsigned int depth_tile_format; unsigned int front_stride, back_stride, depth_stride; if (dev_priv->chipset <= S3_SAVAGE4) { color_tile_format = dev_priv->fb_bpp == 16 ? SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP; depth_tile_format = dev_priv->depth_bpp == 16 ? SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP; } else { color_tile_format = SAVAGE_BD_TILE_DEST; depth_tile_format = SAVAGE_BD_TILE_DEST; } front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8); back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8); depth_stride = dev_priv->depth_pitch / (dev_priv->depth_bpp / 8); dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE | (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) | (color_tile_format << SAVAGE_BD_TILE_SHIFT); dev_priv->back_bd = back_stride | SAVAGE_BD_BW_DISABLE | (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) | (color_tile_format << SAVAGE_BD_TILE_SHIFT); dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE | (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) | (depth_tile_format << SAVAGE_BD_TILE_SHIFT); } /* setup status and bci ptr */ dev_priv->event_counter = 0; dev_priv->event_wrap = 0; dev_priv->bci_ptr = (volatile uint32_t *) ((uint8_t *) dev_priv->mmio->handle + SAVAGE_BCI_OFFSET); if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D; } else { dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4; } if (dev_priv->status != NULL) { dev_priv->status_ptr = (volatile uint32_t *)dev_priv->status->handle; dev_priv->wait_fifo = savage_bci_wait_fifo_shadow; dev_priv->wait_evnt = savage_bci_wait_event_shadow; dev_priv->status_ptr[1023] = dev_priv->event_counter; } else { dev_priv->status_ptr = NULL; if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { dev_priv->wait_fifo = savage_bci_wait_fifo_s3d; } else { dev_priv->wait_fifo = savage_bci_wait_fifo_s4; } dev_priv->wait_evnt = savage_bci_wait_event_reg; } /* cliprect functions */ if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d; else dev_priv->emit_clip_rect = savage_emit_clip_rect_s4; if (savage_freelist_init(dev) < 0) { DRM_ERROR("could not initialize freelist\n"); savage_do_cleanup_bci(dev); return -ENOMEM; } if (savage_dma_init(dev_priv) < 0) { DRM_ERROR("could not initialize command DMA\n"); savage_do_cleanup_bci(dev); return -ENOMEM; } return 0; } static int savage_do_cleanup_bci(struct drm_device * dev) { drm_savage_private_t *dev_priv = dev->dev_private; if (dev_priv->cmd_dma == &dev_priv->fake_dma) { kfree(dev_priv->fake_dma.handle); } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle && dev_priv->cmd_dma->type == _DRM_AGP && dev_priv->dma_type == SAVAGE_DMA_AGP) drm_core_ioremapfree(dev_priv->cmd_dma, dev); if (dev_priv->dma_type == SAVAGE_DMA_AGP && dev->agp_buffer_map && dev->agp_buffer_map->handle) { drm_core_ioremapfree(dev->agp_buffer_map, dev); /* make sure the next instance (which may be running * in PCI mode) doesn't try to use an old * agp_buffer_map. */ dev->agp_buffer_map = NULL; } kfree(dev_priv->dma_pages); return 0; } static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_savage_init_t *init = data; LOCK_TEST_WITH_RETURN(dev, file_priv); switch (init->func) { case SAVAGE_INIT_BCI: return savage_do_init_bci(dev, init); case SAVAGE_CLEANUP_BCI: return savage_do_cleanup_bci(dev); } return -EINVAL; } static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_savage_private_t *dev_priv = dev->dev_private; drm_savage_event_emit_t *event = data; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); event->count = savage_bci_emit_event(dev_priv, event->flags); event->count |= dev_priv->event_wrap << 16; return 0; } static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_savage_private_t *dev_priv = dev->dev_private; drm_savage_event_wait_t *event = data; unsigned int event_e, hw_e; unsigned int event_w, hw_w; DRM_DEBUG("\n"); UPDATE_EVENT_COUNTER(); if (dev_priv->status_ptr) hw_e = dev_priv->status_ptr[1] & 0xffff; else hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; hw_w = dev_priv->event_wrap; if (hw_e > dev_priv->event_counter) hw_w--; /* hardware hasn't passed the last wrap yet */ event_e = event->count & 0xffff; event_w = event->count >> 16; /* Don't need to wait if * - event counter wrapped since the event was emitted or * - the hardware has advanced up to or over the event to wait for. */ if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e)) return 0; else return dev_priv->wait_evnt(dev_priv, event_e); } /* * DMA buffer management */ static int savage_bci_get_buffers(struct drm_device *dev, struct drm_file *file_priv, struct drm_dma *d) { struct drm_buf *buf; int i; for (i = d->granted_count; i < d->request_count; i++) { buf = savage_freelist_get(dev); if (!buf) return -EAGAIN; buf->file_priv = file_priv; if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, sizeof(buf->idx))) return -EFAULT; if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, sizeof(buf->total))) return -EFAULT; d->granted_count++; } return 0; } int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; struct drm_dma *d = data; int ret = 0; LOCK_TEST_WITH_RETURN(dev, file_priv); /* Please don't send us buffers. */ if (d->send_count != 0) { DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", DRM_CURRENTPID, d->send_count); return -EINVAL; } /* We'll send you buffers. */ if (d->request_count < 0 || d->request_count > dma->buf_count) { DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", DRM_CURRENTPID, d->request_count, dma->buf_count); return -EINVAL; } d->granted_count = 0; if (d->request_count) { ret = savage_bci_get_buffers(dev, file_priv, d); } return ret; } void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; drm_savage_private_t *dev_priv = dev->dev_private; int release_idlelock = 0; int i; if (!dma) return; if (!dev_priv) return; if (!dma->buflist) return; if (file_priv->master && file_priv->master->lock.hw_lock) { drm_idlelock_take(&file_priv->master->lock); release_idlelock = 1; } for (i = 0; i < dma->buf_count; i++) { struct drm_buf *buf = dma->buflist[i]; drm_savage_buf_priv_t *buf_priv = buf->dev_private; if (buf->file_priv == file_priv && buf_priv && buf_priv->next == NULL && buf_priv->prev == NULL) { uint16_t event; DRM_DEBUG("reclaimed from client\n"); event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D); SET_AGE(&buf_priv->age, event, dev_priv->event_wrap); savage_freelist_put(dev, buf); } } if (release_idlelock) drm_idlelock_release(&file_priv->master->lock); } struct drm_ioctl_desc savage_ioctls[] = { DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH), DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH), DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH), }; int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
gpl-2.0
prasidh09/cse506
unionfs-3.10.y/drivers/clk/clk-gate.c
2295
3740
/* * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Gated clock implementation */ #include <linux/clk-provider.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/err.h> #include <linux/string.h> /** * DOC: basic gatable clock which can gate and ungate it's ouput * * Traits of this clock: * prepare - clk_(un)prepare only ensures parent is (un)prepared * enable - clk_enable and clk_disable are functional & control gating * rate - inherits rate from parent. No clk_set_rate support * parent - fixed parent. No clk_set_parent support */ #define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw) /* * It works on following logic: * * For enabling clock, enable = 1 * set2dis = 1 -> clear bit -> set = 0 * set2dis = 0 -> set bit -> set = 1 * * For disabling clock, enable = 0 * set2dis = 1 -> set bit -> set = 1 * set2dis = 0 -> clear bit -> set = 0 * * So, result is always: enable xor set2dis. */ static void clk_gate_endisable(struct clk_hw *hw, int enable) { struct clk_gate *gate = to_clk_gate(hw); int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0; unsigned long flags = 0; u32 reg; set ^= enable; if (gate->lock) spin_lock_irqsave(gate->lock, flags); reg = readl(gate->reg); if (set) reg |= BIT(gate->bit_idx); else reg &= ~BIT(gate->bit_idx); writel(reg, gate->reg); if (gate->lock) spin_unlock_irqrestore(gate->lock, flags); } static int clk_gate_enable(struct clk_hw *hw) { clk_gate_endisable(hw, 1); return 0; } static void clk_gate_disable(struct clk_hw *hw) { clk_gate_endisable(hw, 0); } static int clk_gate_is_enabled(struct clk_hw *hw) { u32 reg; struct clk_gate *gate = to_clk_gate(hw); reg = readl(gate->reg); /* if a set bit disables this clk, flip it before masking */ if (gate->flags & CLK_GATE_SET_TO_DISABLE) reg ^= BIT(gate->bit_idx); reg &= BIT(gate->bit_idx); return reg ? 1 : 0; } const struct clk_ops clk_gate_ops = { .enable = clk_gate_enable, .disable = clk_gate_disable, .is_enabled = clk_gate_is_enabled, }; EXPORT_SYMBOL_GPL(clk_gate_ops); /** * clk_register_gate - register a gate clock with the clock framework * @dev: device that is registering this clock * @name: name of this clock * @parent_name: name of this clock's parent * @flags: framework-specific flags for this clock * @reg: register address to control gating of this clock * @bit_idx: which bit in the register controls gating of this clock * @clk_gate_flags: gate-specific flags for this clock * @lock: shared register lock for this clock */ struct clk *clk_register_gate(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 bit_idx, u8 clk_gate_flags, spinlock_t *lock) { struct clk_gate *gate; struct clk *clk; struct clk_init_data init; /* allocate the gate */ gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL); if (!gate) { pr_err("%s: could not allocate gated clk\n", __func__); return ERR_PTR(-ENOMEM); } init.name = name; init.ops = &clk_gate_ops; init.flags = flags | CLK_IS_BASIC; init.parent_names = (parent_name ? &parent_name: NULL); init.num_parents = (parent_name ? 1 : 0); /* struct clk_gate assignments */ gate->reg = reg; gate->bit_idx = bit_idx; gate->flags = clk_gate_flags; gate->lock = lock; gate->hw.init = &init; clk = clk_register(dev, &gate->hw); if (IS_ERR(clk)) kfree(gate); return clk; }
gpl-2.0
hvaibhav/beagle-dev
security/keys/trusted.c
2551
28264
/* * Copyright (C) 2010 IBM Corporation * * Author: * David Safford <safford@us.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2 of the License. * * See Documentation/security/keys-trusted-encrypted.txt */ #include <linux/uaccess.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/parser.h> #include <linux/string.h> #include <linux/err.h> #include <keys/user-type.h> #include <keys/trusted-type.h> #include <linux/key-type.h> #include <linux/rcupdate.h> #include <linux/crypto.h> #include <crypto/hash.h> #include <crypto/sha.h> #include <linux/capability.h> #include <linux/tpm.h> #include <linux/tpm_command.h> #include "trusted.h" static const char hmac_alg[] = "hmac(sha1)"; static const char hash_alg[] = "sha1"; struct sdesc { struct shash_desc shash; char ctx[]; }; static struct crypto_shash *hashalg; static struct crypto_shash *hmacalg; static struct sdesc *init_sdesc(struct crypto_shash *alg) { struct sdesc *sdesc; int size; size = sizeof(struct shash_desc) + crypto_shash_descsize(alg); sdesc = kmalloc(size, GFP_KERNEL); if (!sdesc) return ERR_PTR(-ENOMEM); sdesc->shash.tfm = alg; sdesc->shash.flags = 0x0; return sdesc; } static int TSS_sha1(const unsigned char *data, unsigned int datalen, unsigned char *digest) { struct sdesc *sdesc; int ret; sdesc = init_sdesc(hashalg); if (IS_ERR(sdesc)) { pr_info("trusted_key: can't alloc %s\n", hash_alg); return PTR_ERR(sdesc); } ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest); kfree(sdesc); return ret; } static int TSS_rawhmac(unsigned char *digest, const unsigned char *key, unsigned int keylen, ...) { struct sdesc *sdesc; va_list argp; unsigned int dlen; unsigned char *data; int ret; sdesc = init_sdesc(hmacalg); if (IS_ERR(sdesc)) { pr_info("trusted_key: can't alloc %s\n", hmac_alg); return PTR_ERR(sdesc); } ret = crypto_shash_setkey(hmacalg, key, keylen); if (ret < 0) goto out; ret = crypto_shash_init(&sdesc->shash); if (ret < 0) goto out; va_start(argp, keylen); for (;;) { dlen = va_arg(argp, unsigned int); if (dlen == 0) break; data = va_arg(argp, unsigned char *); if (data == NULL) { ret = -EINVAL; break; } ret = crypto_shash_update(&sdesc->shash, data, dlen); if (ret < 0) break; } va_end(argp); if (!ret) ret = crypto_shash_final(&sdesc->shash, digest); out: kfree(sdesc); return ret; } /* * calculate authorization info fields to send to TPM */ static int TSS_authhmac(unsigned char *digest, const unsigned char *key, unsigned int keylen, unsigned char *h1, unsigned char *h2, unsigned char h3, ...) { unsigned char paramdigest[SHA1_DIGEST_SIZE]; struct sdesc *sdesc; unsigned int dlen; unsigned char *data; unsigned char c; int ret; va_list argp; sdesc = init_sdesc(hashalg); if (IS_ERR(sdesc)) { pr_info("trusted_key: can't alloc %s\n", hash_alg); return PTR_ERR(sdesc); } c = h3; ret = crypto_shash_init(&sdesc->shash); if (ret < 0) goto out; va_start(argp, h3); for (;;) { dlen = va_arg(argp, unsigned int); if (dlen == 0) break; data = va_arg(argp, unsigned char *); if (!data) { ret = -EINVAL; break; } ret = crypto_shash_update(&sdesc->shash, data, dlen); if (ret < 0) break; } va_end(argp); if (!ret) ret = crypto_shash_final(&sdesc->shash, paramdigest); if (!ret) ret = TSS_rawhmac(digest, key, keylen, SHA1_DIGEST_SIZE, paramdigest, TPM_NONCE_SIZE, h1, TPM_NONCE_SIZE, h2, 1, &c, 0, 0); out: kfree(sdesc); return ret; } /* * verify the AUTH1_COMMAND (Seal) result from TPM */ static int TSS_checkhmac1(unsigned char *buffer, const uint32_t command, const unsigned char *ononce, const unsigned char *key, unsigned int keylen, ...) { uint32_t bufsize; uint16_t tag; uint32_t ordinal; uint32_t result; unsigned char *enonce; unsigned char *continueflag; unsigned char *authdata; unsigned char testhmac[SHA1_DIGEST_SIZE]; unsigned char paramdigest[SHA1_DIGEST_SIZE]; struct sdesc *sdesc; unsigned int dlen; unsigned int dpos; va_list argp; int ret; bufsize = LOAD32(buffer, TPM_SIZE_OFFSET); tag = LOAD16(buffer, 0); ordinal = command; result = LOAD32N(buffer, TPM_RETURN_OFFSET); if (tag == TPM_TAG_RSP_COMMAND) return 0; if (tag != TPM_TAG_RSP_AUTH1_COMMAND) return -EINVAL; authdata = buffer + bufsize - SHA1_DIGEST_SIZE; continueflag = authdata - 1; enonce = continueflag - TPM_NONCE_SIZE; sdesc = init_sdesc(hashalg); if (IS_ERR(sdesc)) { pr_info("trusted_key: can't alloc %s\n", hash_alg); return PTR_ERR(sdesc); } ret = crypto_shash_init(&sdesc->shash); if (ret < 0) goto out; ret = crypto_shash_update(&sdesc->shash, (const u8 *)&result, sizeof result); if (ret < 0) goto out; ret = crypto_shash_update(&sdesc->shash, (const u8 *)&ordinal, sizeof ordinal); if (ret < 0) goto out; va_start(argp, keylen); for (;;) { dlen = va_arg(argp, unsigned int); if (dlen == 0) break; dpos = va_arg(argp, unsigned int); ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen); if (ret < 0) break; } va_end(argp); if (!ret) ret = crypto_shash_final(&sdesc->shash, paramdigest); if (ret < 0) goto out; ret = TSS_rawhmac(testhmac, key, keylen, SHA1_DIGEST_SIZE, paramdigest, TPM_NONCE_SIZE, enonce, TPM_NONCE_SIZE, ononce, 1, continueflag, 0, 0); if (ret < 0) goto out; if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE)) ret = -EINVAL; out: kfree(sdesc); return ret; } /* * verify the AUTH2_COMMAND (unseal) result from TPM */ static int TSS_checkhmac2(unsigned char *buffer, const uint32_t command, const unsigned char *ononce, const unsigned char *key1, unsigned int keylen1, const unsigned char *key2, unsigned int keylen2, ...) { uint32_t bufsize; uint16_t tag; uint32_t ordinal; uint32_t result; unsigned char *enonce1; unsigned char *continueflag1; unsigned char *authdata1; unsigned char *enonce2; unsigned char *continueflag2; unsigned char *authdata2; unsigned char testhmac1[SHA1_DIGEST_SIZE]; unsigned char testhmac2[SHA1_DIGEST_SIZE]; unsigned char paramdigest[SHA1_DIGEST_SIZE]; struct sdesc *sdesc; unsigned int dlen; unsigned int dpos; va_list argp; int ret; bufsize = LOAD32(buffer, TPM_SIZE_OFFSET); tag = LOAD16(buffer, 0); ordinal = command; result = LOAD32N(buffer, TPM_RETURN_OFFSET); if (tag == TPM_TAG_RSP_COMMAND) return 0; if (tag != TPM_TAG_RSP_AUTH2_COMMAND) return -EINVAL; authdata1 = buffer + bufsize - (SHA1_DIGEST_SIZE + 1 + SHA1_DIGEST_SIZE + SHA1_DIGEST_SIZE); authdata2 = buffer + bufsize - (SHA1_DIGEST_SIZE); continueflag1 = authdata1 - 1; continueflag2 = authdata2 - 1; enonce1 = continueflag1 - TPM_NONCE_SIZE; enonce2 = continueflag2 - TPM_NONCE_SIZE; sdesc = init_sdesc(hashalg); if (IS_ERR(sdesc)) { pr_info("trusted_key: can't alloc %s\n", hash_alg); return PTR_ERR(sdesc); } ret = crypto_shash_init(&sdesc->shash); if (ret < 0) goto out; ret = crypto_shash_update(&sdesc->shash, (const u8 *)&result, sizeof result); if (ret < 0) goto out; ret = crypto_shash_update(&sdesc->shash, (const u8 *)&ordinal, sizeof ordinal); if (ret < 0) goto out; va_start(argp, keylen2); for (;;) { dlen = va_arg(argp, unsigned int); if (dlen == 0) break; dpos = va_arg(argp, unsigned int); ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen); if (ret < 0) break; } va_end(argp); if (!ret) ret = crypto_shash_final(&sdesc->shash, paramdigest); if (ret < 0) goto out; ret = TSS_rawhmac(testhmac1, key1, keylen1, SHA1_DIGEST_SIZE, paramdigest, TPM_NONCE_SIZE, enonce1, TPM_NONCE_SIZE, ononce, 1, continueflag1, 0, 0); if (ret < 0) goto out; if (memcmp(testhmac1, authdata1, SHA1_DIGEST_SIZE)) { ret = -EINVAL; goto out; } ret = TSS_rawhmac(testhmac2, key2, keylen2, SHA1_DIGEST_SIZE, paramdigest, TPM_NONCE_SIZE, enonce2, TPM_NONCE_SIZE, ononce, 1, continueflag2, 0, 0); if (ret < 0) goto out; if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE)) ret = -EINVAL; out: kfree(sdesc); return ret; } /* * For key specific tpm requests, we will generate and send our * own TPM command packets using the drivers send function. */ static int trusted_tpm_send(const u32 chip_num, unsigned char *cmd, size_t buflen) { int rc; dump_tpm_buf(cmd); rc = tpm_send(chip_num, cmd, buflen); dump_tpm_buf(cmd); if (rc > 0) /* Can't return positive return codes values to keyctl */ rc = -EPERM; return rc; } /* * Lock a trusted key, by extending a selected PCR. * * Prevents a trusted key that is sealed to PCRs from being accessed. * This uses the tpm driver's extend function. */ static int pcrlock(const int pcrnum) { unsigned char hash[SHA1_DIGEST_SIZE]; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = tpm_get_random(TPM_ANY_NUM, hash, SHA1_DIGEST_SIZE); if (ret != SHA1_DIGEST_SIZE) return ret; return tpm_pcr_extend(TPM_ANY_NUM, pcrnum, hash) ? -EINVAL : 0; } /* * Create an object specific authorisation protocol (OSAP) session */ static int osap(struct tpm_buf *tb, struct osapsess *s, const unsigned char *key, uint16_t type, uint32_t handle) { unsigned char enonce[TPM_NONCE_SIZE]; unsigned char ononce[TPM_NONCE_SIZE]; int ret; ret = tpm_get_random(TPM_ANY_NUM, ononce, TPM_NONCE_SIZE); if (ret != TPM_NONCE_SIZE) return ret; INIT_BUF(tb); store16(tb, TPM_TAG_RQU_COMMAND); store32(tb, TPM_OSAP_SIZE); store32(tb, TPM_ORD_OSAP); store16(tb, type); store32(tb, handle); storebytes(tb, ononce, TPM_NONCE_SIZE); ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); if (ret < 0) return ret; s->handle = LOAD32(tb->data, TPM_DATA_OFFSET); memcpy(s->enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)]), TPM_NONCE_SIZE); memcpy(enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t) + TPM_NONCE_SIZE]), TPM_NONCE_SIZE); return TSS_rawhmac(s->secret, key, SHA1_DIGEST_SIZE, TPM_NONCE_SIZE, enonce, TPM_NONCE_SIZE, ononce, 0, 0); } /* * Create an object independent authorisation protocol (oiap) session */ static int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce) { int ret; INIT_BUF(tb); store16(tb, TPM_TAG_RQU_COMMAND); store32(tb, TPM_OIAP_SIZE); store32(tb, TPM_ORD_OIAP); ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); if (ret < 0) return ret; *handle = LOAD32(tb->data, TPM_DATA_OFFSET); memcpy(nonce, &tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)], TPM_NONCE_SIZE); return 0; } struct tpm_digests { unsigned char encauth[SHA1_DIGEST_SIZE]; unsigned char pubauth[SHA1_DIGEST_SIZE]; unsigned char xorwork[SHA1_DIGEST_SIZE * 2]; unsigned char xorhash[SHA1_DIGEST_SIZE]; unsigned char nonceodd[TPM_NONCE_SIZE]; }; /* * Have the TPM seal(encrypt) the trusted key, possibly based on * Platform Configuration Registers (PCRs). AUTH1 for sealing key. */ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype, uint32_t keyhandle, const unsigned char *keyauth, const unsigned char *data, uint32_t datalen, unsigned char *blob, uint32_t *bloblen, const unsigned char *blobauth, const unsigned char *pcrinfo, uint32_t pcrinfosize) { struct osapsess sess; struct tpm_digests *td; unsigned char cont; uint32_t ordinal; uint32_t pcrsize; uint32_t datsize; int sealinfosize; int encdatasize; int storedsize; int ret; int i; /* alloc some work space for all the hashes */ td = kmalloc(sizeof *td, GFP_KERNEL); if (!td) return -ENOMEM; /* get session for sealing key */ ret = osap(tb, &sess, keyauth, keytype, keyhandle); if (ret < 0) goto out; dump_sess(&sess); /* calculate encrypted authorization value */ memcpy(td->xorwork, sess.secret, SHA1_DIGEST_SIZE); memcpy(td->xorwork + SHA1_DIGEST_SIZE, sess.enonce, SHA1_DIGEST_SIZE); ret = TSS_sha1(td->xorwork, SHA1_DIGEST_SIZE * 2, td->xorhash); if (ret < 0) goto out; ret = tpm_get_random(TPM_ANY_NUM, td->nonceodd, TPM_NONCE_SIZE); if (ret != TPM_NONCE_SIZE) goto out; ordinal = htonl(TPM_ORD_SEAL); datsize = htonl(datalen); pcrsize = htonl(pcrinfosize); cont = 0; /* encrypt data authorization key */ for (i = 0; i < SHA1_DIGEST_SIZE; ++i) td->encauth[i] = td->xorhash[i] ^ blobauth[i]; /* calculate authorization HMAC value */ if (pcrinfosize == 0) { /* no pcr info specified */ ret = TSS_authhmac(td->pubauth, sess.secret, SHA1_DIGEST_SIZE, sess.enonce, td->nonceodd, cont, sizeof(uint32_t), &ordinal, SHA1_DIGEST_SIZE, td->encauth, sizeof(uint32_t), &pcrsize, sizeof(uint32_t), &datsize, datalen, data, 0, 0); } else { /* pcr info specified */ ret = TSS_authhmac(td->pubauth, sess.secret, SHA1_DIGEST_SIZE, sess.enonce, td->nonceodd, cont, sizeof(uint32_t), &ordinal, SHA1_DIGEST_SIZE, td->encauth, sizeof(uint32_t), &pcrsize, pcrinfosize, pcrinfo, sizeof(uint32_t), &datsize, datalen, data, 0, 0); } if (ret < 0) goto out; /* build and send the TPM request packet */ INIT_BUF(tb); store16(tb, TPM_TAG_RQU_AUTH1_COMMAND); store32(tb, TPM_SEAL_SIZE + pcrinfosize + datalen); store32(tb, TPM_ORD_SEAL); store32(tb, keyhandle); storebytes(tb, td->encauth, SHA1_DIGEST_SIZE); store32(tb, pcrinfosize); storebytes(tb, pcrinfo, pcrinfosize); store32(tb, datalen); storebytes(tb, data, datalen); store32(tb, sess.handle); storebytes(tb, td->nonceodd, TPM_NONCE_SIZE); store8(tb, cont); storebytes(tb, td->pubauth, SHA1_DIGEST_SIZE); ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); if (ret < 0) goto out; /* calculate the size of the returned Blob */ sealinfosize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t)); encdatasize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t) + sizeof(uint32_t) + sealinfosize); storedsize = sizeof(uint32_t) + sizeof(uint32_t) + sealinfosize + sizeof(uint32_t) + encdatasize; /* check the HMAC in the response */ ret = TSS_checkhmac1(tb->data, ordinal, td->nonceodd, sess.secret, SHA1_DIGEST_SIZE, storedsize, TPM_DATA_OFFSET, 0, 0); /* copy the returned blob to caller */ if (!ret) { memcpy(blob, tb->data + TPM_DATA_OFFSET, storedsize); *bloblen = storedsize; } out: kfree(td); return ret; } /* * use the AUTH2_COMMAND form of unseal, to authorize both key and blob */ static int tpm_unseal(struct tpm_buf *tb, uint32_t keyhandle, const unsigned char *keyauth, const unsigned char *blob, int bloblen, const unsigned char *blobauth, unsigned char *data, unsigned int *datalen) { unsigned char nonceodd[TPM_NONCE_SIZE]; unsigned char enonce1[TPM_NONCE_SIZE]; unsigned char enonce2[TPM_NONCE_SIZE]; unsigned char authdata1[SHA1_DIGEST_SIZE]; unsigned char authdata2[SHA1_DIGEST_SIZE]; uint32_t authhandle1 = 0; uint32_t authhandle2 = 0; unsigned char cont = 0; uint32_t ordinal; uint32_t keyhndl; int ret; /* sessions for unsealing key and data */ ret = oiap(tb, &authhandle1, enonce1); if (ret < 0) { pr_info("trusted_key: oiap failed (%d)\n", ret); return ret; } ret = oiap(tb, &authhandle2, enonce2); if (ret < 0) { pr_info("trusted_key: oiap failed (%d)\n", ret); return ret; } ordinal = htonl(TPM_ORD_UNSEAL); keyhndl = htonl(SRKHANDLE); ret = tpm_get_random(TPM_ANY_NUM, nonceodd, TPM_NONCE_SIZE); if (ret != TPM_NONCE_SIZE) { pr_info("trusted_key: tpm_get_random failed (%d)\n", ret); return ret; } ret = TSS_authhmac(authdata1, keyauth, TPM_NONCE_SIZE, enonce1, nonceodd, cont, sizeof(uint32_t), &ordinal, bloblen, blob, 0, 0); if (ret < 0) return ret; ret = TSS_authhmac(authdata2, blobauth, TPM_NONCE_SIZE, enonce2, nonceodd, cont, sizeof(uint32_t), &ordinal, bloblen, blob, 0, 0); if (ret < 0) return ret; /* build and send TPM request packet */ INIT_BUF(tb); store16(tb, TPM_TAG_RQU_AUTH2_COMMAND); store32(tb, TPM_UNSEAL_SIZE + bloblen); store32(tb, TPM_ORD_UNSEAL); store32(tb, keyhandle); storebytes(tb, blob, bloblen); store32(tb, authhandle1); storebytes(tb, nonceodd, TPM_NONCE_SIZE); store8(tb, cont); storebytes(tb, authdata1, SHA1_DIGEST_SIZE); store32(tb, authhandle2); storebytes(tb, nonceodd, TPM_NONCE_SIZE); store8(tb, cont); storebytes(tb, authdata2, SHA1_DIGEST_SIZE); ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); if (ret < 0) { pr_info("trusted_key: authhmac failed (%d)\n", ret); return ret; } *datalen = LOAD32(tb->data, TPM_DATA_OFFSET); ret = TSS_checkhmac2(tb->data, ordinal, nonceodd, keyauth, SHA1_DIGEST_SIZE, blobauth, SHA1_DIGEST_SIZE, sizeof(uint32_t), TPM_DATA_OFFSET, *datalen, TPM_DATA_OFFSET + sizeof(uint32_t), 0, 0); if (ret < 0) { pr_info("trusted_key: TSS_checkhmac2 failed (%d)\n", ret); return ret; } memcpy(data, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t), *datalen); return 0; } /* * Have the TPM seal(encrypt) the symmetric key */ static int key_seal(struct trusted_key_payload *p, struct trusted_key_options *o) { struct tpm_buf *tb; int ret; tb = kzalloc(sizeof *tb, GFP_KERNEL); if (!tb) return -ENOMEM; /* include migratable flag at end of sealed key */ p->key[p->key_len] = p->migratable; ret = tpm_seal(tb, o->keytype, o->keyhandle, o->keyauth, p->key, p->key_len + 1, p->blob, &p->blob_len, o->blobauth, o->pcrinfo, o->pcrinfo_len); if (ret < 0) pr_info("trusted_key: srkseal failed (%d)\n", ret); kfree(tb); return ret; } /* * Have the TPM unseal(decrypt) the symmetric key */ static int key_unseal(struct trusted_key_payload *p, struct trusted_key_options *o) { struct tpm_buf *tb; int ret; tb = kzalloc(sizeof *tb, GFP_KERNEL); if (!tb) return -ENOMEM; ret = tpm_unseal(tb, o->keyhandle, o->keyauth, p->blob, p->blob_len, o->blobauth, p->key, &p->key_len); if (ret < 0) pr_info("trusted_key: srkunseal failed (%d)\n", ret); else /* pull migratable flag out of sealed key */ p->migratable = p->key[--p->key_len]; kfree(tb); return ret; } enum { Opt_err = -1, Opt_new, Opt_load, Opt_update, Opt_keyhandle, Opt_keyauth, Opt_blobauth, Opt_pcrinfo, Opt_pcrlock, Opt_migratable }; static const match_table_t key_tokens = { {Opt_new, "new"}, {Opt_load, "load"}, {Opt_update, "update"}, {Opt_keyhandle, "keyhandle=%s"}, {Opt_keyauth, "keyauth=%s"}, {Opt_blobauth, "blobauth=%s"}, {Opt_pcrinfo, "pcrinfo=%s"}, {Opt_pcrlock, "pcrlock=%s"}, {Opt_migratable, "migratable=%s"}, {Opt_err, NULL} }; /* can have zero or more token= options */ static int getoptions(char *c, struct trusted_key_payload *pay, struct trusted_key_options *opt) { substring_t args[MAX_OPT_ARGS]; char *p = c; int token; int res; unsigned long handle; unsigned long lock; while ((p = strsep(&c, " \t"))) { if (*p == '\0' || *p == ' ' || *p == '\t') continue; token = match_token(p, key_tokens, args); switch (token) { case Opt_pcrinfo: opt->pcrinfo_len = strlen(args[0].from) / 2; if (opt->pcrinfo_len > MAX_PCRINFO_SIZE) return -EINVAL; res = hex2bin(opt->pcrinfo, args[0].from, opt->pcrinfo_len); if (res < 0) return -EINVAL; break; case Opt_keyhandle: res = strict_strtoul(args[0].from, 16, &handle); if (res < 0) return -EINVAL; opt->keytype = SEAL_keytype; opt->keyhandle = handle; break; case Opt_keyauth: if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE) return -EINVAL; res = hex2bin(opt->keyauth, args[0].from, SHA1_DIGEST_SIZE); if (res < 0) return -EINVAL; break; case Opt_blobauth: if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE) return -EINVAL; res = hex2bin(opt->blobauth, args[0].from, SHA1_DIGEST_SIZE); if (res < 0) return -EINVAL; break; case Opt_migratable: if (*args[0].from == '0') pay->migratable = 0; else return -EINVAL; break; case Opt_pcrlock: res = strict_strtoul(args[0].from, 10, &lock); if (res < 0) return -EINVAL; opt->pcrlock = lock; break; default: return -EINVAL; } } return 0; } /* * datablob_parse - parse the keyctl data and fill in the * payload and options structures * * On success returns 0, otherwise -EINVAL. */ static int datablob_parse(char *datablob, struct trusted_key_payload *p, struct trusted_key_options *o) { substring_t args[MAX_OPT_ARGS]; long keylen; int ret = -EINVAL; int key_cmd; char *c; /* main command */ c = strsep(&datablob, " \t"); if (!c) return -EINVAL; key_cmd = match_token(c, key_tokens, args); switch (key_cmd) { case Opt_new: /* first argument is key size */ c = strsep(&datablob, " \t"); if (!c) return -EINVAL; ret = strict_strtol(c, 10, &keylen); if (ret < 0 || keylen < MIN_KEY_SIZE || keylen > MAX_KEY_SIZE) return -EINVAL; p->key_len = keylen; ret = getoptions(datablob, p, o); if (ret < 0) return ret; ret = Opt_new; break; case Opt_load: /* first argument is sealed blob */ c = strsep(&datablob, " \t"); if (!c) return -EINVAL; p->blob_len = strlen(c) / 2; if (p->blob_len > MAX_BLOB_SIZE) return -EINVAL; ret = hex2bin(p->blob, c, p->blob_len); if (ret < 0) return -EINVAL; ret = getoptions(datablob, p, o); if (ret < 0) return ret; ret = Opt_load; break; case Opt_update: /* all arguments are options */ ret = getoptions(datablob, p, o); if (ret < 0) return ret; ret = Opt_update; break; case Opt_err: return -EINVAL; break; } return ret; } static struct trusted_key_options *trusted_options_alloc(void) { struct trusted_key_options *options; options = kzalloc(sizeof *options, GFP_KERNEL); if (options) { /* set any non-zero defaults */ options->keytype = SRK_keytype; options->keyhandle = SRKHANDLE; } return options; } static struct trusted_key_payload *trusted_payload_alloc(struct key *key) { struct trusted_key_payload *p = NULL; int ret; ret = key_payload_reserve(key, sizeof *p); if (ret < 0) return p; p = kzalloc(sizeof *p, GFP_KERNEL); if (p) p->migratable = 1; /* migratable by default */ return p; } /* * trusted_instantiate - create a new trusted key * * Unseal an existing trusted blob or, for a new key, get a * random key, then seal and create a trusted key-type key, * adding it to the specified keyring. * * On success, return 0. Otherwise return errno. */ static int trusted_instantiate(struct key *key, struct key_preparsed_payload *prep) { struct trusted_key_payload *payload = NULL; struct trusted_key_options *options = NULL; size_t datalen = prep->datalen; char *datablob; int ret = 0; int key_cmd; size_t key_len; if (datalen <= 0 || datalen > 32767 || !prep->data) return -EINVAL; datablob = kmalloc(datalen + 1, GFP_KERNEL); if (!datablob) return -ENOMEM; memcpy(datablob, prep->data, datalen); datablob[datalen] = '\0'; options = trusted_options_alloc(); if (!options) { ret = -ENOMEM; goto out; } payload = trusted_payload_alloc(key); if (!payload) { ret = -ENOMEM; goto out; } key_cmd = datablob_parse(datablob, payload, options); if (key_cmd < 0) { ret = key_cmd; goto out; } dump_payload(payload); dump_options(options); switch (key_cmd) { case Opt_load: ret = key_unseal(payload, options); dump_payload(payload); dump_options(options); if (ret < 0) pr_info("trusted_key: key_unseal failed (%d)\n", ret); break; case Opt_new: key_len = payload->key_len; ret = tpm_get_random(TPM_ANY_NUM, payload->key, key_len); if (ret != key_len) { pr_info("trusted_key: key_create failed (%d)\n", ret); goto out; } ret = key_seal(payload, options); if (ret < 0) pr_info("trusted_key: key_seal failed (%d)\n", ret); break; default: ret = -EINVAL; goto out; } if (!ret && options->pcrlock) ret = pcrlock(options->pcrlock); out: kfree(datablob); kfree(options); if (!ret) rcu_assign_keypointer(key, payload); else kfree(payload); return ret; } static void trusted_rcu_free(struct rcu_head *rcu) { struct trusted_key_payload *p; p = container_of(rcu, struct trusted_key_payload, rcu); memset(p->key, 0, p->key_len); kfree(p); } /* * trusted_update - reseal an existing key with new PCR values */ static int trusted_update(struct key *key, struct key_preparsed_payload *prep) { struct trusted_key_payload *p = key->payload.data; struct trusted_key_payload *new_p; struct trusted_key_options *new_o; size_t datalen = prep->datalen; char *datablob; int ret = 0; if (!p->migratable) return -EPERM; if (datalen <= 0 || datalen > 32767 || !prep->data) return -EINVAL; datablob = kmalloc(datalen + 1, GFP_KERNEL); if (!datablob) return -ENOMEM; new_o = trusted_options_alloc(); if (!new_o) { ret = -ENOMEM; goto out; } new_p = trusted_payload_alloc(key); if (!new_p) { ret = -ENOMEM; goto out; } memcpy(datablob, prep->data, datalen); datablob[datalen] = '\0'; ret = datablob_parse(datablob, new_p, new_o); if (ret != Opt_update) { ret = -EINVAL; kfree(new_p); goto out; } /* copy old key values, and reseal with new pcrs */ new_p->migratable = p->migratable; new_p->key_len = p->key_len; memcpy(new_p->key, p->key, p->key_len); dump_payload(p); dump_payload(new_p); ret = key_seal(new_p, new_o); if (ret < 0) { pr_info("trusted_key: key_seal failed (%d)\n", ret); kfree(new_p); goto out; } if (new_o->pcrlock) { ret = pcrlock(new_o->pcrlock); if (ret < 0) { pr_info("trusted_key: pcrlock failed (%d)\n", ret); kfree(new_p); goto out; } } rcu_assign_keypointer(key, new_p); call_rcu(&p->rcu, trusted_rcu_free); out: kfree(datablob); kfree(new_o); return ret; } /* * trusted_read - copy the sealed blob data to userspace in hex. * On success, return to userspace the trusted key datablob size. */ static long trusted_read(const struct key *key, char __user *buffer, size_t buflen) { struct trusted_key_payload *p; char *ascii_buf; char *bufp; int i; p = rcu_dereference_key(key); if (!p) return -EINVAL; if (!buffer || buflen <= 0) return 2 * p->blob_len; ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL); if (!ascii_buf) return -ENOMEM; bufp = ascii_buf; for (i = 0; i < p->blob_len; i++) bufp = hex_byte_pack(bufp, p->blob[i]); if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) { kfree(ascii_buf); return -EFAULT; } kfree(ascii_buf); return 2 * p->blob_len; } /* * trusted_destroy - before freeing the key, clear the decrypted data */ static void trusted_destroy(struct key *key) { struct trusted_key_payload *p = key->payload.data; if (!p) return; memset(p->key, 0, p->key_len); kfree(key->payload.data); } struct key_type key_type_trusted = { .name = "trusted", .instantiate = trusted_instantiate, .update = trusted_update, .match = user_match, .destroy = trusted_destroy, .describe = user_describe, .read = trusted_read, }; EXPORT_SYMBOL_GPL(key_type_trusted); static void trusted_shash_release(void) { if (hashalg) crypto_free_shash(hashalg); if (hmacalg) crypto_free_shash(hmacalg); } static int __init trusted_shash_alloc(void) { int ret; hmacalg = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hmacalg)) { pr_info("trusted_key: could not allocate crypto %s\n", hmac_alg); return PTR_ERR(hmacalg); } hashalg = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hashalg)) { pr_info("trusted_key: could not allocate crypto %s\n", hash_alg); ret = PTR_ERR(hashalg); goto hashalg_fail; } return 0; hashalg_fail: crypto_free_shash(hmacalg); return ret; } static int __init init_trusted(void) { int ret; ret = trusted_shash_alloc(); if (ret < 0) return ret; ret = register_key_type(&key_type_trusted); if (ret < 0) trusted_shash_release(); return ret; } static void __exit cleanup_trusted(void) { trusted_shash_release(); unregister_key_type(&key_type_trusted); } late_initcall(init_trusted); module_exit(cleanup_trusted); MODULE_LICENSE("GPL");
gpl-2.0
MatiasBjorling/linux
drivers/block/drbd/drbd_proc.c
2551
10024
/* drbd_proc.c This file is part of DRBD by Philipp Reisner and Lars Ellenberg. Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. drbd is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. drbd is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with drbd; see the file COPYING. If not, write to the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <asm/uaccess.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/drbd.h> #include "drbd_int.h" static int drbd_proc_open(struct inode *inode, struct file *file); static int drbd_proc_release(struct inode *inode, struct file *file); struct proc_dir_entry *drbd_proc; const struct file_operations drbd_proc_fops = { .owner = THIS_MODULE, .open = drbd_proc_open, .read = seq_read, .llseek = seq_lseek, .release = drbd_proc_release, }; void seq_printf_with_thousands_grouping(struct seq_file *seq, long v) { /* v is in kB/sec. We don't expect TiByte/sec yet. */ if (unlikely(v >= 1000000)) { /* cool: > GiByte/s */ seq_printf(seq, "%ld,", v / 1000000); v %= 1000000; seq_printf(seq, "%03ld,%03ld", v/1000, v % 1000); } else if (likely(v >= 1000)) seq_printf(seq, "%ld,%03ld", v/1000, v % 1000); else seq_printf(seq, "%ld", v); } /*lge * progress bars shamelessly adapted from driver/md/md.c * output looks like * [=====>..............] 33.5% (23456/123456) * finish: 2:20:20 speed: 6,345 (6,456) K/sec */ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq) { unsigned long db, dt, dbdt, rt, rs_left; unsigned int res; int i, x, y; int stalled = 0; drbd_get_syncer_progress(mdev, &rs_left, &res); x = res/50; y = 20-x; seq_printf(seq, "\t["); for (i = 1; i < x; i++) seq_printf(seq, "="); seq_printf(seq, ">"); for (i = 0; i < y; i++) seq_printf(seq, "."); seq_printf(seq, "] "); if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) seq_printf(seq, "verified:"); else seq_printf(seq, "sync'ed:"); seq_printf(seq, "%3u.%u%% ", res / 10, res % 10); /* if more than a few GB, display in MB */ if (mdev->rs_total > (4UL << (30 - BM_BLOCK_SHIFT))) seq_printf(seq, "(%lu/%lu)M", (unsigned long) Bit2KB(rs_left >> 10), (unsigned long) Bit2KB(mdev->rs_total >> 10)); else seq_printf(seq, "(%lu/%lu)K\n\t", (unsigned long) Bit2KB(rs_left), (unsigned long) Bit2KB(mdev->rs_total)); /* see drivers/md/md.c * We do not want to overflow, so the order of operands and * the * 100 / 100 trick are important. We do a +1 to be * safe against division by zero. We only estimate anyway. * * dt: time from mark until now * db: blocks written from mark until now * rt: remaining time */ /* Rolling marks. last_mark+1 may just now be modified. last_mark+2 is * at least (DRBD_SYNC_MARKS-2)*DRBD_SYNC_MARK_STEP old, and has at * least DRBD_SYNC_MARK_STEP time before it will be modified. */ /* ------------------------ ~18s average ------------------------ */ i = (mdev->rs_last_mark + 2) % DRBD_SYNC_MARKS; dt = (jiffies - mdev->rs_mark_time[i]) / HZ; if (dt > (DRBD_SYNC_MARK_STEP * DRBD_SYNC_MARKS)) stalled = 1; if (!dt) dt++; db = mdev->rs_mark_left[i] - rs_left; rt = (dt * (rs_left / (db/100+1)))/100; /* seconds */ seq_printf(seq, "finish: %lu:%02lu:%02lu", rt / 3600, (rt % 3600) / 60, rt % 60); dbdt = Bit2KB(db/dt); seq_printf(seq, " speed: "); seq_printf_with_thousands_grouping(seq, dbdt); seq_printf(seq, " ("); /* ------------------------- ~3s average ------------------------ */ if (proc_details >= 1) { /* this is what drbd_rs_should_slow_down() uses */ i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS; dt = (jiffies - mdev->rs_mark_time[i]) / HZ; if (!dt) dt++; db = mdev->rs_mark_left[i] - rs_left; dbdt = Bit2KB(db/dt); seq_printf_with_thousands_grouping(seq, dbdt); seq_printf(seq, " -- "); } /* --------------------- long term average ---------------------- */ /* mean speed since syncer started * we do account for PausedSync periods */ dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ; if (dt == 0) dt = 1; db = mdev->rs_total - rs_left; dbdt = Bit2KB(db/dt); seq_printf_with_thousands_grouping(seq, dbdt); seq_printf(seq, ")"); if (mdev->state.conn == C_SYNC_TARGET || mdev->state.conn == C_VERIFY_S) { seq_printf(seq, " want: "); seq_printf_with_thousands_grouping(seq, mdev->c_sync_rate); } seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : ""); if (proc_details >= 1) { /* 64 bit: * we convert to sectors in the display below. */ unsigned long bm_bits = drbd_bm_bits(mdev); unsigned long bit_pos; unsigned long long stop_sector = 0; if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) { bit_pos = bm_bits - mdev->ov_left; if (verify_can_do_stop_sector(mdev)) stop_sector = mdev->ov_stop_sector; } else bit_pos = mdev->bm_resync_fo; /* Total sectors may be slightly off for oddly * sized devices. So what. */ seq_printf(seq, "\t%3d%% sector pos: %llu/%llu", (int)(bit_pos / (bm_bits/100+1)), (unsigned long long)bit_pos * BM_SECT_PER_BIT, (unsigned long long)bm_bits * BM_SECT_PER_BIT); if (stop_sector != 0 && stop_sector != ULLONG_MAX) seq_printf(seq, " stop sector: %llu", stop_sector); seq_printf(seq, "\n"); } } static void resync_dump_detail(struct seq_file *seq, struct lc_element *e) { struct bm_extent *bme = lc_entry(e, struct bm_extent, lce); seq_printf(seq, "%5d %s %s\n", bme->rs_left, bme->flags & BME_NO_WRITES ? "NO_WRITES" : "---------", bme->flags & BME_LOCKED ? "LOCKED" : "------" ); } static int drbd_seq_show(struct seq_file *seq, void *v) { int i, prev_i = -1; const char *sn; struct drbd_conf *mdev; struct net_conf *nc; char wp; static char write_ordering_chars[] = { [WO_none] = 'n', [WO_drain_io] = 'd', [WO_bdev_flush] = 'f', }; seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d-%d)\n%s\n", API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX, drbd_buildtag()); /* cs .. connection state ro .. node role (local/remote) ds .. disk state (local/remote) protocol various flags ns .. network send nr .. network receive dw .. disk write dr .. disk read al .. activity log write count bm .. bitmap update write count pe .. pending (waiting for ack or data reply) ua .. unack'd (still need to send ack or data reply) ap .. application requests accepted, but not yet completed ep .. number of epochs currently "on the fly", P_BARRIER_ACK pending wo .. write ordering mode currently in use oos .. known out-of-sync kB */ rcu_read_lock(); idr_for_each_entry(&minors, mdev, i) { if (prev_i != i - 1) seq_printf(seq, "\n"); prev_i = i; sn = drbd_conn_str(mdev->state.conn); if (mdev->state.conn == C_STANDALONE && mdev->state.disk == D_DISKLESS && mdev->state.role == R_SECONDARY) { seq_printf(seq, "%2d: cs:Unconfigured\n", i); } else { /* reset mdev->congestion_reason */ bdi_rw_congested(&mdev->rq_queue->backing_dev_info); nc = rcu_dereference(mdev->tconn->net_conf); wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' '; seq_printf(seq, "%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n" " ns:%u nr:%u dw:%u dr:%u al:%u bm:%u " "lo:%d pe:%d ua:%d ap:%d ep:%d wo:%c", i, sn, drbd_role_str(mdev->state.role), drbd_role_str(mdev->state.peer), drbd_disk_str(mdev->state.disk), drbd_disk_str(mdev->state.pdsk), wp, drbd_suspended(mdev) ? 's' : 'r', mdev->state.aftr_isp ? 'a' : '-', mdev->state.peer_isp ? 'p' : '-', mdev->state.user_isp ? 'u' : '-', mdev->congestion_reason ?: '-', test_bit(AL_SUSPENDED, &mdev->flags) ? 's' : '-', mdev->send_cnt/2, mdev->recv_cnt/2, mdev->writ_cnt/2, mdev->read_cnt/2, mdev->al_writ_cnt, mdev->bm_writ_cnt, atomic_read(&mdev->local_cnt), atomic_read(&mdev->ap_pending_cnt) + atomic_read(&mdev->rs_pending_cnt), atomic_read(&mdev->unacked_cnt), atomic_read(&mdev->ap_bio_cnt), mdev->tconn->epochs, write_ordering_chars[mdev->tconn->write_ordering] ); seq_printf(seq, " oos:%llu\n", Bit2KB((unsigned long long) drbd_bm_total_weight(mdev))); } if (mdev->state.conn == C_SYNC_SOURCE || mdev->state.conn == C_SYNC_TARGET || mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) drbd_syncer_progress(mdev, seq); if (proc_details >= 1 && get_ldev_if_state(mdev, D_FAILED)) { lc_seq_printf_stats(seq, mdev->resync); lc_seq_printf_stats(seq, mdev->act_log); put_ldev(mdev); } if (proc_details >= 2) { if (mdev->resync) { lc_seq_dump_details(seq, mdev->resync, "rs_left", resync_dump_detail); } } } rcu_read_unlock(); return 0; } static int drbd_proc_open(struct inode *inode, struct file *file) { int err; if (try_module_get(THIS_MODULE)) { err = single_open(file, drbd_seq_show, PDE_DATA(inode)); if (err) module_put(THIS_MODULE); return err; } return -ENODEV; } static int drbd_proc_release(struct inode *inode, struct file *file) { module_put(THIS_MODULE); return single_release(inode, file); } /* PROC FS stuff end */
gpl-2.0
weizhenwei/mi1_kernel
drivers/acpi/acpica/tbxface.c
3319
21955
/****************************************************************************** * * Module Name: tbxface - Public interfaces to the ACPI subsystem * ACPI table oriented interfaces * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <linux/export.h> #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #include "actables.h" #define _COMPONENT ACPI_TABLES ACPI_MODULE_NAME("tbxface") /* Local prototypes */ static acpi_status acpi_tb_load_namespace(void); static int no_auto_ssdt; /******************************************************************************* * * FUNCTION: acpi_allocate_root_table * * PARAMETERS: initial_table_count - Size of initial_table_array, in number of * struct acpi_table_desc structures * * RETURN: Status * * DESCRIPTION: Allocate a root table array. Used by i_aSL compiler and * acpi_initialize_tables. * ******************************************************************************/ acpi_status acpi_allocate_root_table(u32 initial_table_count) { acpi_gbl_root_table_list.max_table_count = initial_table_count; acpi_gbl_root_table_list.flags = ACPI_ROOT_ALLOW_RESIZE; return (acpi_tb_resize_root_table_list()); } /******************************************************************************* * * FUNCTION: acpi_initialize_tables * * PARAMETERS: initial_table_array - Pointer to an array of pre-allocated * struct acpi_table_desc structures. If NULL, the * array is dynamically allocated. * initial_table_count - Size of initial_table_array, in number of * struct acpi_table_desc structures * allow_realloc - Flag to tell Table Manager if resize of * pre-allocated array is allowed. Ignored * if initial_table_array is NULL. * * RETURN: Status * * DESCRIPTION: Initialize the table manager, get the RSDP and RSDT/XSDT. * * NOTE: Allows static allocation of the initial table array in order * to avoid the use of dynamic memory in confined environments * such as the kernel boot sequence where it may not be available. * * If the host OS memory managers are initialized, use NULL for * initial_table_array, and the table will be dynamically allocated. * ******************************************************************************/ acpi_status __init acpi_initialize_tables(struct acpi_table_desc * initial_table_array, u32 initial_table_count, u8 allow_resize) { acpi_physical_address rsdp_address; acpi_status status; ACPI_FUNCTION_TRACE(acpi_initialize_tables); /* * Set up the Root Table Array * Allocate the table array if requested */ if (!initial_table_array) { status = acpi_allocate_root_table(initial_table_count); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } else { /* Root Table Array has been statically allocated by the host */ ACPI_MEMSET(initial_table_array, 0, (acpi_size) initial_table_count * sizeof(struct acpi_table_desc)); acpi_gbl_root_table_list.tables = initial_table_array; acpi_gbl_root_table_list.max_table_count = initial_table_count; acpi_gbl_root_table_list.flags = ACPI_ROOT_ORIGIN_UNKNOWN; if (allow_resize) { acpi_gbl_root_table_list.flags |= ACPI_ROOT_ALLOW_RESIZE; } } /* Get the address of the RSDP */ rsdp_address = acpi_os_get_root_pointer(); if (!rsdp_address) { return_ACPI_STATUS(AE_NOT_FOUND); } /* * Get the root table (RSDT or XSDT) and extract all entries to the local * Root Table Array. This array contains the information of the RSDT/XSDT * in a common, more useable format. */ status = acpi_tb_parse_root_table(rsdp_address); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_reallocate_root_table * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Reallocate Root Table List into dynamic memory. Copies the * root list from the previously provided scratch area. Should * be called once dynamic memory allocation is available in the * kernel * ******************************************************************************/ acpi_status acpi_reallocate_root_table(void) { struct acpi_table_desc *tables; acpi_size new_size; acpi_size current_size; ACPI_FUNCTION_TRACE(acpi_reallocate_root_table); /* * Only reallocate the root table if the host provided a static buffer * for the table array in the call to acpi_initialize_tables. */ if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) { return_ACPI_STATUS(AE_SUPPORT); } /* * Get the current size of the root table and add the default * increment to create the new table size. */ current_size = (acpi_size) acpi_gbl_root_table_list.current_table_count * sizeof(struct acpi_table_desc); new_size = current_size + (ACPI_ROOT_TABLE_SIZE_INCREMENT * sizeof(struct acpi_table_desc)); /* Create new array and copy the old array */ tables = ACPI_ALLOCATE_ZEROED(new_size); if (!tables) { return_ACPI_STATUS(AE_NO_MEMORY); } ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, current_size); /* * Update the root table descriptor. The new size will be the current * number of tables plus the increment, independent of the reserved * size of the original table list. */ acpi_gbl_root_table_list.tables = tables; acpi_gbl_root_table_list.max_table_count = acpi_gbl_root_table_list.current_table_count + ACPI_ROOT_TABLE_SIZE_INCREMENT; acpi_gbl_root_table_list.flags = ACPI_ROOT_ORIGIN_ALLOCATED | ACPI_ROOT_ALLOW_RESIZE; return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_load_table * * PARAMETERS: table_ptr - pointer to a buffer containing the entire * table to be loaded * * RETURN: Status * * DESCRIPTION: This function is called to load a table from the caller's * buffer. The buffer must contain an entire ACPI Table including * a valid header. The header fields will be verified, and if it * is determined that the table is invalid, the call will fail. * ******************************************************************************/ acpi_status acpi_load_table(struct acpi_table_header *table_ptr) { acpi_status status; u32 table_index; struct acpi_table_desc table_desc; if (!table_ptr) return AE_BAD_PARAMETER; ACPI_MEMSET(&table_desc, 0, sizeof(struct acpi_table_desc)); table_desc.pointer = table_ptr; table_desc.length = table_ptr->length; table_desc.flags = ACPI_TABLE_ORIGIN_UNKNOWN; /* * Install the new table into the local data structures */ status = acpi_tb_add_table(&table_desc, &table_index); if (ACPI_FAILURE(status)) { return status; } status = acpi_ns_load_table(table_index, acpi_gbl_root_node); return status; } ACPI_EXPORT_SYMBOL(acpi_load_table) /******************************************************************************* * * FUNCTION: acpi_get_table_header * * PARAMETERS: Signature - ACPI signature of needed table * Instance - Which instance (for SSDTs) * out_table_header - The pointer to the table header to fill * * RETURN: Status and pointer to mapped table header * * DESCRIPTION: Finds an ACPI table header. * * NOTE: Caller is responsible in unmapping the header with * acpi_os_unmap_memory * ******************************************************************************/ acpi_status acpi_get_table_header(char *signature, u32 instance, struct acpi_table_header *out_table_header) { u32 i; u32 j; struct acpi_table_header *header; /* Parameter validation */ if (!signature || !out_table_header) { return (AE_BAD_PARAMETER); } /* Walk the root table list */ for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count; i++) { if (!ACPI_COMPARE_NAME (&(acpi_gbl_root_table_list.tables[i].signature), signature)) { continue; } if (++j < instance) { continue; } if (!acpi_gbl_root_table_list.tables[i].pointer) { if ((acpi_gbl_root_table_list.tables[i].flags & ACPI_TABLE_ORIGIN_MASK) == ACPI_TABLE_ORIGIN_MAPPED) { header = acpi_os_map_memory(acpi_gbl_root_table_list. tables[i].address, sizeof(struct acpi_table_header)); if (!header) { return AE_NO_MEMORY; } ACPI_MEMCPY(out_table_header, header, sizeof(struct acpi_table_header)); acpi_os_unmap_memory(header, sizeof(struct acpi_table_header)); } else { return AE_NOT_FOUND; } } else { ACPI_MEMCPY(out_table_header, acpi_gbl_root_table_list.tables[i].pointer, sizeof(struct acpi_table_header)); } return (AE_OK); } return (AE_NOT_FOUND); } ACPI_EXPORT_SYMBOL(acpi_get_table_header) /******************************************************************************* * * FUNCTION: acpi_unload_table_id * * PARAMETERS: id - Owner ID of the table to be removed. * * RETURN: Status * * DESCRIPTION: This routine is used to force the unload of a table (by id) * ******************************************************************************/ acpi_status acpi_unload_table_id(acpi_owner_id id) { int i; acpi_status status = AE_NOT_EXIST; ACPI_FUNCTION_TRACE(acpi_unload_table_id); /* Find table in the global table list */ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) { if (id != acpi_gbl_root_table_list.tables[i].owner_id) { continue; } /* * Delete all namespace objects owned by this table. Note that these * objects can appear anywhere in the namespace by virtue of the AML * "Scope" operator. Thus, we need to track ownership by an ID, not * simply a position within the hierarchy */ acpi_tb_delete_namespace_by_owner(i); status = acpi_tb_release_owner_id(i); acpi_tb_set_table_loaded_flag(i, FALSE); break; } return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_unload_table_id) /******************************************************************************* * * FUNCTION: acpi_get_table_with_size * * PARAMETERS: Signature - ACPI signature of needed table * Instance - Which instance (for SSDTs) * out_table - Where the pointer to the table is returned * * RETURN: Status and pointer to table * * DESCRIPTION: Finds and verifies an ACPI table. * ******************************************************************************/ acpi_status acpi_get_table_with_size(char *signature, u32 instance, struct acpi_table_header **out_table, acpi_size *tbl_size) { u32 i; u32 j; acpi_status status; /* Parameter validation */ if (!signature || !out_table) { return (AE_BAD_PARAMETER); } /* Walk the root table list */ for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count; i++) { if (!ACPI_COMPARE_NAME (&(acpi_gbl_root_table_list.tables[i].signature), signature)) { continue; } if (++j < instance) { continue; } status = acpi_tb_verify_table(&acpi_gbl_root_table_list.tables[i]); if (ACPI_SUCCESS(status)) { *out_table = acpi_gbl_root_table_list.tables[i].pointer; *tbl_size = acpi_gbl_root_table_list.tables[i].length; } if (!acpi_gbl_permanent_mmap) { acpi_gbl_root_table_list.tables[i].pointer = NULL; } return (status); } return (AE_NOT_FOUND); } acpi_status acpi_get_table(char *signature, u32 instance, struct acpi_table_header **out_table) { acpi_size tbl_size; return acpi_get_table_with_size(signature, instance, out_table, &tbl_size); } ACPI_EXPORT_SYMBOL(acpi_get_table) /******************************************************************************* * * FUNCTION: acpi_get_table_by_index * * PARAMETERS: table_index - Table index * Table - Where the pointer to the table is returned * * RETURN: Status and pointer to the table * * DESCRIPTION: Obtain a table by an index into the global table list. * ******************************************************************************/ acpi_status acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_get_table_by_index); /* Parameter validation */ if (!table) { return_ACPI_STATUS(AE_BAD_PARAMETER); } (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); /* Validate index */ if (table_index >= acpi_gbl_root_table_list.current_table_count) { (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); return_ACPI_STATUS(AE_BAD_PARAMETER); } if (!acpi_gbl_root_table_list.tables[table_index].pointer) { /* Table is not mapped, map it */ status = acpi_tb_verify_table(&acpi_gbl_root_table_list. tables[table_index]); if (ACPI_FAILURE(status)) { (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); return_ACPI_STATUS(status); } } *table = acpi_gbl_root_table_list.tables[table_index].pointer; (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); return_ACPI_STATUS(AE_OK); } ACPI_EXPORT_SYMBOL(acpi_get_table_by_index) /******************************************************************************* * * FUNCTION: acpi_tb_load_namespace * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Load the namespace from the DSDT and all SSDTs/PSDTs found in * the RSDT/XSDT. * ******************************************************************************/ static acpi_status acpi_tb_load_namespace(void) { acpi_status status; u32 i; struct acpi_table_header *new_dsdt; ACPI_FUNCTION_TRACE(tb_load_namespace); (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); /* * Load the namespace. The DSDT is required, but any SSDT and * PSDT tables are optional. Verify the DSDT. */ if (!acpi_gbl_root_table_list.current_table_count || !ACPI_COMPARE_NAME(& (acpi_gbl_root_table_list. tables[ACPI_TABLE_INDEX_DSDT].signature), ACPI_SIG_DSDT) || ACPI_FAILURE(acpi_tb_verify_table (&acpi_gbl_root_table_list. tables[ACPI_TABLE_INDEX_DSDT]))) { status = AE_NO_ACPI_TABLES; goto unlock_and_exit; } /* * Save the DSDT pointer for simple access. This is the mapped memory * address. We must take care here because the address of the .Tables * array can change dynamically as tables are loaded at run-time. Note: * .Pointer field is not validated until after call to acpi_tb_verify_table. */ acpi_gbl_DSDT = acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer; /* * Optionally copy the entire DSDT to local memory (instead of simply * mapping it.) There are some BIOSs that corrupt or replace the original * DSDT, creating the need for this option. Default is FALSE, do not copy * the DSDT. */ if (acpi_gbl_copy_dsdt_locally) { new_dsdt = acpi_tb_copy_dsdt(ACPI_TABLE_INDEX_DSDT); if (new_dsdt) { acpi_gbl_DSDT = new_dsdt; } } /* * Save the original DSDT header for detection of table corruption * and/or replacement of the DSDT from outside the OS. */ ACPI_MEMCPY(&acpi_gbl_original_dsdt_header, acpi_gbl_DSDT, sizeof(struct acpi_table_header)); (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); /* Load and parse tables */ status = acpi_ns_load_table(ACPI_TABLE_INDEX_DSDT, acpi_gbl_root_node); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) { if ((!ACPI_COMPARE_NAME (&(acpi_gbl_root_table_list.tables[i].signature), ACPI_SIG_SSDT) && !ACPI_COMPARE_NAME(& (acpi_gbl_root_table_list.tables[i]. signature), ACPI_SIG_PSDT)) || ACPI_FAILURE(acpi_tb_verify_table (&acpi_gbl_root_table_list.tables[i]))) { continue; } if (no_auto_ssdt) { printk(KERN_WARNING "ACPI: SSDT ignored due to \"acpi_no_auto_ssdt\"\n"); continue; } /* Ignore errors while loading tables, get as many as possible */ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); (void)acpi_ns_load_table(i, acpi_gbl_root_node); (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); } ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI Tables successfully acquired\n")); unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_load_tables * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Load the ACPI tables from the RSDT/XSDT * ******************************************************************************/ acpi_status acpi_load_tables(void) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_load_tables); /* Load the namespace from the tables */ status = acpi_tb_load_namespace(); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "While loading namespace from ACPI tables")); } return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_load_tables) /******************************************************************************* * * FUNCTION: acpi_install_table_handler * * PARAMETERS: Handler - Table event handler * Context - Value passed to the handler on each event * * RETURN: Status * * DESCRIPTION: Install table event handler * ******************************************************************************/ acpi_status acpi_install_table_handler(acpi_tbl_handler handler, void *context) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_install_table_handler); if (!handler) { return_ACPI_STATUS(AE_BAD_PARAMETER); } status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Don't allow more than one handler */ if (acpi_gbl_table_handler) { status = AE_ALREADY_EXISTS; goto cleanup; } /* Install the handler */ acpi_gbl_table_handler = handler; acpi_gbl_table_handler_context = context; cleanup: (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_install_table_handler) /******************************************************************************* * * FUNCTION: acpi_remove_table_handler * * PARAMETERS: Handler - Table event handler that was installed * previously. * * RETURN: Status * * DESCRIPTION: Remove table event handler * ******************************************************************************/ acpi_status acpi_remove_table_handler(acpi_tbl_handler handler) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_remove_table_handler); status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Make sure that the installed handler is the same */ if (!handler || handler != acpi_gbl_table_handler) { status = AE_BAD_PARAMETER; goto cleanup; } /* Remove the handler */ acpi_gbl_table_handler = NULL; cleanup: (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_remove_table_handler) static int __init acpi_no_auto_ssdt_setup(char *s) { printk(KERN_NOTICE "ACPI: SSDT auto-load disabled\n"); no_auto_ssdt = 1; return 1; } __setup("acpi_no_auto_ssdt", acpi_no_auto_ssdt_setup);
gpl-2.0
civato/SneakyKat-N9005_900T
arch/arm/mach-msm/pmic_debugfs.c
3319
25389
/* Copyright (c) 2009, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/debugfs.h> #include <linux/err.h> #include <linux/uaccess.h> #include <mach/pmic.h> static int debug_lp_mode_control(char *buf, int size) { enum switch_cmd cmd; enum vreg_lp_id id; int cnt; cnt = sscanf(buf, "%u %u", &cmd, &id); if (cnt < 2) { printk(KERN_ERR "%s: sscanf failed cnt=%d", __func__, cnt); return -EINVAL; } if (pmic_lp_mode_control(cmd, id) < 0) return -EFAULT; return size; } static int debug_vreg_set_level(char *buf, int size) { enum vreg_id vreg; int level; int cnt; cnt = sscanf(buf, "%u %u", &vreg, &level); if (cnt < 2) { printk(KERN_ERR "%s: sscanf failed cnt=%d", __func__, cnt); return -EINVAL; } if (pmic_vreg_set_level(vreg, level) < 0) return -EFAULT; return size; } static int debug_vreg_pull_down_switch(char *buf, int size) { enum switch_cmd cmd; enum vreg_pdown_id id; int cnt; cnt = sscanf(buf, "%u %u", &cmd, &id); if (cnt < 2) { printk(KERN_ERR "%s: sscanf failed cnt=%d", __func__, cnt); return -EINVAL; } if (pmic_vreg_pull_down_switch(cmd, id) < 0) return -EFAULT; return size; } static int debug_secure_mpp_control_digital_output(char *buf, int size) { enum mpp_which which; enum mpp_dlogic_level level; enum mpp_dlogic_out_ctrl out; int cnt; cnt = sscanf(buf, "%u %u %u", &which, &level, &out); if (cnt < 3) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_secure_mpp_control_digital_output(which, level, out) < 0) return -EFAULT; return size; } static int debug_secure_mpp_config_i_sink(char *buf, int size) { enum mpp_which which; enum mpp_i_sink_level level; enum mpp_i_sink_switch onoff; int cnt; cnt = sscanf(buf, "%u %u %u", &which, &level, &onoff); if (cnt < 3) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_secure_mpp_config_i_sink(which, level, onoff) < 0) return -EFAULT; return size; } static int debug_secure_mpp_config_digital_input(char *buf, int size) { enum mpp_which which; enum mpp_dlogic_level level; enum mpp_dlogic_in_dbus dbus; int cnt; cnt = sscanf(buf, "%u %u %u", &which, &level, &dbus); if (cnt < 3) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_secure_mpp_config_digital_input(which, level, dbus) < 0) return -EFAULT; return size; } static int debug_rtc_start(char *buf, int size) { uint time; struct rtc_time *hal; int cnt; cnt = sscanf(buf, "%d", &time); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } hal = (struct rtc_time *)&time; if (pmic_rtc_start(hal) < 0) return -EFAULT; return size; } static int debug_rtc_stop(char *buf, int size) { if (pmic_rtc_stop() < 0) return -EFAULT; return size; } static int debug_rtc_get_time(char *buf, int size) { uint time; struct rtc_time *hal; hal = (struct rtc_time *)&time; if (pmic_rtc_get_time(hal) < 0) return -EFAULT; return snprintf(buf, size, "%d\n", time); } static int debug_rtc_alarm_ndx; int debug_rtc_enable_alarm(char *buf, int size) { enum rtc_alarm alarm; struct rtc_time *hal; uint time; int cnt; cnt = sscanf(buf, "%u %u", &alarm, &time); if (cnt < 2) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } hal = (struct rtc_time *)&time; if (pmic_rtc_enable_alarm(alarm, hal) < 0) return -EFAULT; debug_rtc_alarm_ndx = alarm; return size; } static int debug_rtc_disable_alarm(char *buf, int size) { enum rtc_alarm alarm; int cnt; cnt = sscanf(buf, "%u", &alarm); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_rtc_disable_alarm(alarm) < 0) return -EFAULT; return size; } static int debug_rtc_get_alarm_time(char *buf, int size) { uint time; struct rtc_time *hal; hal = (struct rtc_time *)&time; if (pmic_rtc_get_alarm_time(debug_rtc_alarm_ndx, hal) < 0) return -EFAULT; return snprintf(buf, size, "%d\n", time); } static int debug_rtc_get_alarm_status(char *buf, int size) { int status;; if (pmic_rtc_get_alarm_status(&status) < 0) return -EFAULT; return snprintf(buf, size, "%d\n", status); } static int debug_rtc_set_time_adjust(char *buf, int size) { uint adjust; int cnt; cnt = sscanf(buf, "%d", &adjust); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_rtc_set_time_adjust(adjust) < 0) return -EFAULT; return size; } static int debug_rtc_get_time_adjust(char *buf, int size) { int adjust;; if (pmic_rtc_get_time_adjust(&adjust) < 0) return -EFAULT; return snprintf(buf, size, "%d\n", adjust); } static int debug_set_led_intensity(char *buf, int size) { enum ledtype type; int level; int cnt; cnt = sscanf(buf, "%u %d", &type, &level); if (cnt < 2) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_set_led_intensity(type, level) < 0) return -EFAULT; return size; } static int debug_flash_led_set_current(char *buf, int size) { int milliamps; int cnt; cnt = sscanf(buf, "%d", &milliamps); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_flash_led_set_current(milliamps) < 0) return -EFAULT; return size; } static int debug_flash_led_set_mode(char *buf, int size) { uint mode; int cnt; cnt = sscanf(buf, "%d", &mode); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_flash_led_set_mode(mode) < 0) return -EFAULT; return size; } static int debug_flash_led_set_polarity(char *buf, int size) { int pol; int cnt; cnt = sscanf(buf, "%d", &pol); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_flash_led_set_polarity(pol) < 0) return -EFAULT; return size; } static int debug_speaker_cmd(char *buf, int size) { int cmd; int cnt; cnt = sscanf(buf, "%d", &cmd); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_speaker_cmd(cmd) < 0) return -EFAULT; return size; } static int debug_set_speaker_gain(char *buf, int size) { int gain; int cnt; cnt = sscanf(buf, "%d", &gain); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_set_speaker_gain(gain) < 0) return -EFAULT; return size; } static int debug_mic_en(char *buf, int size) { int enable; int cnt; cnt = sscanf(buf, "%d", &enable); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_mic_en(enable) < 0) return -EFAULT; return size; } static int debug_mic_is_en(char *buf, int size) { int enabled; if (pmic_mic_is_en(&enabled) < 0) return -EFAULT; return snprintf(buf, size, "%d\n", enabled); } static int debug_mic_set_volt(char *buf, int size) { int vol; int cnt; cnt = sscanf(buf, "%d", &vol); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_mic_set_volt(vol) < 0) return -EFAULT; return size; } static int debug_mic_get_volt(char *buf, int size) { uint vol; if (pmic_mic_get_volt(&vol) < 0) return -EFAULT; return snprintf(buf, size, "%d\n", vol); } static int debug_spkr_en_right_chan(char *buf, int size) { int enable; int cnt; cnt = sscanf(buf, "%d", &enable); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_spkr_en_right_chan(enable) < 0) return -EFAULT; return size; } static int debug_spkr_is_right_chan_en(char *buf, int size) { int enabled; if (pmic_spkr_is_right_chan_en(&enabled) < 0) return -EFAULT; return snprintf(buf, size, "%d\n", enabled); } static int debug_spkr_en_left_chan(char *buf, int size) { int enable; int cnt; cnt = sscanf(buf, "%d", &enable); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_spkr_en_left_chan(enable) < 0) return -EFAULT; return size; } static int debug_spkr_is_left_chan_en(char *buf, int size) { int enabled; if (pmic_spkr_is_left_chan_en(&enabled) < 0) return -EFAULT; return snprintf(buf, size, "%d\n", enabled); } static int debug_set_spkr_configuration(char *buf, int size) { struct spkr_config_mode cfg; int cnt; cnt = sscanf(buf, "%d %d %d %d %d %d %d %d", &cfg.is_right_chan_en, &cfg.is_left_chan_en, &cfg.is_right_left_chan_added, &cfg.is_stereo_en, &cfg.is_usb_with_hpf_20hz, &cfg.is_mux_bypassed, &cfg.is_hpf_en, &cfg.is_sink_curr_from_ref_volt_cir_en); if (cnt < 8) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_set_spkr_configuration(&cfg) < 0) return -EFAULT; return size; } static int debug_get_spkr_configuration(char *buf, int size) { struct spkr_config_mode cfg; if (pmic_get_spkr_configuration(&cfg) < 0) return -EFAULT; return snprintf(buf, size, "%d %d %d %d %d %d %d %d\n", cfg.is_right_chan_en, cfg.is_left_chan_en, cfg.is_right_left_chan_added, cfg.is_stereo_en, cfg.is_usb_with_hpf_20hz, cfg.is_mux_bypassed, cfg.is_hpf_en, cfg.is_sink_curr_from_ref_volt_cir_en); } static int debug_set_speaker_delay(char *buf, int size) { int delay; int cnt; cnt = sscanf(buf, "%d", &delay); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_set_speaker_delay(delay) < 0) return -EFAULT; return size; } static int debug_speaker_1k6_zin_enable(char *buf, int size) { uint enable; int cnt; cnt = sscanf(buf, "%u", &enable); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_speaker_1k6_zin_enable(enable) < 0) return -EFAULT; return size; } static int debug_spkr_set_mux_hpf_corner_freq(char *buf, int size) { int freq; int cnt; cnt = sscanf(buf, "%d", &freq); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_spkr_set_mux_hpf_corner_freq(freq) < 0) return -EFAULT; return size; } static int debug_spkr_get_mux_hpf_corner_freq(char *buf, int size) { uint freq; if (pmic_spkr_get_mux_hpf_corner_freq(&freq) < 0) return -EFAULT; return snprintf(buf, size, "%d\n", freq); } static int debug_spkr_add_right_left_chan(char *buf, int size) { int enable; int cnt; cnt = sscanf(buf, "%d", &enable); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_spkr_add_right_left_chan(enable) < 0) return -EFAULT; return size; } static int debug_spkr_is_right_left_chan_added(char *buf, int size) { int enabled; if (pmic_spkr_is_right_left_chan_added(&enabled) < 0) return -EFAULT; return snprintf(buf, size, "%d\n", enabled); } static int debug_spkr_en_stereo(char *buf, int size) { int enable; int cnt; cnt = sscanf(buf, "%d", &enable); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_spkr_en_stereo(enable) < 0) return -EFAULT; return size; } static int debug_spkr_is_stereo_en(char *buf, int size) { int enabled; if (pmic_spkr_is_stereo_en(&enabled) < 0) return -EFAULT; return snprintf(buf, size, "%d\n", enabled); } static int debug_spkr_select_usb_with_hpf_20hz(char *buf, int size) { int enable; int cnt; cnt = sscanf(buf, "%d", &enable); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_spkr_select_usb_with_hpf_20hz(enable) < 0) return -EFAULT; return size; } static int debug_spkr_is_usb_with_hpf_20hz(char *buf, int size) { int enabled; if (pmic_spkr_is_usb_with_hpf_20hz(&enabled) < 0) return -EFAULT; return snprintf(buf, size, "%d\n", enabled); } static int debug_spkr_bypass_mux(char *buf, int size) { int enable; int cnt; cnt = sscanf(buf, "%d", &enable); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_spkr_bypass_mux(enable) < 0) return -EFAULT; return size; } static int debug_spkr_is_mux_bypassed(char *buf, int size) { int enabled; if (pmic_spkr_is_mux_bypassed(&enabled) < 0) return -EFAULT; return snprintf(buf, size, "%d\n", enabled); } static int debug_spkr_en_hpf(char *buf, int size) { int enable; int cnt; cnt = sscanf(buf, "%d", &enable); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_spkr_en_hpf(enable) < 0) return -EFAULT; return size; } static int debug_spkr_is_hpf_en(char *buf, int size) { int enabled; if (pmic_spkr_is_hpf_en(&enabled) < 0) return -EFAULT; return snprintf(buf, size, "%d\n", enabled); } static int debug_spkr_en_sink_curr_from_ref_volt_cir(char *buf, int size) { int enable; int cnt; cnt = sscanf(buf, "%d", &enable); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_spkr_en_sink_curr_from_ref_volt_cir(enable) < 0) return -EFAULT; return size; } static int debug_spkr_is_sink_curr_from_ref_volt_cir_en(char *buf, int size) { int enabled; if (pmic_spkr_is_sink_curr_from_ref_volt_cir_en(&enabled) < 0) return -EFAULT; return snprintf(buf, size, "%d\n", enabled); } static int debug_vib_mot_set_volt(char *buf, int size) { int vol; int cnt; cnt = sscanf(buf, "%d", &vol); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_vib_mot_set_volt(vol) < 0) return -EFAULT; return size; } static int debug_vib_mot_set_mode(char *buf, int size) { int mode; int cnt; cnt = sscanf(buf, "%d", &mode); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_vib_mot_set_mode(mode) < 0) return -EFAULT; return size; } static int debug_vib_mot_set_polarity(char *buf, int size) { int pol; int cnt; cnt = sscanf(buf, "%d", &pol); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_vib_mot_set_polarity(pol) < 0) return -EFAULT; return size; } static int debug_vid_en(char *buf, int size) { int enable; int cnt; cnt = sscanf(buf, "%d", &enable); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_vid_en(enable) < 0) return -EFAULT; return size; } static int debug_vid_is_en(char *buf, int size) { int enabled; if (pmic_vid_is_en(&enabled) < 0) return -EFAULT; return snprintf(buf, size, "%d\n", enabled); } static int debug_vid_load_detect_en(char *buf, int size) { int enable; int cnt; cnt = sscanf(buf, "%d", &enable); if (cnt < 1) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_vid_load_detect_en(enable) < 0) return -EFAULT; return size; } /************************************************** * speaker indexed by left_right **************************************************/ static enum spkr_left_right debug_spkr_left_right = LEFT_SPKR; static int debug_spkr_en(char *buf, int size) { int left_right; int enable; int cnt; cnt = sscanf(buf, "%d %d", &left_right, &enable); if (cnt < 2) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_spkr_en(left_right, enable) >= 0) { debug_spkr_left_right = left_right; return size; } return -EFAULT; } static int debug_spkr_is_en(char *buf, int size) { int enabled; if (pmic_spkr_is_en(debug_spkr_left_right, &enabled) < 0) return -EFAULT; return snprintf(buf, size, "%d\n", enabled); } static int debug_spkr_set_gain(char *buf, int size) { int left_right; int enable; int cnt; cnt = sscanf(buf, "%d %d", &left_right, &enable); if (cnt < 2) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_spkr_set_gain(left_right, enable) >= 0) { debug_spkr_left_right = left_right; return size; } return -EFAULT; } static int debug_spkr_get_gain(char *buf, int size) { uint gain; if (pmic_spkr_get_gain(debug_spkr_left_right, &gain) < 0) return -EFAULT; return snprintf(buf, size, "%d\n", gain); } static int debug_spkr_set_delay(char *buf, int size) { int left_right; int delay; int cnt; cnt = sscanf(buf, "%d %d", &left_right, &delay); if (cnt < 2) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_spkr_set_delay(left_right, delay) >= 0) { debug_spkr_left_right = left_right; return size; } return -EFAULT; } static int debug_spkr_get_delay(char *buf, int size) { uint delay; if (pmic_spkr_get_delay(debug_spkr_left_right, &delay) < 0) return -EFAULT; return snprintf(buf, size, "%d\n", delay); } static int debug_spkr_en_mute(char *buf, int size) { int left_right; int enable; int cnt; cnt = sscanf(buf, "%d %d", &left_right, &enable); if (cnt < 2) { printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt); return -EINVAL; } if (pmic_spkr_en_mute(left_right, enable) >= 0) { debug_spkr_left_right = left_right; return size; } return -EFAULT; } static int debug_spkr_is_mute_en(char *buf, int size) { int enabled; if (pmic_spkr_is_mute_en(debug_spkr_left_right, &enabled) < 0) return -EFAULT; return snprintf(buf, size, "%d\n", enabled); } /******************************************************************* * debug function table *******************************************************************/ struct pmic_debug_desc { int (*get) (char *, int); int (*set) (char *, int); }; struct pmic_debug_desc pmic_debug[] = { {NULL, NULL}, /*LIB_NULL_PROC */ {NULL, NULL}, /* LIB_RPC_GLUE_CODE_INFO_REMOTE_PROC */ {NULL, debug_lp_mode_control}, /* LP_MODE_CONTROL_PROC */ {NULL, debug_vreg_set_level}, /*VREG_SET_LEVEL_PROC */ {NULL, debug_vreg_pull_down_switch}, /*VREG_PULL_DOWN_SWITCH_PROC */ {NULL, debug_secure_mpp_control_digital_output}, /* SECURE_MPP_CONFIG_DIGITAL_OUTPUT_PROC */ /*SECURE_MPP_CONFIG_I_SINK_PROC */ {NULL, debug_secure_mpp_config_i_sink}, {NULL, debug_rtc_start}, /*RTC_START_PROC */ {NULL, debug_rtc_stop}, /* RTC_STOP_PROC */ {debug_rtc_get_time, NULL}, /* RTC_GET_TIME_PROC */ {NULL, debug_rtc_enable_alarm}, /* RTC_ENABLE_ALARM_PROC */ {NULL , debug_rtc_disable_alarm}, /*RTC_DISABLE_ALARM_PROC */ {debug_rtc_get_alarm_time, NULL}, /* RTC_GET_ALARM_TIME_PROC */ {debug_rtc_get_alarm_status, NULL}, /* RTC_GET_ALARM_STATUS_PROC */ {NULL, debug_rtc_set_time_adjust}, /* RTC_SET_TIME_ADJUST_PROC */ {debug_rtc_get_time_adjust, NULL}, /* RTC_GET_TIME_ADJUST_PROC */ {NULL, debug_set_led_intensity}, /* SET_LED_INTENSITY_PROC */ {NULL, debug_flash_led_set_current}, /* FLASH_LED_SET_CURRENT_PROC */ {NULL, debug_flash_led_set_mode}, /* FLASH_LED_SET_MODE_PROC */ {NULL, debug_flash_led_set_polarity}, /* FLASH_LED_SET_POLARITY_PROC */ {NULL, debug_speaker_cmd}, /* SPEAKER_CMD_PROC */ {NULL, debug_set_speaker_gain}, /* SET_SPEAKER_GAIN_PROC */ {NULL, debug_vib_mot_set_volt}, /* VIB_MOT_SET_VOLT_PROC */ {NULL, debug_vib_mot_set_mode}, /* VIB_MOT_SET_MODE_PROC */ {NULL, debug_vib_mot_set_polarity}, /* VIB_MOT_SET_POLARITY_PROC */ {NULL, debug_vid_en}, /* VID_EN_PROC */ {debug_vid_is_en, NULL}, /* VID_IS_EN_PROC */ {NULL, debug_vid_load_detect_en}, /* VID_LOAD_DETECT_EN_PROC */ {NULL, debug_mic_en}, /* MIC_EN_PROC */ {debug_mic_is_en, NULL}, /* MIC_IS_EN_PROC */ {NULL, debug_mic_set_volt}, /* MIC_SET_VOLT_PROC */ {debug_mic_get_volt, NULL}, /* MIC_GET_VOLT_PROC */ {NULL, debug_spkr_en_right_chan}, /* SPKR_EN_RIGHT_CHAN_PROC */ {debug_spkr_is_right_chan_en, NULL}, /* SPKR_IS_RIGHT_CHAN_EN_PROC */ {NULL, debug_spkr_en_left_chan}, /* SPKR_EN_LEFT_CHAN_PROC */ {debug_spkr_is_left_chan_en, NULL}, /* SPKR_IS_LEFT_CHAN_EN_PROC */ {NULL, debug_set_spkr_configuration}, /* SET_SPKR_CONFIGURATION_PROC */ {debug_get_spkr_configuration, NULL}, /* GET_SPKR_CONFIGURATION_PROC */ {debug_spkr_get_gain, NULL}, /* SPKR_GET_GAIN_PROC */ {debug_spkr_is_en, NULL}, /* SPKR_IS_EN_PROC */ {NULL, debug_spkr_en_mute}, /* SPKR_EN_MUTE_PROC */ {debug_spkr_is_mute_en, NULL}, /* SPKR_IS_MUTE_EN_PROC */ {NULL, debug_spkr_set_delay}, /* SPKR_SET_DELAY_PROC */ {debug_spkr_get_delay, NULL}, /* SPKR_GET_DELAY_PROC */ /* SECURE_MPP_CONFIG_DIGITAL_INPUT_PROC */ {NULL, debug_secure_mpp_config_digital_input}, {NULL, debug_set_speaker_delay}, /* SET_SPEAKER_DELAY_PROC */ {NULL, debug_speaker_1k6_zin_enable}, /* SPEAKER_1K6_ZIN_ENABLE_PROC */ /* SPKR_SET_MUX_HPF_CORNER_FREQ_PROC */ {NULL, debug_spkr_set_mux_hpf_corner_freq}, /* SPKR_GET_MUX_HPF_CORNER_FREQ_PROC */ {debug_spkr_get_mux_hpf_corner_freq, NULL}, /* SPKR_IS_RIGHT_LEFT_CHAN_ADDED_PROC */ {debug_spkr_is_right_left_chan_added, NULL}, {NULL, debug_spkr_en_stereo}, /* SPKR_EN_STEREO_PROC */ {debug_spkr_is_stereo_en, NULL}, /* SPKR_IS_STEREO_EN_PROC */ /* SPKR_SELECT_USB_WITH_HPF_20HZ_PROC */ {NULL, debug_spkr_select_usb_with_hpf_20hz}, /* SPKR_IS_USB_WITH_HPF_20HZ_PROC */ {debug_spkr_is_usb_with_hpf_20hz, NULL}, {NULL, debug_spkr_bypass_mux}, /* SPKR_BYPASS_MUX_PROC */ {debug_spkr_is_mux_bypassed, NULL}, /* SPKR_IS_MUX_BYPASSED_PROC */ {NULL, debug_spkr_en_hpf}, /* SPKR_EN_HPF_PROC */ { debug_spkr_is_hpf_en, NULL}, /* SPKR_IS_HPF_EN_PROC */ /* SPKR_EN_SINK_CURR_FROM_REF_VOLT_CIR_PROC */ {NULL, debug_spkr_en_sink_curr_from_ref_volt_cir}, /* SPKR_IS_SINK_CURR_FROM_REF_VOLT_CIR_EN_PROC */ {debug_spkr_is_sink_curr_from_ref_volt_cir_en, NULL}, /* SPKR_ADD_RIGHT_LEFT_CHAN_PROC */ {NULL, debug_spkr_add_right_left_chan}, {NULL, debug_spkr_set_gain}, /* SPKR_SET_GAIN_PROC */ {NULL , debug_spkr_en}, /* SPKR_EN_PROC */ }; /***********************************************************************/ #define PROC_END (sizeof(pmic_debug)/sizeof(struct pmic_debug_desc)) #define PMIC_DEBUG_BUF 512 static int debug_proc; /* PROC's index */ static char debug_buf[PMIC_DEBUG_BUF]; static int proc_index_set(void *data, u64 val) { int ndx; ndx = (int)val; if (ndx >= 0 && ndx <= PROC_END) debug_proc = ndx; return 0; } static int proc_index_get(void *data, u64 *val) { *val = (u64)debug_proc; return 0; } DEFINE_SIMPLE_ATTRIBUTE( proc_index_fops, proc_index_get, proc_index_set, "%llu\n"); static int pmic_debugfs_open(struct inode *inode, struct file *file) { /* non-seekable */ file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); return 0; } static int pmic_debugfs_release(struct inode *inode, struct file *file) { return 0; } static ssize_t pmic_debugfs_write( struct file *file, const char __user *buff, size_t count, loff_t *ppos) { struct pmic_debug_desc *pd; int len = 0; printk(KERN_INFO "%s: proc=%d count=%d *ppos=%d\n", __func__, debug_proc, count, (uint)*ppos); if (count > sizeof(debug_buf)) return -EFAULT; if (copy_from_user(debug_buf, buff, count)) return -EFAULT; debug_buf[count] = 0; /* end of string */ pd = &pmic_debug[debug_proc]; if (pd->set) { len = pd->set(debug_buf, count); printk(KERN_INFO "%s: len=%d\n", __func__, len); return len; } return 0; } static ssize_t pmic_debugfs_read( struct file *file, char __user *buff, size_t count, loff_t *ppos) { struct pmic_debug_desc *pd; int len = 0; printk(KERN_INFO "%s: proc=%d count=%d *ppos=%d\n", __func__, debug_proc, count, (uint)*ppos); pd = &pmic_debug[debug_proc]; if (*ppos) return 0; /* the end */ if (pd->get) { len = pd->get(debug_buf, sizeof(debug_buf)); if (len > 0) { if (len > count) len = count; if (copy_to_user(buff, debug_buf, len)) return -EFAULT; } } printk(KERN_INFO "%s: len=%d\n", __func__, len); if (len < 0) return 0; *ppos += len; /* increase offset */ return len; } static const struct file_operations pmic_debugfs_fops = { .open = pmic_debugfs_open, .release = pmic_debugfs_release, .read = pmic_debugfs_read, .write = pmic_debugfs_write, }; static int __init pmic_debugfs_init(void) { struct dentry *dent = debugfs_create_dir("pmic", NULL); if (IS_ERR(dent)) { printk(KERN_ERR "%s(%d): debugfs_create_dir fail, error %ld\n", __FILE__, __LINE__, PTR_ERR(dent)); return -1; } if (debugfs_create_file("index", 0644, dent, 0, &proc_index_fops) == NULL) { printk(KERN_ERR "%s(%d): debugfs_create_file: index fail\n", __FILE__, __LINE__); return -1; } if (debugfs_create_file("debug", 0644, dent, 0, &pmic_debugfs_fops) == NULL) { printk(KERN_ERR "%s(%d): debugfs_create_file: debug fail\n", __FILE__, __LINE__); return -1; } debug_proc = 0; debug_rtc_alarm_ndx = 0; return 0; } late_initcall(pmic_debugfs_init);
gpl-2.0
crazyquark/linux
drivers/media/video/tlg2300/pd-dvb.c
3575
13792
#include "pd-common.h" #include <linux/kernel.h> #include <linux/usb.h> #include <linux/dvb/dmx.h> #include <linux/delay.h> #include <linux/gfp.h> #include "vendorcmds.h" #include <linux/sched.h> #include <asm/atomic.h> static void dvb_urb_cleanup(struct pd_dvb_adapter *pd_dvb); static int dvb_bandwidth[][2] = { { TLG_BW_8, BANDWIDTH_8_MHZ }, { TLG_BW_7, BANDWIDTH_7_MHZ }, { TLG_BW_6, BANDWIDTH_6_MHZ } }; static int dvb_bandwidth_length = ARRAY_SIZE(dvb_bandwidth); static s32 dvb_start_streaming(struct pd_dvb_adapter *pd_dvb); static int poseidon_check_mode_dvbt(struct poseidon *pd) { s32 ret = 0, cmd_status = 0; set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ/4); ret = usb_set_interface(pd->udev, 0, BULK_ALTERNATE_IFACE); if (ret != 0) return ret; ret = set_tuner_mode(pd, TLG_MODE_CAPS_DVB_T); if (ret) return ret; /* signal source */ ret = send_set_req(pd, SGNL_SRC_SEL, TLG_SIG_SRC_ANTENNA, &cmd_status); if (ret|cmd_status) return ret; return 0; } /* acquire : * 1 == open * 0 == release */ static int poseidon_ts_bus_ctrl(struct dvb_frontend *fe, int acquire) { struct poseidon *pd = fe->demodulator_priv; struct pd_dvb_adapter *pd_dvb; int ret = 0; if (!pd) return -ENODEV; pd_dvb = container_of(fe, struct pd_dvb_adapter, dvb_fe); if (acquire) { mutex_lock(&pd->lock); if (pd->state & POSEIDON_STATE_DISCONNECT) { ret = -ENODEV; goto open_out; } if (pd->state && !(pd->state & POSEIDON_STATE_DVBT)) { ret = -EBUSY; goto open_out; } usb_autopm_get_interface(pd->interface); if (0 == pd->state) { ret = poseidon_check_mode_dvbt(pd); if (ret < 0) { usb_autopm_put_interface(pd->interface); goto open_out; } pd->state |= POSEIDON_STATE_DVBT; pd_dvb->bandwidth = 0; pd_dvb->prev_freq = 0; } atomic_inc(&pd_dvb->users); kref_get(&pd->kref); open_out: mutex_unlock(&pd->lock); } else { dvb_stop_streaming(pd_dvb); if (atomic_dec_and_test(&pd_dvb->users)) { mutex_lock(&pd->lock); pd->state &= ~POSEIDON_STATE_DVBT; mutex_unlock(&pd->lock); } kref_put(&pd->kref, poseidon_delete); usb_autopm_put_interface(pd->interface); } return ret; } #ifdef CONFIG_PM static void poseidon_fe_release(struct dvb_frontend *fe) { struct poseidon *pd = fe->demodulator_priv; pd->pm_suspend = NULL; pd->pm_resume = NULL; } #else #define poseidon_fe_release NULL #endif static s32 poseidon_fe_sleep(struct dvb_frontend *fe) { return 0; } /* * return true if we can satisfy the conditions, else return false. */ static bool check_scan_ok(__u32 freq, int bandwidth, struct pd_dvb_adapter *adapter) { if (bandwidth < 0) return false; if (adapter->prev_freq == freq && adapter->bandwidth == bandwidth) { long nl = jiffies - adapter->last_jiffies; unsigned int msec ; msec = jiffies_to_msecs(abs(nl)); return msec > 15000 ? true : false; } return true; } /* * Check if the firmware delays too long for an invalid frequency. */ static int fw_delay_overflow(struct pd_dvb_adapter *adapter) { long nl = jiffies - adapter->last_jiffies; unsigned int msec ; msec = jiffies_to_msecs(abs(nl)); return msec > 800 ? true : false; } static int poseidon_set_fe(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep) { s32 ret = 0, cmd_status = 0; s32 i, bandwidth = -1; struct poseidon *pd = fe->demodulator_priv; struct pd_dvb_adapter *pd_dvb = &pd->dvb_data; if (in_hibernation(pd)) return -EBUSY; mutex_lock(&pd->lock); for (i = 0; i < dvb_bandwidth_length; i++) if (fep->u.ofdm.bandwidth == dvb_bandwidth[i][1]) bandwidth = dvb_bandwidth[i][0]; if (check_scan_ok(fep->frequency, bandwidth, pd_dvb)) { ret = send_set_req(pd, TUNE_FREQ_SELECT, fep->frequency / 1000, &cmd_status); if (ret | cmd_status) { log("error line"); goto front_out; } ret = send_set_req(pd, DVBT_BANDW_SEL, bandwidth, &cmd_status); if (ret | cmd_status) { log("error line"); goto front_out; } ret = send_set_req(pd, TAKE_REQUEST, 0, &cmd_status); if (ret | cmd_status) { log("error line"); goto front_out; } /* save the context for future */ memcpy(&pd_dvb->fe_param, fep, sizeof(*fep)); pd_dvb->bandwidth = bandwidth; pd_dvb->prev_freq = fep->frequency; pd_dvb->last_jiffies = jiffies; } front_out: mutex_unlock(&pd->lock); return ret; } #ifdef CONFIG_PM static int pm_dvb_suspend(struct poseidon *pd) { struct pd_dvb_adapter *pd_dvb = &pd->dvb_data; dvb_stop_streaming(pd_dvb); dvb_urb_cleanup(pd_dvb); msleep(500); return 0; } static int pm_dvb_resume(struct poseidon *pd) { struct pd_dvb_adapter *pd_dvb = &pd->dvb_data; poseidon_check_mode_dvbt(pd); msleep(300); poseidon_set_fe(&pd_dvb->dvb_fe, &pd_dvb->fe_param); dvb_start_streaming(pd_dvb); return 0; } #endif static s32 poseidon_fe_init(struct dvb_frontend *fe) { struct poseidon *pd = fe->demodulator_priv; struct pd_dvb_adapter *pd_dvb = &pd->dvb_data; #ifdef CONFIG_PM pd->pm_suspend = pm_dvb_suspend; pd->pm_resume = pm_dvb_resume; #endif memset(&pd_dvb->fe_param, 0, sizeof(struct dvb_frontend_parameters)); return 0; } static int poseidon_get_fe(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep) { struct poseidon *pd = fe->demodulator_priv; struct pd_dvb_adapter *pd_dvb = &pd->dvb_data; memcpy(fep, &pd_dvb->fe_param, sizeof(*fep)); return 0; } static int poseidon_fe_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *tune) { tune->min_delay_ms = 1000; return 0; } static int poseidon_read_status(struct dvb_frontend *fe, fe_status_t *stat) { struct poseidon *pd = fe->demodulator_priv; s32 ret = -1, cmd_status; struct tuner_dtv_sig_stat_s status = {}; if (in_hibernation(pd)) return -EBUSY; mutex_lock(&pd->lock); ret = send_get_req(pd, TUNER_STATUS, TLG_MODE_DVB_T, &status, &cmd_status, sizeof(status)); if (ret | cmd_status) { log("get tuner status error"); goto out; } if (debug_mode) log("P : %d, L %d, LB :%d", status.sig_present, status.sig_locked, status.sig_lock_busy); if (status.sig_lock_busy) { goto out; } else if (status.sig_present || status.sig_locked) { *stat |= FE_HAS_LOCK | FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_SYNC | FE_HAS_VITERBI; } else { if (fw_delay_overflow(&pd->dvb_data)) *stat |= FE_TIMEDOUT; } out: mutex_unlock(&pd->lock); return ret; } static int poseidon_read_ber(struct dvb_frontend *fe, u32 *ber) { struct poseidon *pd = fe->demodulator_priv; struct tuner_ber_rate_s tlg_ber = {}; s32 ret = -1, cmd_status; mutex_lock(&pd->lock); ret = send_get_req(pd, TUNER_BER_RATE, 0, &tlg_ber, &cmd_status, sizeof(tlg_ber)); if (ret | cmd_status) goto out; *ber = tlg_ber.ber_rate; out: mutex_unlock(&pd->lock); return ret; } static s32 poseidon_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { struct poseidon *pd = fe->demodulator_priv; struct tuner_dtv_sig_stat_s status = {}; s32 ret = 0, cmd_status; mutex_lock(&pd->lock); ret = send_get_req(pd, TUNER_STATUS, TLG_MODE_DVB_T, &status, &cmd_status, sizeof(status)); if (ret | cmd_status) goto out; if ((status.sig_present || status.sig_locked) && !status.sig_strength) *strength = 0xFFFF; else *strength = status.sig_strength; out: mutex_unlock(&pd->lock); return ret; } static int poseidon_read_snr(struct dvb_frontend *fe, u16 *snr) { return 0; } static int poseidon_read_unc_blocks(struct dvb_frontend *fe, u32 *unc) { *unc = 0; return 0; } static struct dvb_frontend_ops poseidon_frontend_ops = { .info = { .name = "Poseidon DVB-T", .type = FE_OFDM, .frequency_min = 174000000, .frequency_max = 862000000, .frequency_stepsize = 62500,/* FIXME */ .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_RECOVER | FE_CAN_HIERARCHY_AUTO, }, .release = poseidon_fe_release, .init = poseidon_fe_init, .sleep = poseidon_fe_sleep, .set_frontend = poseidon_set_fe, .get_frontend = poseidon_get_fe, .get_tune_settings = poseidon_fe_get_tune_settings, .read_status = poseidon_read_status, .read_ber = poseidon_read_ber, .read_signal_strength = poseidon_read_signal_strength, .read_snr = poseidon_read_snr, .read_ucblocks = poseidon_read_unc_blocks, .ts_bus_ctrl = poseidon_ts_bus_ctrl, }; static void dvb_urb_irq(struct urb *urb) { struct pd_dvb_adapter *pd_dvb = urb->context; int len = urb->transfer_buffer_length; struct dvb_demux *demux = &pd_dvb->demux; s32 ret; if (!pd_dvb->is_streaming || urb->status) { if (urb->status == -EPROTO) goto resend; return; } if (urb->actual_length == len) dvb_dmx_swfilter(demux, urb->transfer_buffer, len); else if (urb->actual_length == len - 4) { int offset; u8 *buf = urb->transfer_buffer; /* * The packet size is 512, * last packet contains 456 bytes tsp data */ for (offset = 456; offset < len; offset += 512) { if (!strncmp(buf + offset, "DVHS", 4)) { dvb_dmx_swfilter(demux, buf, offset); if (len > offset + 52 + 4) { /*16 bytes trailer + 36 bytes padding */ buf += offset + 52; len -= offset + 52 + 4; dvb_dmx_swfilter(demux, buf, len); } break; } } } resend: ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) log(" usb_submit_urb failed: error %d", ret); } static int dvb_urb_init(struct pd_dvb_adapter *pd_dvb) { if (pd_dvb->urb_array[0]) return 0; alloc_bulk_urbs_generic(pd_dvb->urb_array, DVB_SBUF_NUM, pd_dvb->pd_device->udev, pd_dvb->ep_addr, DVB_URB_BUF_SIZE, GFP_KERNEL, dvb_urb_irq, pd_dvb); return 0; } static void dvb_urb_cleanup(struct pd_dvb_adapter *pd_dvb) { free_all_urb_generic(pd_dvb->urb_array, DVB_SBUF_NUM); } static s32 dvb_start_streaming(struct pd_dvb_adapter *pd_dvb) { struct poseidon *pd = pd_dvb->pd_device; int ret = 0; if (pd->state & POSEIDON_STATE_DISCONNECT) return -ENODEV; mutex_lock(&pd->lock); if (!pd_dvb->is_streaming) { s32 i, cmd_status = 0; /* * Once upon a time, there was a difficult bug lying here. * ret = send_set_req(pd, TAKE_REQUEST, 0, &cmd_status); */ ret = send_set_req(pd, PLAY_SERVICE, 1, &cmd_status); if (ret | cmd_status) goto out; ret = dvb_urb_init(pd_dvb); if (ret < 0) goto out; pd_dvb->is_streaming = 1; for (i = 0; i < DVB_SBUF_NUM; i++) { ret = usb_submit_urb(pd_dvb->urb_array[i], GFP_KERNEL); if (ret) { log(" submit urb error %d", ret); goto out; } } } out: mutex_unlock(&pd->lock); return ret; } void dvb_stop_streaming(struct pd_dvb_adapter *pd_dvb) { struct poseidon *pd = pd_dvb->pd_device; mutex_lock(&pd->lock); if (pd_dvb->is_streaming) { s32 i, ret, cmd_status = 0; pd_dvb->is_streaming = 0; for (i = 0; i < DVB_SBUF_NUM; i++) if (pd_dvb->urb_array[i]) usb_kill_urb(pd_dvb->urb_array[i]); ret = send_set_req(pd, PLAY_SERVICE, TLG_TUNE_PLAY_SVC_STOP, &cmd_status); if (ret | cmd_status) log("error"); } mutex_unlock(&pd->lock); } static int pd_start_feed(struct dvb_demux_feed *feed) { struct pd_dvb_adapter *pd_dvb = feed->demux->priv; int ret = 0; if (!pd_dvb) return -1; if (atomic_inc_return(&pd_dvb->active_feed) == 1) ret = dvb_start_streaming(pd_dvb); return ret; } static int pd_stop_feed(struct dvb_demux_feed *feed) { struct pd_dvb_adapter *pd_dvb = feed->demux->priv; if (!pd_dvb) return -1; if (atomic_dec_and_test(&pd_dvb->active_feed)) dvb_stop_streaming(pd_dvb); return 0; } DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); int pd_dvb_usb_device_init(struct poseidon *pd) { struct pd_dvb_adapter *pd_dvb = &pd->dvb_data; struct dvb_demux *dvbdemux; int ret = 0; pd_dvb->ep_addr = 0x82; atomic_set(&pd_dvb->users, 0); atomic_set(&pd_dvb->active_feed, 0); pd_dvb->pd_device = pd; ret = dvb_register_adapter(&pd_dvb->dvb_adap, "Poseidon dvbt adapter", THIS_MODULE, NULL /* for hibernation correctly*/, adapter_nr); if (ret < 0) goto error1; /* register frontend */ pd_dvb->dvb_fe.demodulator_priv = pd; memcpy(&pd_dvb->dvb_fe.ops, &poseidon_frontend_ops, sizeof(struct dvb_frontend_ops)); ret = dvb_register_frontend(&pd_dvb->dvb_adap, &pd_dvb->dvb_fe); if (ret < 0) goto error2; /* register demux device */ dvbdemux = &pd_dvb->demux; dvbdemux->dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING; dvbdemux->priv = pd_dvb; dvbdemux->feednum = dvbdemux->filternum = 64; dvbdemux->start_feed = pd_start_feed; dvbdemux->stop_feed = pd_stop_feed; dvbdemux->write_to_decoder = NULL; ret = dvb_dmx_init(dvbdemux); if (ret < 0) goto error3; pd_dvb->dmxdev.filternum = pd_dvb->demux.filternum; pd_dvb->dmxdev.demux = &pd_dvb->demux.dmx; pd_dvb->dmxdev.capabilities = 0; ret = dvb_dmxdev_init(&pd_dvb->dmxdev, &pd_dvb->dvb_adap); if (ret < 0) goto error3; return 0; error3: dvb_unregister_frontend(&pd_dvb->dvb_fe); error2: dvb_unregister_adapter(&pd_dvb->dvb_adap); error1: return ret; } void pd_dvb_usb_device_exit(struct poseidon *pd) { struct pd_dvb_adapter *pd_dvb = &pd->dvb_data; while (atomic_read(&pd_dvb->users) != 0 || atomic_read(&pd_dvb->active_feed) != 0) { set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ); } dvb_dmxdev_release(&pd_dvb->dmxdev); dvb_unregister_frontend(&pd_dvb->dvb_fe); dvb_unregister_adapter(&pd_dvb->dvb_adap); pd_dvb_usb_device_cleanup(pd); } void pd_dvb_usb_device_cleanup(struct poseidon *pd) { struct pd_dvb_adapter *pd_dvb = &pd->dvb_data; dvb_urb_cleanup(pd_dvb); } int pd_dvb_get_adapter_num(struct pd_dvb_adapter *pd_dvb) { return pd_dvb->dvb_adap.num; }
gpl-2.0
Loller79/Solid_Kernel-GPROJ
arch/arm/mm/fault-armv.c
4855
6880
/* * linux/arch/arm/mm/fault-armv.c * * Copyright (C) 1995 Linus Torvalds * Modifications for ARM processor (c) 1995-2002 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/bitops.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/pagemap.h> #include <linux/gfp.h> #include <asm/bugs.h> #include <asm/cacheflush.h> #include <asm/cachetype.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include "mm.h" static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE; #if __LINUX_ARM_ARCH__ < 6 /* * We take the easy way out of this problem - we make the * PTE uncacheable. However, we leave the write buffer on. * * Note that the pte lock held when calling update_mmu_cache must also * guard the pte (somewhere else in the same mm) that we modify here. * Therefore those configurations which might call adjust_pte (those * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. */ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn, pte_t *ptep) { pte_t entry = *ptep; int ret; /* * If this page is present, it's actually being shared. */ ret = pte_present(entry); /* * If this page isn't present, or is already setup to * fault (ie, is old), we can safely ignore any issues. */ if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { flush_cache_page(vma, address, pfn); outer_flush_range((pfn << PAGE_SHIFT), (pfn << PAGE_SHIFT) + PAGE_SIZE); pte_val(entry) &= ~L_PTE_MT_MASK; pte_val(entry) |= shared_pte_mask; set_pte_at(vma->vm_mm, address, ptep, entry); flush_tlb_page(vma, address); } return ret; } #if USE_SPLIT_PTLOCKS /* * If we are using split PTE locks, then we need to take the page * lock here. Otherwise we are using shared mm->page_table_lock * which is already locked, thus cannot take it. */ static inline void do_pte_lock(spinlock_t *ptl) { /* * Use nested version here to indicate that we are already * holding one similar spinlock. */ spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); } static inline void do_pte_unlock(spinlock_t *ptl) { spin_unlock(ptl); } #else /* !USE_SPLIT_PTLOCKS */ static inline void do_pte_lock(spinlock_t *ptl) {} static inline void do_pte_unlock(spinlock_t *ptl) {} #endif /* USE_SPLIT_PTLOCKS */ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn) { spinlock_t *ptl; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; int ret; pgd = pgd_offset(vma->vm_mm, address); if (pgd_none_or_clear_bad(pgd)) return 0; pud = pud_offset(pgd, address); if (pud_none_or_clear_bad(pud)) return 0; pmd = pmd_offset(pud, address); if (pmd_none_or_clear_bad(pmd)) return 0; /* * This is called while another page table is mapped, so we * must use the nested version. This also means we need to * open-code the spin-locking. */ ptl = pte_lockptr(vma->vm_mm, pmd); pte = pte_offset_map(pmd, address); do_pte_lock(ptl); ret = do_adjust_pte(vma, address, pfn, pte); do_pte_unlock(ptl); pte_unmap(pte); return ret; } static void make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, unsigned long pfn) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *mpnt; struct prio_tree_iter iter; unsigned long offset; pgoff_t pgoff; int aliases = 0; pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); /* * If we have any shared mappings that are in the same mm * space, then we need to handle them specially to maintain * cache coherency. */ flush_dcache_mmap_lock(mapping); vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { /* * If this VMA is not in our MM, we can ignore it. * Note that we intentionally mask out the VMA * that we are fixing up. */ if (mpnt->vm_mm != mm || mpnt == vma) continue; if (!(mpnt->vm_flags & VM_MAYSHARE)) continue; offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn); } flush_dcache_mmap_unlock(mapping); if (aliases) do_adjust_pte(vma, addr, pfn, ptep); } /* * Take care of architecture specific things when placing a new PTE into * a page table, or changing an existing PTE. Basically, there are two * things that we need to take care of: * * 1. If PG_dcache_clean is not set for the page, we need to ensure * that any cache entries for the kernels virtual memory * range are written back to the page. * 2. If we have multiple shared mappings of the same space in * an object, we need to deal with the cache aliasing issues. * * Note that the pte lock will be held. */ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { unsigned long pfn = pte_pfn(*ptep); struct address_space *mapping; struct page *page; if (!pfn_valid(pfn)) return; /* * The zero page is never written to, so never has any dirty * cache lines, and therefore never needs to be flushed. */ page = pfn_to_page(pfn); if (page == ZERO_PAGE(0)) return; mapping = page_mapping(page); if (!test_and_set_bit(PG_dcache_clean, &page->flags)) __flush_dcache_page(mapping, page); if (mapping) { if (cache_is_vivt()) make_coherent(mapping, vma, addr, ptep, pfn); else if (vma->vm_flags & VM_EXEC) __flush_icache_all(); } } #endif /* __LINUX_ARM_ARCH__ < 6 */ /* * Check whether the write buffer has physical address aliasing * issues. If it has, we need to avoid them for the case where * we have several shared mappings of the same object in user * space. */ static int __init check_writebuffer(unsigned long *p1, unsigned long *p2) { register unsigned long zero = 0, one = 1, val; local_irq_disable(); mb(); *p1 = one; mb(); *p2 = zero; mb(); val = *p1; mb(); local_irq_enable(); return val != zero; } void __init check_writebuffer_bugs(void) { struct page *page; const char *reason; unsigned long v = 1; printk(KERN_INFO "CPU: Testing write buffer coherency: "); page = alloc_page(GFP_KERNEL); if (page) { unsigned long *p1, *p2; pgprot_t prot = __pgprot_modify(PAGE_KERNEL, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE); p1 = vmap(&page, 1, VM_IOREMAP, prot); p2 = vmap(&page, 1, VM_IOREMAP, prot); if (p1 && p2) { v = check_writebuffer(p1, p2); reason = "enabling work-around"; } else { reason = "unable to map memory\n"; } vunmap(p1); vunmap(p2); put_page(page); } else { reason = "unable to grab page\n"; } if (v) { printk("failed, %s\n", reason); shared_pte_mask = L_PTE_MT_UNCACHED; } else { printk("ok\n"); } }
gpl-2.0
MattCrystal/sarin-gas-aosp-jewel
arch/arm/mach-pxa/colibri-pxa320.c
4855
6488
/* * arch/arm/mach-pxa/colibri-pxa320.c * * Support for Toradex PXA320/310 based Colibri module * * Daniel Mack <daniel@caiaq.de> * Matthias Meier <matthias.j.meier@gmx.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/usb/gpio_vbus.h> #include <asm/mach-types.h> #include <asm/sizes.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> #include <mach/pxa320.h> #include <mach/colibri.h> #include <mach/pxafb.h> #include <mach/ohci.h> #include <mach/audio.h> #include <mach/pxa27x-udc.h> #include <mach/udc.h> #include "generic.h" #include "devices.h" #ifdef CONFIG_MACH_COLIBRI_EVALBOARD static mfp_cfg_t colibri_pxa320_evalboard_pin_config[] __initdata = { /* MMC */ GPIO22_MMC1_CLK, GPIO23_MMC1_CMD, GPIO18_MMC1_DAT0, GPIO19_MMC1_DAT1, GPIO20_MMC1_DAT2, GPIO21_MMC1_DAT3, GPIO28_GPIO, /* SD detect */ /* UART 1 configuration (may be set by bootloader) */ GPIO99_UART1_CTS, GPIO104_UART1_RTS, GPIO97_UART1_RXD, GPIO98_UART1_TXD, GPIO101_UART1_DTR, GPIO103_UART1_DSR, GPIO100_UART1_DCD, GPIO102_UART1_RI, /* UART 2 configuration */ GPIO109_UART2_CTS, GPIO112_UART2_RTS, GPIO110_UART2_RXD, GPIO111_UART2_TXD, /* UART 3 configuration */ GPIO30_UART3_RXD, GPIO31_UART3_TXD, /* UHC */ GPIO2_2_USBH_PEN, GPIO3_2_USBH_PWR, /* I2C */ GPIO32_I2C_SCL, GPIO33_I2C_SDA, /* PCMCIA */ MFP_CFG(GPIO59, AF7), /* PRST ; AF7 to tristate */ MFP_CFG(GPIO61, AF7), /* PCE1 ; AF7 to tristate */ MFP_CFG(GPIO60, AF7), /* PCE2 ; AF7 to tristate */ MFP_CFG(GPIO62, AF7), /* PCD ; AF7 to tristate */ MFP_CFG(GPIO56, AF7), /* PSKTSEL ; AF7 to tristate */ GPIO27_GPIO, /* RDnWR ; input/tristate */ GPIO50_GPIO, /* PREG ; input/tristate */ GPIO2_RDY, GPIO5_NPIOR, GPIO6_NPIOW, GPIO7_NPIOS16, GPIO8_NPWAIT, GPIO29_GPIO, /* PRDY (READY GPIO) */ GPIO57_GPIO, /* PPEN (POWER GPIO) */ GPIO81_GPIO, /* PCD (DETECT GPIO) */ GPIO77_GPIO, /* PRST (RESET GPIO) */ GPIO53_GPIO, /* PBVD1 */ GPIO79_GPIO, /* PBVD2 */ GPIO54_GPIO, /* POE */ }; #else static mfp_cfg_t colibri_pxa320_evalboard_pin_config[] __initdata = {}; #endif #if defined(CONFIG_AX88796) #define COLIBRI_ETH_IRQ_GPIO mfp_to_gpio(GPIO36_GPIO) /* * Asix AX88796 Ethernet */ static struct ax_plat_data colibri_asix_platdata = { .flags = 0, /* defined later */ .wordlength = 2, }; static struct resource colibri_asix_resource[] = { [0] = { .start = PXA3xx_CS2_PHYS, .end = PXA3xx_CS2_PHYS + (0x20 * 2) - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = PXA_GPIO_TO_IRQ(COLIBRI_ETH_IRQ_GPIO), .end = PXA_GPIO_TO_IRQ(COLIBRI_ETH_IRQ_GPIO), .flags = IORESOURCE_IRQ | IRQF_TRIGGER_FALLING, } }; static struct platform_device asix_device = { .name = "ax88796", .id = 0, .num_resources = ARRAY_SIZE(colibri_asix_resource), .resource = colibri_asix_resource, .dev = { .platform_data = &colibri_asix_platdata } }; static mfp_cfg_t colibri_pxa320_eth_pin_config[] __initdata = { GPIO3_nCS2, /* AX88796 chip select */ GPIO36_GPIO | MFP_PULL_HIGH /* AX88796 IRQ */ }; static void __init colibri_pxa320_init_eth(void) { colibri_pxa3xx_init_eth(&colibri_asix_platdata); pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa320_eth_pin_config)); platform_device_register(&asix_device); } #else static inline void __init colibri_pxa320_init_eth(void) {} #endif /* CONFIG_AX88796 */ #if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE) static struct gpio_vbus_mach_info colibri_pxa320_gpio_vbus_info = { .gpio_vbus = mfp_to_gpio(MFP_PIN_GPIO96), .gpio_pullup = -1, }; static struct platform_device colibri_pxa320_gpio_vbus = { .name = "gpio-vbus", .id = -1, .dev = { .platform_data = &colibri_pxa320_gpio_vbus_info, }, }; static void colibri_pxa320_udc_command(int cmd) { if (cmd == PXA2XX_UDC_CMD_CONNECT) UP2OCR = UP2OCR_HXOE | UP2OCR_DPPUE; else if (cmd == PXA2XX_UDC_CMD_DISCONNECT) UP2OCR = UP2OCR_HXOE; } static struct pxa2xx_udc_mach_info colibri_pxa320_udc_info __initdata = { .udc_command = colibri_pxa320_udc_command, .gpio_pullup = -1, }; static void __init colibri_pxa320_init_udc(void) { pxa_set_udc_info(&colibri_pxa320_udc_info); platform_device_register(&colibri_pxa320_gpio_vbus); } #else static inline void colibri_pxa320_init_udc(void) {} #endif #if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) static mfp_cfg_t colibri_pxa320_lcd_pin_config[] __initdata = { GPIO6_2_LCD_LDD_0, GPIO7_2_LCD_LDD_1, GPIO8_2_LCD_LDD_2, GPIO9_2_LCD_LDD_3, GPIO10_2_LCD_LDD_4, GPIO11_2_LCD_LDD_5, GPIO12_2_LCD_LDD_6, GPIO13_2_LCD_LDD_7, GPIO63_LCD_LDD_8, GPIO64_LCD_LDD_9, GPIO65_LCD_LDD_10, GPIO66_LCD_LDD_11, GPIO67_LCD_LDD_12, GPIO68_LCD_LDD_13, GPIO69_LCD_LDD_14, GPIO70_LCD_LDD_15, GPIO71_LCD_LDD_16, GPIO72_LCD_LDD_17, GPIO73_LCD_CS_N, GPIO74_LCD_VSYNC, GPIO14_2_LCD_FCLK, GPIO15_2_LCD_LCLK, GPIO16_2_LCD_PCLK, GPIO17_2_LCD_BIAS, }; static void __init colibri_pxa320_init_lcd(void) { pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa320_lcd_pin_config)); } #else static inline void colibri_pxa320_init_lcd(void) {} #endif #if defined(CONFIG_SND_AC97_CODEC) || \ defined(CONFIG_SND_AC97_CODEC_MODULE) static mfp_cfg_t colibri_pxa320_ac97_pin_config[] __initdata = { GPIO34_AC97_SYSCLK, GPIO35_AC97_SDATA_IN_0, GPIO37_AC97_SDATA_OUT, GPIO38_AC97_SYNC, GPIO39_AC97_BITCLK, GPIO40_AC97_nACRESET }; static inline void __init colibri_pxa320_init_ac97(void) { pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa320_ac97_pin_config)); pxa_set_ac97_info(NULL); } #else static inline void colibri_pxa320_init_ac97(void) {} #endif void __init colibri_pxa320_init(void) { colibri_pxa320_init_eth(); colibri_pxa3xx_init_nand(); colibri_pxa320_init_lcd(); colibri_pxa3xx_init_lcd(mfp_to_gpio(GPIO49_GPIO)); colibri_pxa320_init_ac97(); colibri_pxa320_init_udc(); /* Evalboard init */ pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa320_evalboard_pin_config)); colibri_evalboard_init(); } MACHINE_START(COLIBRI320, "Toradex Colibri PXA320") .atag_offset = 0x100, .init_machine = colibri_pxa320_init, .map_io = pxa3xx_map_io, .nr_irqs = PXA_NR_IRQS, .init_irq = pxa3xx_init_irq, .handle_irq = pxa3xx_handle_irq, .timer = &pxa_timer, .restart = pxa_restart, MACHINE_END
gpl-2.0
schqiushui/kernel_kk442_sense_dlx
drivers/acpi/acpica/hwesleep.c
4855
8078
/****************************************************************************** * * Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the * extended FADT-V5 sleep registers. * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_HARDWARE ACPI_MODULE_NAME("hwesleep") /******************************************************************************* * * FUNCTION: acpi_hw_execute_sleep_method * * PARAMETERS: method_pathname - Pathname of method to execute * integer_argument - Argument to pass to the method * * RETURN: None * * DESCRIPTION: Execute a sleep/wake related method with one integer argument * and no return value. * ******************************************************************************/ void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument) { struct acpi_object_list arg_list; union acpi_object arg; acpi_status status; ACPI_FUNCTION_TRACE(hw_execute_sleep_method); /* One argument, integer_argument; No return value expected */ arg_list.count = 1; arg_list.pointer = &arg; arg.type = ACPI_TYPE_INTEGER; arg.integer.value = (u64)integer_argument; status = acpi_evaluate_object(NULL, method_pathname, &arg_list, NULL); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { ACPI_EXCEPTION((AE_INFO, status, "While executing method %s", method_pathname)); } return_VOID; } /******************************************************************************* * * FUNCTION: acpi_hw_extended_sleep * * PARAMETERS: sleep_state - Which sleep state to enter * Flags - ACPI_EXECUTE_GTS to run optional method * * RETURN: Status * * DESCRIPTION: Enter a system sleep state via the extended FADT sleep * registers (V5 FADT). * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED * ******************************************************************************/ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags) { acpi_status status; u8 sleep_type_value; u64 sleep_status; ACPI_FUNCTION_TRACE(hw_extended_sleep); /* Extended sleep registers must be valid */ if (!acpi_gbl_FADT.sleep_control.address || !acpi_gbl_FADT.sleep_status.address) { return_ACPI_STATUS(AE_NOT_EXIST); } /* Clear wake status (WAK_STS) */ status = acpi_write(ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } acpi_gbl_system_awake_and_running = FALSE; /* Optionally execute _GTS (Going To Sleep) */ if (flags & ACPI_EXECUTE_GTS) { acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state); } /* Flush caches, as per ACPI specification */ ACPI_FLUSH_CPU_CACHE(); /* * Set the SLP_TYP and SLP_EN bits. * * Note: We only use the first value returned by the \_Sx method * (acpi_gbl_sleep_type_a) - As per ACPI specification. */ ACPI_DEBUG_PRINT((ACPI_DB_INIT, "Entering sleep state [S%u]\n", sleep_state)); sleep_type_value = ((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) & ACPI_X_SLEEP_TYPE_MASK); status = acpi_write((sleep_type_value | ACPI_X_SLEEP_ENABLE), &acpi_gbl_FADT.sleep_control); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Wait for transition back to Working State */ do { status = acpi_read(&sleep_status, &acpi_gbl_FADT.sleep_status); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } while (!(((u8)sleep_status) & ACPI_X_WAKE_STATUS)); return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_hw_extended_wake_prep * * PARAMETERS: sleep_state - Which sleep state we just exited * Flags - ACPI_EXECUTE_BFS to run optional method * * RETURN: Status * * DESCRIPTION: Perform first part of OS-independent ACPI cleanup after * a sleep. Called with interrupts ENABLED. * ******************************************************************************/ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags) { acpi_status status; u8 sleep_type_value; ACPI_FUNCTION_TRACE(hw_extended_wake_prep); status = acpi_get_sleep_type_data(ACPI_STATE_S0, &acpi_gbl_sleep_type_a, &acpi_gbl_sleep_type_b); if (ACPI_SUCCESS(status)) { sleep_type_value = ((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) & ACPI_X_SLEEP_TYPE_MASK); (void)acpi_write((sleep_type_value | ACPI_X_SLEEP_ENABLE), &acpi_gbl_FADT.sleep_control); } /* Optionally execute _BFS (Back From Sleep) */ if (flags & ACPI_EXECUTE_BFS) { acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state); } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_hw_extended_wake * * PARAMETERS: sleep_state - Which sleep state we just exited * Flags - Reserved, set to zero * * RETURN: Status * * DESCRIPTION: Perform OS-independent ACPI cleanup after a sleep * Called with interrupts ENABLED. * ******************************************************************************/ acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags) { ACPI_FUNCTION_TRACE(hw_extended_wake); /* Ensure enter_sleep_state_prep -> enter_sleep_state ordering */ acpi_gbl_sleep_type_a = ACPI_SLEEP_TYPE_INVALID; /* Execute the wake methods */ acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WAKING); acpi_hw_execute_sleep_method(METHOD_PATHNAME__WAK, sleep_state); /* * Some BIOS code assumes that WAK_STS will be cleared on resume * and use it to determine whether the system is rebooting or * resuming. Clear WAK_STS for compatibility. */ (void)acpi_write(ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status); acpi_gbl_system_awake_and_running = TRUE; acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING); return_ACPI_STATUS(AE_OK); }
gpl-2.0
AndrewDB/rk3066-kernel
arch/xtensa/mm/tlb.c
4855
3174
/* * arch/xtensa/mm/tlb.c * * Logic that manipulates the Xtensa MMU. Derived from MIPS. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2003 Tensilica Inc. * * Joe Taylor * Chris Zankel <chris@zankel.net> * Marc Gauthier */ #include <linux/mm.h> #include <asm/processor.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> #include <asm/system.h> #include <asm/cacheflush.h> static inline void __flush_itlb_all (void) { int w, i; for (w = 0; w < ITLB_ARF_WAYS; w++) { for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) { int e = w + (i << PAGE_SHIFT); invalidate_itlb_entry_no_isync(e); } } asm volatile ("isync\n"); } static inline void __flush_dtlb_all (void) { int w, i; for (w = 0; w < DTLB_ARF_WAYS; w++) { for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) { int e = w + (i << PAGE_SHIFT); invalidate_dtlb_entry_no_isync(e); } } asm volatile ("isync\n"); } void flush_tlb_all (void) { __flush_itlb_all(); __flush_dtlb_all(); } /* If mm is current, we simply assign the current task a new ASID, thus, * invalidating all previous tlb entries. If mm is someone else's user mapping, * wie invalidate the context, thus, when that user mapping is swapped in, * a new context will be assigned to it. */ void flush_tlb_mm(struct mm_struct *mm) { if (mm == current->active_mm) { int flags; local_save_flags(flags); __get_new_mmu_context(mm); __load_mmu_context(mm); local_irq_restore(flags); } else mm->context = 0; } #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2) #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2) #if _ITLB_ENTRIES > _DTLB_ENTRIES # define _TLB_ENTRIES _ITLB_ENTRIES #else # define _TLB_ENTRIES _DTLB_ENTRIES #endif void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; unsigned long flags; if (mm->context == NO_CONTEXT) return; #if 0 printk("[tlbrange<%02lx,%08lx,%08lx>]\n", (unsigned long)mm->context, start, end); #endif local_save_flags(flags); if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) { int oldpid = get_rasid_register(); set_rasid_register (ASID_INSERT(mm->context)); start &= PAGE_MASK; if (vma->vm_flags & VM_EXEC) while(start < end) { invalidate_itlb_mapping(start); invalidate_dtlb_mapping(start); start += PAGE_SIZE; } else while(start < end) { invalidate_dtlb_mapping(start); start += PAGE_SIZE; } set_rasid_register(oldpid); } else { flush_tlb_mm(mm); } local_irq_restore(flags); } void flush_tlb_page (struct vm_area_struct *vma, unsigned long page) { struct mm_struct* mm = vma->vm_mm; unsigned long flags; int oldpid; if(mm->context == NO_CONTEXT) return; local_save_flags(flags); oldpid = get_rasid_register(); if (vma->vm_flags & VM_EXEC) invalidate_itlb_mapping(page); invalidate_dtlb_mapping(page); set_rasid_register(oldpid); local_irq_restore(flags); }
gpl-2.0
talnoah/Kangaroo_Kernel
drivers/video/cg6.c
8183
22315
/* cg6.c: CGSIX (GX, GXplus, TGX) frame buffer driver * * Copyright (C) 2003, 2006 David S. Miller (davem@davemloft.net) * Copyright (C) 1996,1998 Jakub Jelinek (jj@ultra.linux.cz) * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) * * Driver layout based loosely on tgafb.c, see that file for credits. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/fb.h> #include <linux/mm.h> #include <linux/of_device.h> #include <asm/io.h> #include <asm/fbio.h> #include "sbuslib.h" /* * Local functions. */ static int cg6_setcolreg(unsigned, unsigned, unsigned, unsigned, unsigned, struct fb_info *); static int cg6_blank(int, struct fb_info *); static void cg6_imageblit(struct fb_info *, const struct fb_image *); static void cg6_fillrect(struct fb_info *, const struct fb_fillrect *); static void cg6_copyarea(struct fb_info *info, const struct fb_copyarea *area); static int cg6_sync(struct fb_info *); static int cg6_mmap(struct fb_info *, struct vm_area_struct *); static int cg6_ioctl(struct fb_info *, unsigned int, unsigned long); static int cg6_pan_display(struct fb_var_screeninfo *, struct fb_info *); /* * Frame buffer operations */ static struct fb_ops cg6_ops = { .owner = THIS_MODULE, .fb_setcolreg = cg6_setcolreg, .fb_blank = cg6_blank, .fb_pan_display = cg6_pan_display, .fb_fillrect = cg6_fillrect, .fb_copyarea = cg6_copyarea, .fb_imageblit = cg6_imageblit, .fb_sync = cg6_sync, .fb_mmap = cg6_mmap, .fb_ioctl = cg6_ioctl, #ifdef CONFIG_COMPAT .fb_compat_ioctl = sbusfb_compat_ioctl, #endif }; /* Offset of interesting structures in the OBIO space */ /* * Brooktree is the video dac and is funny to program on the cg6. * (it's even funnier on the cg3) * The FBC could be the frame buffer control * The FHC could is the frame buffer hardware control. */ #define CG6_ROM_OFFSET 0x0UL #define CG6_BROOKTREE_OFFSET 0x200000UL #define CG6_DHC_OFFSET 0x240000UL #define CG6_ALT_OFFSET 0x280000UL #define CG6_FHC_OFFSET 0x300000UL #define CG6_THC_OFFSET 0x301000UL #define CG6_FBC_OFFSET 0x700000UL #define CG6_TEC_OFFSET 0x701000UL #define CG6_RAM_OFFSET 0x800000UL /* FHC definitions */ #define CG6_FHC_FBID_SHIFT 24 #define CG6_FHC_FBID_MASK 255 #define CG6_FHC_REV_SHIFT 20 #define CG6_FHC_REV_MASK 15 #define CG6_FHC_FROP_DISABLE (1 << 19) #define CG6_FHC_ROW_DISABLE (1 << 18) #define CG6_FHC_SRC_DISABLE (1 << 17) #define CG6_FHC_DST_DISABLE (1 << 16) #define CG6_FHC_RESET (1 << 15) #define CG6_FHC_LITTLE_ENDIAN (1 << 13) #define CG6_FHC_RES_MASK (3 << 11) #define CG6_FHC_1024 (0 << 11) #define CG6_FHC_1152 (1 << 11) #define CG6_FHC_1280 (2 << 11) #define CG6_FHC_1600 (3 << 11) #define CG6_FHC_CPU_MASK (3 << 9) #define CG6_FHC_CPU_SPARC (0 << 9) #define CG6_FHC_CPU_68020 (1 << 9) #define CG6_FHC_CPU_386 (2 << 9) #define CG6_FHC_TEST (1 << 8) #define CG6_FHC_TEST_X_SHIFT 4 #define CG6_FHC_TEST_X_MASK 15 #define CG6_FHC_TEST_Y_SHIFT 0 #define CG6_FHC_TEST_Y_MASK 15 /* FBC mode definitions */ #define CG6_FBC_BLIT_IGNORE 0x00000000 #define CG6_FBC_BLIT_NOSRC 0x00100000 #define CG6_FBC_BLIT_SRC 0x00200000 #define CG6_FBC_BLIT_ILLEGAL 0x00300000 #define CG6_FBC_BLIT_MASK 0x00300000 #define CG6_FBC_VBLANK 0x00080000 #define CG6_FBC_MODE_IGNORE 0x00000000 #define CG6_FBC_MODE_COLOR8 0x00020000 #define CG6_FBC_MODE_COLOR1 0x00040000 #define CG6_FBC_MODE_HRMONO 0x00060000 #define CG6_FBC_MODE_MASK 0x00060000 #define CG6_FBC_DRAW_IGNORE 0x00000000 #define CG6_FBC_DRAW_RENDER 0x00008000 #define CG6_FBC_DRAW_PICK 0x00010000 #define CG6_FBC_DRAW_ILLEGAL 0x00018000 #define CG6_FBC_DRAW_MASK 0x00018000 #define CG6_FBC_BWRITE0_IGNORE 0x00000000 #define CG6_FBC_BWRITE0_ENABLE 0x00002000 #define CG6_FBC_BWRITE0_DISABLE 0x00004000 #define CG6_FBC_BWRITE0_ILLEGAL 0x00006000 #define CG6_FBC_BWRITE0_MASK 0x00006000 #define CG6_FBC_BWRITE1_IGNORE 0x00000000 #define CG6_FBC_BWRITE1_ENABLE 0x00000800 #define CG6_FBC_BWRITE1_DISABLE 0x00001000 #define CG6_FBC_BWRITE1_ILLEGAL 0x00001800 #define CG6_FBC_BWRITE1_MASK 0x00001800 #define CG6_FBC_BREAD_IGNORE 0x00000000 #define CG6_FBC_BREAD_0 0x00000200 #define CG6_FBC_BREAD_1 0x00000400 #define CG6_FBC_BREAD_ILLEGAL 0x00000600 #define CG6_FBC_BREAD_MASK 0x00000600 #define CG6_FBC_BDISP_IGNORE 0x00000000 #define CG6_FBC_BDISP_0 0x00000080 #define CG6_FBC_BDISP_1 0x00000100 #define CG6_FBC_BDISP_ILLEGAL 0x00000180 #define CG6_FBC_BDISP_MASK 0x00000180 #define CG6_FBC_INDEX_MOD 0x00000040 #define CG6_FBC_INDEX_MASK 0x00000030 /* THC definitions */ #define CG6_THC_MISC_REV_SHIFT 16 #define CG6_THC_MISC_REV_MASK 15 #define CG6_THC_MISC_RESET (1 << 12) #define CG6_THC_MISC_VIDEO (1 << 10) #define CG6_THC_MISC_SYNC (1 << 9) #define CG6_THC_MISC_VSYNC (1 << 8) #define CG6_THC_MISC_SYNC_ENAB (1 << 7) #define CG6_THC_MISC_CURS_RES (1 << 6) #define CG6_THC_MISC_INT_ENAB (1 << 5) #define CG6_THC_MISC_INT (1 << 4) #define CG6_THC_MISC_INIT 0x9f #define CG6_THC_CURSOFF ((65536-32) | ((65536-32) << 16)) /* The contents are unknown */ struct cg6_tec { int tec_matrix; int tec_clip; int tec_vdc; }; struct cg6_thc { u32 thc_pad0[512]; u32 thc_hs; /* hsync timing */ u32 thc_hsdvs; u32 thc_hd; u32 thc_vs; /* vsync timing */ u32 thc_vd; u32 thc_refresh; u32 thc_misc; u32 thc_pad1[56]; u32 thc_cursxy; /* cursor x,y position (16 bits each) */ u32 thc_cursmask[32]; /* cursor mask bits */ u32 thc_cursbits[32]; /* what to show where mask enabled */ }; struct cg6_fbc { u32 xxx0[1]; u32 mode; u32 clip; u32 xxx1[1]; u32 s; u32 draw; u32 blit; u32 font; u32 xxx2[24]; u32 x0, y0, z0, color0; u32 x1, y1, z1, color1; u32 x2, y2, z2, color2; u32 x3, y3, z3, color3; u32 offx, offy; u32 xxx3[2]; u32 incx, incy; u32 xxx4[2]; u32 clipminx, clipminy; u32 xxx5[2]; u32 clipmaxx, clipmaxy; u32 xxx6[2]; u32 fg; u32 bg; u32 alu; u32 pm; u32 pixelm; u32 xxx7[2]; u32 patalign; u32 pattern[8]; u32 xxx8[432]; u32 apointx, apointy, apointz; u32 xxx9[1]; u32 rpointx, rpointy, rpointz; u32 xxx10[5]; u32 pointr, pointg, pointb, pointa; u32 alinex, aliney, alinez; u32 xxx11[1]; u32 rlinex, rliney, rlinez; u32 xxx12[5]; u32 liner, lineg, lineb, linea; u32 atrix, atriy, atriz; u32 xxx13[1]; u32 rtrix, rtriy, rtriz; u32 xxx14[5]; u32 trir, trig, trib, tria; u32 aquadx, aquady, aquadz; u32 xxx15[1]; u32 rquadx, rquady, rquadz; u32 xxx16[5]; u32 quadr, quadg, quadb, quada; u32 arectx, arecty, arectz; u32 xxx17[1]; u32 rrectx, rrecty, rrectz; u32 xxx18[5]; u32 rectr, rectg, rectb, recta; }; struct bt_regs { u32 addr; u32 color_map; u32 control; u32 cursor; }; struct cg6_par { spinlock_t lock; struct bt_regs __iomem *bt; struct cg6_fbc __iomem *fbc; struct cg6_thc __iomem *thc; struct cg6_tec __iomem *tec; u32 __iomem *fhc; u32 flags; #define CG6_FLAG_BLANKED 0x00000001 unsigned long which_io; }; static int cg6_sync(struct fb_info *info) { struct cg6_par *par = (struct cg6_par *)info->par; struct cg6_fbc __iomem *fbc = par->fbc; int limit = 10000; do { if (!(sbus_readl(&fbc->s) & 0x10000000)) break; udelay(10); } while (--limit > 0); return 0; } static void cg6_switch_from_graph(struct cg6_par *par) { struct cg6_thc __iomem *thc = par->thc; unsigned long flags; spin_lock_irqsave(&par->lock, flags); /* Hide the cursor. */ sbus_writel(CG6_THC_CURSOFF, &thc->thc_cursxy); spin_unlock_irqrestore(&par->lock, flags); } static int cg6_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct cg6_par *par = (struct cg6_par *)info->par; /* We just use this to catch switches out of * graphics mode. */ cg6_switch_from_graph(par); if (var->xoffset || var->yoffset || var->vmode) return -EINVAL; return 0; } /** * cg6_fillrect - Draws a rectangle on the screen. * * @info: frame buffer structure that represents a single frame buffer * @rect: structure defining the rectagle and operation. */ static void cg6_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct cg6_par *par = (struct cg6_par *)info->par; struct cg6_fbc __iomem *fbc = par->fbc; unsigned long flags; s32 val; /* CG6 doesn't handle ROP_XOR */ spin_lock_irqsave(&par->lock, flags); cg6_sync(info); sbus_writel(rect->color, &fbc->fg); sbus_writel(~(u32)0, &fbc->pixelm); sbus_writel(0xea80ff00, &fbc->alu); sbus_writel(0, &fbc->s); sbus_writel(0, &fbc->clip); sbus_writel(~(u32)0, &fbc->pm); sbus_writel(rect->dy, &fbc->arecty); sbus_writel(rect->dx, &fbc->arectx); sbus_writel(rect->dy + rect->height, &fbc->arecty); sbus_writel(rect->dx + rect->width, &fbc->arectx); do { val = sbus_readl(&fbc->draw); } while (val < 0 && (val & 0x20000000)); spin_unlock_irqrestore(&par->lock, flags); } /** * cg6_copyarea - Copies one area of the screen to another area. * * @info: frame buffer structure that represents a single frame buffer * @area: Structure providing the data to copy the framebuffer contents * from one region to another. * * This drawing operation copies a rectangular area from one area of the * screen to another area. */ static void cg6_copyarea(struct fb_info *info, const struct fb_copyarea *area) { struct cg6_par *par = (struct cg6_par *)info->par; struct cg6_fbc __iomem *fbc = par->fbc; unsigned long flags; int i; spin_lock_irqsave(&par->lock, flags); cg6_sync(info); sbus_writel(0xff, &fbc->fg); sbus_writel(0x00, &fbc->bg); sbus_writel(~0, &fbc->pixelm); sbus_writel(0xe880cccc, &fbc->alu); sbus_writel(0, &fbc->s); sbus_writel(0, &fbc->clip); sbus_writel(area->sy, &fbc->y0); sbus_writel(area->sx, &fbc->x0); sbus_writel(area->sy + area->height - 1, &fbc->y1); sbus_writel(area->sx + area->width - 1, &fbc->x1); sbus_writel(area->dy, &fbc->y2); sbus_writel(area->dx, &fbc->x2); sbus_writel(area->dy + area->height - 1, &fbc->y3); sbus_writel(area->dx + area->width - 1, &fbc->x3); do { i = sbus_readl(&fbc->blit); } while (i < 0 && (i & 0x20000000)); spin_unlock_irqrestore(&par->lock, flags); } /** * cg6_imageblit - Copies a image from system memory to the screen. * * @info: frame buffer structure that represents a single frame buffer * @image: structure defining the image. */ static void cg6_imageblit(struct fb_info *info, const struct fb_image *image) { struct cg6_par *par = (struct cg6_par *)info->par; struct cg6_fbc __iomem *fbc = par->fbc; const u8 *data = image->data; unsigned long flags; u32 x, y; int i, width; if (image->depth > 1) { cfb_imageblit(info, image); return; } spin_lock_irqsave(&par->lock, flags); cg6_sync(info); sbus_writel(image->fg_color, &fbc->fg); sbus_writel(image->bg_color, &fbc->bg); sbus_writel(0x140000, &fbc->mode); sbus_writel(0xe880fc30, &fbc->alu); sbus_writel(~(u32)0, &fbc->pixelm); sbus_writel(0, &fbc->s); sbus_writel(0, &fbc->clip); sbus_writel(0xff, &fbc->pm); sbus_writel(32, &fbc->incx); sbus_writel(0, &fbc->incy); x = image->dx; y = image->dy; for (i = 0; i < image->height; i++) { width = image->width; while (width >= 32) { u32 val; sbus_writel(y, &fbc->y0); sbus_writel(x, &fbc->x0); sbus_writel(x + 32 - 1, &fbc->x1); val = ((u32)data[0] << 24) | ((u32)data[1] << 16) | ((u32)data[2] << 8) | ((u32)data[3] << 0); sbus_writel(val, &fbc->font); data += 4; x += 32; width -= 32; } if (width) { u32 val; sbus_writel(y, &fbc->y0); sbus_writel(x, &fbc->x0); sbus_writel(x + width - 1, &fbc->x1); if (width <= 8) { val = (u32) data[0] << 24; data += 1; } else if (width <= 16) { val = ((u32) data[0] << 24) | ((u32) data[1] << 16); data += 2; } else { val = ((u32) data[0] << 24) | ((u32) data[1] << 16) | ((u32) data[2] << 8); data += 3; } sbus_writel(val, &fbc->font); } y += 1; x = image->dx; } spin_unlock_irqrestore(&par->lock, flags); } /** * cg6_setcolreg - Sets a color register. * * @regno: boolean, 0 copy local, 1 get_user() function * @red: frame buffer colormap structure * @green: The green value which can be up to 16 bits wide * @blue: The blue value which can be up to 16 bits wide. * @transp: If supported the alpha value which can be up to 16 bits wide. * @info: frame buffer info structure */ static int cg6_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct cg6_par *par = (struct cg6_par *)info->par; struct bt_regs __iomem *bt = par->bt; unsigned long flags; if (regno >= 256) return 1; red >>= 8; green >>= 8; blue >>= 8; spin_lock_irqsave(&par->lock, flags); sbus_writel((u32)regno << 24, &bt->addr); sbus_writel((u32)red << 24, &bt->color_map); sbus_writel((u32)green << 24, &bt->color_map); sbus_writel((u32)blue << 24, &bt->color_map); spin_unlock_irqrestore(&par->lock, flags); return 0; } /** * cg6_blank - Blanks the display. * * @blank_mode: the blank mode we want. * @info: frame buffer structure that represents a single frame buffer */ static int cg6_blank(int blank, struct fb_info *info) { struct cg6_par *par = (struct cg6_par *)info->par; struct cg6_thc __iomem *thc = par->thc; unsigned long flags; u32 val; spin_lock_irqsave(&par->lock, flags); val = sbus_readl(&thc->thc_misc); switch (blank) { case FB_BLANK_UNBLANK: /* Unblanking */ val |= CG6_THC_MISC_VIDEO; par->flags &= ~CG6_FLAG_BLANKED; break; case FB_BLANK_NORMAL: /* Normal blanking */ case FB_BLANK_VSYNC_SUSPEND: /* VESA blank (vsync off) */ case FB_BLANK_HSYNC_SUSPEND: /* VESA blank (hsync off) */ case FB_BLANK_POWERDOWN: /* Poweroff */ val &= ~CG6_THC_MISC_VIDEO; par->flags |= CG6_FLAG_BLANKED; break; } sbus_writel(val, &thc->thc_misc); spin_unlock_irqrestore(&par->lock, flags); return 0; } static struct sbus_mmap_map cg6_mmap_map[] = { { .voff = CG6_FBC, .poff = CG6_FBC_OFFSET, .size = PAGE_SIZE }, { .voff = CG6_TEC, .poff = CG6_TEC_OFFSET, .size = PAGE_SIZE }, { .voff = CG6_BTREGS, .poff = CG6_BROOKTREE_OFFSET, .size = PAGE_SIZE }, { .voff = CG6_FHC, .poff = CG6_FHC_OFFSET, .size = PAGE_SIZE }, { .voff = CG6_THC, .poff = CG6_THC_OFFSET, .size = PAGE_SIZE }, { .voff = CG6_ROM, .poff = CG6_ROM_OFFSET, .size = 0x10000 }, { .voff = CG6_RAM, .poff = CG6_RAM_OFFSET, .size = SBUS_MMAP_FBSIZE(1) }, { .voff = CG6_DHC, .poff = CG6_DHC_OFFSET, .size = 0x40000 }, { .size = 0 } }; static int cg6_mmap(struct fb_info *info, struct vm_area_struct *vma) { struct cg6_par *par = (struct cg6_par *)info->par; return sbusfb_mmap_helper(cg6_mmap_map, info->fix.smem_start, info->fix.smem_len, par->which_io, vma); } static int cg6_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { return sbusfb_ioctl_helper(cmd, arg, info, FBTYPE_SUNFAST_COLOR, 8, info->fix.smem_len); } /* * Initialisation */ static void __devinit cg6_init_fix(struct fb_info *info, int linebytes) { struct cg6_par *par = (struct cg6_par *)info->par; const char *cg6_cpu_name, *cg6_card_name; u32 conf; conf = sbus_readl(par->fhc); switch (conf & CG6_FHC_CPU_MASK) { case CG6_FHC_CPU_SPARC: cg6_cpu_name = "sparc"; break; case CG6_FHC_CPU_68020: cg6_cpu_name = "68020"; break; default: cg6_cpu_name = "i386"; break; }; if (((conf >> CG6_FHC_REV_SHIFT) & CG6_FHC_REV_MASK) >= 11) { if (info->fix.smem_len <= 0x100000) cg6_card_name = "TGX"; else cg6_card_name = "TGX+"; } else { if (info->fix.smem_len <= 0x100000) cg6_card_name = "GX"; else cg6_card_name = "GX+"; } sprintf(info->fix.id, "%s %s", cg6_card_name, cg6_cpu_name); info->fix.id[sizeof(info->fix.id) - 1] = 0; info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.visual = FB_VISUAL_PSEUDOCOLOR; info->fix.line_length = linebytes; info->fix.accel = FB_ACCEL_SUN_CGSIX; } /* Initialize Brooktree DAC */ static void __devinit cg6_bt_init(struct cg6_par *par) { struct bt_regs __iomem *bt = par->bt; sbus_writel(0x04 << 24, &bt->addr); /* color planes */ sbus_writel(0xff << 24, &bt->control); sbus_writel(0x05 << 24, &bt->addr); sbus_writel(0x00 << 24, &bt->control); sbus_writel(0x06 << 24, &bt->addr); /* overlay plane */ sbus_writel(0x73 << 24, &bt->control); sbus_writel(0x07 << 24, &bt->addr); sbus_writel(0x00 << 24, &bt->control); } static void __devinit cg6_chip_init(struct fb_info *info) { struct cg6_par *par = (struct cg6_par *)info->par; struct cg6_tec __iomem *tec = par->tec; struct cg6_fbc __iomem *fbc = par->fbc; struct cg6_thc __iomem *thc = par->thc; u32 rev, conf, mode; int i; /* Hide the cursor. */ sbus_writel(CG6_THC_CURSOFF, &thc->thc_cursxy); /* Turn off stuff in the Transform Engine. */ sbus_writel(0, &tec->tec_matrix); sbus_writel(0, &tec->tec_clip); sbus_writel(0, &tec->tec_vdc); /* Take care of bugs in old revisions. */ rev = (sbus_readl(par->fhc) >> CG6_FHC_REV_SHIFT) & CG6_FHC_REV_MASK; if (rev < 5) { conf = (sbus_readl(par->fhc) & CG6_FHC_RES_MASK) | CG6_FHC_CPU_68020 | CG6_FHC_TEST | (11 << CG6_FHC_TEST_X_SHIFT) | (11 << CG6_FHC_TEST_Y_SHIFT); if (rev < 2) conf |= CG6_FHC_DST_DISABLE; sbus_writel(conf, par->fhc); } /* Set things in the FBC. Bad things appear to happen if we do * back to back store/loads on the mode register, so copy it * out instead. */ mode = sbus_readl(&fbc->mode); do { i = sbus_readl(&fbc->s); } while (i & 0x10000000); mode &= ~(CG6_FBC_BLIT_MASK | CG6_FBC_MODE_MASK | CG6_FBC_DRAW_MASK | CG6_FBC_BWRITE0_MASK | CG6_FBC_BWRITE1_MASK | CG6_FBC_BREAD_MASK | CG6_FBC_BDISP_MASK); mode |= (CG6_FBC_BLIT_SRC | CG6_FBC_MODE_COLOR8 | CG6_FBC_DRAW_RENDER | CG6_FBC_BWRITE0_ENABLE | CG6_FBC_BWRITE1_DISABLE | CG6_FBC_BREAD_0 | CG6_FBC_BDISP_0); sbus_writel(mode, &fbc->mode); sbus_writel(0, &fbc->clip); sbus_writel(0, &fbc->offx); sbus_writel(0, &fbc->offy); sbus_writel(0, &fbc->clipminx); sbus_writel(0, &fbc->clipminy); sbus_writel(info->var.xres - 1, &fbc->clipmaxx); sbus_writel(info->var.yres - 1, &fbc->clipmaxy); } static void cg6_unmap_regs(struct platform_device *op, struct fb_info *info, struct cg6_par *par) { if (par->fbc) of_iounmap(&op->resource[0], par->fbc, 4096); if (par->tec) of_iounmap(&op->resource[0], par->tec, sizeof(struct cg6_tec)); if (par->thc) of_iounmap(&op->resource[0], par->thc, sizeof(struct cg6_thc)); if (par->bt) of_iounmap(&op->resource[0], par->bt, sizeof(struct bt_regs)); if (par->fhc) of_iounmap(&op->resource[0], par->fhc, sizeof(u32)); if (info->screen_base) of_iounmap(&op->resource[0], info->screen_base, info->fix.smem_len); } static int __devinit cg6_probe(struct platform_device *op) { struct device_node *dp = op->dev.of_node; struct fb_info *info; struct cg6_par *par; int linebytes, err; int dblbuf; info = framebuffer_alloc(sizeof(struct cg6_par), &op->dev); err = -ENOMEM; if (!info) goto out_err; par = info->par; spin_lock_init(&par->lock); info->fix.smem_start = op->resource[0].start; par->which_io = op->resource[0].flags & IORESOURCE_BITS; sbusfb_fill_var(&info->var, dp, 8); info->var.red.length = 8; info->var.green.length = 8; info->var.blue.length = 8; linebytes = of_getintprop_default(dp, "linebytes", info->var.xres); info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres); dblbuf = of_getintprop_default(dp, "dblbuf", 0); if (dblbuf) info->fix.smem_len *= 4; par->fbc = of_ioremap(&op->resource[0], CG6_FBC_OFFSET, 4096, "cgsix fbc"); par->tec = of_ioremap(&op->resource[0], CG6_TEC_OFFSET, sizeof(struct cg6_tec), "cgsix tec"); par->thc = of_ioremap(&op->resource[0], CG6_THC_OFFSET, sizeof(struct cg6_thc), "cgsix thc"); par->bt = of_ioremap(&op->resource[0], CG6_BROOKTREE_OFFSET, sizeof(struct bt_regs), "cgsix dac"); par->fhc = of_ioremap(&op->resource[0], CG6_FHC_OFFSET, sizeof(u32), "cgsix fhc"); info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT | FBINFO_READS_FAST; info->fbops = &cg6_ops; info->screen_base = of_ioremap(&op->resource[0], CG6_RAM_OFFSET, info->fix.smem_len, "cgsix ram"); if (!par->fbc || !par->tec || !par->thc || !par->bt || !par->fhc || !info->screen_base) goto out_unmap_regs; info->var.accel_flags = FB_ACCELF_TEXT; cg6_bt_init(par); cg6_chip_init(info); cg6_blank(FB_BLANK_UNBLANK, info); if (fb_alloc_cmap(&info->cmap, 256, 0)) goto out_unmap_regs; fb_set_cmap(&info->cmap, info); cg6_init_fix(info, linebytes); err = register_framebuffer(info); if (err < 0) goto out_dealloc_cmap; dev_set_drvdata(&op->dev, info); printk(KERN_INFO "%s: CGsix [%s] at %lx:%lx\n", dp->full_name, info->fix.id, par->which_io, info->fix.smem_start); return 0; out_dealloc_cmap: fb_dealloc_cmap(&info->cmap); out_unmap_regs: cg6_unmap_regs(op, info, par); framebuffer_release(info); out_err: return err; } static int __devexit cg6_remove(struct platform_device *op) { struct fb_info *info = dev_get_drvdata(&op->dev); struct cg6_par *par = info->par; unregister_framebuffer(info); fb_dealloc_cmap(&info->cmap); cg6_unmap_regs(op, info, par); framebuffer_release(info); dev_set_drvdata(&op->dev, NULL); return 0; } static const struct of_device_id cg6_match[] = { { .name = "cgsix", }, { .name = "cgthree+", }, {}, }; MODULE_DEVICE_TABLE(of, cg6_match); static struct platform_driver cg6_driver = { .driver = { .name = "cg6", .owner = THIS_MODULE, .of_match_table = cg6_match, }, .probe = cg6_probe, .remove = __devexit_p(cg6_remove), }; static int __init cg6_init(void) { if (fb_get_options("cg6fb", NULL)) return -ENODEV; return platform_driver_register(&cg6_driver); } static void __exit cg6_exit(void) { platform_driver_unregister(&cg6_driver); } module_init(cg6_init); module_exit(cg6_exit); MODULE_DESCRIPTION("framebuffer driver for CGsix chipsets"); MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL");
gpl-2.0
omnirom/android_kernel_oppo_apq8064
drivers/media/common/tuners/mt2131.c
8439
7983
/* * Driver for Microtune MT2131 "QAM/8VSB single chip tuner" * * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/dvb/frontend.h> #include <linux/i2c.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "mt2131.h" #include "mt2131_priv.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); #define dprintk(level,fmt, arg...) if (debug >= level) \ printk(KERN_INFO "%s: " fmt, "mt2131", ## arg) static u8 mt2131_config1[] = { 0x01, 0x50, 0x00, 0x50, 0x80, 0x00, 0x49, 0xfa, 0x88, 0x08, 0x77, 0x41, 0x04, 0x00, 0x00, 0x00, 0x32, 0x7f, 0xda, 0x4c, 0x00, 0x10, 0xaa, 0x78, 0x80, 0xff, 0x68, 0xa0, 0xff, 0xdd, 0x00, 0x00 }; static u8 mt2131_config2[] = { 0x10, 0x7f, 0xc8, 0x0a, 0x5f, 0x00, 0x04 }; static int mt2131_readreg(struct mt2131_priv *priv, u8 reg, u8 *val) { struct i2c_msg msg[2] = { { .addr = priv->cfg->i2c_address, .flags = 0, .buf = &reg, .len = 1 }, { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .buf = val, .len = 1 }, }; if (i2c_transfer(priv->i2c, msg, 2) != 2) { printk(KERN_WARNING "mt2131 I2C read failed\n"); return -EREMOTEIO; } return 0; } static int mt2131_writereg(struct mt2131_priv *priv, u8 reg, u8 val) { u8 buf[2] = { reg, val }; struct i2c_msg msg = { .addr = priv->cfg->i2c_address, .flags = 0, .buf = buf, .len = 2 }; if (i2c_transfer(priv->i2c, &msg, 1) != 1) { printk(KERN_WARNING "mt2131 I2C write failed\n"); return -EREMOTEIO; } return 0; } static int mt2131_writeregs(struct mt2131_priv *priv,u8 *buf, u8 len) { struct i2c_msg msg = { .addr = priv->cfg->i2c_address, .flags = 0, .buf = buf, .len = len }; if (i2c_transfer(priv->i2c, &msg, 1) != 1) { printk(KERN_WARNING "mt2131 I2C write failed (len=%i)\n", (int)len); return -EREMOTEIO; } return 0; } static int mt2131_set_params(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct mt2131_priv *priv; int ret=0, i; u32 freq; u8 if_band_center; u32 f_lo1, f_lo2; u32 div1, num1, div2, num2; u8 b[8]; u8 lockval = 0; priv = fe->tuner_priv; freq = c->frequency / 1000; /* Hz -> kHz */ dprintk(1, "%s() freq=%d\n", __func__, freq); f_lo1 = freq + MT2131_IF1 * 1000; f_lo1 = (f_lo1 / 250) * 250; f_lo2 = f_lo1 - freq - MT2131_IF2; priv->frequency = (f_lo1 - f_lo2 - MT2131_IF2) * 1000; /* Frequency LO1 = 16MHz * (DIV1 + NUM1/8192 ) */ num1 = f_lo1 * 64 / (MT2131_FREF / 128); div1 = num1 / 8192; num1 &= 0x1fff; /* Frequency LO2 = 16MHz * (DIV2 + NUM2/8192 ) */ num2 = f_lo2 * 64 / (MT2131_FREF / 128); div2 = num2 / 8192; num2 &= 0x1fff; if (freq <= 82500) if_band_center = 0x00; else if (freq <= 137500) if_band_center = 0x01; else if (freq <= 192500) if_band_center = 0x02; else if (freq <= 247500) if_band_center = 0x03; else if (freq <= 302500) if_band_center = 0x04; else if (freq <= 357500) if_band_center = 0x05; else if (freq <= 412500) if_band_center = 0x06; else if (freq <= 467500) if_band_center = 0x07; else if (freq <= 522500) if_band_center = 0x08; else if (freq <= 577500) if_band_center = 0x09; else if (freq <= 632500) if_band_center = 0x0A; else if (freq <= 687500) if_band_center = 0x0B; else if (freq <= 742500) if_band_center = 0x0C; else if (freq <= 797500) if_band_center = 0x0D; else if (freq <= 852500) if_band_center = 0x0E; else if (freq <= 907500) if_band_center = 0x0F; else if (freq <= 962500) if_band_center = 0x10; else if (freq <= 1017500) if_band_center = 0x11; else if (freq <= 1072500) if_band_center = 0x12; else if_band_center = 0x13; b[0] = 1; b[1] = (num1 >> 5) & 0xFF; b[2] = (num1 & 0x1F); b[3] = div1; b[4] = (num2 >> 5) & 0xFF; b[5] = num2 & 0x1F; b[6] = div2; dprintk(1, "IF1: %dMHz IF2: %dMHz\n", MT2131_IF1, MT2131_IF2); dprintk(1, "PLL freq=%dkHz band=%d\n", (int)freq, (int)if_band_center); dprintk(1, "PLL f_lo1=%dkHz f_lo2=%dkHz\n", (int)f_lo1, (int)f_lo2); dprintk(1, "PLL div1=%d num1=%d div2=%d num2=%d\n", (int)div1, (int)num1, (int)div2, (int)num2); dprintk(1, "PLL [1..6]: %2x %2x %2x %2x %2x %2x\n", (int)b[1], (int)b[2], (int)b[3], (int)b[4], (int)b[5], (int)b[6]); ret = mt2131_writeregs(priv,b,7); if (ret < 0) return ret; mt2131_writereg(priv, 0x0b, if_band_center); /* Wait for lock */ i = 0; do { mt2131_readreg(priv, 0x08, &lockval); if ((lockval & 0x88) == 0x88) break; msleep(4); i++; } while (i < 10); return ret; } static int mt2131_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct mt2131_priv *priv = fe->tuner_priv; dprintk(1, "%s()\n", __func__); *frequency = priv->frequency; return 0; } static int mt2131_get_status(struct dvb_frontend *fe, u32 *status) { struct mt2131_priv *priv = fe->tuner_priv; u8 lock_status = 0; u8 afc_status = 0; *status = 0; mt2131_readreg(priv, 0x08, &lock_status); if ((lock_status & 0x88) == 0x88) *status = TUNER_STATUS_LOCKED; mt2131_readreg(priv, 0x09, &afc_status); dprintk(1, "%s() - LO Status = 0x%x, AFC Status = 0x%x\n", __func__, lock_status, afc_status); return 0; } static int mt2131_init(struct dvb_frontend *fe) { struct mt2131_priv *priv = fe->tuner_priv; int ret; dprintk(1, "%s()\n", __func__); if ((ret = mt2131_writeregs(priv, mt2131_config1, sizeof(mt2131_config1))) < 0) return ret; mt2131_writereg(priv, 0x0b, 0x09); mt2131_writereg(priv, 0x15, 0x47); mt2131_writereg(priv, 0x07, 0xf2); mt2131_writereg(priv, 0x0b, 0x01); if ((ret = mt2131_writeregs(priv, mt2131_config2, sizeof(mt2131_config2))) < 0) return ret; return ret; } static int mt2131_release(struct dvb_frontend *fe) { dprintk(1, "%s()\n", __func__); kfree(fe->tuner_priv); fe->tuner_priv = NULL; return 0; } static const struct dvb_tuner_ops mt2131_tuner_ops = { .info = { .name = "Microtune MT2131", .frequency_min = 48000000, .frequency_max = 860000000, .frequency_step = 50000, }, .release = mt2131_release, .init = mt2131_init, .set_params = mt2131_set_params, .get_frequency = mt2131_get_frequency, .get_status = mt2131_get_status }; struct dvb_frontend * mt2131_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2131_config *cfg, u16 if1) { struct mt2131_priv *priv = NULL; u8 id = 0; dprintk(1, "%s()\n", __func__); priv = kzalloc(sizeof(struct mt2131_priv), GFP_KERNEL); if (priv == NULL) return NULL; priv->cfg = cfg; priv->i2c = i2c; if (mt2131_readreg(priv, 0, &id) != 0) { kfree(priv); return NULL; } if ( (id != 0x3E) && (id != 0x3F) ) { printk(KERN_ERR "MT2131: Device not found at addr 0x%02x\n", cfg->i2c_address); kfree(priv); return NULL; } printk(KERN_INFO "MT2131: successfully identified at address 0x%02x\n", cfg->i2c_address); memcpy(&fe->ops.tuner_ops, &mt2131_tuner_ops, sizeof(struct dvb_tuner_ops)); fe->tuner_priv = priv; return fe; } EXPORT_SYMBOL(mt2131_attach); MODULE_AUTHOR("Steven Toth"); MODULE_DESCRIPTION("Microtune MT2131 silicon tuner driver"); MODULE_LICENSE("GPL"); /* * Local variables: * c-basic-offset: 8 */
gpl-2.0
jeboo/kernel_KK_i9505_NB8
drivers/macintosh/via-pmu68k.c
9463
19884
/* * Device driver for the PMU on 68K-based Apple PowerBooks * * The VIA (versatile interface adapter) interfaces to the PMU, * a 6805 microprocessor core whose primary function is to control * battery charging and system power on the PowerBooks. * The PMU also controls the ADB (Apple Desktop Bus) which connects * to the keyboard and mouse, as well as the non-volatile RAM * and the RTC (real time clock) chip. * * Adapted for 68K PMU by Joshua M. Thompson * * Based largely on the PowerMac PMU code by Paul Mackerras and * Fabio Riccardi. * * Also based on the PMU driver from MkLinux by Apple Computer, Inc. * and the Open Software Foundation, Inc. */ #include <stdarg.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/miscdevice.h> #include <linux/blkdev.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/adb.h> #include <linux/pmu.h> #include <linux/cuda.h> #include <asm/macintosh.h> #include <asm/macints.h> #include <asm/mac_via.h> #include <asm/pgtable.h> #include <asm/irq.h> #include <asm/uaccess.h> /* Misc minor number allocated for /dev/pmu */ #define PMU_MINOR 154 /* VIA registers - spaced 0x200 bytes apart */ #define RS 0x200 /* skip between registers */ #define B 0 /* B-side data */ #define A RS /* A-side data */ #define DIRB (2*RS) /* B-side direction (1=output) */ #define DIRA (3*RS) /* A-side direction (1=output) */ #define T1CL (4*RS) /* Timer 1 ctr/latch (low 8 bits) */ #define T1CH (5*RS) /* Timer 1 counter (high 8 bits) */ #define T1LL (6*RS) /* Timer 1 latch (low 8 bits) */ #define T1LH (7*RS) /* Timer 1 latch (high 8 bits) */ #define T2CL (8*RS) /* Timer 2 ctr/latch (low 8 bits) */ #define T2CH (9*RS) /* Timer 2 counter (high 8 bits) */ #define SR (10*RS) /* Shift register */ #define ACR (11*RS) /* Auxiliary control register */ #define PCR (12*RS) /* Peripheral control register */ #define IFR (13*RS) /* Interrupt flag register */ #define IER (14*RS) /* Interrupt enable register */ #define ANH (15*RS) /* A-side data, no handshake */ /* Bits in B data register: both active low */ #define TACK 0x02 /* Transfer acknowledge (input) */ #define TREQ 0x04 /* Transfer request (output) */ /* Bits in ACR */ #define SR_CTRL 0x1c /* Shift register control bits */ #define SR_EXT 0x0c /* Shift on external clock */ #define SR_OUT 0x10 /* Shift out if 1 */ /* Bits in IFR and IER */ #define SR_INT 0x04 /* Shift register full/empty */ #define CB1_INT 0x10 /* transition on CB1 input */ static enum pmu_state { idle, sending, intack, reading, reading_intr, } pmu_state; static struct adb_request *current_req; static struct adb_request *last_req; static struct adb_request *req_awaiting_reply; static unsigned char interrupt_data[32]; static unsigned char *reply_ptr; static int data_index; static int data_len; static int adb_int_pending; static int pmu_adb_flags; static int adb_dev_map; static struct adb_request bright_req_1, bright_req_2, bright_req_3; static int pmu_kind = PMU_UNKNOWN; static int pmu_fully_inited; int asleep; static int pmu_probe(void); static int pmu_init(void); static void pmu_start(void); static irqreturn_t pmu_interrupt(int irq, void *arg); static int pmu_send_request(struct adb_request *req, int sync); static int pmu_autopoll(int devs); void pmu_poll(void); static int pmu_reset_bus(void); static void pmu_start(void); static void send_byte(int x); static void recv_byte(void); static void pmu_done(struct adb_request *req); static void pmu_handle_data(unsigned char *data, int len); static void set_volume(int level); static void pmu_enable_backlight(int on); static void pmu_set_brightness(int level); struct adb_driver via_pmu_driver = { "68K PMU", pmu_probe, pmu_init, pmu_send_request, pmu_autopoll, pmu_poll, pmu_reset_bus }; /* * This table indicates for each PMU opcode: * - the number of data bytes to be sent with the command, or -1 * if a length byte should be sent, * - the number of response bytes which the PMU will return, or * -1 if it will send a length byte. */ static s8 pmu_data_len[256][2] = { /* 0 1 2 3 4 5 6 7 */ /*00*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, /*08*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, /*10*/ { 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, /*18*/ { 0, 1},{ 0, 1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{ 0, 0}, /*20*/ {-1, 0},{ 0, 0},{ 2, 0},{ 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0}, /*28*/ { 0,-1},{ 0,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{ 0,-1}, /*30*/ { 4, 0},{20, 0},{-1, 0},{ 3, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, /*38*/ { 0, 4},{ 0,20},{ 2,-1},{ 2, 1},{ 3,-1},{-1,-1},{-1,-1},{ 4, 0}, /*40*/ { 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, /*48*/ { 0, 1},{ 0, 1},{-1,-1},{ 1, 0},{ 1, 0},{-1,-1},{-1,-1},{-1,-1}, /*50*/ { 1, 0},{ 0, 0},{ 2, 0},{ 2, 0},{-1, 0},{ 1, 0},{ 3, 0},{ 1, 0}, /*58*/ { 0, 1},{ 1, 0},{ 0, 2},{ 0, 2},{ 0,-1},{-1,-1},{-1,-1},{-1,-1}, /*60*/ { 2, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, /*68*/ { 0, 3},{ 0, 3},{ 0, 2},{ 0, 8},{ 0,-1},{ 0,-1},{-1,-1},{-1,-1}, /*70*/ { 1, 0},{ 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, /*78*/ { 0,-1},{ 0,-1},{-1,-1},{-1,-1},{-1,-1},{ 5, 1},{ 4, 1},{ 4, 1}, /*80*/ { 4, 0},{-1, 0},{ 0, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, /*88*/ { 0, 5},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, /*90*/ { 1, 0},{ 2, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, /*98*/ { 0, 1},{ 0, 1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, /*a0*/ { 2, 0},{ 2, 0},{ 2, 0},{ 4, 0},{-1, 0},{ 0, 0},{-1, 0},{-1, 0}, /*a8*/ { 1, 1},{ 1, 0},{ 3, 0},{ 2, 0},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, /*b0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, /*b8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, /*c0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, /*c8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, /*d0*/ { 0, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, /*d8*/ { 1, 1},{ 1, 1},{-1,-1},{-1,-1},{ 0, 1},{ 0,-1},{-1,-1},{-1,-1}, /*e0*/ {-1, 0},{ 4, 0},{ 0, 1},{-1, 0},{-1, 0},{ 4, 0},{-1, 0},{-1, 0}, /*e8*/ { 3,-1},{-1,-1},{ 0, 1},{-1,-1},{ 0,-1},{-1,-1},{-1,-1},{ 0, 0}, /*f0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, /*f8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, }; int pmu_probe(void) { if (macintosh_config->adb_type == MAC_ADB_PB1) { pmu_kind = PMU_68K_V1; } else if (macintosh_config->adb_type == MAC_ADB_PB2) { pmu_kind = PMU_68K_V2; } else { return -ENODEV; } pmu_state = idle; return 0; } static int pmu_init(void) { int timeout; volatile struct adb_request req; via2[B] |= TREQ; /* negate TREQ */ via2[DIRB] = (via2[DIRB] | TREQ) & ~TACK; /* TACK in, TREQ out */ pmu_request((struct adb_request *) &req, NULL, 2, PMU_SET_INTR_MASK, PMU_INT_ADB); timeout = 100000; while (!req.complete) { if (--timeout < 0) { printk(KERN_ERR "pmu_init: no response from PMU\n"); return -EAGAIN; } udelay(10); pmu_poll(); } /* ack all pending interrupts */ timeout = 100000; interrupt_data[0] = 1; while (interrupt_data[0] || pmu_state != idle) { if (--timeout < 0) { printk(KERN_ERR "pmu_init: timed out acking intrs\n"); return -EAGAIN; } if (pmu_state == idle) { adb_int_pending = 1; pmu_interrupt(0, NULL); } pmu_poll(); udelay(10); } pmu_request((struct adb_request *) &req, NULL, 2, PMU_SET_INTR_MASK, PMU_INT_ADB_AUTO|PMU_INT_SNDBRT|PMU_INT_ADB); timeout = 100000; while (!req.complete) { if (--timeout < 0) { printk(KERN_ERR "pmu_init: no response from PMU\n"); return -EAGAIN; } udelay(10); pmu_poll(); } bright_req_1.complete = 1; bright_req_2.complete = 1; bright_req_3.complete = 1; if (request_irq(IRQ_MAC_ADB_SR, pmu_interrupt, 0, "pmu-shift", pmu_interrupt)) { printk(KERN_ERR "pmu_init: can't get irq %d\n", IRQ_MAC_ADB_SR); return -EAGAIN; } if (request_irq(IRQ_MAC_ADB_CL, pmu_interrupt, 0, "pmu-clock", pmu_interrupt)) { printk(KERN_ERR "pmu_init: can't get irq %d\n", IRQ_MAC_ADB_CL); free_irq(IRQ_MAC_ADB_SR, pmu_interrupt); return -EAGAIN; } pmu_fully_inited = 1; /* Enable backlight */ pmu_enable_backlight(1); printk("adb: PMU 68K driver v0.5 for Unified ADB.\n"); return 0; } int pmu_get_model(void) { return pmu_kind; } /* Send an ADB command */ static int pmu_send_request(struct adb_request *req, int sync) { int i, ret; if (!pmu_fully_inited) { req->complete = 1; return -ENXIO; } ret = -EINVAL; switch (req->data[0]) { case PMU_PACKET: for (i = 0; i < req->nbytes - 1; ++i) req->data[i] = req->data[i+1]; --req->nbytes; if (pmu_data_len[req->data[0]][1] != 0) { req->reply[0] = ADB_RET_OK; req->reply_len = 1; } else req->reply_len = 0; ret = pmu_queue_request(req); break; case CUDA_PACKET: switch (req->data[1]) { case CUDA_GET_TIME: if (req->nbytes != 2) break; req->data[0] = PMU_READ_RTC; req->nbytes = 1; req->reply_len = 3; req->reply[0] = CUDA_PACKET; req->reply[1] = 0; req->reply[2] = CUDA_GET_TIME; ret = pmu_queue_request(req); break; case CUDA_SET_TIME: if (req->nbytes != 6) break; req->data[0] = PMU_SET_RTC; req->nbytes = 5; for (i = 1; i <= 4; ++i) req->data[i] = req->data[i+1]; req->reply_len = 3; req->reply[0] = CUDA_PACKET; req->reply[1] = 0; req->reply[2] = CUDA_SET_TIME; ret = pmu_queue_request(req); break; case CUDA_GET_PRAM: if (req->nbytes != 4) break; req->data[0] = PMU_READ_NVRAM; req->data[1] = req->data[2]; req->data[2] = req->data[3]; req->nbytes = 3; req->reply_len = 3; req->reply[0] = CUDA_PACKET; req->reply[1] = 0; req->reply[2] = CUDA_GET_PRAM; ret = pmu_queue_request(req); break; case CUDA_SET_PRAM: if (req->nbytes != 5) break; req->data[0] = PMU_WRITE_NVRAM; req->data[1] = req->data[2]; req->data[2] = req->data[3]; req->data[3] = req->data[4]; req->nbytes = 4; req->reply_len = 3; req->reply[0] = CUDA_PACKET; req->reply[1] = 0; req->reply[2] = CUDA_SET_PRAM; ret = pmu_queue_request(req); break; } break; case ADB_PACKET: for (i = req->nbytes - 1; i > 1; --i) req->data[i+2] = req->data[i]; req->data[3] = req->nbytes - 2; req->data[2] = pmu_adb_flags; /*req->data[1] = req->data[1];*/ req->data[0] = PMU_ADB_CMD; req->nbytes += 2; req->reply_expected = 1; req->reply_len = 0; ret = pmu_queue_request(req); break; } if (ret) { req->complete = 1; return ret; } if (sync) { while (!req->complete) pmu_poll(); } return 0; } /* Enable/disable autopolling */ static int pmu_autopoll(int devs) { struct adb_request req; if (!pmu_fully_inited) return -ENXIO; if (devs) { adb_dev_map = devs; pmu_request(&req, NULL, 5, PMU_ADB_CMD, 0, 0x86, adb_dev_map >> 8, adb_dev_map); pmu_adb_flags = 2; } else { pmu_request(&req, NULL, 1, PMU_ADB_POLL_OFF); pmu_adb_flags = 0; } while (!req.complete) pmu_poll(); return 0; } /* Reset the ADB bus */ static int pmu_reset_bus(void) { struct adb_request req; long timeout; int save_autopoll = adb_dev_map; if (!pmu_fully_inited) return -ENXIO; /* anyone got a better idea?? */ pmu_autopoll(0); req.nbytes = 5; req.done = NULL; req.data[0] = PMU_ADB_CMD; req.data[1] = 0; req.data[2] = 3; /* ADB_BUSRESET ??? */ req.data[3] = 0; req.data[4] = 0; req.reply_len = 0; req.reply_expected = 1; if (pmu_queue_request(&req) != 0) { printk(KERN_ERR "pmu_adb_reset_bus: pmu_queue_request failed\n"); return -EIO; } while (!req.complete) pmu_poll(); timeout = 100000; while (!req.complete) { if (--timeout < 0) { printk(KERN_ERR "pmu_adb_reset_bus (reset): no response from PMU\n"); return -EIO; } udelay(10); pmu_poll(); } if (save_autopoll != 0) pmu_autopoll(save_autopoll); return 0; } /* Construct and send a pmu request */ int pmu_request(struct adb_request *req, void (*done)(struct adb_request *), int nbytes, ...) { va_list list; int i; if (nbytes < 0 || nbytes > 32) { printk(KERN_ERR "pmu_request: bad nbytes (%d)\n", nbytes); req->complete = 1; return -EINVAL; } req->nbytes = nbytes; req->done = done; va_start(list, nbytes); for (i = 0; i < nbytes; ++i) req->data[i] = va_arg(list, int); va_end(list); if (pmu_data_len[req->data[0]][1] != 0) { req->reply[0] = ADB_RET_OK; req->reply_len = 1; } else req->reply_len = 0; req->reply_expected = 0; return pmu_queue_request(req); } int pmu_queue_request(struct adb_request *req) { unsigned long flags; int nsend; if (req->nbytes <= 0) { req->complete = 1; return 0; } nsend = pmu_data_len[req->data[0]][0]; if (nsend >= 0 && req->nbytes != nsend + 1) { req->complete = 1; return -EINVAL; } req->next = NULL; req->sent = 0; req->complete = 0; local_irq_save(flags); if (current_req != 0) { last_req->next = req; last_req = req; } else { current_req = req; last_req = req; if (pmu_state == idle) pmu_start(); } local_irq_restore(flags); return 0; } static void send_byte(int x) { via1[ACR] |= SR_CTRL; via1[SR] = x; via2[B] &= ~TREQ; /* assert TREQ */ } static void recv_byte(void) { char c; via1[ACR] = (via1[ACR] | SR_EXT) & ~SR_OUT; c = via1[SR]; /* resets SR */ via2[B] &= ~TREQ; } static void pmu_start(void) { unsigned long flags; struct adb_request *req; /* assert pmu_state == idle */ /* get the packet to send */ local_irq_save(flags); req = current_req; if (req == 0 || pmu_state != idle || (req->reply_expected && req_awaiting_reply)) goto out; pmu_state = sending; data_index = 1; data_len = pmu_data_len[req->data[0]][0]; /* set the shift register to shift out and send a byte */ send_byte(req->data[0]); out: local_irq_restore(flags); } void pmu_poll(void) { unsigned long flags; local_irq_save(flags); if (via1[IFR] & SR_INT) { via1[IFR] = SR_INT; pmu_interrupt(IRQ_MAC_ADB_SR, NULL); } if (via1[IFR] & CB1_INT) { via1[IFR] = CB1_INT; pmu_interrupt(IRQ_MAC_ADB_CL, NULL); } local_irq_restore(flags); } static irqreturn_t pmu_interrupt(int irq, void *dev_id) { struct adb_request *req; int timeout, bite = 0; /* to prevent compiler warning */ #if 0 printk("pmu_interrupt: irq %d state %d acr %02X, b %02X data_index %d/%d adb_int_pending %d\n", irq, pmu_state, (uint) via1[ACR], (uint) via2[B], data_index, data_len, adb_int_pending); #endif if (irq == IRQ_MAC_ADB_CL) { /* CB1 interrupt */ adb_int_pending = 1; } else if (irq == IRQ_MAC_ADB_SR) { /* SR interrupt */ if (via2[B] & TACK) { printk(KERN_DEBUG "PMU: SR_INT but ack still high! (%x)\n", via2[B]); } /* if reading grab the byte */ if ((via1[ACR] & SR_OUT) == 0) bite = via1[SR]; /* reset TREQ and wait for TACK to go high */ via2[B] |= TREQ; timeout = 3200; while (!(via2[B] & TACK)) { if (--timeout < 0) { printk(KERN_ERR "PMU not responding (!ack)\n"); goto finish; } udelay(10); } switch (pmu_state) { case sending: req = current_req; if (data_len < 0) { data_len = req->nbytes - 1; send_byte(data_len); break; } if (data_index <= data_len) { send_byte(req->data[data_index++]); break; } req->sent = 1; data_len = pmu_data_len[req->data[0]][1]; if (data_len == 0) { pmu_state = idle; current_req = req->next; if (req->reply_expected) req_awaiting_reply = req; else pmu_done(req); } else { pmu_state = reading; data_index = 0; reply_ptr = req->reply + req->reply_len; recv_byte(); } break; case intack: data_index = 0; data_len = -1; pmu_state = reading_intr; reply_ptr = interrupt_data; recv_byte(); break; case reading: case reading_intr: if (data_len == -1) { data_len = bite; if (bite > 32) printk(KERN_ERR "PMU: bad reply len %d\n", bite); } else { reply_ptr[data_index++] = bite; } if (data_index < data_len) { recv_byte(); break; } if (pmu_state == reading_intr) { pmu_handle_data(interrupt_data, data_index); } else { req = current_req; current_req = req->next; req->reply_len += data_index; pmu_done(req); } pmu_state = idle; break; default: printk(KERN_ERR "pmu_interrupt: unknown state %d?\n", pmu_state); } } finish: if (pmu_state == idle) { if (adb_int_pending) { pmu_state = intack; send_byte(PMU_INT_ACK); adb_int_pending = 0; } else if (current_req) { pmu_start(); } } #if 0 printk("pmu_interrupt: exit state %d acr %02X, b %02X data_index %d/%d adb_int_pending %d\n", pmu_state, (uint) via1[ACR], (uint) via2[B], data_index, data_len, adb_int_pending); #endif return IRQ_HANDLED; } static void pmu_done(struct adb_request *req) { req->complete = 1; if (req->done) (*req->done)(req); } /* Interrupt data could be the result data from an ADB cmd */ static void pmu_handle_data(unsigned char *data, int len) { static int show_pmu_ints = 1; asleep = 0; if (len < 1) { adb_int_pending = 0; return; } if (data[0] & PMU_INT_ADB) { if ((data[0] & PMU_INT_ADB_AUTO) == 0) { struct adb_request *req = req_awaiting_reply; if (req == 0) { printk(KERN_ERR "PMU: extra ADB reply\n"); return; } req_awaiting_reply = NULL; if (len <= 2) req->reply_len = 0; else { memcpy(req->reply, data + 1, len - 1); req->reply_len = len - 1; } pmu_done(req); } else { adb_input(data+1, len-1, 1); } } else { if (data[0] == 0x08 && len == 3) { /* sound/brightness buttons pressed */ pmu_set_brightness(data[1] >> 3); set_volume(data[2]); } else if (show_pmu_ints && !(data[0] == PMU_INT_TICK && len == 1)) { int i; printk(KERN_DEBUG "pmu intr"); for (i = 0; i < len; ++i) printk(" %.2x", data[i]); printk("\n"); } } } static int backlight_level = -1; static int backlight_enabled = 0; #define LEVEL_TO_BRIGHT(lev) ((lev) < 1? 0x7f: 0x4a - ((lev) << 1)) static void pmu_enable_backlight(int on) { struct adb_request req; if (on) { /* first call: get current backlight value */ if (backlight_level < 0) { switch(pmu_kind) { case PMU_68K_V1: case PMU_68K_V2: pmu_request(&req, NULL, 3, PMU_READ_NVRAM, 0x14, 0xe); while (!req.complete) pmu_poll(); printk(KERN_DEBUG "pmu: nvram returned bright: %d\n", (int)req.reply[1]); backlight_level = req.reply[1]; break; default: backlight_enabled = 0; return; } } pmu_request(&req, NULL, 2, PMU_BACKLIGHT_BRIGHT, LEVEL_TO_BRIGHT(backlight_level)); while (!req.complete) pmu_poll(); } pmu_request(&req, NULL, 2, PMU_POWER_CTRL, PMU_POW_BACKLIGHT | (on ? PMU_POW_ON : PMU_POW_OFF)); while (!req.complete) pmu_poll(); backlight_enabled = on; } static void pmu_set_brightness(int level) { int bright; backlight_level = level; bright = LEVEL_TO_BRIGHT(level); if (!backlight_enabled) return; if (bright_req_1.complete) pmu_request(&bright_req_1, NULL, 2, PMU_BACKLIGHT_BRIGHT, bright); if (bright_req_2.complete) pmu_request(&bright_req_2, NULL, 2, PMU_POWER_CTRL, PMU_POW_BACKLIGHT | (bright < 0x7f ? PMU_POW_ON : PMU_POW_OFF)); } void pmu_enable_irled(int on) { struct adb_request req; pmu_request(&req, NULL, 2, PMU_POWER_CTRL, PMU_POW_IRLED | (on ? PMU_POW_ON : PMU_POW_OFF)); while (!req.complete) pmu_poll(); } static void set_volume(int level) { } int pmu_present(void) { return (pmu_kind != PMU_UNKNOWN); }
gpl-2.0
Split-Screen/android_kernel_htc_msm8960
arch/x86/kernel/cpu/sched.c
9463
1144
#include <linux/sched.h> #include <linux/math64.h> #include <linux/percpu.h> #include <linux/irqflags.h> #include <asm/cpufeature.h> #include <asm/processor.h> #ifdef CONFIG_SMP static DEFINE_PER_CPU(struct aperfmperf, old_perf_sched); static unsigned long scale_aperfmperf(void) { struct aperfmperf val, *old = &__get_cpu_var(old_perf_sched); unsigned long ratio, flags; local_irq_save(flags); get_aperfmperf(&val); local_irq_restore(flags); ratio = calc_aperfmperf_ratio(old, &val); *old = val; return ratio; } unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu) { /* * do aperf/mperf on the cpu level because it includes things * like turbo mode, which are relevant to full cores. */ if (boot_cpu_has(X86_FEATURE_APERFMPERF)) return scale_aperfmperf(); /* * maybe have something cpufreq here */ return default_scale_freq_power(sd, cpu); } unsigned long arch_scale_smt_power(struct sched_domain *sd, int cpu) { /* * aperf/mperf already includes the smt gain */ if (boot_cpu_has(X86_FEATURE_APERFMPERF)) return SCHED_LOAD_SCALE; return default_scale_smt_power(sd, cpu); } #endif
gpl-2.0
dohclude/android_kernel_htc_msm8960
drivers/net/usb/net1080.c
248
15783
/* * Net1080 based USB host-to-host cables * Copyright (C) 2000-2005 by David Brownell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ // #define DEBUG // error path messages, extra info // #define VERBOSE // more; success messages #include <linux/module.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/workqueue.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/usb/usbnet.h> #include <linux/slab.h> #include <asm/unaligned.h> /* * Netchip 1080 driver ... http://www.netchip.com * (Sept 2004: End-of-life announcement has been sent.) * Used in (some) LapLink cables */ #define frame_errors data[1] /* * NetChip framing of ethernet packets, supporting additional error * checks for links that may drop bulk packets from inside messages. * Odd USB length == always short read for last usb packet. * - nc_header * - Ethernet header (14 bytes) * - payload * - (optional padding byte, if needed so length becomes odd) * - nc_trailer * * This framing is to be avoided for non-NetChip devices. */ struct nc_header { // packed: __le16 hdr_len; // sizeof nc_header (LE, all) __le16 packet_len; // payload size (including ethhdr) __le16 packet_id; // detects dropped packets #define MIN_HEADER 6 // all else is optional, and must start with: // __le16 vendorId; // from usb-if // __le16 productId; } __packed; #define PAD_BYTE ((unsigned char)0xAC) struct nc_trailer { __le16 packet_id; } __packed; // packets may use FLAG_FRAMING_NC and optional pad #define FRAMED_SIZE(mtu) (sizeof (struct nc_header) \ + sizeof (struct ethhdr) \ + (mtu) \ + 1 \ + sizeof (struct nc_trailer)) #define MIN_FRAMED FRAMED_SIZE(0) /* packets _could_ be up to 64KB... */ #define NC_MAX_PACKET 32767 /* * Zero means no timeout; else, how long a 64 byte bulk packet may be queued * before the hardware drops it. If that's done, the driver will need to * frame network packets to guard against the dropped USB packets. The win32 * driver sets this for both sides of the link. */ #define NC_READ_TTL_MS ((u8)255) // ms /* * We ignore most registers and EEPROM contents. */ #define REG_USBCTL ((u8)0x04) #define REG_TTL ((u8)0x10) #define REG_STATUS ((u8)0x11) /* * Vendor specific requests to read/write data */ #define REQUEST_REGISTER ((u8)0x10) #define REQUEST_EEPROM ((u8)0x11) static int nc_vendor_read(struct usbnet *dev, u8 req, u8 regnum, u16 *retval_ptr) { int status = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), req, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, regnum, retval_ptr, sizeof *retval_ptr, USB_CTRL_GET_TIMEOUT); if (status > 0) status = 0; if (!status) le16_to_cpus(retval_ptr); return status; } static inline int nc_register_read(struct usbnet *dev, u8 regnum, u16 *retval_ptr) { return nc_vendor_read(dev, REQUEST_REGISTER, regnum, retval_ptr); } // no retval ... can become async, usable in_interrupt() static void nc_vendor_write(struct usbnet *dev, u8 req, u8 regnum, u16 value) { usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), req, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, regnum, NULL, 0, // data is in setup packet USB_CTRL_SET_TIMEOUT); } static inline void nc_register_write(struct usbnet *dev, u8 regnum, u16 value) { nc_vendor_write(dev, REQUEST_REGISTER, regnum, value); } #if 0 static void nc_dump_registers(struct usbnet *dev) { u8 reg; u16 *vp = kmalloc(sizeof (u16)); if (!vp) { dbg("no memory?"); return; } dbg("%s registers:", dev->net->name); for (reg = 0; reg < 0x20; reg++) { int retval; // reading some registers is trouble if (reg >= 0x08 && reg <= 0xf) continue; if (reg >= 0x12 && reg <= 0x1e) continue; retval = nc_register_read(dev, reg, vp); if (retval < 0) dbg("%s reg [0x%x] ==> error %d", dev->net->name, reg, retval); else dbg("%s reg [0x%x] = 0x%x", dev->net->name, reg, *vp); } kfree(vp); } #endif /*-------------------------------------------------------------------------*/ /* * Control register */ #define USBCTL_WRITABLE_MASK 0x1f0f // bits 15-13 reserved, r/o #define USBCTL_ENABLE_LANG (1 << 12) #define USBCTL_ENABLE_MFGR (1 << 11) #define USBCTL_ENABLE_PROD (1 << 10) #define USBCTL_ENABLE_SERIAL (1 << 9) #define USBCTL_ENABLE_DEFAULTS (1 << 8) // bits 7-4 reserved, r/o #define USBCTL_FLUSH_OTHER (1 << 3) #define USBCTL_FLUSH_THIS (1 << 2) #define USBCTL_DISCONN_OTHER (1 << 1) #define USBCTL_DISCONN_THIS (1 << 0) static inline void nc_dump_usbctl(struct usbnet *dev, u16 usbctl) { netif_dbg(dev, link, dev->net, "net1080 %s-%s usbctl 0x%x:%s%s%s%s%s; this%s%s; other%s%s; r/o 0x%x\n", dev->udev->bus->bus_name, dev->udev->devpath, usbctl, (usbctl & USBCTL_ENABLE_LANG) ? " lang" : "", (usbctl & USBCTL_ENABLE_MFGR) ? " mfgr" : "", (usbctl & USBCTL_ENABLE_PROD) ? " prod" : "", (usbctl & USBCTL_ENABLE_SERIAL) ? " serial" : "", (usbctl & USBCTL_ENABLE_DEFAULTS) ? " defaults" : "", (usbctl & USBCTL_FLUSH_THIS) ? " FLUSH" : "", (usbctl & USBCTL_DISCONN_THIS) ? " DIS" : "", (usbctl & USBCTL_FLUSH_OTHER) ? " FLUSH" : "", (usbctl & USBCTL_DISCONN_OTHER) ? " DIS" : "", usbctl & ~USBCTL_WRITABLE_MASK); } /*-------------------------------------------------------------------------*/ /* * Status register */ #define STATUS_PORT_A (1 << 15) #define STATUS_CONN_OTHER (1 << 14) #define STATUS_SUSPEND_OTHER (1 << 13) #define STATUS_MAILBOX_OTHER (1 << 12) #define STATUS_PACKETS_OTHER(n) (((n) >> 8) & 0x03) #define STATUS_CONN_THIS (1 << 6) #define STATUS_SUSPEND_THIS (1 << 5) #define STATUS_MAILBOX_THIS (1 << 4) #define STATUS_PACKETS_THIS(n) (((n) >> 0) & 0x03) #define STATUS_UNSPEC_MASK 0x0c8c #define STATUS_NOISE_MASK ((u16)~(0x0303|STATUS_UNSPEC_MASK)) static inline void nc_dump_status(struct usbnet *dev, u16 status) { netif_dbg(dev, link, dev->net, "net1080 %s-%s status 0x%x: this (%c) PKT=%d%s%s%s; other PKT=%d%s%s%s; unspec 0x%x\n", dev->udev->bus->bus_name, dev->udev->devpath, status, // XXX the packet counts don't seem right // (1 at reset, not 0); maybe UNSPEC too (status & STATUS_PORT_A) ? 'A' : 'B', STATUS_PACKETS_THIS(status), (status & STATUS_CONN_THIS) ? " CON" : "", (status & STATUS_SUSPEND_THIS) ? " SUS" : "", (status & STATUS_MAILBOX_THIS) ? " MBOX" : "", STATUS_PACKETS_OTHER(status), (status & STATUS_CONN_OTHER) ? " CON" : "", (status & STATUS_SUSPEND_OTHER) ? " SUS" : "", (status & STATUS_MAILBOX_OTHER) ? " MBOX" : "", status & STATUS_UNSPEC_MASK); } /*-------------------------------------------------------------------------*/ /* * TTL register */ #define TTL_THIS(ttl) (0x00ff & ttl) #define TTL_OTHER(ttl) (0x00ff & (ttl >> 8)) #define MK_TTL(this,other) ((u16)(((other)<<8)|(0x00ff&(this)))) static inline void nc_dump_ttl(struct usbnet *dev, u16 ttl) { netif_dbg(dev, link, dev->net, "net1080 %s-%s ttl 0x%x this = %d, other = %d\n", dev->udev->bus->bus_name, dev->udev->devpath, ttl, TTL_THIS(ttl), TTL_OTHER(ttl)); } /*-------------------------------------------------------------------------*/ static int net1080_reset(struct usbnet *dev) { u16 usbctl, status, ttl; u16 *vp = kmalloc(sizeof (u16), GFP_KERNEL); int retval; if (!vp) return -ENOMEM; // nc_dump_registers(dev); if ((retval = nc_register_read(dev, REG_STATUS, vp)) < 0) { dbg("can't read %s-%s status: %d", dev->udev->bus->bus_name, dev->udev->devpath, retval); goto done; } status = *vp; nc_dump_status(dev, status); if ((retval = nc_register_read(dev, REG_USBCTL, vp)) < 0) { dbg("can't read USBCTL, %d", retval); goto done; } usbctl = *vp; nc_dump_usbctl(dev, usbctl); nc_register_write(dev, REG_USBCTL, USBCTL_FLUSH_THIS | USBCTL_FLUSH_OTHER); if ((retval = nc_register_read(dev, REG_TTL, vp)) < 0) { dbg("can't read TTL, %d", retval); goto done; } ttl = *vp; // nc_dump_ttl(dev, ttl); nc_register_write(dev, REG_TTL, MK_TTL(NC_READ_TTL_MS, TTL_OTHER(ttl)) ); dbg("%s: assigned TTL, %d ms", dev->net->name, NC_READ_TTL_MS); netif_info(dev, link, dev->net, "port %c, peer %sconnected\n", (status & STATUS_PORT_A) ? 'A' : 'B', (status & STATUS_CONN_OTHER) ? "" : "dis"); retval = 0; done: kfree(vp); return retval; } static int net1080_check_connect(struct usbnet *dev) { int retval; u16 status; u16 *vp = kmalloc(sizeof (u16), GFP_KERNEL); if (!vp) return -ENOMEM; retval = nc_register_read(dev, REG_STATUS, vp); status = *vp; kfree(vp); if (retval != 0) { dbg("%s net1080_check_conn read - %d", dev->net->name, retval); return retval; } if ((status & STATUS_CONN_OTHER) != STATUS_CONN_OTHER) return -ENOLINK; return 0; } static void nc_flush_complete(struct urb *urb) { kfree(urb->context); usb_free_urb(urb); } static void nc_ensure_sync(struct usbnet *dev) { dev->frame_errors++; if (dev->frame_errors > 5) { struct urb *urb; struct usb_ctrlrequest *req; int status; /* Send a flush */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return; req = kmalloc(sizeof *req, GFP_ATOMIC); if (!req) { usb_free_urb(urb); return; } req->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE; req->bRequest = REQUEST_REGISTER; req->wValue = cpu_to_le16(USBCTL_FLUSH_THIS | USBCTL_FLUSH_OTHER); req->wIndex = cpu_to_le16(REG_USBCTL); req->wLength = cpu_to_le16(0); /* queue an async control request, we don't need * to do anything when it finishes except clean up. */ usb_fill_control_urb(urb, dev->udev, usb_sndctrlpipe(dev->udev, 0), (unsigned char *) req, NULL, 0, nc_flush_complete, req); status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { kfree(req); usb_free_urb(urb); return; } netif_dbg(dev, rx_err, dev->net, "flush net1080; too many framing errors\n"); dev->frame_errors = 0; } } static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { struct nc_header *header; struct nc_trailer *trailer; u16 hdr_len, packet_len; if (!(skb->len & 0x01)) { #ifdef DEBUG struct net_device *net = dev->net; dbg("rx framesize %d range %d..%d mtu %d", skb->len, net->hard_header_len, dev->hard_mtu, net->mtu); #endif dev->net->stats.rx_frame_errors++; nc_ensure_sync(dev); return 0; } header = (struct nc_header *) skb->data; hdr_len = le16_to_cpup(&header->hdr_len); packet_len = le16_to_cpup(&header->packet_len); if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) { dev->net->stats.rx_frame_errors++; dbg("packet too big, %d", packet_len); nc_ensure_sync(dev); return 0; } else if (hdr_len < MIN_HEADER) { dev->net->stats.rx_frame_errors++; dbg("header too short, %d", hdr_len); nc_ensure_sync(dev); return 0; } else if (hdr_len > MIN_HEADER) { // out of band data for us? dbg("header OOB, %d bytes", hdr_len - MIN_HEADER); nc_ensure_sync(dev); // switch (vendor/product ids) { ... } } skb_pull(skb, hdr_len); trailer = (struct nc_trailer *) (skb->data + skb->len - sizeof *trailer); skb_trim(skb, skb->len - sizeof *trailer); if ((packet_len & 0x01) == 0) { if (skb->data [packet_len] != PAD_BYTE) { dev->net->stats.rx_frame_errors++; dbg("bad pad"); return 0; } skb_trim(skb, skb->len - 1); } if (skb->len != packet_len) { dev->net->stats.rx_frame_errors++; dbg("bad packet len %d (expected %d)", skb->len, packet_len); nc_ensure_sync(dev); return 0; } if (header->packet_id != get_unaligned(&trailer->packet_id)) { dev->net->stats.rx_fifo_errors++; dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x", le16_to_cpu(header->packet_id), le16_to_cpu(trailer->packet_id)); return 0; } #if 0 netdev_dbg(dev->net, "frame <rx h %d p %d id %d\n", header->hdr_len, header->packet_len, header->packet_id); #endif dev->frame_errors = 0; return 1; } static struct sk_buff * net1080_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { struct sk_buff *skb2; struct nc_header *header = NULL; struct nc_trailer *trailer = NULL; int padlen = sizeof (struct nc_trailer); int len = skb->len; if (!((len + padlen + sizeof (struct nc_header)) & 0x01)) padlen++; if (!skb_cloned(skb)) { int headroom = skb_headroom(skb); int tailroom = skb_tailroom(skb); if (padlen <= tailroom && sizeof(struct nc_header) <= headroom) /* There's enough head and tail room */ goto encapsulate; if ((sizeof (struct nc_header) + padlen) < (headroom + tailroom)) { /* There's enough total room, so just readjust */ skb->data = memmove(skb->head + sizeof (struct nc_header), skb->data, skb->len); skb_set_tail_pointer(skb, len); goto encapsulate; } } /* Create a new skb to use with the correct size */ skb2 = skb_copy_expand(skb, sizeof (struct nc_header), padlen, flags); dev_kfree_skb_any(skb); if (!skb2) return skb2; skb = skb2; encapsulate: /* header first */ header = (struct nc_header *) skb_push(skb, sizeof *header); header->hdr_len = cpu_to_le16(sizeof (*header)); header->packet_len = cpu_to_le16(len); header->packet_id = cpu_to_le16((u16)dev->xid++); /* maybe pad; then trailer */ if (!((skb->len + sizeof *trailer) & 0x01)) *skb_put(skb, 1) = PAD_BYTE; trailer = (struct nc_trailer *) skb_put(skb, sizeof *trailer); put_unaligned(header->packet_id, &trailer->packet_id); #if 0 netdev_dbg(dev->net, "frame >tx h %d p %d id %d\n", header->hdr_len, header->packet_len, header->packet_id); #endif return skb; } static int net1080_bind(struct usbnet *dev, struct usb_interface *intf) { unsigned extra = sizeof (struct nc_header) + 1 + sizeof (struct nc_trailer); dev->net->hard_header_len += extra; dev->rx_urb_size = dev->net->hard_header_len + dev->net->mtu; dev->hard_mtu = NC_MAX_PACKET; return usbnet_get_endpoints (dev, intf); } static const struct driver_info net1080_info = { .description = "NetChip TurboCONNECT", .flags = FLAG_FRAMING_NC, .bind = net1080_bind, .reset = net1080_reset, .check_connect = net1080_check_connect, .rx_fixup = net1080_rx_fixup, .tx_fixup = net1080_tx_fixup, }; static const struct usb_device_id products [] = { { USB_DEVICE(0x0525, 0x1080), // NetChip ref design .driver_info = (unsigned long) &net1080_info, }, { USB_DEVICE(0x06D0, 0x0622), // Laplink Gold .driver_info = (unsigned long) &net1080_info, }, { }, // END }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver net1080_driver = { .name = "net1080", .id_table = products, .probe = usbnet_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, }; static int __init net1080_init(void) { return usb_register(&net1080_driver); } module_init(net1080_init); static void __exit net1080_exit(void) { usb_deregister(&net1080_driver); } module_exit(net1080_exit); MODULE_AUTHOR("David Brownell"); MODULE_DESCRIPTION("NetChip 1080 based USB Host-to-Host Links"); MODULE_LICENSE("GPL");
gpl-2.0
blindi/LameSung-Kernel
drivers/bluetooth/ath3k.c
248
11012
/* * Copyright (c) 2008-2009 Atheros Communications Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/usb.h> #include <net/bluetooth/bluetooth.h> #define VERSION "1.0" #define ATH3K_DNLOAD 0x01 #define ATH3K_GETSTATE 0x05 #define ATH3K_SET_NORMAL_MODE 0x07 #define ATH3K_GETVERSION 0x09 #define USB_REG_SWITCH_VID_PID 0x0a #define ATH3K_MODE_MASK 0x3F #define ATH3K_NORMAL_MODE 0x0E #define ATH3K_PATCH_UPDATE 0x80 #define ATH3K_SYSCFG_UPDATE 0x40 #define ATH3K_XTAL_FREQ_26M 0x00 #define ATH3K_XTAL_FREQ_40M 0x01 #define ATH3K_XTAL_FREQ_19P2 0x02 #define ATH3K_NAME_LEN 0xFF struct ath3k_version { unsigned int rom_version; unsigned int build_version; unsigned int ram_version; unsigned char ref_clock; unsigned char reserved[0x07]; }; static struct usb_device_id ath3k_table[] = { /* Atheros AR3011 */ { USB_DEVICE(0x0CF3, 0x3000) }, /* Atheros AR3011 with sflash firmware*/ { USB_DEVICE(0x0CF3, 0x3002) }, { USB_DEVICE(0x13d3, 0x3304) }, { USB_DEVICE(0x0930, 0x0215) }, { USB_DEVICE(0x0489, 0xE03D) }, /* Atheros AR9285 Malbec with sflash firmware */ { USB_DEVICE(0x03F0, 0x311D) }, /* Atheros AR3012 with sflash firmware*/ { USB_DEVICE(0x0CF3, 0x3004) }, { USB_DEVICE(0x0CF3, 0x311D) }, { USB_DEVICE(0x13d3, 0x3375) }, { USB_DEVICE(0x04CA, 0x3005) }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xE02C) }, /* Atheros AR5BBU22 with sflash firmware */ { USB_DEVICE(0x0489, 0xE03C) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, ath3k_table); #define BTUSB_ATH3012 0x80 /* This table is to load patch and sysconfig files * for AR3012 */ static struct usb_device_id ath3k_blist_tbl[] = { /* Atheros AR3012 with sflash firmware*/ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU22 with sflash firmware */ { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 }, { } /* Terminating entry */ }; #define USB_REQ_DFU_DNLOAD 1 #define BULK_SIZE 4096 #define FW_HDR_SIZE 20 static int ath3k_load_firmware(struct usb_device *udev, const struct firmware *firmware) { u8 *send_buf; int err, pipe, len, size, sent = 0; int count = firmware->size; BT_DBG("udev %p", udev); pipe = usb_sndctrlpipe(udev, 0); send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC); if (!send_buf) { BT_ERR("Can't allocate memory chunk for firmware"); return -ENOMEM; } memcpy(send_buf, firmware->data, 20); if ((err = usb_control_msg(udev, pipe, USB_REQ_DFU_DNLOAD, USB_TYPE_VENDOR, 0, 0, send_buf, 20, USB_CTRL_SET_TIMEOUT)) < 0) { BT_ERR("Can't change to loading configuration err"); goto error; } sent += 20; count -= 20; while (count) { size = min_t(uint, count, BULK_SIZE); pipe = usb_sndbulkpipe(udev, 0x02); memcpy(send_buf, firmware->data + sent, size); err = usb_bulk_msg(udev, pipe, send_buf, size, &len, 3000); if (err || (len != size)) { BT_ERR("Error in firmware loading err = %d," "len = %d, size = %d", err, len, size); goto error; } sent += size; count -= size; } error: kfree(send_buf); return err; } static int ath3k_get_state(struct usb_device *udev, unsigned char *state) { int pipe = 0; pipe = usb_rcvctrlpipe(udev, 0); return usb_control_msg(udev, pipe, ATH3K_GETSTATE, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, state, 0x01, USB_CTRL_SET_TIMEOUT); } static int ath3k_get_version(struct usb_device *udev, struct ath3k_version *version) { int pipe = 0; pipe = usb_rcvctrlpipe(udev, 0); return usb_control_msg(udev, pipe, ATH3K_GETVERSION, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, version, sizeof(struct ath3k_version), USB_CTRL_SET_TIMEOUT); } static int ath3k_load_fwfile(struct usb_device *udev, const struct firmware *firmware) { u8 *send_buf; int err, pipe, len, size, count, sent = 0; int ret; count = firmware->size; send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC); if (!send_buf) { BT_ERR("Can't allocate memory chunk for firmware"); return -ENOMEM; } size = min_t(uint, count, FW_HDR_SIZE); memcpy(send_buf, firmware->data, size); pipe = usb_sndctrlpipe(udev, 0); ret = usb_control_msg(udev, pipe, ATH3K_DNLOAD, USB_TYPE_VENDOR, 0, 0, send_buf, size, USB_CTRL_SET_TIMEOUT); if (ret < 0) { BT_ERR("Can't change to loading configuration err"); kfree(send_buf); return ret; } sent += size; count -= size; while (count) { size = min_t(uint, count, BULK_SIZE); pipe = usb_sndbulkpipe(udev, 0x02); memcpy(send_buf, firmware->data + sent, size); err = usb_bulk_msg(udev, pipe, send_buf, size, &len, 3000); if (err || (len != size)) { BT_ERR("Error in firmware loading err = %d," "len = %d, size = %d", err, len, size); kfree(send_buf); return err; } sent += size; count -= size; } kfree(send_buf); return 0; } static int ath3k_switch_pid(struct usb_device *udev) { int pipe = 0; pipe = usb_sndctrlpipe(udev, 0); return usb_control_msg(udev, pipe, USB_REG_SWITCH_VID_PID, USB_TYPE_VENDOR, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } static int ath3k_set_normal_mode(struct usb_device *udev) { unsigned char fw_state; int pipe = 0, ret; ret = ath3k_get_state(udev, &fw_state); if (ret < 0) { BT_ERR("Can't get state to change to normal mode err"); return ret; } if ((fw_state & ATH3K_MODE_MASK) == ATH3K_NORMAL_MODE) { BT_DBG("firmware was already in normal mode"); return 0; } pipe = usb_sndctrlpipe(udev, 0); return usb_control_msg(udev, pipe, ATH3K_SET_NORMAL_MODE, USB_TYPE_VENDOR, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } static int ath3k_load_patch(struct usb_device *udev) { unsigned char fw_state; char filename[ATH3K_NAME_LEN] = {0}; const struct firmware *firmware; struct ath3k_version fw_version, pt_version; int ret; ret = ath3k_get_state(udev, &fw_state); if (ret < 0) { BT_ERR("Can't get state to change to load ram patch err"); return ret; } if (fw_state & ATH3K_PATCH_UPDATE) { BT_DBG("Patch was already downloaded"); return 0; } ret = ath3k_get_version(udev, &fw_version); if (ret < 0) { BT_ERR("Can't get version to change to load ram patch err"); return ret; } snprintf(filename, ATH3K_NAME_LEN, "ar3k/AthrBT_0x%08x.dfu", fw_version.rom_version); ret = request_firmware(&firmware, filename, &udev->dev); if (ret < 0) { BT_ERR("Patch file not found %s", filename); return ret; } pt_version.rom_version = *(int *)(firmware->data + firmware->size - 8); pt_version.build_version = *(int *) (firmware->data + firmware->size - 4); if ((pt_version.rom_version != fw_version.rom_version) || (pt_version.build_version <= fw_version.build_version)) { BT_ERR("Patch file version did not match with firmware"); release_firmware(firmware); return -EINVAL; } ret = ath3k_load_fwfile(udev, firmware); release_firmware(firmware); return ret; } static int ath3k_load_syscfg(struct usb_device *udev) { unsigned char fw_state; char filename[ATH3K_NAME_LEN] = {0}; const struct firmware *firmware; struct ath3k_version fw_version; int clk_value, ret; ret = ath3k_get_state(udev, &fw_state); if (ret < 0) { BT_ERR("Can't get state to change to load configration err"); return -EBUSY; } ret = ath3k_get_version(udev, &fw_version); if (ret < 0) { BT_ERR("Can't get version to change to load ram patch err"); return ret; } switch (fw_version.ref_clock) { case ATH3K_XTAL_FREQ_26M: clk_value = 26; break; case ATH3K_XTAL_FREQ_40M: clk_value = 40; break; case ATH3K_XTAL_FREQ_19P2: clk_value = 19; break; default: clk_value = 0; break; } snprintf(filename, ATH3K_NAME_LEN, "ar3k/ramps_0x%08x_%d%s", fw_version.rom_version, clk_value, ".dfu"); ret = request_firmware(&firmware, filename, &udev->dev); if (ret < 0) { BT_ERR("Configuration file not found %s", filename); return ret; } ret = ath3k_load_fwfile(udev, firmware); release_firmware(firmware); return ret; } static int ath3k_probe(struct usb_interface *intf, const struct usb_device_id *id) { const struct firmware *firmware; struct usb_device *udev = interface_to_usbdev(intf); int ret; BT_DBG("intf %p id %p", intf, id); if (intf->cur_altsetting->desc.bInterfaceNumber != 0) return -ENODEV; /* match device ID in ath3k blacklist table */ if (!id->driver_info) { const struct usb_device_id *match; match = usb_match_id(intf, ath3k_blist_tbl); if (match) id = match; } /* load patch and sysconfig files for AR3012 */ if (id->driver_info & BTUSB_ATH3012) { /* New firmware with patch and sysconfig files already loaded */ if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x0001) return -ENODEV; ret = ath3k_load_patch(udev); if (ret < 0) { BT_ERR("Loading patch file failed"); return ret; } ret = ath3k_load_syscfg(udev); if (ret < 0) { BT_ERR("Loading sysconfig file failed"); return ret; } ret = ath3k_set_normal_mode(udev); if (ret < 0) { BT_ERR("Set normal mode failed"); return ret; } ath3k_switch_pid(udev); return 0; } if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) { BT_ERR("Error loading firmware"); return -EIO; } ret = ath3k_load_firmware(udev, firmware); release_firmware(firmware); return ret; } static void ath3k_disconnect(struct usb_interface *intf) { BT_DBG("ath3k_disconnect intf %p", intf); } static struct usb_driver ath3k_driver = { .name = "ath3k", .probe = ath3k_probe, .disconnect = ath3k_disconnect, .id_table = ath3k_table, }; static int __init ath3k_init(void) { BT_INFO("Atheros AR30xx firmware driver ver %s", VERSION); return usb_register(&ath3k_driver); } static void __exit ath3k_exit(void) { usb_deregister(&ath3k_driver); } module_init(ath3k_init); module_exit(ath3k_exit); MODULE_AUTHOR("Atheros Communications"); MODULE_DESCRIPTION("Atheros AR30xx firmware driver"); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("ath3k-1.fw");
gpl-2.0
Evervolv/android_kernel_lge_pecan
drivers/hwmon/adt7475.c
504
35308
/* * adt7475 - Thermal sensor driver for the ADT7475 chip and derivatives * Copyright (C) 2007-2008, Advanced Micro Devices, Inc. * Copyright (C) 2008 Jordan Crouse <jordan@cosmicpenguin.net> * Copyright (C) 2008 Hans de Goede <hdegoede@redhat.com> * Derived from the lm83 driver by Jean Delvare * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> /* Indexes for the sysfs hooks */ #define INPUT 0 #define MIN 1 #define MAX 2 #define CONTROL 3 #define OFFSET 3 #define AUTOMIN 4 #define THERM 5 #define HYSTERSIS 6 /* These are unique identifiers for the sysfs functions - unlike the numbers above, these are not also indexes into an array */ #define ALARM 9 #define FAULT 10 /* 7475 Common Registers */ #define REG_VOLTAGE_BASE 0x21 #define REG_TEMP_BASE 0x25 #define REG_TACH_BASE 0x28 #define REG_PWM_BASE 0x30 #define REG_PWM_MAX_BASE 0x38 #define REG_DEVID 0x3D #define REG_VENDID 0x3E #define REG_STATUS1 0x41 #define REG_STATUS2 0x42 #define REG_VOLTAGE_MIN_BASE 0x46 #define REG_VOLTAGE_MAX_BASE 0x47 #define REG_TEMP_MIN_BASE 0x4E #define REG_TEMP_MAX_BASE 0x4F #define REG_TACH_MIN_BASE 0x54 #define REG_PWM_CONFIG_BASE 0x5C #define REG_TEMP_TRANGE_BASE 0x5F #define REG_PWM_MIN_BASE 0x64 #define REG_TEMP_TMIN_BASE 0x67 #define REG_TEMP_THERM_BASE 0x6A #define REG_REMOTE1_HYSTERSIS 0x6D #define REG_REMOTE2_HYSTERSIS 0x6E #define REG_TEMP_OFFSET_BASE 0x70 #define REG_EXTEND1 0x76 #define REG_EXTEND2 0x77 #define REG_CONFIG5 0x7C #define CONFIG5_TWOSCOMP 0x01 #define CONFIG5_TEMPOFFSET 0x02 /* ADT7475 Settings */ #define ADT7475_VOLTAGE_COUNT 2 #define ADT7475_TEMP_COUNT 3 #define ADT7475_TACH_COUNT 4 #define ADT7475_PWM_COUNT 3 /* Macro to read the registers */ #define adt7475_read(reg) i2c_smbus_read_byte_data(client, (reg)) /* Macros to easily index the registers */ #define TACH_REG(idx) (REG_TACH_BASE + ((idx) * 2)) #define TACH_MIN_REG(idx) (REG_TACH_MIN_BASE + ((idx) * 2)) #define PWM_REG(idx) (REG_PWM_BASE + (idx)) #define PWM_MAX_REG(idx) (REG_PWM_MAX_BASE + (idx)) #define PWM_MIN_REG(idx) (REG_PWM_MIN_BASE + (idx)) #define PWM_CONFIG_REG(idx) (REG_PWM_CONFIG_BASE + (idx)) #define VOLTAGE_REG(idx) (REG_VOLTAGE_BASE + (idx)) #define VOLTAGE_MIN_REG(idx) (REG_VOLTAGE_MIN_BASE + ((idx) * 2)) #define VOLTAGE_MAX_REG(idx) (REG_VOLTAGE_MAX_BASE + ((idx) * 2)) #define TEMP_REG(idx) (REG_TEMP_BASE + (idx)) #define TEMP_MIN_REG(idx) (REG_TEMP_MIN_BASE + ((idx) * 2)) #define TEMP_MAX_REG(idx) (REG_TEMP_MAX_BASE + ((idx) * 2)) #define TEMP_TMIN_REG(idx) (REG_TEMP_TMIN_BASE + (idx)) #define TEMP_THERM_REG(idx) (REG_TEMP_THERM_BASE + (idx)) #define TEMP_OFFSET_REG(idx) (REG_TEMP_OFFSET_BASE + (idx)) #define TEMP_TRANGE_REG(idx) (REG_TEMP_TRANGE_BASE + (idx)) static unsigned short normal_i2c[] = { 0x2e, I2C_CLIENT_END }; I2C_CLIENT_INSMOD_1(adt7475); static const struct i2c_device_id adt7475_id[] = { { "adt7475", adt7475 }, { } }; MODULE_DEVICE_TABLE(i2c, adt7475_id); struct adt7475_data { struct device *hwmon_dev; struct mutex lock; unsigned long measure_updated; unsigned long limits_updated; char valid; u8 config5; u16 alarms; u16 voltage[3][3]; u16 temp[7][3]; u16 tach[2][4]; u8 pwm[4][3]; u8 range[3]; u8 pwmctl[3]; u8 pwmchan[3]; }; static struct i2c_driver adt7475_driver; static struct adt7475_data *adt7475_update_device(struct device *dev); static void adt7475_read_hystersis(struct i2c_client *client); static void adt7475_read_pwm(struct i2c_client *client, int index); /* Given a temp value, convert it to register value */ static inline u16 temp2reg(struct adt7475_data *data, long val) { u16 ret; if (!(data->config5 & CONFIG5_TWOSCOMP)) { val = SENSORS_LIMIT(val, -64000, 191000); ret = (val + 64500) / 1000; } else { val = SENSORS_LIMIT(val, -128000, 127000); if (val < -500) ret = (256500 + val) / 1000; else ret = (val + 500) / 1000; } return ret << 2; } /* Given a register value, convert it to a real temp value */ static inline int reg2temp(struct adt7475_data *data, u16 reg) { if (data->config5 & CONFIG5_TWOSCOMP) { if (reg >= 512) return (reg - 1024) * 250; else return reg * 250; } else return (reg - 256) * 250; } static inline int tach2rpm(u16 tach) { if (tach == 0 || tach == 0xFFFF) return 0; return (90000 * 60) / tach; } static inline u16 rpm2tach(unsigned long rpm) { if (rpm == 0) return 0; return SENSORS_LIMIT((90000 * 60) / rpm, 1, 0xFFFF); } static inline int reg2vcc(u16 reg) { return (4296 * reg) / 1000; } static inline int reg2vccp(u16 reg) { return (2929 * reg) / 1000; } static inline u16 vcc2reg(long vcc) { vcc = SENSORS_LIMIT(vcc, 0, 4396); return (vcc * 1000) / 4296; } static inline u16 vccp2reg(long vcc) { vcc = SENSORS_LIMIT(vcc, 0, 2998); return (vcc * 1000) / 2929; } static u16 adt7475_read_word(struct i2c_client *client, int reg) { u16 val; val = i2c_smbus_read_byte_data(client, reg); val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8); return val; } static void adt7475_write_word(struct i2c_client *client, int reg, u16 val) { i2c_smbus_write_byte_data(client, reg + 1, val >> 8); i2c_smbus_write_byte_data(client, reg, val & 0xFF); } /* Find the nearest value in a table - used for pwm frequency and auto temp range */ static int find_nearest(long val, const int *array, int size) { int i; if (val < array[0]) return 0; if (val > array[size - 1]) return size - 1; for (i = 0; i < size - 1; i++) { int a, b; if (val > array[i + 1]) continue; a = val - array[i]; b = array[i + 1] - val; return (a <= b) ? i : i + 1; } return 0; } static ssize_t show_voltage(struct device *dev, struct device_attribute *attr, char *buf) { struct adt7475_data *data = adt7475_update_device(dev); struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); unsigned short val; switch (sattr->nr) { case ALARM: return sprintf(buf, "%d\n", (data->alarms >> (sattr->index + 1)) & 1); default: val = data->voltage[sattr->nr][sattr->index]; return sprintf(buf, "%d\n", sattr->index == 0 ? reg2vccp(val) : reg2vcc(val)); } } static ssize_t set_voltage(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); struct i2c_client *client = to_i2c_client(dev); struct adt7475_data *data = i2c_get_clientdata(client); unsigned char reg; long val; if (strict_strtol(buf, 10, &val)) return -EINVAL; mutex_lock(&data->lock); data->voltage[sattr->nr][sattr->index] = sattr->index ? vcc2reg(val) : vccp2reg(val); if (sattr->nr == MIN) reg = VOLTAGE_MIN_REG(sattr->index); else reg = VOLTAGE_MAX_REG(sattr->index); i2c_smbus_write_byte_data(client, reg, data->voltage[sattr->nr][sattr->index] >> 2); mutex_unlock(&data->lock); return count; } static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct adt7475_data *data = adt7475_update_device(dev); struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); int out; switch (sattr->nr) { case HYSTERSIS: mutex_lock(&data->lock); out = data->temp[sattr->nr][sattr->index]; if (sattr->index != 1) out = (out >> 4) & 0xF; else out = (out & 0xF); /* Show the value as an absolute number tied to * THERM */ out = reg2temp(data, data->temp[THERM][sattr->index]) - out * 1000; mutex_unlock(&data->lock); break; case OFFSET: /* Offset is always 2's complement, regardless of the * setting in CONFIG5 */ mutex_lock(&data->lock); out = (s8)data->temp[sattr->nr][sattr->index]; if (data->config5 & CONFIG5_TEMPOFFSET) out *= 1000; else out *= 500; mutex_unlock(&data->lock); break; case ALARM: out = (data->alarms >> (sattr->index + 4)) & 1; break; case FAULT: /* Note - only for remote1 and remote2 */ out = !!(data->alarms & (sattr->index ? 0x8000 : 0x4000)); break; default: /* All other temp values are in the configured format */ out = reg2temp(data, data->temp[sattr->nr][sattr->index]); } return sprintf(buf, "%d\n", out); } static ssize_t set_temp(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); struct i2c_client *client = to_i2c_client(dev); struct adt7475_data *data = i2c_get_clientdata(client); unsigned char reg = 0; u8 out; int temp; long val; if (strict_strtol(buf, 10, &val)) return -EINVAL; mutex_lock(&data->lock); /* We need the config register in all cases for temp <-> reg conv. */ data->config5 = adt7475_read(REG_CONFIG5); switch (sattr->nr) { case OFFSET: if (data->config5 & CONFIG5_TEMPOFFSET) { val = SENSORS_LIMIT(val, -63000, 127000); out = data->temp[OFFSET][sattr->index] = val / 1000; } else { val = SENSORS_LIMIT(val, -63000, 64000); out = data->temp[OFFSET][sattr->index] = val / 500; } break; case HYSTERSIS: /* The value will be given as an absolute value, turn it into an offset based on THERM */ /* Read fresh THERM and HYSTERSIS values from the chip */ data->temp[THERM][sattr->index] = adt7475_read(TEMP_THERM_REG(sattr->index)) << 2; adt7475_read_hystersis(client); temp = reg2temp(data, data->temp[THERM][sattr->index]); val = SENSORS_LIMIT(val, temp - 15000, temp); val = (temp - val) / 1000; if (sattr->index != 1) { data->temp[HYSTERSIS][sattr->index] &= 0xF0; data->temp[HYSTERSIS][sattr->index] |= (val & 0xF) << 4; } else { data->temp[HYSTERSIS][sattr->index] &= 0x0F; data->temp[HYSTERSIS][sattr->index] |= (val & 0xF); } out = data->temp[HYSTERSIS][sattr->index]; break; default: data->temp[sattr->nr][sattr->index] = temp2reg(data, val); /* We maintain an extra 2 digits of precision for simplicity * - shift those back off before writing the value */ out = (u8) (data->temp[sattr->nr][sattr->index] >> 2); } switch (sattr->nr) { case MIN: reg = TEMP_MIN_REG(sattr->index); break; case MAX: reg = TEMP_MAX_REG(sattr->index); break; case OFFSET: reg = TEMP_OFFSET_REG(sattr->index); break; case AUTOMIN: reg = TEMP_TMIN_REG(sattr->index); break; case THERM: reg = TEMP_THERM_REG(sattr->index); break; case HYSTERSIS: if (sattr->index != 2) reg = REG_REMOTE1_HYSTERSIS; else reg = REG_REMOTE2_HYSTERSIS; break; } i2c_smbus_write_byte_data(client, reg, out); mutex_unlock(&data->lock); return count; } /* Table of autorange values - the user will write the value in millidegrees, and we'll convert it */ static const int autorange_table[] = { 2000, 2500, 3330, 4000, 5000, 6670, 8000, 10000, 13330, 16000, 20000, 26670, 32000, 40000, 53330, 80000 }; static ssize_t show_point2(struct device *dev, struct device_attribute *attr, char *buf) { struct adt7475_data *data = adt7475_update_device(dev); struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); int out, val; mutex_lock(&data->lock); out = (data->range[sattr->index] >> 4) & 0x0F; val = reg2temp(data, data->temp[AUTOMIN][sattr->index]); mutex_unlock(&data->lock); return sprintf(buf, "%d\n", val + autorange_table[out]); } static ssize_t set_point2(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct adt7475_data *data = i2c_get_clientdata(client); struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); int temp; long val; if (strict_strtol(buf, 10, &val)) return -EINVAL; mutex_lock(&data->lock); /* Get a fresh copy of the needed registers */ data->config5 = adt7475_read(REG_CONFIG5); data->temp[AUTOMIN][sattr->index] = adt7475_read(TEMP_TMIN_REG(sattr->index)) << 2; data->range[sattr->index] = adt7475_read(TEMP_TRANGE_REG(sattr->index)); /* The user will write an absolute value, so subtract the start point to figure the range */ temp = reg2temp(data, data->temp[AUTOMIN][sattr->index]); val = SENSORS_LIMIT(val, temp + autorange_table[0], temp + autorange_table[ARRAY_SIZE(autorange_table) - 1]); val -= temp; /* Find the nearest table entry to what the user wrote */ val = find_nearest(val, autorange_table, ARRAY_SIZE(autorange_table)); data->range[sattr->index] &= ~0xF0; data->range[sattr->index] |= val << 4; i2c_smbus_write_byte_data(client, TEMP_TRANGE_REG(sattr->index), data->range[sattr->index]); mutex_unlock(&data->lock); return count; } static ssize_t show_tach(struct device *dev, struct device_attribute *attr, char *buf) { struct adt7475_data *data = adt7475_update_device(dev); struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); int out; if (sattr->nr == ALARM) out = (data->alarms >> (sattr->index + 10)) & 1; else out = tach2rpm(data->tach[sattr->nr][sattr->index]); return sprintf(buf, "%d\n", out); } static ssize_t set_tach(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); struct i2c_client *client = to_i2c_client(dev); struct adt7475_data *data = i2c_get_clientdata(client); unsigned long val; if (strict_strtoul(buf, 10, &val)) return -EINVAL; mutex_lock(&data->lock); data->tach[MIN][sattr->index] = rpm2tach(val); adt7475_write_word(client, TACH_MIN_REG(sattr->index), data->tach[MIN][sattr->index]); mutex_unlock(&data->lock); return count; } static ssize_t show_pwm(struct device *dev, struct device_attribute *attr, char *buf) { struct adt7475_data *data = adt7475_update_device(dev); struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); return sprintf(buf, "%d\n", data->pwm[sattr->nr][sattr->index]); } static ssize_t show_pwmchan(struct device *dev, struct device_attribute *attr, char *buf) { struct adt7475_data *data = adt7475_update_device(dev); struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); return sprintf(buf, "%d\n", data->pwmchan[sattr->index]); } static ssize_t show_pwmctrl(struct device *dev, struct device_attribute *attr, char *buf) { struct adt7475_data *data = adt7475_update_device(dev); struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); return sprintf(buf, "%d\n", data->pwmctl[sattr->index]); } static ssize_t set_pwm(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); struct i2c_client *client = to_i2c_client(dev); struct adt7475_data *data = i2c_get_clientdata(client); unsigned char reg = 0; long val; if (strict_strtol(buf, 10, &val)) return -EINVAL; mutex_lock(&data->lock); switch (sattr->nr) { case INPUT: /* Get a fresh value for CONTROL */ data->pwm[CONTROL][sattr->index] = adt7475_read(PWM_CONFIG_REG(sattr->index)); /* If we are not in manual mode, then we shouldn't allow * the user to set the pwm speed */ if (((data->pwm[CONTROL][sattr->index] >> 5) & 7) != 7) { mutex_unlock(&data->lock); return count; } reg = PWM_REG(sattr->index); break; case MIN: reg = PWM_MIN_REG(sattr->index); break; case MAX: reg = PWM_MAX_REG(sattr->index); break; } data->pwm[sattr->nr][sattr->index] = SENSORS_LIMIT(val, 0, 0xFF); i2c_smbus_write_byte_data(client, reg, data->pwm[sattr->nr][sattr->index]); mutex_unlock(&data->lock); return count; } /* Called by set_pwmctrl and set_pwmchan */ static int hw_set_pwm(struct i2c_client *client, int index, unsigned int pwmctl, unsigned int pwmchan) { struct adt7475_data *data = i2c_get_clientdata(client); long val = 0; switch (pwmctl) { case 0: val = 0x03; /* Run at full speed */ break; case 1: val = 0x07; /* Manual mode */ break; case 2: switch (pwmchan) { case 1: /* Remote1 controls PWM */ val = 0x00; break; case 2: /* local controls PWM */ val = 0x01; break; case 4: /* remote2 controls PWM */ val = 0x02; break; case 6: /* local/remote2 control PWM */ val = 0x05; break; case 7: /* All three control PWM */ val = 0x06; break; default: return -EINVAL; } break; default: return -EINVAL; } data->pwmctl[index] = pwmctl; data->pwmchan[index] = pwmchan; data->pwm[CONTROL][index] &= ~0xE0; data->pwm[CONTROL][index] |= (val & 7) << 5; i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index), data->pwm[CONTROL][index]); return 0; } static ssize_t set_pwmchan(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); struct i2c_client *client = to_i2c_client(dev); struct adt7475_data *data = i2c_get_clientdata(client); int r; long val; if (strict_strtol(buf, 10, &val)) return -EINVAL; mutex_lock(&data->lock); /* Read Modify Write PWM values */ adt7475_read_pwm(client, sattr->index); r = hw_set_pwm(client, sattr->index, data->pwmctl[sattr->index], val); if (r) count = r; mutex_unlock(&data->lock); return count; } static ssize_t set_pwmctrl(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); struct i2c_client *client = to_i2c_client(dev); struct adt7475_data *data = i2c_get_clientdata(client); int r; long val; if (strict_strtol(buf, 10, &val)) return -EINVAL; mutex_lock(&data->lock); /* Read Modify Write PWM values */ adt7475_read_pwm(client, sattr->index); r = hw_set_pwm(client, sattr->index, val, data->pwmchan[sattr->index]); if (r) count = r; mutex_unlock(&data->lock); return count; } /* List of frequencies for the PWM */ static const int pwmfreq_table[] = { 11, 14, 22, 29, 35, 44, 58, 88 }; static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr, char *buf) { struct adt7475_data *data = adt7475_update_device(dev); struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); return sprintf(buf, "%d\n", pwmfreq_table[data->range[sattr->index] & 7]); } static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); struct i2c_client *client = to_i2c_client(dev); struct adt7475_data *data = i2c_get_clientdata(client); int out; long val; if (strict_strtol(buf, 10, &val)) return -EINVAL; out = find_nearest(val, pwmfreq_table, ARRAY_SIZE(pwmfreq_table)); mutex_lock(&data->lock); data->range[sattr->index] = adt7475_read(TEMP_TRANGE_REG(sattr->index)); data->range[sattr->index] &= ~7; data->range[sattr->index] |= out; i2c_smbus_write_byte_data(client, TEMP_TRANGE_REG(sattr->index), data->range[sattr->index]); mutex_unlock(&data->lock); return count; } static SENSOR_DEVICE_ATTR_2(in1_input, S_IRUGO, show_voltage, NULL, INPUT, 0); static SENSOR_DEVICE_ATTR_2(in1_max, S_IRUGO | S_IWUSR, show_voltage, set_voltage, MAX, 0); static SENSOR_DEVICE_ATTR_2(in1_min, S_IRUGO | S_IWUSR, show_voltage, set_voltage, MIN, 0); static SENSOR_DEVICE_ATTR_2(in1_alarm, S_IRUGO, show_voltage, NULL, ALARM, 0); static SENSOR_DEVICE_ATTR_2(in2_input, S_IRUGO, show_voltage, NULL, INPUT, 1); static SENSOR_DEVICE_ATTR_2(in2_max, S_IRUGO | S_IWUSR, show_voltage, set_voltage, MAX, 1); static SENSOR_DEVICE_ATTR_2(in2_min, S_IRUGO | S_IWUSR, show_voltage, set_voltage, MIN, 1); static SENSOR_DEVICE_ATTR_2(in2_alarm, S_IRUGO, show_voltage, NULL, ALARM, 1); static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, INPUT, 0); static SENSOR_DEVICE_ATTR_2(temp1_alarm, S_IRUGO, show_temp, NULL, ALARM, 0); static SENSOR_DEVICE_ATTR_2(temp1_fault, S_IRUGO, show_temp, NULL, FAULT, 0); static SENSOR_DEVICE_ATTR_2(temp1_max, S_IRUGO | S_IWUSR, show_temp, set_temp, MAX, 0); static SENSOR_DEVICE_ATTR_2(temp1_min, S_IRUGO | S_IWUSR, show_temp, set_temp, MIN, 0); static SENSOR_DEVICE_ATTR_2(temp1_offset, S_IRUGO | S_IWUSR, show_temp, set_temp, OFFSET, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_point1_temp, S_IRUGO | S_IWUSR, show_temp, set_temp, AUTOMIN, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_point2_temp, S_IRUGO | S_IWUSR, show_point2, set_point2, 0, 0); static SENSOR_DEVICE_ATTR_2(temp1_crit, S_IRUGO | S_IWUSR, show_temp, set_temp, THERM, 0); static SENSOR_DEVICE_ATTR_2(temp1_crit_hyst, S_IRUGO | S_IWUSR, show_temp, set_temp, HYSTERSIS, 0); static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, INPUT, 1); static SENSOR_DEVICE_ATTR_2(temp2_alarm, S_IRUGO, show_temp, NULL, ALARM, 1); static SENSOR_DEVICE_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp, set_temp, MAX, 1); static SENSOR_DEVICE_ATTR_2(temp2_min, S_IRUGO | S_IWUSR, show_temp, set_temp, MIN, 1); static SENSOR_DEVICE_ATTR_2(temp2_offset, S_IRUGO | S_IWUSR, show_temp, set_temp, OFFSET, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_point1_temp, S_IRUGO | S_IWUSR, show_temp, set_temp, AUTOMIN, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_point2_temp, S_IRUGO | S_IWUSR, show_point2, set_point2, 0, 1); static SENSOR_DEVICE_ATTR_2(temp2_crit, S_IRUGO | S_IWUSR, show_temp, set_temp, THERM, 1); static SENSOR_DEVICE_ATTR_2(temp2_crit_hyst, S_IRUGO | S_IWUSR, show_temp, set_temp, HYSTERSIS, 1); static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, INPUT, 2); static SENSOR_DEVICE_ATTR_2(temp3_alarm, S_IRUGO, show_temp, NULL, ALARM, 2); static SENSOR_DEVICE_ATTR_2(temp3_fault, S_IRUGO, show_temp, NULL, FAULT, 2); static SENSOR_DEVICE_ATTR_2(temp3_max, S_IRUGO | S_IWUSR, show_temp, set_temp, MAX, 2); static SENSOR_DEVICE_ATTR_2(temp3_min, S_IRUGO | S_IWUSR, show_temp, set_temp, MIN, 2); static SENSOR_DEVICE_ATTR_2(temp3_offset, S_IRUGO | S_IWUSR, show_temp, set_temp, OFFSET, 2); static SENSOR_DEVICE_ATTR_2(temp3_auto_point1_temp, S_IRUGO | S_IWUSR, show_temp, set_temp, AUTOMIN, 2); static SENSOR_DEVICE_ATTR_2(temp3_auto_point2_temp, S_IRUGO | S_IWUSR, show_point2, set_point2, 0, 2); static SENSOR_DEVICE_ATTR_2(temp3_crit, S_IRUGO | S_IWUSR, show_temp, set_temp, THERM, 2); static SENSOR_DEVICE_ATTR_2(temp3_crit_hyst, S_IRUGO | S_IWUSR, show_temp, set_temp, HYSTERSIS, 2); static SENSOR_DEVICE_ATTR_2(fan1_input, S_IRUGO, show_tach, NULL, INPUT, 0); static SENSOR_DEVICE_ATTR_2(fan1_min, S_IRUGO | S_IWUSR, show_tach, set_tach, MIN, 0); static SENSOR_DEVICE_ATTR_2(fan1_alarm, S_IRUGO, show_tach, NULL, ALARM, 0); static SENSOR_DEVICE_ATTR_2(fan2_input, S_IRUGO, show_tach, NULL, INPUT, 1); static SENSOR_DEVICE_ATTR_2(fan2_min, S_IRUGO | S_IWUSR, show_tach, set_tach, MIN, 1); static SENSOR_DEVICE_ATTR_2(fan2_alarm, S_IRUGO, show_tach, NULL, ALARM, 1); static SENSOR_DEVICE_ATTR_2(fan3_input, S_IRUGO, show_tach, NULL, INPUT, 2); static SENSOR_DEVICE_ATTR_2(fan3_min, S_IRUGO | S_IWUSR, show_tach, set_tach, MIN, 2); static SENSOR_DEVICE_ATTR_2(fan3_alarm, S_IRUGO, show_tach, NULL, ALARM, 2); static SENSOR_DEVICE_ATTR_2(fan4_input, S_IRUGO, show_tach, NULL, INPUT, 3); static SENSOR_DEVICE_ATTR_2(fan4_min, S_IRUGO | S_IWUSR, show_tach, set_tach, MIN, 3); static SENSOR_DEVICE_ATTR_2(fan4_alarm, S_IRUGO, show_tach, NULL, ALARM, 3); static SENSOR_DEVICE_ATTR_2(pwm1, S_IRUGO | S_IWUSR, show_pwm, set_pwm, INPUT, 0); static SENSOR_DEVICE_ATTR_2(pwm1_freq, S_IRUGO | S_IWUSR, show_pwmfreq, set_pwmfreq, INPUT, 0); static SENSOR_DEVICE_ATTR_2(pwm1_enable, S_IRUGO | S_IWUSR, show_pwmctrl, set_pwmctrl, INPUT, 0); static SENSOR_DEVICE_ATTR_2(pwm1_auto_channels_temp, S_IRUGO | S_IWUSR, show_pwmchan, set_pwmchan, INPUT, 0); static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm, set_pwm, MIN, 0); static SENSOR_DEVICE_ATTR_2(pwm1_auto_point2_pwm, S_IRUGO | S_IWUSR, show_pwm, set_pwm, MAX, 0); static SENSOR_DEVICE_ATTR_2(pwm2, S_IRUGO | S_IWUSR, show_pwm, set_pwm, INPUT, 1); static SENSOR_DEVICE_ATTR_2(pwm2_freq, S_IRUGO | S_IWUSR, show_pwmfreq, set_pwmfreq, INPUT, 1); static SENSOR_DEVICE_ATTR_2(pwm2_enable, S_IRUGO | S_IWUSR, show_pwmctrl, set_pwmctrl, INPUT, 1); static SENSOR_DEVICE_ATTR_2(pwm2_auto_channels_temp, S_IRUGO | S_IWUSR, show_pwmchan, set_pwmchan, INPUT, 1); static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm, set_pwm, MIN, 1); static SENSOR_DEVICE_ATTR_2(pwm2_auto_point2_pwm, S_IRUGO | S_IWUSR, show_pwm, set_pwm, MAX, 1); static SENSOR_DEVICE_ATTR_2(pwm3, S_IRUGO | S_IWUSR, show_pwm, set_pwm, INPUT, 2); static SENSOR_DEVICE_ATTR_2(pwm3_freq, S_IRUGO | S_IWUSR, show_pwmfreq, set_pwmfreq, INPUT, 2); static SENSOR_DEVICE_ATTR_2(pwm3_enable, S_IRUGO | S_IWUSR, show_pwmctrl, set_pwmctrl, INPUT, 2); static SENSOR_DEVICE_ATTR_2(pwm3_auto_channels_temp, S_IRUGO | S_IWUSR, show_pwmchan, set_pwmchan, INPUT, 2); static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm, set_pwm, MIN, 2); static SENSOR_DEVICE_ATTR_2(pwm3_auto_point2_pwm, S_IRUGO | S_IWUSR, show_pwm, set_pwm, MAX, 2); static struct attribute *adt7475_attrs[] = { &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in1_max.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, &sensor_dev_attr_in1_alarm.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in2_max.dev_attr.attr, &sensor_dev_attr_in2_min.dev_attr.attr, &sensor_dev_attr_in2_alarm.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_alarm.dev_attr.attr, &sensor_dev_attr_temp1_fault.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp1_offset.dev_attr.attr, &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_temp1_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_temp1_crit.dev_attr.attr, &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_alarm.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_min.dev_attr.attr, &sensor_dev_attr_temp2_offset.dev_attr.attr, &sensor_dev_attr_temp2_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_temp2_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_temp2_crit.dev_attr.attr, &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr, &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp3_fault.dev_attr.attr, &sensor_dev_attr_temp3_alarm.dev_attr.attr, &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp3_min.dev_attr.attr, &sensor_dev_attr_temp3_offset.dev_attr.attr, &sensor_dev_attr_temp3_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_temp3_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_temp3_crit.dev_attr.attr, &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr, &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan1_min.dev_attr.attr, &sensor_dev_attr_fan1_alarm.dev_attr.attr, &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan2_min.dev_attr.attr, &sensor_dev_attr_fan2_alarm.dev_attr.attr, &sensor_dev_attr_fan3_input.dev_attr.attr, &sensor_dev_attr_fan3_min.dev_attr.attr, &sensor_dev_attr_fan3_alarm.dev_attr.attr, &sensor_dev_attr_fan4_input.dev_attr.attr, &sensor_dev_attr_fan4_min.dev_attr.attr, &sensor_dev_attr_fan4_alarm.dev_attr.attr, &sensor_dev_attr_pwm1.dev_attr.attr, &sensor_dev_attr_pwm1_freq.dev_attr.attr, &sensor_dev_attr_pwm1_enable.dev_attr.attr, &sensor_dev_attr_pwm1_auto_channels_temp.dev_attr.attr, &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr, &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr, &sensor_dev_attr_pwm2.dev_attr.attr, &sensor_dev_attr_pwm2_freq.dev_attr.attr, &sensor_dev_attr_pwm2_enable.dev_attr.attr, &sensor_dev_attr_pwm2_auto_channels_temp.dev_attr.attr, &sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr, &sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr, &sensor_dev_attr_pwm3.dev_attr.attr, &sensor_dev_attr_pwm3_freq.dev_attr.attr, &sensor_dev_attr_pwm3_enable.dev_attr.attr, &sensor_dev_attr_pwm3_auto_channels_temp.dev_attr.attr, &sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr, &sensor_dev_attr_pwm3_auto_point2_pwm.dev_attr.attr, NULL, }; struct attribute_group adt7475_attr_group = { .attrs = adt7475_attrs }; static int adt7475_detect(struct i2c_client *client, int kind, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; if (kind <= 0) { if (adt7475_read(REG_VENDID) != 0x41 || adt7475_read(REG_DEVID) != 0x75) { dev_err(&adapter->dev, "Couldn't detect a adt7475 part at 0x%02x\n", (unsigned int)client->addr); return -ENODEV; } } strlcpy(info->type, adt7475_id[0].name, I2C_NAME_SIZE); return 0; } static int adt7475_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct adt7475_data *data; int i, ret = 0; data = kzalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) return -ENOMEM; mutex_init(&data->lock); i2c_set_clientdata(client, data); /* Call adt7475_read_pwm for all pwm's as this will reprogram any pwm's which are disabled to manual mode with 0% duty cycle */ for (i = 0; i < ADT7475_PWM_COUNT; i++) adt7475_read_pwm(client, i); ret = sysfs_create_group(&client->dev.kobj, &adt7475_attr_group); if (ret) goto efree; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { ret = PTR_ERR(data->hwmon_dev); goto eremove; } return 0; eremove: sysfs_remove_group(&client->dev.kobj, &adt7475_attr_group); efree: kfree(data); return ret; } static int adt7475_remove(struct i2c_client *client) { struct adt7475_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &adt7475_attr_group); kfree(data); return 0; } static struct i2c_driver adt7475_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "adt7475", }, .probe = adt7475_probe, .remove = adt7475_remove, .id_table = adt7475_id, .detect = adt7475_detect, .address_data = &addr_data, }; static void adt7475_read_hystersis(struct i2c_client *client) { struct adt7475_data *data = i2c_get_clientdata(client); data->temp[HYSTERSIS][0] = (u16) adt7475_read(REG_REMOTE1_HYSTERSIS); data->temp[HYSTERSIS][1] = data->temp[HYSTERSIS][0]; data->temp[HYSTERSIS][2] = (u16) adt7475_read(REG_REMOTE2_HYSTERSIS); } static void adt7475_read_pwm(struct i2c_client *client, int index) { struct adt7475_data *data = i2c_get_clientdata(client); unsigned int v; data->pwm[CONTROL][index] = adt7475_read(PWM_CONFIG_REG(index)); /* Figure out the internal value for pwmctrl and pwmchan based on the current settings */ v = (data->pwm[CONTROL][index] >> 5) & 7; if (v == 3) data->pwmctl[index] = 0; else if (v == 7) data->pwmctl[index] = 1; else if (v == 4) { /* The fan is disabled - we don't want to support that, so change to manual mode and set the duty cycle to 0 instead */ data->pwm[INPUT][index] = 0; data->pwm[CONTROL][index] &= ~0xE0; data->pwm[CONTROL][index] |= (7 << 5); i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index), data->pwm[INPUT][index]); i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index), data->pwm[CONTROL][index]); data->pwmctl[index] = 1; } else { data->pwmctl[index] = 2; switch (v) { case 0: data->pwmchan[index] = 1; break; case 1: data->pwmchan[index] = 2; break; case 2: data->pwmchan[index] = 4; break; case 5: data->pwmchan[index] = 6; break; case 6: data->pwmchan[index] = 7; break; } } } static struct adt7475_data *adt7475_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct adt7475_data *data = i2c_get_clientdata(client); u8 ext; int i; mutex_lock(&data->lock); /* Measurement values update every 2 seconds */ if (time_after(jiffies, data->measure_updated + HZ * 2) || !data->valid) { data->alarms = adt7475_read(REG_STATUS2) << 8; data->alarms |= adt7475_read(REG_STATUS1); ext = adt7475_read(REG_EXTEND1); for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) data->voltage[INPUT][i] = (adt7475_read(VOLTAGE_REG(i)) << 2) | ((ext >> ((i + 1) * 2)) & 3); ext = adt7475_read(REG_EXTEND2); for (i = 0; i < ADT7475_TEMP_COUNT; i++) data->temp[INPUT][i] = (adt7475_read(TEMP_REG(i)) << 2) | ((ext >> ((i + 1) * 2)) & 3); for (i = 0; i < ADT7475_TACH_COUNT; i++) data->tach[INPUT][i] = adt7475_read_word(client, TACH_REG(i)); /* Updated by hw when in auto mode */ for (i = 0; i < ADT7475_PWM_COUNT; i++) data->pwm[INPUT][i] = adt7475_read(PWM_REG(i)); data->measure_updated = jiffies; } /* Limits and settings, should never change update every 60 seconds */ if (time_after(jiffies, data->limits_updated + HZ * 60) || !data->valid) { data->config5 = adt7475_read(REG_CONFIG5); for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) { /* Adjust values so they match the input precision */ data->voltage[MIN][i] = adt7475_read(VOLTAGE_MIN_REG(i)) << 2; data->voltage[MAX][i] = adt7475_read(VOLTAGE_MAX_REG(i)) << 2; } for (i = 0; i < ADT7475_TEMP_COUNT; i++) { /* Adjust values so they match the input precision */ data->temp[MIN][i] = adt7475_read(TEMP_MIN_REG(i)) << 2; data->temp[MAX][i] = adt7475_read(TEMP_MAX_REG(i)) << 2; data->temp[AUTOMIN][i] = adt7475_read(TEMP_TMIN_REG(i)) << 2; data->temp[THERM][i] = adt7475_read(TEMP_THERM_REG(i)) << 2; data->temp[OFFSET][i] = adt7475_read(TEMP_OFFSET_REG(i)); } adt7475_read_hystersis(client); for (i = 0; i < ADT7475_TACH_COUNT; i++) data->tach[MIN][i] = adt7475_read_word(client, TACH_MIN_REG(i)); for (i = 0; i < ADT7475_PWM_COUNT; i++) { data->pwm[MAX][i] = adt7475_read(PWM_MAX_REG(i)); data->pwm[MIN][i] = adt7475_read(PWM_MIN_REG(i)); /* Set the channel and control information */ adt7475_read_pwm(client, i); } data->range[0] = adt7475_read(TEMP_TRANGE_REG(0)); data->range[1] = adt7475_read(TEMP_TRANGE_REG(1)); data->range[2] = adt7475_read(TEMP_TRANGE_REG(2)); data->limits_updated = jiffies; data->valid = 1; } mutex_unlock(&data->lock); return data; } static int __init sensors_adt7475_init(void) { return i2c_add_driver(&adt7475_driver); } static void __exit sensors_adt7475_exit(void) { i2c_del_driver(&adt7475_driver); } MODULE_AUTHOR("Advanced Micro Devices, Inc"); MODULE_DESCRIPTION("adt7475 driver"); MODULE_LICENSE("GPL"); module_init(sensors_adt7475_init); module_exit(sensors_adt7475_exit);
gpl-2.0
t0mm13b/Zte-Blade-ClockWorkMod-Kernel
drivers/staging/vt6656/main_usb.c
504
63544
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * File: main_usb.c * * Purpose: driver entry for initial, open, close, tx and rx. * * Author: Lyndon Chen * * Date: Dec 8, 2005 * * Functions: * * vntwusb_found1 - module initial (insmod) driver entry * device_remove1 - module remove entry * device_open - allocate dma/descripter resource & initial mac/bbp function * device_xmit - asynchrous data tx function * device_set_multi - set mac filter * device_ioctl - ioctl entry * device_close - shutdown mac/bbp & free dma/descripter resource * device_alloc_frag_buf - rx fragement pre-allocated function * device_free_tx_bufs - free tx buffer function * device_dma0_tx_80211- tx 802.11 frame via dma0 * device_dma0_xmit- tx PS bufferred frame via dma0 * device_init_registers- initial MAC & BBP & RF internal registers. * device_init_rings- initial tx/rx ring buffer * device_init_defrag_cb- initial & allocate de-fragement buffer. * device_tx_srv- tx interrupt service function * * Revision History: */ #undef __NO_VERSION__ #include "device.h" #include "card.h" #include "baseband.h" #include "mac.h" #include "tether.h" #include "wmgr.h" #include "wctl.h" #include "power.h" #include "wcmd.h" #include "iocmd.h" #include "tcrc.h" #include "rxtx.h" #include "bssdb.h" #include "hostap.h" #include "wpactl.h" #include "ioctl.h" #include "iwctl.h" #include "dpc.h" #include "iocmd.h" #include "datarate.h" #include "rf.h" #include "firmware.h" #include "mac.h" #include "rndis.h" #include "control.h" #include "channel.h" #include "int.h" #include "iowpa.h" /*--------------------- Static Definitions -------------------------*/ //static int msglevel =MSG_LEVEL_DEBUG; static int msglevel =MSG_LEVEL_INFO; // // Define module options // // Version Information #define DRIVER_AUTHOR "VIA Networking Technologies, Inc., <lyndonchen@vntek.com.tw>" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(DEVICE_FULL_DRV_NAM); #define DEVICE_PARAM(N,D) \ static int N[MAX_UINTS]=OPTION_DEFAULT;\ module_param_array(N, int, NULL, 0);\ MODULE_PARM_DESC(N, D); #define RX_DESC_MIN0 16 #define RX_DESC_MAX0 128 #define RX_DESC_DEF0 64 DEVICE_PARAM(RxDescriptors0,"Number of receive usb desc buffer"); #define TX_DESC_MIN0 16 #define TX_DESC_MAX0 128 #define TX_DESC_DEF0 64 DEVICE_PARAM(TxDescriptors0,"Number of transmit usb desc buffer"); #define CHANNEL_MIN 1 #define CHANNEL_MAX 14 #define CHANNEL_DEF 6 DEVICE_PARAM(Channel, "Channel number"); /* PreambleType[] is the preamble length used for transmit. 0: indicate allows long preamble type 1: indicate allows short preamble type */ #define PREAMBLE_TYPE_DEF 1 DEVICE_PARAM(PreambleType, "Preamble Type"); #define RTS_THRESH_MIN 512 #define RTS_THRESH_MAX 2347 #define RTS_THRESH_DEF 2347 DEVICE_PARAM(RTSThreshold, "RTS threshold"); #define FRAG_THRESH_MIN 256 #define FRAG_THRESH_MAX 2346 #define FRAG_THRESH_DEF 2346 DEVICE_PARAM(FragThreshold, "Fragmentation threshold"); #define DATA_RATE_MIN 0 #define DATA_RATE_MAX 13 #define DATA_RATE_DEF 13 /* datarate[] index 0: indicate 1 Mbps 0x02 1: indicate 2 Mbps 0x04 2: indicate 5.5 Mbps 0x0B 3: indicate 11 Mbps 0x16 4: indicate 6 Mbps 0x0c 5: indicate 9 Mbps 0x12 6: indicate 12 Mbps 0x18 7: indicate 18 Mbps 0x24 8: indicate 24 Mbps 0x30 9: indicate 36 Mbps 0x48 10: indicate 48 Mbps 0x60 11: indicate 54 Mbps 0x6c 12: indicate 72 Mbps 0x90 13: indicate auto rate */ DEVICE_PARAM(ConnectionRate, "Connection data rate"); #define OP_MODE_MAX 2 #define OP_MODE_DEF 0 #define OP_MODE_MIN 0 DEVICE_PARAM(OPMode, "Infrastruct, adhoc, AP mode "); /* OpMode[] is used for transmit. 0: indicate infrastruct mode used 1: indicate adhoc mode used 2: indicate AP mode used */ /* PSMode[] 0: indicate disable power saving mode 1: indicate enable power saving mode */ #define PS_MODE_DEF 0 DEVICE_PARAM(PSMode, "Power saving mode"); #define SHORT_RETRY_MIN 0 #define SHORT_RETRY_MAX 31 #define SHORT_RETRY_DEF 8 DEVICE_PARAM(ShortRetryLimit, "Short frame retry limits"); #define LONG_RETRY_MIN 0 #define LONG_RETRY_MAX 15 #define LONG_RETRY_DEF 4 DEVICE_PARAM(LongRetryLimit, "long frame retry limits"); /* BasebandType[] baseband type selected 0: indicate 802.11a type 1: indicate 802.11b type 2: indicate 802.11g type */ #define BBP_TYPE_MIN 0 #define BBP_TYPE_MAX 2 #define BBP_TYPE_DEF 2 DEVICE_PARAM(BasebandType, "baseband type"); /* 80211hEnable[] 0: indicate disable 802.11h 1: indicate enable 802.11h */ #define X80211h_MODE_DEF 0 DEVICE_PARAM(b80211hEnable, "802.11h mode"); // // Static vars definitions // static struct usb_device_id vntwusb_table[] = { {USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)}, {} }; // Frequency list (map channels to frequencies) /* static const long frequency_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484, 4915, 4920, 4925, 4935, 4940, 4945, 4960, 4980, 5035, 5040, 5045, 5055, 5060, 5080, 5170, 5180, 5190, 5200, 5210, 5220, 5230, 5240, 5260, 5280, 5300, 5320, 5500, 5520, 5540, 5560, 5580, 5600, 5620, 5640, 5660, 5680, 5700, 5745, 5765, 5785, 5805, 5825 }; #ifndef IW_ENCODE_NOKEY #define IW_ENCODE_NOKEY 0x0800 #define IW_ENCODE_MODE (IW_ENCODE_DISABLED | IW_ENCODE_RESTRICTED | IW_ENCODE_OPEN) #endif static const struct iw_handler_def iwctl_handler_def; */ /*--------------------- Static Functions --------------------------*/ static int vntwusb_found1(struct usb_interface *intf, const struct usb_device_id *id); static void vntwusb_disconnect(struct usb_interface *intf); #ifdef CONFIG_PM /* Minimal support for suspend and resume */ static int vntwusb_suspend(struct usb_interface *intf, pm_message_t message); static int vntwusb_resume(struct usb_interface *intf); #endif static struct net_device_stats *device_get_stats(struct net_device *dev); static int device_open(struct net_device *dev); static int device_xmit(struct sk_buff *skb, struct net_device *dev); static void device_set_multi(struct net_device *dev); static int device_close(struct net_device *dev); static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType); static BOOL device_init_defrag_cb(PSDevice pDevice); static void device_init_diversity_timer(PSDevice pDevice); static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev); static int ethtool_ioctl(struct net_device *dev, void *useraddr); static void device_free_tx_bufs(PSDevice pDevice); static void device_free_rx_bufs(PSDevice pDevice); static void device_free_int_bufs(PSDevice pDevice); static void device_free_frag_bufs(PSDevice pDevice); static BOOL device_alloc_bufs(PSDevice pDevice); static int Read_config_file(PSDevice pDevice); static UCHAR *Config_FileOperation(PSDevice pDevice); static int Config_FileGetParameter(UCHAR *string, UCHAR *dest,UCHAR *source); //2008-0714<Add>by Mike Liu static BOOL device_release_WPADEV(PSDevice pDevice); static void usb_device_reset(PSDevice pDevice); /*--------------------- Export Variables --------------------------*/ /*--------------------- Export Functions --------------------------*/ static void device_set_options(PSDevice pDevice) { BYTE abyBroadcastAddr[U_ETHER_ADDR_LEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; BYTE abySNAP_RFC1042[U_ETHER_ADDR_LEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00}; BYTE abySNAP_Bridgetunnel[U_ETHER_ADDR_LEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0xF8}; memcpy(pDevice->abyBroadcastAddr, abyBroadcastAddr, U_ETHER_ADDR_LEN); memcpy(pDevice->abySNAP_RFC1042, abySNAP_RFC1042, U_ETHER_ADDR_LEN); memcpy(pDevice->abySNAP_Bridgetunnel, abySNAP_Bridgetunnel, U_ETHER_ADDR_LEN); pDevice->cbTD = TX_DESC_DEF0; pDevice->cbRD = RX_DESC_DEF0; pDevice->uChannel = CHANNEL_DEF; pDevice->wRTSThreshold = RTS_THRESH_DEF; pDevice->wFragmentationThreshold = FRAG_THRESH_DEF; pDevice->byShortRetryLimit = SHORT_RETRY_DEF; pDevice->byLongRetryLimit = LONG_RETRY_DEF; pDevice->wMaxTransmitMSDULifetime = DEFAULT_MSDU_LIFETIME; pDevice->byShortPreamble = PREAMBLE_TYPE_DEF; pDevice->ePSMode = PS_MODE_DEF; pDevice->b11hEnable = X80211h_MODE_DEF; pDevice->eOPMode = OP_MODE_DEF; pDevice->uConnectionRate = DATA_RATE_DEF; if (pDevice->uConnectionRate < RATE_AUTO) pDevice->bFixRate = TRUE; pDevice->byBBType = BBP_TYPE_DEF; pDevice->byPacketType = pDevice->byBBType; pDevice->byAutoFBCtrl = AUTO_FB_0; pDevice->bUpdateBBVGA = TRUE; pDevice->byFOETuning = 0; pDevice->byAutoPwrTunning = 0; pDevice->wCTSDuration = 0; pDevice->byPreambleType = 0; pDevice->bExistSWNetAddr = FALSE; // pDevice->bDiversityRegCtlON = TRUE; pDevice->bDiversityRegCtlON = FALSE; } static VOID device_init_diversity_timer(PSDevice pDevice) { init_timer(&pDevice->TimerSQ3Tmax1); pDevice->TimerSQ3Tmax1.data = (ULONG)pDevice; pDevice->TimerSQ3Tmax1.function = (TimerFunction)TimerSQ3CallBack; pDevice->TimerSQ3Tmax1.expires = RUN_AT(HZ); init_timer(&pDevice->TimerSQ3Tmax2); pDevice->TimerSQ3Tmax2.data = (ULONG)pDevice; pDevice->TimerSQ3Tmax2.function = (TimerFunction)TimerSQ3CallBack; pDevice->TimerSQ3Tmax2.expires = RUN_AT(HZ); init_timer(&pDevice->TimerSQ3Tmax3); pDevice->TimerSQ3Tmax3.data = (ULONG)pDevice; pDevice->TimerSQ3Tmax3.function = (TimerFunction)TimerSQ3Tmax3CallBack; pDevice->TimerSQ3Tmax3.expires = RUN_AT(HZ); return; } // // Initialiation of MAC & BBP registers // static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType) { BYTE abyBroadcastAddr[U_ETHER_ADDR_LEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; BYTE abySNAP_RFC1042[U_ETHER_ADDR_LEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00}; BYTE abySNAP_Bridgetunnel[U_ETHER_ADDR_LEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0xF8}; BYTE byAntenna; UINT ii; CMD_CARD_INIT sInitCmd; NTSTATUS ntStatus = STATUS_SUCCESS; RSP_CARD_INIT sInitRsp; PSMgmtObject pMgmt = &(pDevice->sMgmtObj); BYTE byTmp; BYTE byCalibTXIQ = 0; BYTE byCalibTXDC = 0; BYTE byCalibRXIQ = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "---->INIbInitAdapter. [%d][%d]\n", InitType, pDevice->byPacketType); spin_lock_irq(&pDevice->lock); if (InitType == DEVICE_INIT_COLD) { memcpy(pDevice->abyBroadcastAddr, abyBroadcastAddr, U_ETHER_ADDR_LEN); memcpy(pDevice->abySNAP_RFC1042, abySNAP_RFC1042, U_ETHER_ADDR_LEN); memcpy(pDevice->abySNAP_Bridgetunnel, abySNAP_Bridgetunnel, U_ETHER_ADDR_LEN); if ( !FIRMWAREbCheckVersion(pDevice) ) { if (FIRMWAREbDownload(pDevice) == TRUE) { if (FIRMWAREbBrach2Sram(pDevice) == FALSE) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" FIRMWAREbBrach2Sram fail \n"); spin_unlock_irq(&pDevice->lock); return FALSE; } } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" FIRMWAREbDownload fail \n"); spin_unlock_irq(&pDevice->lock); return FALSE; } } if ( !BBbVT3184Init(pDevice) ) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" BBbVT3184Init fail \n"); spin_unlock_irq(&pDevice->lock); return FALSE; } } sInitCmd.byInitClass = (BYTE)InitType; sInitCmd.bExistSWNetAddr = (BYTE) pDevice->bExistSWNetAddr; for(ii=0;ii<6;ii++) sInitCmd.bySWNetAddr[ii] = pDevice->abyCurrentNetAddr[ii]; sInitCmd.byShortRetryLimit = pDevice->byShortRetryLimit; sInitCmd.byLongRetryLimit = pDevice->byLongRetryLimit; //issue Card_init command to device ntStatus = CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_CARDINIT, 0, 0, sizeof(CMD_CARD_INIT), (PBYTE) &(sInitCmd)); if ( ntStatus != STATUS_SUCCESS ) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Issue Card init fail \n"); spin_unlock_irq(&pDevice->lock); return FALSE; } if (InitType == DEVICE_INIT_COLD) { ntStatus = CONTROLnsRequestIn(pDevice,MESSAGE_TYPE_INIT_RSP,0,0,sizeof(RSP_CARD_INIT), (PBYTE) &(sInitRsp)); if (ntStatus != STATUS_SUCCESS) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Cardinit request in status fail!\n"); spin_unlock_irq(&pDevice->lock); return FALSE; } //Local ID for AES functions ntStatus = CONTROLnsRequestIn(pDevice, MESSAGE_TYPE_READ, MAC_REG_LOCALID, MESSAGE_REQUEST_MACREG, 1, &pDevice->byLocalID); if ( ntStatus != STATUS_SUCCESS ) { spin_unlock_irq(&pDevice->lock); return FALSE; } // Do MACbSoftwareReset in MACvInitialize // force CCK pDevice->bCCK = TRUE; pDevice->bProtectMode = FALSE; //Only used in 11g type, sync with ERP IE pDevice->bNonERPPresent = FALSE; pDevice->bBarkerPreambleMd = FALSE; if ( pDevice->bFixRate ) { pDevice->wCurrentRate = (WORD) pDevice->uConnectionRate; } else { if ( pDevice->byBBType == BB_TYPE_11B ) pDevice->wCurrentRate = RATE_11M; else pDevice->wCurrentRate = RATE_54M; } CHvInitChannelTable(pDevice); pDevice->byTopOFDMBasicRate = RATE_24M; pDevice->byTopCCKBasicRate = RATE_1M; pDevice->byRevId = 0; //Target to IF pin while programming to RF chip. pDevice->byCurPwr = 0xFF; pDevice->byCCKPwr = pDevice->abyEEPROM[EEP_OFS_PWR_CCK]; pDevice->byOFDMPwrG = pDevice->abyEEPROM[EEP_OFS_PWR_OFDMG]; // Load power Table for (ii=0;ii<14;ii++) { pDevice->abyCCKPwrTbl[ii] = pDevice->abyEEPROM[ii + EEP_OFS_CCK_PWR_TBL]; if (pDevice->abyCCKPwrTbl[ii] == 0) pDevice->abyCCKPwrTbl[ii] = pDevice->byCCKPwr; pDevice->abyOFDMPwrTbl[ii] = pDevice->abyEEPROM[ii + EEP_OFS_OFDM_PWR_TBL]; if (pDevice->abyOFDMPwrTbl[ii] == 0) pDevice->abyOFDMPwrTbl[ii] = pDevice->byOFDMPwrG; } //original zonetype is USA,but customize zonetype is europe, // then need recover 12,13 ,14 channel with 11 channel if(((pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Japan) || (pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Europe))&& (pDevice->byOriginalZonetype == ZoneType_USA)) { for(ii=11;ii<14;ii++) { pDevice->abyCCKPwrTbl[ii] = pDevice->abyCCKPwrTbl[10]; pDevice->abyOFDMPwrTbl[ii] = pDevice->abyOFDMPwrTbl[10]; } } //{{ RobertYu: 20041124 pDevice->byOFDMPwrA = 0x34; // same as RFbMA2829SelectChannel // Load OFDM A Power Table for (ii=0;ii<CB_MAX_CHANNEL_5G;ii++) { //RobertYu:20041224, bug using CB_MAX_CHANNEL pDevice->abyOFDMAPwrTbl[ii] = pDevice->abyEEPROM[ii + EEP_OFS_OFDMA_PWR_TBL]; if (pDevice->abyOFDMAPwrTbl[ii] == 0) pDevice->abyOFDMAPwrTbl[ii] = pDevice->byOFDMPwrA; } //}} RobertYu byAntenna = pDevice->abyEEPROM[EEP_OFS_ANTENNA]; if (byAntenna & EEP_ANTINV) pDevice->bTxRxAntInv = TRUE; else pDevice->bTxRxAntInv = FALSE; byAntenna &= (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN); if (byAntenna == 0) // if not set default is All byAntenna = (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN); if (byAntenna == (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN)) { pDevice->byAntennaCount = 2; pDevice->byTxAntennaMode = ANT_B; pDevice->dwTxAntennaSel = 1; pDevice->dwRxAntennaSel = 1; if (pDevice->bTxRxAntInv == TRUE) pDevice->byRxAntennaMode = ANT_A; else pDevice->byRxAntennaMode = ANT_B; if (pDevice->bDiversityRegCtlON) pDevice->bDiversityEnable = TRUE; else pDevice->bDiversityEnable = FALSE; } else { pDevice->bDiversityEnable = FALSE; pDevice->byAntennaCount = 1; pDevice->dwTxAntennaSel = 0; pDevice->dwRxAntennaSel = 0; if (byAntenna & EEP_ANTENNA_AUX) { pDevice->byTxAntennaMode = ANT_A; if (pDevice->bTxRxAntInv == TRUE) pDevice->byRxAntennaMode = ANT_B; else pDevice->byRxAntennaMode = ANT_A; } else { pDevice->byTxAntennaMode = ANT_B; if (pDevice->bTxRxAntInv == TRUE) pDevice->byRxAntennaMode = ANT_A; else pDevice->byRxAntennaMode = ANT_B; } } pDevice->ulDiversityNValue = 100*255; pDevice->ulDiversityMValue = 100*16; pDevice->byTMax = 1; pDevice->byTMax2 = 4; pDevice->ulSQ3TH = 0; pDevice->byTMax3 = 64; // ----------------------------------------------------------------- //Get Auto Fall Back Type pDevice->byAutoFBCtrl = AUTO_FB_0; // Set SCAN Time pDevice->uScanTime = WLAN_SCAN_MINITIME; // default Auto Mode //pDevice->NetworkType = Ndis802_11Automode; pDevice->eConfigPHYMode = PHY_TYPE_AUTO; pDevice->byBBType = BB_TYPE_11G; // initialize BBP registers pDevice->ulTxPower = 25; // Get Channel range pDevice->byMinChannel = 1; pDevice->byMaxChannel = CB_MAX_CHANNEL; // Get RFType pDevice->byRFType = sInitRsp.byRFType; if ((pDevice->byRFType & RF_EMU) != 0) { // force change RevID for VT3253 emu pDevice->byRevId = 0x80; } // Load EEPROM calibrated vt3266 parameters if (pDevice->byRFType == RF_VT3226D0) { if((pDevice->abyEEPROM[EEP_OFS_MAJOR_VER] == 0x1) && (pDevice->abyEEPROM[EEP_OFS_MINOR_VER] >= 0x4)) { byCalibTXIQ = pDevice->abyEEPROM[EEP_OFS_CALIB_TX_IQ]; byCalibTXDC = pDevice->abyEEPROM[EEP_OFS_CALIB_TX_DC]; byCalibRXIQ = pDevice->abyEEPROM[EEP_OFS_CALIB_RX_IQ]; if( (byCalibTXIQ || byCalibTXDC || byCalibRXIQ) ) { ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xFF, 0x03); // CR255, Set BB to support TX/RX IQ and DC compensation Mode ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xFB, byCalibTXIQ); // CR251, TX I/Q Imbalance Calibration ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xFC, byCalibTXDC); // CR252, TX DC-Offset Calibration ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xFD, byCalibRXIQ); // CR253, RX I/Q Imbalance Calibration } else { // turn off BB Calibration compensation ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xFF, 0x0); // CR255 } } } pMgmt->eScanType = WMAC_SCAN_PASSIVE; pMgmt->uCurrChannel = pDevice->uChannel; pMgmt->uIBSSChannel = pDevice->uChannel; CARDbSetMediaChannel(pDevice, pMgmt->uCurrChannel); // get Permanent network address memcpy(pDevice->abyPermanentNetAddr,&(sInitRsp.byNetAddr[0]),6); memcpy(pDevice->abyCurrentNetAddr, pDevice->abyPermanentNetAddr, U_ETHER_ADDR_LEN); // if exist SW network address, use SW network address. DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Network address = %02x-%02x-%02x=%02x-%02x-%02x\n", pDevice->abyCurrentNetAddr[0], pDevice->abyCurrentNetAddr[1], pDevice->abyCurrentNetAddr[2], pDevice->abyCurrentNetAddr[3], pDevice->abyCurrentNetAddr[4], pDevice->abyCurrentNetAddr[5]); } // Set BB and packet type at the same time. // Set Short Slot Time, xIFS, and RSPINF. if (pDevice->byBBType == BB_TYPE_11A) { CARDbAddBasicRate(pDevice, RATE_6M); pDevice->bShortSlotTime = TRUE; } else { CARDbAddBasicRate(pDevice, RATE_1M); pDevice->bShortSlotTime = FALSE; } BBvSetShortSlotTime(pDevice); CARDvSetBSSMode(pDevice); if (pDevice->bUpdateBBVGA) { pDevice->byBBVGACurrent = pDevice->abyBBVGA[0]; pDevice->byBBVGANew = pDevice->byBBVGACurrent; BBvSetVGAGainOffset(pDevice, pDevice->abyBBVGA[0]); } pDevice->byRadioCtl = pDevice->abyEEPROM[EEP_OFS_RADIOCTL]; pDevice->bHWRadioOff = FALSE; if ( (pDevice->byRadioCtl & EEP_RADIOCTL_ENABLE) != 0 ) { ntStatus = CONTROLnsRequestIn(pDevice, MESSAGE_TYPE_READ, MAC_REG_GPIOCTL1, MESSAGE_REQUEST_MACREG, 1, &byTmp); if ( ntStatus != STATUS_SUCCESS ) { spin_unlock_irq(&pDevice->lock); return FALSE; } if ( (byTmp & GPIO3_DATA) == 0 ) { pDevice->bHWRadioOff = TRUE; MACvRegBitsOn(pDevice,MAC_REG_GPIOCTL1,GPIO3_INTMD); } else { MACvRegBitsOff(pDevice,MAC_REG_GPIOCTL1,GPIO3_INTMD); pDevice->bHWRadioOff = FALSE; } } //EEP_RADIOCTL_ENABLE ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_TMLEN,0x38); ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW); MACvRegBitsOn(pDevice,MAC_REG_GPIOCTL0,0x01); if ((pDevice->bHWRadioOff == TRUE) || (pDevice->bRadioControlOff == TRUE)) { CARDbRadioPowerOff(pDevice); } else { CARDbRadioPowerOn(pDevice); } spin_unlock_irq(&pDevice->lock); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"<----INIbInitAdapter Exit\n"); return TRUE; } static BOOL device_release_WPADEV(PSDevice pDevice) { viawget_wpa_header *wpahdr; int ii=0; // wait_queue_head_t Set_wait; //send device close to wpa_supplicnat layer if (pDevice->bWPADEVUp==TRUE) { wpahdr = (viawget_wpa_header *)pDevice->skb->data; wpahdr->type = VIAWGET_DEVICECLOSE_MSG; wpahdr->resp_ie_len = 0; wpahdr->req_ie_len = 0; skb_put(pDevice->skb, sizeof(viawget_wpa_header)); pDevice->skb->dev = pDevice->wpadev; skb_reset_mac_header(pDevice->skb); pDevice->skb->pkt_type = PACKET_HOST; pDevice->skb->protocol = htons(ETH_P_802_2); memset(pDevice->skb->cb, 0, sizeof(pDevice->skb->cb)); netif_rx(pDevice->skb); pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz); //wait release WPADEV // init_waitqueue_head(&Set_wait); // wait_event_timeout(Set_wait, ((pDevice->wpadev==NULL)&&(pDevice->skb == NULL)),5*HZ); //1s wait while(pDevice->bWPADEVUp==TRUE) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout (HZ/20); //wait 50ms ii++; if(ii>20) break; } }; return TRUE; } #ifdef CONFIG_PM /* Minimal support for suspend and resume */ static int vntwusb_suspend(struct usb_interface *intf, pm_message_t message) { PSDevice pDevice = usb_get_intfdata(intf); struct net_device *dev = pDevice->dev; printk("VNTWUSB Suspend Start======>\n"); if(dev != NULL) { if(pDevice->flags & DEVICE_FLAGS_OPENED) device_close(dev); } usb_put_dev(interface_to_usbdev(intf)); return 0; } static int vntwusb_resume(struct usb_interface *intf) { PSDevice pDevice = usb_get_intfdata(intf); struct net_device *dev = pDevice->dev; printk("VNTWUSB Resume Start======>\n"); if(dev != NULL) { usb_get_dev(interface_to_usbdev(intf)); if(!(pDevice->flags & DEVICE_FLAGS_OPENED)) { if(device_open(dev)!=0) printk("VNTWUSB Resume Start======>open fail\n"); } } return 0; } #endif static const struct net_device_ops device_netdev_ops = { .ndo_open = device_open, .ndo_stop = device_close, .ndo_do_ioctl = device_ioctl, .ndo_get_stats = device_get_stats, .ndo_start_xmit = device_xmit, .ndo_set_multicast_list = device_set_multi, }; static int vntwusb_found1(struct usb_interface *intf, const struct usb_device_id *id) { BYTE fake_mac[U_ETHER_ADDR_LEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x01};//fake MAC address struct usb_device *udev = interface_to_usbdev(intf); int rc = 0; struct net_device *netdev = NULL; PSDevice pDevice = NULL; printk(KERN_NOTICE "%s Ver. %s\n",DEVICE_FULL_DRV_NAM, DEVICE_VERSION); printk(KERN_NOTICE "Copyright (c) 2004 VIA Networking Technologies, Inc.\n"); udev = usb_get_dev(udev); netdev = alloc_etherdev(sizeof(DEVICE_INFO)); if (netdev == NULL) { printk(KERN_ERR DEVICE_NAME ": allocate net device failed \n"); kfree(pDevice); goto err_nomem; } pDevice = netdev_priv(netdev); memset(pDevice, 0, sizeof(DEVICE_INFO)); pDevice->dev = netdev; pDevice->usb = udev; // Set initial settings device_set_options(pDevice); spin_lock_init(&pDevice->lock); pDevice->tx_80211 = device_dma0_tx_80211; pDevice->sMgmtObj.pAdapter = (PVOID)pDevice; netdev->netdev_ops = &device_netdev_ops; netdev->wireless_handlers = (struct iw_handler_def *)&iwctl_handler_def; //2008-0623-01<Remark>by MikeLiu //2007-0821-01<Add>by MikeLiu usb_set_intfdata(intf, pDevice); SET_NETDEV_DEV(netdev, &intf->dev); memcpy(pDevice->dev->dev_addr, fake_mac, U_ETHER_ADDR_LEN); //use fake mac address rc = register_netdev(netdev); if (rc != 0) { printk(KERN_ERR DEVICE_NAME " Failed to register netdev\n"); free_netdev(netdev); kfree(pDevice); return -ENODEV; } //2008-0623-02<Remark>by MikeLiu //2007-0821-01<Add>by MikeLiu //#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) //usb_set_intfdata(intf, pDevice); //SET_NETDEV_DEV(netdev, &intf->dev); //#endif //2008-07-21-01<Add>by MikeLiu //register wpadev #if 0 if(wpa_set_wpadev(pDevice, 1)!=0) { printk("Fail to Register WPADEV?\n"); unregister_netdev(pDevice->dev); free_netdev(netdev); kfree(pDevice); } #endif usb_device_reset(pDevice); #ifdef SndEvt_ToAPI { union iwreq_data wrqu; memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.flags = RT_INSMOD_EVENT_FLAG; wrqu.data.length =IFNAMSIZ; wireless_send_event(pDevice->dev, IWEVCUSTOM, &wrqu, pDevice->dev->name); } #endif return 0; err_nomem: //2008-0922-01<Add>by MikeLiu, decrease usb counter. usb_put_dev(udev); return -ENOMEM; } static VOID device_free_tx_bufs(PSDevice pDevice) { PUSB_SEND_CONTEXT pTxContext; int ii; for (ii = 0; ii < pDevice->cbTD; ii++) { pTxContext = pDevice->apTD[ii]; //de-allocate URBs if (pTxContext->pUrb) { usb_kill_urb(pTxContext->pUrb); usb_free_urb(pTxContext->pUrb); } if (pTxContext) kfree(pTxContext); } return; } static VOID device_free_rx_bufs(PSDevice pDevice) { PRCB pRCB; int ii; for (ii = 0; ii < pDevice->cbRD; ii++) { pRCB = pDevice->apRCB[ii]; //de-allocate URBs if (pRCB->pUrb) { usb_kill_urb(pRCB->pUrb); usb_free_urb(pRCB->pUrb); } //de-allocate skb if (pRCB->skb) dev_kfree_skb(pRCB->skb); } if (pDevice->pRCBMem) kfree(pDevice->pRCBMem); return; } //2007-1107-02<Add>by MikeLiu static void usb_device_reset(PSDevice pDevice) { int status; status = usb_reset_device(pDevice->usb); if (status) printk("usb_device_reset fail status=%d\n",status); return ; } static VOID device_free_int_bufs(PSDevice pDevice) { if (pDevice->intBuf.pDataBuf != NULL) kfree(pDevice->intBuf.pDataBuf); return; } static BOOL device_alloc_bufs(PSDevice pDevice) { PUSB_SEND_CONTEXT pTxContext; PRCB pRCB; int ii; for (ii = 0; ii < pDevice->cbTD; ii++) { pTxContext = kmalloc(sizeof(USB_SEND_CONTEXT), GFP_KERNEL); if (pTxContext == NULL) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s : allocate tx usb context failed\n", pDevice->dev->name); goto free_tx; } pDevice->apTD[ii] = pTxContext; pTxContext->pDevice = (PVOID) pDevice; //allocate URBs pTxContext->pUrb = usb_alloc_urb(0, GFP_ATOMIC); if (pTxContext->pUrb == NULL) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "alloc tx urb failed\n"); goto free_tx; } pTxContext->bBoolInUse = FALSE; } // allocate rcb mem pDevice->pRCBMem = kmalloc((sizeof(RCB) * pDevice->cbRD), GFP_KERNEL); if (pDevice->pRCBMem == NULL) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s : alloc rx usb context failed\n", pDevice->dev->name); goto free_tx; } pDevice->FirstRecvFreeList = NULL; pDevice->LastRecvFreeList = NULL; pDevice->FirstRecvMngList = NULL; pDevice->LastRecvMngList = NULL; pDevice->NumRecvFreeList = 0; memset(pDevice->pRCBMem, 0, (sizeof(RCB) * pDevice->cbRD)); pRCB = (PRCB) pDevice->pRCBMem; for (ii = 0; ii < pDevice->cbRD; ii++) { pDevice->apRCB[ii] = pRCB; pRCB->pDevice = (PVOID) pDevice; //allocate URBs pRCB->pUrb = usb_alloc_urb(0, GFP_ATOMIC); if (pRCB->pUrb == NULL) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR" Failed to alloc rx urb\n"); goto free_rx_tx; } pRCB->skb = dev_alloc_skb((int)pDevice->rx_buf_sz); if (pRCB->skb == NULL) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR" Failed to alloc rx skb\n"); goto free_rx_tx; } pRCB->skb->dev = pDevice->dev; pRCB->bBoolInUse = FALSE; EnqueueRCB(pDevice->FirstRecvFreeList, pDevice->LastRecvFreeList, pRCB); pDevice->NumRecvFreeList++; pRCB++; } pDevice->pControlURB = usb_alloc_urb(0, GFP_ATOMIC); if (pDevice->pControlURB == NULL) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR"Failed to alloc control urb\n"); goto free_rx_tx; } pDevice->pInterruptURB = usb_alloc_urb(0, GFP_ATOMIC); if (pDevice->pInterruptURB == NULL) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR"Failed to alloc int urb\n"); usb_kill_urb(pDevice->pControlURB); usb_free_urb(pDevice->pControlURB); goto free_rx_tx; } pDevice->intBuf.pDataBuf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL); if (pDevice->intBuf.pDataBuf == NULL) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR"Failed to alloc int buf\n"); usb_kill_urb(pDevice->pControlURB); usb_kill_urb(pDevice->pInterruptURB); usb_free_urb(pDevice->pControlURB); usb_free_urb(pDevice->pInterruptURB); goto free_rx_tx; } return TRUE; free_rx_tx: device_free_rx_bufs(pDevice); free_tx: device_free_tx_bufs(pDevice); return FALSE; } static BOOL device_init_defrag_cb(PSDevice pDevice) { int i; PSDeFragControlBlock pDeF; /* Init the fragment ctl entries */ for (i = 0; i < CB_MAX_RX_FRAG; i++) { pDeF = &(pDevice->sRxDFCB[i]); if (!device_alloc_frag_buf(pDevice, pDeF)) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s: can not alloc frag bufs\n", pDevice->dev->name); goto free_frag; }; } pDevice->cbDFCB = CB_MAX_RX_FRAG; pDevice->cbFreeDFCB = pDevice->cbDFCB; return TRUE; free_frag: device_free_frag_bufs(pDevice); return FALSE; } static void device_free_frag_bufs(PSDevice pDevice) { PSDeFragControlBlock pDeF; int i; for (i = 0; i < CB_MAX_RX_FRAG; i++) { pDeF = &(pDevice->sRxDFCB[i]); if (pDeF->skb) dev_kfree_skb(pDeF->skb); } } BOOL device_alloc_frag_buf(PSDevice pDevice, PSDeFragControlBlock pDeF) { pDeF->skb = dev_alloc_skb((int)pDevice->rx_buf_sz); if (pDeF->skb == NULL) return FALSE; ASSERT(pDeF->skb); pDeF->skb->dev = pDevice->dev; return TRUE; } /*-----------------------------------------------------------------*/ static int device_open(struct net_device *dev) { PSDevice pDevice=(PSDevice) netdev_priv(dev); #ifdef WPA_SM_Transtatus extern SWPAResult wpa_Result; memset(wpa_Result.ifname,0,sizeof(wpa_Result.ifname)); wpa_Result.proto = 0; wpa_Result.key_mgmt = 0; wpa_Result.eap_type = 0; wpa_Result.authenticated = FALSE; pDevice->fWPA_Authened = FALSE; #endif DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " device_open...\n"); pDevice->rx_buf_sz = MAX_TOTAL_SIZE_WITH_ALL_HEADERS; if (device_alloc_bufs(pDevice) == FALSE) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " device_alloc_bufs fail... \n"); return -ENOMEM; } if (device_init_defrag_cb(pDevice)== FALSE) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " Initial defragement cb fail \n"); goto free_rx_tx; } MP_CLEAR_FLAG(pDevice, fMP_DISCONNECTED); MP_CLEAR_FLAG(pDevice, fMP_CONTROL_READS); MP_CLEAR_FLAG(pDevice, fMP_CONTROL_WRITES); MP_SET_FLAG(pDevice, fMP_POST_READS); MP_SET_FLAG(pDevice, fMP_POST_WRITES); //read config file Read_config_file(pDevice); if (device_init_registers(pDevice, DEVICE_INIT_COLD) == FALSE) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " init register fail\n"); goto free_all; } device_set_multi(pDevice->dev); // Init for Key Management KeyvInitTable(pDevice,&pDevice->sKey); memcpy(pDevice->sMgmtObj.abyMACAddr, pDevice->abyCurrentNetAddr, U_ETHER_ADDR_LEN); memcpy(pDevice->dev->dev_addr, pDevice->abyCurrentNetAddr, U_ETHER_ADDR_LEN); pDevice->bStopTx0Pkt = FALSE; pDevice->bStopDataPkt = FALSE; pDevice->bRoaming = FALSE; //DavidWang pDevice->bIsRoaming = FALSE;//DavidWang pDevice->bEnableRoaming = FALSE; if (pDevice->bDiversityRegCtlON) { device_init_diversity_timer(pDevice); } vMgrObjectInit(pDevice); tasklet_init(&pDevice->RxMngWorkItem, (void *)RXvMngWorkItem, (unsigned long)pDevice); tasklet_init(&pDevice->ReadWorkItem, (void *)RXvWorkItem, (unsigned long)pDevice); tasklet_init(&pDevice->EventWorkItem, (void *)INTvWorkItem, (unsigned long)pDevice); add_timer(&(pDevice->sMgmtObj.sTimerSecondCallback)); pDevice->int_interval = 100; //Max 100 microframes. pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled; pDevice->bIsRxWorkItemQueued = TRUE; pDevice->fKillEventPollingThread = FALSE; pDevice->bEventAvailable = FALSE; pDevice->bWPADEVUp = FALSE; #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT pDevice->bwextstep0 = FALSE; pDevice->bwextstep1 = FALSE; pDevice->bwextstep2 = FALSE; pDevice->bwextstep3 = FALSE; pDevice->bWPASuppWextEnabled = FALSE; #endif pDevice->byReAssocCount = 0; RXvWorkItem(pDevice); INTvWorkItem(pDevice); // Patch: if WEP key already set by iwconfig but device not yet open if ((pDevice->bEncryptionEnable == TRUE) && (pDevice->bTransmitKey == TRUE)) { spin_lock_irq(&pDevice->lock); KeybSetDefaultKey( pDevice, &(pDevice->sKey), pDevice->byKeyIndex | (1 << 31), pDevice->uKeyLength, NULL, pDevice->abyKey, KEY_CTL_WEP ); spin_unlock_irq(&pDevice->lock); pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled; } if (pDevice->sMgmtObj.eConfigMode == WMAC_CONFIG_AP) { bScheduleCommand((HANDLE)pDevice, WLAN_CMD_RUN_AP, NULL); } else { //mike:mark@2008-11-10 bScheduleCommand((HANDLE)pDevice, WLAN_CMD_BSSID_SCAN, NULL); //bScheduleCommand((HANDLE)pDevice, WLAN_CMD_SSID, NULL); } netif_stop_queue(pDevice->dev); pDevice->flags |= DEVICE_FLAGS_OPENED; #ifdef SndEvt_ToAPI { union iwreq_data wrqu; memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.flags = RT_UPDEV_EVENT_FLAG; wireless_send_event(pDevice->dev, IWEVCUSTOM, &wrqu, NULL); } #endif DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_open success.. \n"); return 0; free_all: device_free_frag_bufs(pDevice); free_rx_tx: device_free_rx_bufs(pDevice); device_free_tx_bufs(pDevice); device_free_int_bufs(pDevice); usb_kill_urb(pDevice->pControlURB); usb_kill_urb(pDevice->pInterruptURB); usb_free_urb(pDevice->pControlURB); usb_free_urb(pDevice->pInterruptURB); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_open fail.. \n"); return -ENOMEM; } static int device_close(struct net_device *dev) { PSDevice pDevice=(PSDevice) netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); int uu; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close1 \n"); if (pDevice == NULL) return -ENODEV; #ifdef SndEvt_ToAPI { union iwreq_data wrqu; memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.flags = RT_DOWNDEV_EVENT_FLAG; wireless_send_event(pDevice->dev, IWEVCUSTOM, &wrqu, NULL); } #endif //2007-1121-02<Add>by EinsnLiu if (pDevice->bLinkPass) { bScheduleCommand((HANDLE)pDevice, WLAN_CMD_DISASSOCIATE, NULL); mdelay(30); } //End Add //2008-0714-01<Add>by MikeLiu device_release_WPADEV(pDevice); memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1); pMgmt->bShareKeyAlgorithm = FALSE; pDevice->bEncryptionEnable = FALSE; pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled; spin_lock_irq(&pDevice->lock); for(uu=0;uu<MAX_KEY_TABLE;uu++) MACvDisableKeyEntry(pDevice,uu); spin_unlock_irq(&pDevice->lock); if ((pDevice->flags & DEVICE_FLAGS_UNPLUG) == FALSE) { MACbShutdown(pDevice); } netif_stop_queue(pDevice->dev); MP_SET_FLAG(pDevice, fMP_DISCONNECTED); MP_CLEAR_FLAG(pDevice, fMP_POST_WRITES); MP_CLEAR_FLAG(pDevice, fMP_POST_READS); pDevice->fKillEventPollingThread = TRUE; del_timer(&pDevice->sTimerCommand); del_timer(&pMgmt->sTimerSecondCallback); //2007-0115-02<Add>by MikeLiu #ifdef TxInSleep del_timer(&pDevice->sTimerTxData); #endif if (pDevice->bDiversityRegCtlON) { del_timer(&pDevice->TimerSQ3Tmax1); del_timer(&pDevice->TimerSQ3Tmax2); del_timer(&pDevice->TimerSQ3Tmax3); } tasklet_kill(&pDevice->RxMngWorkItem); tasklet_kill(&pDevice->ReadWorkItem); tasklet_kill(&pDevice->EventWorkItem); pDevice->bRoaming = FALSE; //DavidWang pDevice->bIsRoaming = FALSE;//DavidWang pDevice->bEnableRoaming = FALSE; pDevice->bCmdRunning = FALSE; pDevice->bLinkPass = FALSE; memset(pMgmt->abyCurrBSSID, 0, 6); pMgmt->eCurrState = WMAC_STATE_IDLE; device_free_tx_bufs(pDevice); device_free_rx_bufs(pDevice); device_free_int_bufs(pDevice); device_free_frag_bufs(pDevice); usb_kill_urb(pDevice->pControlURB); usb_kill_urb(pDevice->pInterruptURB); usb_free_urb(pDevice->pControlURB); usb_free_urb(pDevice->pInterruptURB); BSSvClearNodeDBTable(pDevice, 0); pDevice->flags &=(~DEVICE_FLAGS_OPENED); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n"); return 0; } static void vntwusb_disconnect(struct usb_interface *intf) { PSDevice pDevice = usb_get_intfdata(intf); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_disconnect1.. \n"); if (pDevice == NULL) return; #ifdef SndEvt_ToAPI { union iwreq_data wrqu; memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.flags = RT_RMMOD_EVENT_FLAG; wireless_send_event(pDevice->dev, IWEVCUSTOM, &wrqu, NULL); } #endif //2008-0714-01<Add>by MikeLiu device_release_WPADEV(pDevice); usb_set_intfdata(intf, NULL); //2008-0922-01<Add>by MikeLiu, decrease usb counter. usb_put_dev(interface_to_usbdev(intf)); pDevice->flags |= DEVICE_FLAGS_UNPLUG; if (pDevice->dev != NULL) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "unregister_netdev..\n"); unregister_netdev(pDevice->dev); //2008-07-21-01<Add>by MikeLiu //unregister wpadev if(wpa_set_wpadev(pDevice, 0)!=0) printk("unregister wpadev fail?\n"); free_netdev(pDevice->dev); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_disconnect3.. \n"); } static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev) { PSDevice pDevice=netdev_priv(dev); PBYTE pbMPDU; UINT cbMPDULen = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_dma0_tx_80211\n"); spin_lock_irq(&pDevice->lock); if (pDevice->bStopTx0Pkt == TRUE) { dev_kfree_skb_irq(skb); spin_unlock_irq(&pDevice->lock); return 0; }; cbMPDULen = skb->len; pbMPDU = skb->data; vDMA0_tx_80211(pDevice, skb); spin_unlock_irq(&pDevice->lock); return 0; } static int device_xmit(struct sk_buff *skb, struct net_device *dev) { PSDevice pDevice=netdev_priv(dev); struct net_device_stats* pStats = &pDevice->stats; spin_lock_irq(&pDevice->lock); netif_stop_queue(pDevice->dev); if (pDevice->bLinkPass == FALSE) { dev_kfree_skb_irq(skb); spin_unlock_irq(&pDevice->lock); return 0; } if (pDevice->bStopDataPkt == TRUE) { dev_kfree_skb_irq(skb); pStats->tx_dropped++; spin_unlock_irq(&pDevice->lock); return 0; } if(nsDMA_tx_packet(pDevice, TYPE_AC0DMA, skb) !=0) { //mike add:xmit fail! if (netif_queue_stopped(pDevice->dev)) netif_wake_queue(pDevice->dev); } spin_unlock_irq(&pDevice->lock); return 0; } static unsigned const ethernet_polynomial = 0x04c11db7U; static inline u32 ether_crc(int length, unsigned char *data) { int crc = -1; while(--length >= 0) { unsigned char current_octet = *data++; int bit; for (bit = 0; bit < 8; bit++, current_octet >>= 1) { crc = (crc << 1) ^ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0); } } return crc; } //find out the start position of str2 from str1 static UCHAR *kstrstr(const UCHAR *str1,const UCHAR *str2) { int str1_len=strlen(str1); int str2_len=strlen(str2); while (str1_len >= str2_len) { str1_len--; if(memcmp(str1,str2,str2_len)==0) return (UCHAR *)str1; str1++; } return NULL; } static int Config_FileGetParameter(UCHAR *string, UCHAR *dest,UCHAR *source) { UCHAR buf1[100]; UCHAR buf2[100]; UCHAR *start_p=NULL,*end_p=NULL,*tmp_p=NULL; int ii; memset(buf1,0,100); strcat(buf1, string); strcat(buf1, "="); source+=strlen(buf1); //find target string start point if((start_p = kstrstr(source,buf1))==NULL) return FALSE; //check if current config line is marked by "#" ?? for(ii=1;;ii++) { if(memcmp(start_p-ii,"\n",1)==0) break; if(memcmp(start_p-ii,"#",1)==0) return FALSE; } //find target string end point if((end_p = kstrstr(start_p,"\n"))==NULL) { //cann't find "\n",but don't care end_p=start_p+strlen(start_p); //no include "\n" } memset(buf2,0,100); memcpy(buf2,start_p,end_p-start_p); //get the tartget line buf2[end_p-start_p]='\0'; //find value if((start_p = kstrstr(buf2,"="))==NULL) return FALSE; memset(buf1,0,100); strcpy(buf1,start_p+1); //except space tmp_p = buf1; while(*tmp_p != 0x00) { if(*tmp_p==' ') tmp_p++; else break; } memcpy(dest,tmp_p,strlen(tmp_p)); return TRUE; } //if read fail,return NULL,or return data pointer; static UCHAR *Config_FileOperation(PSDevice pDevice) { UCHAR *config_path=CONFIG_PATH; UCHAR *buffer=NULL; struct file *filp=NULL; mm_segment_t old_fs = get_fs(); //int oldfsuid=0,oldfsgid=0; int result=0; set_fs (KERNEL_DS); /* Can't do this anymore, so we rely on correct filesystem permissions: //Make sure a caller can read or write power as root oldfsuid=current->fsuid; oldfsgid=current->fsgid; current->fsuid = 0; current->fsgid = 0; */ //open file filp = filp_open(config_path, O_RDWR, 0); if (IS_ERR(filp)) { printk("Config_FileOperation file Not exist\n"); result=-1; goto error2; } if(!(filp->f_op) || !(filp->f_op->read) ||!(filp->f_op->write)) { printk("file %s cann't readable or writable?\n",config_path); result = -1; goto error1; } buffer = (UCHAR *)kmalloc(1024, GFP_KERNEL); if(buffer==NULL) { printk("alllocate mem for file fail?\n"); result = -1; goto error1; } if(filp->f_op->read(filp, buffer, 1024, &filp->f_pos)<0) { printk("read file error?\n"); result = -1; } error1: if(filp_close(filp,NULL)) printk("Config_FileOperation:close file fail\n"); error2: set_fs (old_fs); /* current->fsuid=oldfsuid; current->fsgid=oldfsgid; */ if(result!=0) { if(buffer) kfree(buffer); buffer=NULL; } return buffer; } //return --->-1:fail; >=0:sucessful static int Read_config_file(PSDevice pDevice) { int result=0; UCHAR tmpbuffer[100]; UCHAR *buffer=NULL; //init config setting pDevice->config_file.ZoneType = -1; pDevice->config_file.eAuthenMode = -1; pDevice->config_file.eEncryptionStatus = -1; if((buffer=Config_FileOperation(pDevice)) ==NULL) { result =-1; return result; } //get zonetype { memset(tmpbuffer,0,sizeof(tmpbuffer)); if(Config_FileGetParameter("ZONETYPE",tmpbuffer,buffer) ==TRUE) { if(memcmp(tmpbuffer,"USA",3)==0) { pDevice->config_file.ZoneType=ZoneType_USA; } else if(memcmp(tmpbuffer,"JAPAN",5)==0) { pDevice->config_file.ZoneType=ZoneType_Japan; } else if(memcmp(tmpbuffer,"EUROPE",6)==0) { pDevice->config_file.ZoneType=ZoneType_Europe; } else { printk("Unknown Zonetype[%s]?\n",tmpbuffer); } } } #if 1 //get other parameter { memset(tmpbuffer,0,sizeof(tmpbuffer)); if(Config_FileGetParameter("AUTHENMODE",tmpbuffer,buffer)==TRUE) { pDevice->config_file.eAuthenMode = (int) simple_strtol(tmpbuffer, NULL, 10); } memset(tmpbuffer,0,sizeof(tmpbuffer)); if(Config_FileGetParameter("ENCRYPTIONMODE",tmpbuffer,buffer)==TRUE) { pDevice->config_file.eEncryptionStatus= (int) simple_strtol(tmpbuffer, NULL, 10); } } #endif kfree(buffer); return result; } static void device_set_multi(struct net_device *dev) { PSDevice pDevice = (PSDevice) netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); u32 mc_filter[2]; int ii; struct dev_mc_list *mclist; BYTE pbyData[8] = {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff}; BYTE byTmpMode = 0; int rc; spin_lock_irq(&pDevice->lock); rc = CONTROLnsRequestIn(pDevice, MESSAGE_TYPE_READ, MAC_REG_RCR, MESSAGE_REQUEST_MACREG, 1, &byTmpMode ); if (rc == 0) pDevice->byRxMode = byTmpMode; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->byRxMode in= %x\n", pDevice->byRxMode); if (dev->flags & IFF_PROMISC) { // Set promiscuous. DBG_PRT(MSG_LEVEL_ERR,KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name); // Unconditionally log net taps. pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST|RCR_UNICAST); } else if ((dev->mc_count > pDevice->multicast_limit) || (dev->flags & IFF_ALLMULTI)) { CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_WRITE, MAC_REG_MAR0, MESSAGE_REQUEST_MACREG, 8, pbyData ); pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST); } else { memset(mc_filter, 0, sizeof(mc_filter)); for (ii = 0, mclist = dev->mc_list; mclist && ii < dev->mc_count; ii++, mclist = mclist->next) { int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31)); } for (ii = 0; ii < 4; ii++) { MACvWriteMultiAddr(pDevice, ii, *((PBYTE)&mc_filter[0] + ii)); MACvWriteMultiAddr(pDevice, ii+ 4, *((PBYTE)&mc_filter[1] + ii)); } pDevice->byRxMode &= ~(RCR_UNICAST); pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST); } if (pMgmt->eConfigMode == WMAC_CONFIG_AP) { // If AP mode, don't enable RCR_UNICAST. Since hw only compare addr1 with local mac. pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST); pDevice->byRxMode &= ~(RCR_UNICAST); } ControlvWriteByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_RCR, pDevice->byRxMode); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->byRxMode out= %x\n", pDevice->byRxMode); spin_unlock_irq(&pDevice->lock); } static struct net_device_stats *device_get_stats(struct net_device *dev) { PSDevice pDevice=(PSDevice) netdev_priv(dev); return &pDevice->stats; } static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { PSDevice pDevice = (PSDevice)netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); PSCmdRequest pReq; //BOOL bCommit = FALSE; struct iwreq *wrq = (struct iwreq *) rq; int rc =0; if (pMgmt == NULL) { rc = -EFAULT; return rc; } switch(cmd) { case SIOCGIWNAME: rc = iwctl_giwname(dev, NULL, (char *)&(wrq->u.name), NULL); break; case SIOCSIWNWID: rc = -EOPNOTSUPP; break; case SIOCGIWNWID: //0x8b03 support #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT rc = iwctl_giwnwid(dev, NULL, &(wrq->u.nwid), NULL); #else rc = -EOPNOTSUPP; #endif break; // Set frequency/channel case SIOCSIWFREQ: rc = iwctl_siwfreq(dev, NULL, &(wrq->u.freq), NULL); break; // Get frequency/channel case SIOCGIWFREQ: rc = iwctl_giwfreq(dev, NULL, &(wrq->u.freq), NULL); break; // Set desired network name (ESSID) case SIOCSIWESSID: { char essid[IW_ESSID_MAX_SIZE+1]; if (wrq->u.essid.length > IW_ESSID_MAX_SIZE) { rc = -E2BIG; break; } if (copy_from_user(essid, wrq->u.essid.pointer, wrq->u.essid.length)) { rc = -EFAULT; break; } rc = iwctl_siwessid(dev, NULL, &(wrq->u.essid), essid); } break; // Get current network name (ESSID) case SIOCGIWESSID: { char essid[IW_ESSID_MAX_SIZE+1]; if (wrq->u.essid.pointer) rc = iwctl_giwessid(dev, NULL, &(wrq->u.essid), essid); if (copy_to_user(wrq->u.essid.pointer, essid, wrq->u.essid.length) ) rc = -EFAULT; } break; case SIOCSIWAP: rc = iwctl_siwap(dev, NULL, &(wrq->u.ap_addr), NULL); break; // Get current Access Point (BSSID) case SIOCGIWAP: rc = iwctl_giwap(dev, NULL, &(wrq->u.ap_addr), NULL); break; // Set desired station name case SIOCSIWNICKN: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWNICKN \n"); rc = -EOPNOTSUPP; break; // Get current station name case SIOCGIWNICKN: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWNICKN \n"); rc = -EOPNOTSUPP; break; // Set the desired bit-rate case SIOCSIWRATE: rc = iwctl_siwrate(dev, NULL, &(wrq->u.bitrate), NULL); break; // Get the current bit-rate case SIOCGIWRATE: rc = iwctl_giwrate(dev, NULL, &(wrq->u.bitrate), NULL); break; // Set the desired RTS threshold case SIOCSIWRTS: rc = iwctl_siwrts(dev, NULL, &(wrq->u.rts), NULL); break; // Get the current RTS threshold case SIOCGIWRTS: rc = iwctl_giwrts(dev, NULL, &(wrq->u.rts), NULL); break; // Set the desired fragmentation threshold case SIOCSIWFRAG: rc = iwctl_siwfrag(dev, NULL, &(wrq->u.frag), NULL); break; // Get the current fragmentation threshold case SIOCGIWFRAG: rc = iwctl_giwfrag(dev, NULL, &(wrq->u.frag), NULL); break; // Set mode of operation case SIOCSIWMODE: rc = iwctl_siwmode(dev, NULL, &(wrq->u.mode), NULL); break; // Get mode of operation case SIOCGIWMODE: rc = iwctl_giwmode(dev, NULL, &(wrq->u.mode), NULL); break; // Set WEP keys and mode case SIOCSIWENCODE: { char abyKey[WLAN_WEP232_KEYLEN]; if (wrq->u.encoding.pointer) { if (wrq->u.encoding.length > WLAN_WEP232_KEYLEN) { rc = -E2BIG; break; } memset(abyKey, 0, WLAN_WEP232_KEYLEN); if (copy_from_user(abyKey, wrq->u.encoding.pointer, wrq->u.encoding.length)) { rc = -EFAULT; break; } } else if (wrq->u.encoding.length != 0) { rc = -EINVAL; break; } rc = iwctl_siwencode(dev, NULL, &(wrq->u.encoding), abyKey); } break; // Get the WEP keys and mode case SIOCGIWENCODE: if (!capable(CAP_NET_ADMIN)) { rc = -EPERM; break; } { char abyKey[WLAN_WEP232_KEYLEN]; rc = iwctl_giwencode(dev, NULL, &(wrq->u.encoding), abyKey); if (rc != 0) break; if (wrq->u.encoding.pointer) { if (copy_to_user(wrq->u.encoding.pointer, abyKey, wrq->u.encoding.length)) rc = -EFAULT; } } break; // Get the current Tx-Power case SIOCGIWTXPOW: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWTXPOW \n"); rc = -EOPNOTSUPP; break; case SIOCSIWTXPOW: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWTXPOW \n"); rc = -EOPNOTSUPP; break; case SIOCSIWRETRY: rc = iwctl_siwretry(dev, NULL, &(wrq->u.retry), NULL); break; case SIOCGIWRETRY: rc = iwctl_giwretry(dev, NULL, &(wrq->u.retry), NULL); break; // Get range of parameters case SIOCGIWRANGE: { struct iw_range range; rc = iwctl_giwrange(dev, NULL, &(wrq->u.data), (char *) &range); if (copy_to_user(wrq->u.data.pointer, &range, sizeof(struct iw_range))) rc = -EFAULT; } break; case SIOCGIWPOWER: rc = iwctl_giwpower(dev, NULL, &(wrq->u.power), NULL); break; case SIOCSIWPOWER: rc = iwctl_siwpower(dev, NULL, &(wrq->u.power), NULL); break; case SIOCGIWSENS: rc = iwctl_giwsens(dev, NULL, &(wrq->u.sens), NULL); break; case SIOCSIWSENS: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWSENS \n"); rc = -EOPNOTSUPP; break; case SIOCGIWAPLIST: { char buffer[IW_MAX_AP * (sizeof(struct sockaddr) + sizeof(struct iw_quality))]; if (wrq->u.data.pointer) { rc = iwctl_giwaplist(dev, NULL, &(wrq->u.data), buffer); if (rc == 0) { if (copy_to_user(wrq->u.data.pointer, buffer, (wrq->u.data.length * (sizeof(struct sockaddr) + sizeof(struct iw_quality))) )) rc = -EFAULT; } } } break; #ifdef WIRELESS_SPY // Set the spy list case SIOCSIWSPY: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWSPY \n"); rc = -EOPNOTSUPP; break; // Get the spy list case SIOCGIWSPY: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWSPY \n"); rc = -EOPNOTSUPP; break; #endif // WIRELESS_SPY case SIOCGIWPRIV: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWPRIV \n"); rc = -EOPNOTSUPP; /* if(wrq->u.data.pointer) { wrq->u.data.length = sizeof(iwctl_private_args) / sizeof( iwctl_private_args[0]); if(copy_to_user(wrq->u.data.pointer, (u_char *) iwctl_private_args, sizeof(iwctl_private_args))) rc = -EFAULT; } */ break; //2008-0409-07, <Add> by Einsn Liu #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT case SIOCSIWAUTH: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWAUTH \n"); rc = iwctl_siwauth(dev, NULL, &(wrq->u.param), NULL); break; case SIOCGIWAUTH: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWAUTH \n"); rc = iwctl_giwauth(dev, NULL, &(wrq->u.param), NULL); break; case SIOCSIWGENIE: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWGENIE \n"); rc = iwctl_siwgenie(dev, NULL, &(wrq->u.data), wrq->u.data.pointer); break; case SIOCGIWGENIE: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWGENIE \n"); rc = iwctl_giwgenie(dev, NULL, &(wrq->u.data), wrq->u.data.pointer); break; case SIOCSIWENCODEEXT: { char extra[sizeof(struct iw_encode_ext)+MAX_KEY_LEN+1]; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWENCODEEXT \n"); if(wrq->u.encoding.pointer){ memset(extra, 0, sizeof(struct iw_encode_ext)+MAX_KEY_LEN+1); if(wrq->u.encoding.length > (sizeof(struct iw_encode_ext)+ MAX_KEY_LEN)){ rc = -E2BIG; break; } if(copy_from_user(extra, wrq->u.encoding.pointer,wrq->u.encoding.length)){ rc = -EFAULT; break; } }else if(wrq->u.encoding.length != 0){ rc = -EINVAL; break; } rc = iwctl_siwencodeext(dev, NULL, &(wrq->u.encoding), extra); } break; case SIOCGIWENCODEEXT: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWENCODEEXT \n"); rc = iwctl_giwencodeext(dev, NULL, &(wrq->u.encoding), NULL); break; case SIOCSIWMLME: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWMLME \n"); rc = iwctl_siwmlme(dev, NULL, &(wrq->u.data), wrq->u.data.pointer); break; #endif // #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT //End Add -- //2008-0409-07, <Add> by Einsn Liu case IOCTL_CMD_TEST: if (!(pDevice->flags & DEVICE_FLAGS_OPENED)) { rc = -EFAULT; break; } else { rc = 0; } pReq = (PSCmdRequest)rq; //20080130-01,<Remark> by Mike Liu // if(pDevice->bLinkPass==TRUE) pReq->wResult = MAGIC_CODE; //Linking status:0x3142 //20080130-02,<Remark> by Mike Liu // else // pReq->wResult = MAGIC_CODE+1; //disconnect status:0x3143 break; case IOCTL_CMD_SET: if (!(pDevice->flags & DEVICE_FLAGS_OPENED) && (((PSCmdRequest)rq)->wCmdCode !=WLAN_CMD_SET_WPA)) { rc = -EFAULT; break; } else { rc = 0; } if (test_and_set_bit( 0, (void*)&(pMgmt->uCmdBusy))) { return -EBUSY; } rc = private_ioctl(pDevice, rq); clear_bit( 0, (void*)&(pMgmt->uCmdBusy)); break; case IOCTL_CMD_HOSTAPD: if (!(pDevice->flags & DEVICE_FLAGS_OPENED)) { rc = -EFAULT; break; } else { rc = 0; } rc = hostap_ioctl(pDevice, &wrq->u.data); break; case IOCTL_CMD_WPA: if (!(pDevice->flags & DEVICE_FLAGS_OPENED)) { rc = -EFAULT; break; } else { rc = 0; } rc = wpa_ioctl(pDevice, &wrq->u.data); break; case SIOCETHTOOL: return ethtool_ioctl(dev, (void *) rq->ifr_data); // All other calls are currently unsupported default: rc = -EOPNOTSUPP; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Ioctl command not support..%x\n", cmd); } if (pDevice->bCommit) { if (pMgmt->eConfigMode == WMAC_CONFIG_AP) { netif_stop_queue(pDevice->dev); spin_lock_irq(&pDevice->lock); bScheduleCommand((HANDLE)pDevice, WLAN_CMD_RUN_AP, NULL); spin_unlock_irq(&pDevice->lock); } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Commit the settings\n"); spin_lock_irq(&pDevice->lock); //2007-1121-01<Modify>by EinsnLiu if (pDevice->bLinkPass&& memcmp(pMgmt->abyCurrSSID,pMgmt->abyDesireSSID,WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN)) { bScheduleCommand((HANDLE)pDevice, WLAN_CMD_DISASSOCIATE, NULL); } else { pDevice->bLinkPass = FALSE; pMgmt->eCurrState = WMAC_STATE_IDLE; memset(pMgmt->abyCurrBSSID, 0, 6); } ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW); //End Modify netif_stop_queue(pDevice->dev); #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT pMgmt->eScanType = WMAC_SCAN_ACTIVE; if(pDevice->bWPASuppWextEnabled !=TRUE) #endif bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID); bScheduleCommand((HANDLE) pDevice, WLAN_CMD_SSID, NULL); spin_unlock_irq(&pDevice->lock); } pDevice->bCommit = FALSE; } return rc; } static int ethtool_ioctl(struct net_device *dev, void *useraddr) { u32 ethcmd; if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd))) return -EFAULT; switch (ethcmd) { case ETHTOOL_GDRVINFO: { struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO}; strncpy(info.driver, DEVICE_NAME, sizeof(info.driver)-1); strncpy(info.version, DEVICE_VERSION, sizeof(info.version)-1); if (copy_to_user(useraddr, &info, sizeof(info))) return -EFAULT; return 0; } } return -EOPNOTSUPP; } /*------------------------------------------------------------------*/ MODULE_DEVICE_TABLE(usb, vntwusb_table); static struct usb_driver vntwusb_driver = { .name = DEVICE_NAME, .probe = vntwusb_found1, .disconnect = vntwusb_disconnect, .id_table = vntwusb_table, //2008-0920-01<Add>by MikeLiu //for supporting S3 & S4 function #ifdef CONFIG_PM .suspend = vntwusb_suspend, .resume = vntwusb_resume, #endif }; static int __init vntwusb_init_module(void) { printk(KERN_NOTICE DEVICE_FULL_DRV_NAM " " DEVICE_VERSION); return usb_register(&vntwusb_driver); } static void __exit vntwusb_cleanup_module(void) { usb_deregister(&vntwusb_driver); } module_init(vntwusb_init_module); module_exit(vntwusb_cleanup_module);
gpl-2.0
bwrsandman/android_kernel_zte_roamer
fs/ceph/snap.c
760
25635
#include "ceph_debug.h" #include <linux/sort.h> #include <linux/slab.h> #include "super.h" #include "decode.h" /* * Snapshots in ceph are driven in large part by cooperation from the * client. In contrast to local file systems or file servers that * implement snapshots at a single point in the system, ceph's * distributed access to storage requires clients to help decide * whether a write logically occurs before or after a recently created * snapshot. * * This provides a perfect instantanous client-wide snapshot. Between * clients, however, snapshots may appear to be applied at slightly * different points in time, depending on delays in delivering the * snapshot notification. * * Snapshots are _not_ file system-wide. Instead, each snapshot * applies to the subdirectory nested beneath some directory. This * effectively divides the hierarchy into multiple "realms," where all * of the files contained by each realm share the same set of * snapshots. An individual realm's snap set contains snapshots * explicitly created on that realm, as well as any snaps in its * parent's snap set _after_ the point at which the parent became it's * parent (due to, say, a rename). Similarly, snaps from prior parents * during the time intervals during which they were the parent are included. * * The client is spared most of this detail, fortunately... it must only * maintains a hierarchy of realms reflecting the current parent/child * realm relationship, and for each realm has an explicit list of snaps * inherited from prior parents. * * A snap_realm struct is maintained for realms containing every inode * with an open cap in the system. (The needed snap realm information is * provided by the MDS whenever a cap is issued, i.e., on open.) A 'seq' * version number is used to ensure that as realm parameters change (new * snapshot, new parent, etc.) the client's realm hierarchy is updated. * * The realm hierarchy drives the generation of a 'snap context' for each * realm, which simply lists the resulting set of snaps for the realm. This * is attached to any writes sent to OSDs. */ /* * Unfortunately error handling is a bit mixed here. If we get a snap * update, but don't have enough memory to update our realm hierarchy, * it's not clear what we can do about it (besides complaining to the * console). */ /* * increase ref count for the realm * * caller must hold snap_rwsem for write. */ void ceph_get_snap_realm(struct ceph_mds_client *mdsc, struct ceph_snap_realm *realm) { dout("get_realm %p %d -> %d\n", realm, atomic_read(&realm->nref), atomic_read(&realm->nref)+1); /* * since we _only_ increment realm refs or empty the empty * list with snap_rwsem held, adjusting the empty list here is * safe. we do need to protect against concurrent empty list * additions, however. */ if (atomic_read(&realm->nref) == 0) { spin_lock(&mdsc->snap_empty_lock); list_del_init(&realm->empty_item); spin_unlock(&mdsc->snap_empty_lock); } atomic_inc(&realm->nref); } static void __insert_snap_realm(struct rb_root *root, struct ceph_snap_realm *new) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct ceph_snap_realm *r = NULL; while (*p) { parent = *p; r = rb_entry(parent, struct ceph_snap_realm, node); if (new->ino < r->ino) p = &(*p)->rb_left; else if (new->ino > r->ino) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new->node, parent, p); rb_insert_color(&new->node, root); } /* * create and get the realm rooted at @ino and bump its ref count. * * caller must hold snap_rwsem for write. */ static struct ceph_snap_realm *ceph_create_snap_realm( struct ceph_mds_client *mdsc, u64 ino) { struct ceph_snap_realm *realm; realm = kzalloc(sizeof(*realm), GFP_NOFS); if (!realm) return ERR_PTR(-ENOMEM); atomic_set(&realm->nref, 0); /* tree does not take a ref */ realm->ino = ino; INIT_LIST_HEAD(&realm->children); INIT_LIST_HEAD(&realm->child_item); INIT_LIST_HEAD(&realm->empty_item); INIT_LIST_HEAD(&realm->inodes_with_caps); spin_lock_init(&realm->inodes_with_caps_lock); __insert_snap_realm(&mdsc->snap_realms, realm); dout("create_snap_realm %llx %p\n", realm->ino, realm); return realm; } /* * lookup the realm rooted at @ino. * * caller must hold snap_rwsem for write. */ struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc, u64 ino) { struct rb_node *n = mdsc->snap_realms.rb_node; struct ceph_snap_realm *r; while (n) { r = rb_entry(n, struct ceph_snap_realm, node); if (ino < r->ino) n = n->rb_left; else if (ino > r->ino) n = n->rb_right; else { dout("lookup_snap_realm %llx %p\n", r->ino, r); return r; } } return NULL; } static void __put_snap_realm(struct ceph_mds_client *mdsc, struct ceph_snap_realm *realm); /* * called with snap_rwsem (write) */ static void __destroy_snap_realm(struct ceph_mds_client *mdsc, struct ceph_snap_realm *realm) { dout("__destroy_snap_realm %p %llx\n", realm, realm->ino); rb_erase(&realm->node, &mdsc->snap_realms); if (realm->parent) { list_del_init(&realm->child_item); __put_snap_realm(mdsc, realm->parent); } kfree(realm->prior_parent_snaps); kfree(realm->snaps); ceph_put_snap_context(realm->cached_context); kfree(realm); } /* * caller holds snap_rwsem (write) */ static void __put_snap_realm(struct ceph_mds_client *mdsc, struct ceph_snap_realm *realm) { dout("__put_snap_realm %llx %p %d -> %d\n", realm->ino, realm, atomic_read(&realm->nref), atomic_read(&realm->nref)-1); if (atomic_dec_and_test(&realm->nref)) __destroy_snap_realm(mdsc, realm); } /* * caller needn't hold any locks */ void ceph_put_snap_realm(struct ceph_mds_client *mdsc, struct ceph_snap_realm *realm) { dout("put_snap_realm %llx %p %d -> %d\n", realm->ino, realm, atomic_read(&realm->nref), atomic_read(&realm->nref)-1); if (!atomic_dec_and_test(&realm->nref)) return; if (down_write_trylock(&mdsc->snap_rwsem)) { __destroy_snap_realm(mdsc, realm); up_write(&mdsc->snap_rwsem); } else { spin_lock(&mdsc->snap_empty_lock); list_add(&mdsc->snap_empty, &realm->empty_item); spin_unlock(&mdsc->snap_empty_lock); } } /* * Clean up any realms whose ref counts have dropped to zero. Note * that this does not include realms who were created but not yet * used. * * Called under snap_rwsem (write) */ static void __cleanup_empty_realms(struct ceph_mds_client *mdsc) { struct ceph_snap_realm *realm; spin_lock(&mdsc->snap_empty_lock); while (!list_empty(&mdsc->snap_empty)) { realm = list_first_entry(&mdsc->snap_empty, struct ceph_snap_realm, empty_item); list_del(&realm->empty_item); spin_unlock(&mdsc->snap_empty_lock); __destroy_snap_realm(mdsc, realm); spin_lock(&mdsc->snap_empty_lock); } spin_unlock(&mdsc->snap_empty_lock); } void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc) { down_write(&mdsc->snap_rwsem); __cleanup_empty_realms(mdsc); up_write(&mdsc->snap_rwsem); } /* * adjust the parent realm of a given @realm. adjust child list, and parent * pointers, and ref counts appropriately. * * return true if parent was changed, 0 if unchanged, <0 on error. * * caller must hold snap_rwsem for write. */ static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc, struct ceph_snap_realm *realm, u64 parentino) { struct ceph_snap_realm *parent; if (realm->parent_ino == parentino) return 0; parent = ceph_lookup_snap_realm(mdsc, parentino); if (!parent) { parent = ceph_create_snap_realm(mdsc, parentino); if (IS_ERR(parent)) return PTR_ERR(parent); } dout("adjust_snap_realm_parent %llx %p: %llx %p -> %llx %p\n", realm->ino, realm, realm->parent_ino, realm->parent, parentino, parent); if (realm->parent) { list_del_init(&realm->child_item); ceph_put_snap_realm(mdsc, realm->parent); } realm->parent_ino = parentino; realm->parent = parent; ceph_get_snap_realm(mdsc, parent); list_add(&realm->child_item, &parent->children); return 1; } static int cmpu64_rev(const void *a, const void *b) { if (*(u64 *)a < *(u64 *)b) return 1; if (*(u64 *)a > *(u64 *)b) return -1; return 0; } /* * build the snap context for a given realm. */ static int build_snap_context(struct ceph_snap_realm *realm) { struct ceph_snap_realm *parent = realm->parent; struct ceph_snap_context *snapc; int err = 0; int i; int num = realm->num_prior_parent_snaps + realm->num_snaps; /* * build parent context, if it hasn't been built. * conservatively estimate that all parent snaps might be * included by us. */ if (parent) { if (!parent->cached_context) { err = build_snap_context(parent); if (err) goto fail; } num += parent->cached_context->num_snaps; } /* do i actually need to update? not if my context seq matches realm seq, and my parents' does to. (this works because we rebuild_snap_realms() works _downward_ in hierarchy after each update.) */ if (realm->cached_context && realm->cached_context->seq == realm->seq && (!parent || realm->cached_context->seq >= parent->cached_context->seq)) { dout("build_snap_context %llx %p: %p seq %lld (%d snaps)" " (unchanged)\n", realm->ino, realm, realm->cached_context, realm->cached_context->seq, realm->cached_context->num_snaps); return 0; } /* alloc new snap context */ err = -ENOMEM; if (num > ULONG_MAX / sizeof(u64) - sizeof(*snapc)) goto fail; snapc = kzalloc(sizeof(*snapc) + num*sizeof(u64), GFP_NOFS); if (!snapc) goto fail; atomic_set(&snapc->nref, 1); /* build (reverse sorted) snap vector */ num = 0; snapc->seq = realm->seq; if (parent) { /* include any of parent's snaps occuring _after_ my parent became my parent */ for (i = 0; i < parent->cached_context->num_snaps; i++) if (parent->cached_context->snaps[i] >= realm->parent_since) snapc->snaps[num++] = parent->cached_context->snaps[i]; if (parent->cached_context->seq > snapc->seq) snapc->seq = parent->cached_context->seq; } memcpy(snapc->snaps + num, realm->snaps, sizeof(u64)*realm->num_snaps); num += realm->num_snaps; memcpy(snapc->snaps + num, realm->prior_parent_snaps, sizeof(u64)*realm->num_prior_parent_snaps); num += realm->num_prior_parent_snaps; sort(snapc->snaps, num, sizeof(u64), cmpu64_rev, NULL); snapc->num_snaps = num; dout("build_snap_context %llx %p: %p seq %lld (%d snaps)\n", realm->ino, realm, snapc, snapc->seq, snapc->num_snaps); if (realm->cached_context) ceph_put_snap_context(realm->cached_context); realm->cached_context = snapc; return 0; fail: /* * if we fail, clear old (incorrect) cached_context... hopefully * we'll have better luck building it later */ if (realm->cached_context) { ceph_put_snap_context(realm->cached_context); realm->cached_context = NULL; } pr_err("build_snap_context %llx %p fail %d\n", realm->ino, realm, err); return err; } /* * rebuild snap context for the given realm and all of its children. */ static void rebuild_snap_realms(struct ceph_snap_realm *realm) { struct ceph_snap_realm *child; dout("rebuild_snap_realms %llx %p\n", realm->ino, realm); build_snap_context(realm); list_for_each_entry(child, &realm->children, child_item) rebuild_snap_realms(child); } /* * helper to allocate and decode an array of snapids. free prior * instance, if any. */ static int dup_array(u64 **dst, __le64 *src, int num) { int i; kfree(*dst); if (num) { *dst = kcalloc(num, sizeof(u64), GFP_NOFS); if (!*dst) return -ENOMEM; for (i = 0; i < num; i++) (*dst)[i] = get_unaligned_le64(src + i); } else { *dst = NULL; } return 0; } /* * When a snapshot is applied, the size/mtime inode metadata is queued * in a ceph_cap_snap (one for each snapshot) until writeback * completes and the metadata can be flushed back to the MDS. * * However, if a (sync) write is currently in-progress when we apply * the snapshot, we have to wait until the write succeeds or fails * (and a final size/mtime is known). In this case the * cap_snap->writing = 1, and is said to be "pending." When the write * finishes, we __ceph_finish_cap_snap(). * * Caller must hold snap_rwsem for read (i.e., the realm topology won't * change). */ void ceph_queue_cap_snap(struct ceph_inode_info *ci) { struct inode *inode = &ci->vfs_inode; struct ceph_cap_snap *capsnap; int used; capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS); if (!capsnap) { pr_err("ENOMEM allocating ceph_cap_snap on %p\n", inode); return; } spin_lock(&inode->i_lock); used = __ceph_caps_used(ci); if (__ceph_have_pending_cap_snap(ci)) { /* there is no point in queuing multiple "pending" cap_snaps, as no new writes are allowed to start when pending, so any writes in progress now were started before the previous cap_snap. lucky us. */ dout("queue_cap_snap %p already pending\n", inode); kfree(capsnap); } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR)) { struct ceph_snap_context *snapc = ci->i_head_snapc; igrab(inode); atomic_set(&capsnap->nref, 1); capsnap->ci = ci; INIT_LIST_HEAD(&capsnap->ci_item); INIT_LIST_HEAD(&capsnap->flushing_item); capsnap->follows = snapc->seq - 1; capsnap->issued = __ceph_caps_issued(ci, NULL); capsnap->dirty = __ceph_caps_dirty(ci); capsnap->mode = inode->i_mode; capsnap->uid = inode->i_uid; capsnap->gid = inode->i_gid; /* fixme? */ capsnap->xattr_blob = NULL; capsnap->xattr_len = 0; /* dirty page count moved from _head to this cap_snap; all subsequent writes page dirties occur _after_ this snapshot. */ capsnap->dirty_pages = ci->i_wrbuffer_ref_head; ci->i_wrbuffer_ref_head = 0; capsnap->context = snapc; ci->i_head_snapc = NULL; list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps); if (used & CEPH_CAP_FILE_WR) { dout("queue_cap_snap %p cap_snap %p snapc %p" " seq %llu used WR, now pending\n", inode, capsnap, snapc, snapc->seq); capsnap->writing = 1; } else { /* note mtime, size NOW. */ __ceph_finish_cap_snap(ci, capsnap); } } else { dout("queue_cap_snap %p nothing dirty|writing\n", inode); kfree(capsnap); } spin_unlock(&inode->i_lock); } /* * Finalize the size, mtime for a cap_snap.. that is, settle on final values * to be used for the snapshot, to be flushed back to the mds. * * If capsnap can now be flushed, add to snap_flush list, and return 1. * * Caller must hold i_lock. */ int __ceph_finish_cap_snap(struct ceph_inode_info *ci, struct ceph_cap_snap *capsnap) { struct inode *inode = &ci->vfs_inode; struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc; BUG_ON(capsnap->writing); capsnap->size = inode->i_size; capsnap->mtime = inode->i_mtime; capsnap->atime = inode->i_atime; capsnap->ctime = inode->i_ctime; capsnap->time_warp_seq = ci->i_time_warp_seq; if (capsnap->dirty_pages) { dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu " "still has %d dirty pages\n", inode, capsnap, capsnap->context, capsnap->context->seq, ceph_cap_string(capsnap->dirty), capsnap->size, capsnap->dirty_pages); return 0; } dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu\n", inode, capsnap, capsnap->context, capsnap->context->seq, ceph_cap_string(capsnap->dirty), capsnap->size); spin_lock(&mdsc->snap_flush_lock); list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list); spin_unlock(&mdsc->snap_flush_lock); return 1; /* caller may want to ceph_flush_snaps */ } /* * Parse and apply a snapblob "snap trace" from the MDS. This specifies * the snap realm parameters from a given realm and all of its ancestors, * up to the root. * * Caller must hold snap_rwsem for write. */ int ceph_update_snap_trace(struct ceph_mds_client *mdsc, void *p, void *e, bool deletion) { struct ceph_mds_snap_realm *ri; /* encoded */ __le64 *snaps; /* encoded */ __le64 *prior_parent_snaps; /* encoded */ struct ceph_snap_realm *realm; int invalidate = 0; int err = -ENOMEM; dout("update_snap_trace deletion=%d\n", deletion); more: ceph_decode_need(&p, e, sizeof(*ri), bad); ri = p; p += sizeof(*ri); ceph_decode_need(&p, e, sizeof(u64)*(le32_to_cpu(ri->num_snaps) + le32_to_cpu(ri->num_prior_parent_snaps)), bad); snaps = p; p += sizeof(u64) * le32_to_cpu(ri->num_snaps); prior_parent_snaps = p; p += sizeof(u64) * le32_to_cpu(ri->num_prior_parent_snaps); realm = ceph_lookup_snap_realm(mdsc, le64_to_cpu(ri->ino)); if (!realm) { realm = ceph_create_snap_realm(mdsc, le64_to_cpu(ri->ino)); if (IS_ERR(realm)) { err = PTR_ERR(realm); goto fail; } } if (le64_to_cpu(ri->seq) > realm->seq) { dout("update_snap_trace updating %llx %p %lld -> %lld\n", realm->ino, realm, realm->seq, le64_to_cpu(ri->seq)); /* * if the realm seq has changed, queue a cap_snap for every * inode with open caps. we do this _before_ we update * the realm info so that we prepare for writeback under the * _previous_ snap context. * * ...unless it's a snap deletion! */ if (!deletion) { struct ceph_inode_info *ci; struct inode *lastinode = NULL; spin_lock(&realm->inodes_with_caps_lock); list_for_each_entry(ci, &realm->inodes_with_caps, i_snap_realm_item) { struct inode *inode = igrab(&ci->vfs_inode); if (!inode) continue; spin_unlock(&realm->inodes_with_caps_lock); if (lastinode) iput(lastinode); lastinode = inode; ceph_queue_cap_snap(ci); spin_lock(&realm->inodes_with_caps_lock); } spin_unlock(&realm->inodes_with_caps_lock); if (lastinode) iput(lastinode); dout("update_snap_trace cap_snaps queued\n"); } } else { dout("update_snap_trace %llx %p seq %lld unchanged\n", realm->ino, realm, realm->seq); } /* ensure the parent is correct */ err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent)); if (err < 0) goto fail; invalidate += err; if (le64_to_cpu(ri->seq) > realm->seq) { /* update realm parameters, snap lists */ realm->seq = le64_to_cpu(ri->seq); realm->created = le64_to_cpu(ri->created); realm->parent_since = le64_to_cpu(ri->parent_since); realm->num_snaps = le32_to_cpu(ri->num_snaps); err = dup_array(&realm->snaps, snaps, realm->num_snaps); if (err < 0) goto fail; realm->num_prior_parent_snaps = le32_to_cpu(ri->num_prior_parent_snaps); err = dup_array(&realm->prior_parent_snaps, prior_parent_snaps, realm->num_prior_parent_snaps); if (err < 0) goto fail; invalidate = 1; } else if (!realm->cached_context) { invalidate = 1; } dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino, realm, invalidate, p, e); if (p < e) goto more; /* invalidate when we reach the _end_ (root) of the trace */ if (invalidate) rebuild_snap_realms(realm); __cleanup_empty_realms(mdsc); return 0; bad: err = -EINVAL; fail: pr_err("update_snap_trace error %d\n", err); return err; } /* * Send any cap_snaps that are queued for flush. Try to carry * s_mutex across multiple snap flushes to avoid locking overhead. * * Caller holds no locks. */ static void flush_snaps(struct ceph_mds_client *mdsc) { struct ceph_inode_info *ci; struct inode *inode; struct ceph_mds_session *session = NULL; dout("flush_snaps\n"); spin_lock(&mdsc->snap_flush_lock); while (!list_empty(&mdsc->snap_flush_list)) { ci = list_first_entry(&mdsc->snap_flush_list, struct ceph_inode_info, i_snap_flush_item); inode = &ci->vfs_inode; igrab(inode); spin_unlock(&mdsc->snap_flush_lock); spin_lock(&inode->i_lock); __ceph_flush_snaps(ci, &session); spin_unlock(&inode->i_lock); iput(inode); spin_lock(&mdsc->snap_flush_lock); } spin_unlock(&mdsc->snap_flush_lock); if (session) { mutex_unlock(&session->s_mutex); ceph_put_mds_session(session); } dout("flush_snaps done\n"); } /* * Handle a snap notification from the MDS. * * This can take two basic forms: the simplest is just a snap creation * or deletion notification on an existing realm. This should update the * realm and its children. * * The more difficult case is realm creation, due to snap creation at a * new point in the file hierarchy, or due to a rename that moves a file or * directory into another realm. */ void ceph_handle_snap(struct ceph_mds_client *mdsc, struct ceph_mds_session *session, struct ceph_msg *msg) { struct super_block *sb = mdsc->client->sb; int mds = session->s_mds; u64 split; int op; int trace_len; struct ceph_snap_realm *realm = NULL; void *p = msg->front.iov_base; void *e = p + msg->front.iov_len; struct ceph_mds_snap_head *h; int num_split_inos, num_split_realms; __le64 *split_inos = NULL, *split_realms = NULL; int i; int locked_rwsem = 0; /* decode */ if (msg->front.iov_len < sizeof(*h)) goto bad; h = p; op = le32_to_cpu(h->op); split = le64_to_cpu(h->split); /* non-zero if we are splitting an * existing realm */ num_split_inos = le32_to_cpu(h->num_split_inos); num_split_realms = le32_to_cpu(h->num_split_realms); trace_len = le32_to_cpu(h->trace_len); p += sizeof(*h); dout("handle_snap from mds%d op %s split %llx tracelen %d\n", mds, ceph_snap_op_name(op), split, trace_len); mutex_lock(&session->s_mutex); session->s_seq++; mutex_unlock(&session->s_mutex); down_write(&mdsc->snap_rwsem); locked_rwsem = 1; if (op == CEPH_SNAP_OP_SPLIT) { struct ceph_mds_snap_realm *ri; /* * A "split" breaks part of an existing realm off into * a new realm. The MDS provides a list of inodes * (with caps) and child realms that belong to the new * child. */ split_inos = p; p += sizeof(u64) * num_split_inos; split_realms = p; p += sizeof(u64) * num_split_realms; ceph_decode_need(&p, e, sizeof(*ri), bad); /* we will peek at realm info here, but will _not_ * advance p, as the realm update will occur below in * ceph_update_snap_trace. */ ri = p; realm = ceph_lookup_snap_realm(mdsc, split); if (!realm) { realm = ceph_create_snap_realm(mdsc, split); if (IS_ERR(realm)) goto out; } ceph_get_snap_realm(mdsc, realm); dout("splitting snap_realm %llx %p\n", realm->ino, realm); for (i = 0; i < num_split_inos; i++) { struct ceph_vino vino = { .ino = le64_to_cpu(split_inos[i]), .snap = CEPH_NOSNAP, }; struct inode *inode = ceph_find_inode(sb, vino); struct ceph_inode_info *ci; if (!inode) continue; ci = ceph_inode(inode); spin_lock(&inode->i_lock); if (!ci->i_snap_realm) goto skip_inode; /* * If this inode belongs to a realm that was * created after our new realm, we experienced * a race (due to another split notifications * arriving from a different MDS). So skip * this inode. */ if (ci->i_snap_realm->created > le64_to_cpu(ri->created)) { dout(" leaving %p in newer realm %llx %p\n", inode, ci->i_snap_realm->ino, ci->i_snap_realm); goto skip_inode; } dout(" will move %p to split realm %llx %p\n", inode, realm->ino, realm); /* * Remove the inode from the realm's inode * list, but don't add it to the new realm * yet. We don't want the cap_snap to be * queued (again) by ceph_update_snap_trace() * below. Queue it _now_, under the old context. */ spin_lock(&realm->inodes_with_caps_lock); list_del_init(&ci->i_snap_realm_item); spin_unlock(&realm->inodes_with_caps_lock); spin_unlock(&inode->i_lock); ceph_queue_cap_snap(ci); iput(inode); continue; skip_inode: spin_unlock(&inode->i_lock); iput(inode); } /* we may have taken some of the old realm's children. */ for (i = 0; i < num_split_realms; i++) { struct ceph_snap_realm *child = ceph_lookup_snap_realm(mdsc, le64_to_cpu(split_realms[i])); if (!child) continue; adjust_snap_realm_parent(mdsc, child, realm->ino); } } /* * update using the provided snap trace. if we are deleting a * snap, we can avoid queueing cap_snaps. */ ceph_update_snap_trace(mdsc, p, e, op == CEPH_SNAP_OP_DESTROY); if (op == CEPH_SNAP_OP_SPLIT) { /* * ok, _now_ add the inodes into the new realm. */ for (i = 0; i < num_split_inos; i++) { struct ceph_vino vino = { .ino = le64_to_cpu(split_inos[i]), .snap = CEPH_NOSNAP, }; struct inode *inode = ceph_find_inode(sb, vino); struct ceph_inode_info *ci; if (!inode) continue; ci = ceph_inode(inode); spin_lock(&inode->i_lock); if (list_empty(&ci->i_snap_realm_item)) { struct ceph_snap_realm *oldrealm = ci->i_snap_realm; dout(" moving %p to split realm %llx %p\n", inode, realm->ino, realm); spin_lock(&realm->inodes_with_caps_lock); list_add(&ci->i_snap_realm_item, &realm->inodes_with_caps); ci->i_snap_realm = realm; spin_unlock(&realm->inodes_with_caps_lock); ceph_get_snap_realm(mdsc, realm); ceph_put_snap_realm(mdsc, oldrealm); } spin_unlock(&inode->i_lock); iput(inode); } /* we took a reference when we created the realm, above */ ceph_put_snap_realm(mdsc, realm); } __cleanup_empty_realms(mdsc); up_write(&mdsc->snap_rwsem); flush_snaps(mdsc); return; bad: pr_err("corrupt snap message from mds%d\n", mds); ceph_msg_dump(msg); out: if (locked_rwsem) up_write(&mdsc->snap_rwsem); return; }
gpl-2.0
sndnvaps/android_kernel_samsung_m110s
drivers/isdn/hisax/sedlbauer_cs.c
760
13643
/*====================================================================== A Sedlbauer PCMCIA client driver This driver is for the Sedlbauer Speed Star and Speed Star II, which are ISDN PCMCIA Cards. The contents of this file are subject to the Mozilla Public License Version 1.1 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.mozilla.org/MPL/ Software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. The initial developer of the original code is David A. Hinds <dahinds@users.sourceforge.net>. Portions created by David A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights Reserved. Modifications from dummy_cs.c are Copyright (C) 1999-2001 Marcus Niemann <maniemann@users.sourceforge.net>. All Rights Reserved. Alternatively, the contents of this file may be used under the terms of the GNU General Public License version 2 (the "GPL"), in which case the provisions of the GPL are applicable instead of the above. If you wish to allow the use of your version of this file only under the terms of the GPL and not to allow others to use your version of this file under the MPL, indicate your decision by deleting the provisions above and replace them with the notice and other provisions required by the GPL. If you do not delete the provisions above, a recipient may use your version of this file under either the MPL or the GPL. ======================================================================*/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/ioport.h> #include <asm/io.h> #include <asm/system.h> #include <pcmcia/cs_types.h> #include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> #include "hisax_cfg.h" MODULE_DESCRIPTION("ISDN4Linux: PCMCIA client driver for Sedlbauer cards"); MODULE_AUTHOR("Marcus Niemann"); MODULE_LICENSE("Dual MPL/GPL"); /*====================================================================*/ /* Parameters that can be set with 'insmod' */ static int protocol = 2; /* EURO-ISDN Default */ module_param(protocol, int, 0); /*====================================================================*/ /* The event() function is this driver's Card Services event handler. It will be called by Card Services when an appropriate card status event is received. The config() and release() entry points are used to configure or release a socket, in response to card insertion and ejection events. They are invoked from the sedlbauer event handler. */ static int sedlbauer_config(struct pcmcia_device *link) __devinit ; static void sedlbauer_release(struct pcmcia_device *link); /* The attach() and detach() entry points are used to create and destroy "instances" of the driver, where each instance represents everything needed to manage one actual PCMCIA card. */ static void sedlbauer_detach(struct pcmcia_device *p_dev) __devexit; typedef struct local_info_t { struct pcmcia_device *p_dev; int stop; int cardnr; } local_info_t; /*====================================================================== sedlbauer_attach() creates an "instance" of the driver, allocating local data structures for one device. The device is registered with Card Services. The dev_link structure is initialized, but we don't actually configure the card at this point -- we wait until we receive a card insertion event. ======================================================================*/ static int __devinit sedlbauer_probe(struct pcmcia_device *link) { local_info_t *local; dev_dbg(&link->dev, "sedlbauer_attach()\n"); /* Allocate space for private device-specific data */ local = kzalloc(sizeof(local_info_t), GFP_KERNEL); if (!local) return -ENOMEM; local->cardnr = -1; local->p_dev = link; link->priv = local; /* General socket configuration defaults can go here. In this client, we assume very little, and rely on the CIS for almost everything. In most clients, many details (i.e., number, sizes, and attributes of IO windows) are fixed by the nature of the device, and can be hard-wired here. */ /* from old sedl_cs */ /* The io structure describes IO port mapping */ link->io.NumPorts1 = 8; link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; link->io.IOAddrLines = 3; link->conf.Attributes = 0; link->conf.IntType = INT_MEMORY_AND_IO; return sedlbauer_config(link); } /* sedlbauer_attach */ /*====================================================================== This deletes a driver "instance". The device is de-registered with Card Services. If it has been released, all local data structures are freed. Otherwise, the structures will be freed when the device is released. ======================================================================*/ static void __devexit sedlbauer_detach(struct pcmcia_device *link) { dev_dbg(&link->dev, "sedlbauer_detach(0x%p)\n", link); ((local_info_t *)link->priv)->stop = 1; sedlbauer_release(link); /* This points to the parent local_info_t struct */ kfree(link->priv); } /* sedlbauer_detach */ /*====================================================================== sedlbauer_config() is scheduled to run after a CARD_INSERTION event is received, to configure the PCMCIA socket, and to make the device available to the system. ======================================================================*/ static int sedlbauer_config_check(struct pcmcia_device *p_dev, cistpl_cftable_entry_t *cfg, cistpl_cftable_entry_t *dflt, unsigned int vcc, void *priv_data) { win_req_t *req = priv_data; if (cfg->index == 0) return -ENODEV; /* Does this card need audio output? */ if (cfg->flags & CISTPL_CFTABLE_AUDIO) { p_dev->conf.Attributes |= CONF_ENABLE_SPKR; p_dev->conf.Status = CCSR_AUDIO_ENA; } /* Use power settings for Vcc and Vpp if present */ /* Note that the CIS values need to be rescaled */ if (cfg->vcc.present & (1<<CISTPL_POWER_VNOM)) { if (vcc != cfg->vcc.param[CISTPL_POWER_VNOM]/10000) return -ENODEV; } else if (dflt->vcc.present & (1<<CISTPL_POWER_VNOM)) { if (vcc != dflt->vcc.param[CISTPL_POWER_VNOM]/10000) return -ENODEV; } if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM)) p_dev->conf.Vpp = cfg->vpp1.param[CISTPL_POWER_VNOM]/10000; else if (dflt->vpp1.present & (1<<CISTPL_POWER_VNOM)) p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM]/10000; p_dev->conf.Attributes |= CONF_ENABLE_IRQ; /* IO window settings */ p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0; if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; if (!(io->flags & CISTPL_IO_8BIT)) p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16; if (!(io->flags & CISTPL_IO_16BIT)) p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; p_dev->io.BasePort1 = io->win[0].base; p_dev->io.NumPorts1 = io->win[0].len; if (io->nwin > 1) { p_dev->io.Attributes2 = p_dev->io.Attributes1; p_dev->io.BasePort2 = io->win[1].base; p_dev->io.NumPorts2 = io->win[1].len; } /* This reserves IO space but doesn't actually enable it */ if (pcmcia_request_io(p_dev, &p_dev->io) != 0) return -ENODEV; } /* Now set up a common memory window, if needed. There is room in the struct pcmcia_device structure for one memory window handle, but if the base addresses need to be saved, or if multiple windows are needed, the info should go in the private data structure for this device. Note that the memory window base is a physical address, and needs to be mapped to virtual space with ioremap() before it is used. */ if ((cfg->mem.nwin > 0) || (dflt->mem.nwin > 0)) { cistpl_mem_t *mem = (cfg->mem.nwin) ? &cfg->mem : &dflt->mem; memreq_t map; req->Attributes = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM; req->Attributes |= WIN_ENABLE; req->Base = mem->win[0].host_addr; req->Size = mem->win[0].len; req->AccessSpeed = 0; if (pcmcia_request_window(p_dev, req, &p_dev->win) != 0) return -ENODEV; map.Page = 0; map.CardOffset = mem->win[0].card_addr; if (pcmcia_map_mem_page(p_dev, p_dev->win, &map) != 0) return -ENODEV; } return 0; } static int __devinit sedlbauer_config(struct pcmcia_device *link) { win_req_t *req; int ret; IsdnCard_t icard; dev_dbg(&link->dev, "sedlbauer_config(0x%p)\n", link); req = kzalloc(sizeof(win_req_t), GFP_KERNEL); if (!req) return -ENOMEM; /* In this loop, we scan the CIS for configuration table entries, each of which describes a valid card configuration, including voltage, IO window, memory window, and interrupt settings. We make no assumptions about the card to be configured: we use just the information available in the CIS. In an ideal world, this would work for any PCMCIA card, but it requires a complete and accurate CIS. In practice, a driver usually "knows" most of these things without consulting the CIS, and most client drivers will only use the CIS to fill in implementation-defined details. */ ret = pcmcia_loop_config(link, sedlbauer_config_check, req); if (ret) goto failed; /* This actually configures the PCMCIA socket -- setting up the I/O windows and the interrupt mapping, and putting the card and host interface into "Memory and IO" mode. */ ret = pcmcia_request_configuration(link, &link->conf); if (ret) goto failed; /* Finally, report what we've done */ dev_info(&link->dev, "index 0x%02x:", link->conf.ConfigIndex); if (link->conf.Vpp) printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10); if (link->conf.Attributes & CONF_ENABLE_IRQ) printk(", irq %d", link->irq); if (link->io.NumPorts1) printk(", io 0x%04x-0x%04x", link->io.BasePort1, link->io.BasePort1+link->io.NumPorts1-1); if (link->io.NumPorts2) printk(" & 0x%04x-0x%04x", link->io.BasePort2, link->io.BasePort2+link->io.NumPorts2-1); if (link->win) printk(", mem 0x%06lx-0x%06lx", req->Base, req->Base+req->Size-1); printk("\n"); icard.para[0] = link->irq; icard.para[1] = link->io.BasePort1; icard.protocol = protocol; icard.typ = ISDN_CTYPE_SEDLBAUER_PCMCIA; ret = hisax_init_pcmcia(link, &(((local_info_t *)link->priv)->stop), &icard); if (ret < 0) { printk(KERN_ERR "sedlbauer_cs: failed to initialize SEDLBAUER PCMCIA %d at i/o %#x\n", ret, link->io.BasePort1); sedlbauer_release(link); return -ENODEV; } else ((local_info_t *)link->priv)->cardnr = ret; return 0; failed: sedlbauer_release(link); return -ENODEV; } /* sedlbauer_config */ /*====================================================================== After a card is removed, sedlbauer_release() will unregister the device, and release the PCMCIA configuration. If the device is still open, this will be postponed until it is closed. ======================================================================*/ static void sedlbauer_release(struct pcmcia_device *link) { local_info_t *local = link->priv; dev_dbg(&link->dev, "sedlbauer_release(0x%p)\n", link); if (local) { if (local->cardnr >= 0) { /* no unregister function with hisax */ HiSax_closecard(local->cardnr); } } pcmcia_disable_device(link); } /* sedlbauer_release */ static int sedlbauer_suspend(struct pcmcia_device *link) { local_info_t *dev = link->priv; dev->stop = 1; return 0; } static int sedlbauer_resume(struct pcmcia_device *link) { local_info_t *dev = link->priv; dev->stop = 0; return 0; } static struct pcmcia_device_id sedlbauer_ids[] = { PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "speed star II", "V 3.1", 0x81fb79f5, 0xf3612e1d, 0x6b95c78a), PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", "4D67", 0x81fb79f5, 0xe4e9bc12, 0x397b7e90), PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", "4D98", 0x81fb79f5, 0xe4e9bc12, 0x2e5c7fce), PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", " (C) 93-94 VK", 0x81fb79f5, 0xe4e9bc12, 0x8db143fe), PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", " (c) 93-95 VK", 0x81fb79f5, 0xe4e9bc12, 0xb391ab4c), PCMCIA_DEVICE_PROD_ID12("HST High Soft Tech GmbH", "Saphir II B", 0xd79e0b84, 0x21d083ae), /* PCMCIA_DEVICE_PROD_ID1234("SEDLBAUER", 0x81fb79f5), */ /* too generic*/ PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, sedlbauer_ids); static struct pcmcia_driver sedlbauer_driver = { .owner = THIS_MODULE, .drv = { .name = "sedlbauer_cs", }, .probe = sedlbauer_probe, .remove = __devexit_p(sedlbauer_detach), .id_table = sedlbauer_ids, .suspend = sedlbauer_suspend, .resume = sedlbauer_resume, }; static int __init init_sedlbauer_cs(void) { return pcmcia_register_driver(&sedlbauer_driver); } static void __exit exit_sedlbauer_cs(void) { pcmcia_unregister_driver(&sedlbauer_driver); } module_init(init_sedlbauer_cs); module_exit(exit_sedlbauer_cs);
gpl-2.0
virtuous/kernel-vivow-gingerbread-v2
drivers/net/tulip/tulip_core.c
760
56045
/* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux. Copyright 2000,2001 The Linux Kernel Team Written/copyright 1994-2001 by Donald Becker. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} for more information on this driver. Please submit bugs to http://bugzilla.kernel.org/ . */ #define DRV_NAME "tulip" #ifdef CONFIG_TULIP_NAPI #define DRV_VERSION "1.1.15-NAPI" /* Keep at least for test */ #else #define DRV_VERSION "1.1.15" #endif #define DRV_RELDATE "Feb 27, 2007" #include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> #include "tulip.h" #include <linux/init.h> #include <linux/etherdevice.h> #include <linux/delay.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/crc32.h> #include <asm/unaligned.h> #include <asm/uaccess.h> #ifdef CONFIG_SPARC #include <asm/prom.h> #endif static char version[] __devinitdata = "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n"; /* A few user-configurable values. */ /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ static unsigned int max_interrupt_work = 25; #define MAX_UNITS 8 /* Used to pass the full-duplex flag, etc. */ static int full_duplex[MAX_UNITS]; static int options[MAX_UNITS]; static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */ /* The possible media types that can be set in options[] are: */ const char * const medianame[32] = { "10baseT", "10base2", "AUI", "100baseTx", "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx", "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII", "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4", "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19", "","","","", "","","","", "","","","Transceiver reset", }; /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \ defined(CONFIG_SPARC) || defined(__ia64__) || \ defined(__sh__) || defined(__mips__) static int rx_copybreak = 1518; #else static int rx_copybreak = 100; #endif /* Set the bus performance register. Typical: Set 16 longword cache alignment, no burst limit. Cache alignment bits 15:14 Burst length 13:8 0000 No alignment 0x00000000 unlimited 0800 8 longwords 4000 8 longwords 0100 1 longword 1000 16 longwords 8000 16 longwords 0200 2 longwords 2000 32 longwords C000 32 longwords 0400 4 longwords Warning: many older 486 systems are broken and require setting 0x00A04800 8 longword cache alignment, 8 longword burst. ToDo: Non-Intel setting could be better. */ #if defined(__alpha__) || defined(__ia64__) static int csr0 = 0x01A00000 | 0xE000; #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__) static int csr0 = 0x01A00000 | 0x8000; #elif defined(CONFIG_SPARC) || defined(__hppa__) /* The UltraSparc PCI controllers will disconnect at every 64-byte * crossing anyways so it makes no sense to tell Tulip to burst * any more than that. */ static int csr0 = 0x01A00000 | 0x9000; #elif defined(__arm__) || defined(__sh__) static int csr0 = 0x01A00000 | 0x4800; #elif defined(__mips__) static int csr0 = 0x00200000 | 0x4000; #else #warning Processor architecture undefined! static int csr0 = 0x00A00000 | 0x4800; #endif /* Operational parameters that usually are not changed. */ /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (4*HZ) MODULE_AUTHOR("The Linux Kernel Team"); MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); module_param(tulip_debug, int, 0); module_param(max_interrupt_work, int, 0); module_param(rx_copybreak, int, 0); module_param(csr0, int, 0); module_param_array(options, int, NULL, 0); module_param_array(full_duplex, int, NULL, 0); #define PFX DRV_NAME ": " #ifdef TULIP_DEBUG int tulip_debug = TULIP_DEBUG; #else int tulip_debug = 1; #endif static void tulip_timer(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct tulip_private *tp = netdev_priv(dev); if (netif_running(dev)) schedule_work(&tp->media_work); } /* * This table use during operation for capabilities and media timer. * * It is indexed via the values in 'enum chips' */ struct tulip_chip_table tulip_tbl[] = { { }, /* placeholder for array, slot unused currently */ { }, /* placeholder for array, slot unused currently */ /* DC21140 */ { "Digital DS21140 Tulip", 128, 0x0001ebef, HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer, tulip_media_task }, /* DC21142, DC21143 */ { "Digital DS21142/43 Tulip", 128, 0x0801fbff, HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY | HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task }, /* LC82C168 */ { "Lite-On 82c168 PNIC", 256, 0x0001fbef, HAS_MII | HAS_PNICNWAY, pnic_timer, }, /* MX98713 */ { "Macronix 98713 PMAC", 128, 0x0001ebef, HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, }, /* MX98715 */ { "Macronix 98715 PMAC", 256, 0x0001ebef, HAS_MEDIA_TABLE, mxic_timer, }, /* MX98725 */ { "Macronix 98725 PMAC", 256, 0x0001ebef, HAS_MEDIA_TABLE, mxic_timer, }, /* AX88140 */ { "ASIX AX88140", 128, 0x0001fbff, HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY | IS_ASIX, tulip_timer, tulip_media_task }, /* PNIC2 */ { "Lite-On PNIC-II", 256, 0x0801fbff, HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, }, /* COMET */ { "ADMtek Comet", 256, 0x0001abef, HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, }, /* COMPEX9881 */ { "Compex 9881 PMAC", 128, 0x0001ebef, HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, }, /* I21145 */ { "Intel DS21145 Tulip", 128, 0x0801fbff, HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task }, /* DM910X */ #ifdef CONFIG_TULIP_DM910X { "Davicom DM9102/DM9102A", 128, 0x0001ebef, HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI, tulip_timer, tulip_media_task }, #else { NULL }, #endif /* RS7112 */ { "Conexant LANfinity", 256, 0x0001ebef, HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task }, }; static DEFINE_PCI_DEVICE_TABLE(tulip_pci_tbl) = { { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 }, { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 }, { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 }, { 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 }, { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 }, /* { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/ { 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 }, { 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 }, { 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, { 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, { 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, { 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, { 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, { 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, { 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, { 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, { 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 }, { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 }, #ifdef CONFIG_TULIP_DM910X { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X }, { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X }, #endif { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 }, { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, { 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, { 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, { 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, { 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT }, { 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */ { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */ { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */ { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, { } /* terminate list */ }; MODULE_DEVICE_TABLE(pci, tulip_pci_tbl); /* A full-duplex map for media types. */ const char tulip_media_cap[32] = {0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, }; static void tulip_tx_timeout(struct net_device *dev); static void tulip_init_ring(struct net_device *dev); static void tulip_free_ring(struct net_device *dev); static netdev_tx_t tulip_start_xmit(struct sk_buff *skb, struct net_device *dev); static int tulip_open(struct net_device *dev); static int tulip_close(struct net_device *dev); static void tulip_up(struct net_device *dev); static void tulip_down(struct net_device *dev); static struct net_device_stats *tulip_get_stats(struct net_device *dev); static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static void set_rx_mode(struct net_device *dev); #ifdef CONFIG_NET_POLL_CONTROLLER static void poll_tulip(struct net_device *dev); #endif static void tulip_set_power_state (struct tulip_private *tp, int sleep, int snooze) { if (tp->flags & HAS_ACPI) { u32 tmp, newtmp; pci_read_config_dword (tp->pdev, CFDD, &tmp); newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze); if (sleep) newtmp |= CFDD_Sleep; else if (snooze) newtmp |= CFDD_Snooze; if (tmp != newtmp) pci_write_config_dword (tp->pdev, CFDD, newtmp); } } static void tulip_up(struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->base_addr; int next_tick = 3*HZ; u32 reg; int i; #ifdef CONFIG_TULIP_NAPI napi_enable(&tp->napi); #endif /* Wake the chip from sleep/snooze mode. */ tulip_set_power_state (tp, 0, 0); /* On some chip revs we must set the MII/SYM port before the reset!? */ if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii)) iowrite32(0x00040000, ioaddr + CSR6); /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */ iowrite32(0x00000001, ioaddr + CSR0); pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg); /* flush write */ udelay(100); /* Deassert reset. Wait the specified 50 PCI cycles after a reset by initializing Tx and Rx queues and the address filter list. */ iowrite32(tp->csr0, ioaddr + CSR0); pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg); /* flush write */ udelay(100); if (tulip_debug > 1) printk(KERN_DEBUG "%s: tulip_up(), irq==%d\n", dev->name, dev->irq); iowrite32(tp->rx_ring_dma, ioaddr + CSR3); iowrite32(tp->tx_ring_dma, ioaddr + CSR4); tp->cur_rx = tp->cur_tx = 0; tp->dirty_rx = tp->dirty_tx = 0; if (tp->flags & MC_HASH_ONLY) { u32 addr_low = get_unaligned_le32(dev->dev_addr); u32 addr_high = get_unaligned_le16(dev->dev_addr + 4); if (tp->chip_id == AX88140) { iowrite32(0, ioaddr + CSR13); iowrite32(addr_low, ioaddr + CSR14); iowrite32(1, ioaddr + CSR13); iowrite32(addr_high, ioaddr + CSR14); } else if (tp->flags & COMET_MAC_ADDR) { iowrite32(addr_low, ioaddr + 0xA4); iowrite32(addr_high, ioaddr + 0xA8); iowrite32(0, ioaddr + 0xAC); iowrite32(0, ioaddr + 0xB0); } } else { /* This is set_rx_mode(), but without starting the transmitter. */ u16 *eaddrs = (u16 *)dev->dev_addr; u16 *setup_frm = &tp->setup_frame[15*6]; dma_addr_t mapping; /* 21140 bug: you must add the broadcast address. */ memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame)); /* Fill the final entry of the table with our physical address. */ *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; mapping = pci_map_single(tp->pdev, tp->setup_frame, sizeof(tp->setup_frame), PCI_DMA_TODEVICE); tp->tx_buffers[tp->cur_tx].skb = NULL; tp->tx_buffers[tp->cur_tx].mapping = mapping; /* Put the setup frame on the Tx list. */ tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192); tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping); tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned); tp->cur_tx++; } tp->saved_if_port = dev->if_port; if (dev->if_port == 0) dev->if_port = tp->default_port; /* Allow selecting a default media. */ i = 0; if (tp->mtable == NULL) goto media_picked; if (dev->if_port) { int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 : (dev->if_port == 12 ? 0 : dev->if_port); for (i = 0; i < tp->mtable->leafcount; i++) if (tp->mtable->mleaf[i].media == looking_for) { dev_info(&dev->dev, "Using user-specified media %s\n", medianame[dev->if_port]); goto media_picked; } } if ((tp->mtable->defaultmedia & 0x0800) == 0) { int looking_for = tp->mtable->defaultmedia & MEDIA_MASK; for (i = 0; i < tp->mtable->leafcount; i++) if (tp->mtable->mleaf[i].media == looking_for) { dev_info(&dev->dev, "Using EEPROM-set media %s\n", medianame[looking_for]); goto media_picked; } } /* Start sensing first non-full-duplex media. */ for (i = tp->mtable->leafcount - 1; (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--) ; media_picked: tp->csr6 = 0; tp->cur_index = i; tp->nwayset = 0; if (dev->if_port) { if (tp->chip_id == DC21143 && (tulip_media_cap[dev->if_port] & MediaIsMII)) { /* We must reset the media CSRs when we force-select MII mode. */ iowrite32(0x0000, ioaddr + CSR13); iowrite32(0x0000, ioaddr + CSR14); iowrite32(0x0008, ioaddr + CSR15); } tulip_select_media(dev, 1); } else if (tp->chip_id == DC21142) { if (tp->mii_cnt) { tulip_select_media(dev, 1); if (tulip_debug > 1) dev_info(&dev->dev, "Using MII transceiver %d, status %04x\n", tp->phys[0], tulip_mdio_read(dev, tp->phys[0], 1)); iowrite32(csr6_mask_defstate, ioaddr + CSR6); tp->csr6 = csr6_mask_hdcap; dev->if_port = 11; iowrite32(0x0000, ioaddr + CSR13); iowrite32(0x0000, ioaddr + CSR14); } else t21142_start_nway(dev); } else if (tp->chip_id == PNIC2) { /* for initial startup advertise 10/100 Full and Half */ tp->sym_advertise = 0x01E0; /* enable autonegotiate end interrupt */ iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5); iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7); pnic2_start_nway(dev); } else if (tp->chip_id == LC82C168 && ! tp->medialock) { if (tp->mii_cnt) { dev->if_port = 11; tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0); iowrite32(0x0001, ioaddr + CSR15); } else if (ioread32(ioaddr + CSR5) & TPLnkPass) pnic_do_nway(dev); else { /* Start with 10mbps to do autonegotiation. */ iowrite32(0x32, ioaddr + CSR12); tp->csr6 = 0x00420000; iowrite32(0x0001B078, ioaddr + 0xB8); iowrite32(0x0201B078, ioaddr + 0xB8); next_tick = 1*HZ; } } else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) && ! tp->medialock) { dev->if_port = 0; tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0); iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80); } else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) { /* Provided by BOLO, Macronix - 12/10/1998. */ dev->if_port = 0; tp->csr6 = 0x01a80200; iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80); iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0); } else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) { /* Enable automatic Tx underrun recovery. */ iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88); dev->if_port = tp->mii_cnt ? 11 : 0; tp->csr6 = 0x00040000; } else if (tp->chip_id == AX88140) { tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100; } else tulip_select_media(dev, 1); /* Start the chip's Tx to process setup frame. */ tulip_stop_rxtx(tp); barrier(); udelay(5); iowrite32(tp->csr6 | TxOn, ioaddr + CSR6); /* Enable interrupts by setting the interrupt mask. */ iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5); iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7); tulip_start_rxtx(tp); iowrite32(0, ioaddr + CSR2); /* Rx poll demand */ if (tulip_debug > 2) { printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n", dev->name, ioread32(ioaddr + CSR0), ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR6)); } /* Set the timer to switch to check for link beat and perhaps switch to an alternate media type. */ tp->timer.expires = RUN_AT(next_tick); add_timer(&tp->timer); #ifdef CONFIG_TULIP_NAPI init_timer(&tp->oom_timer); tp->oom_timer.data = (unsigned long)dev; tp->oom_timer.function = oom_timer; #endif } static int tulip_open(struct net_device *dev) { int retval; tulip_init_ring (dev); retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev); if (retval) goto free_ring; tulip_up (dev); netif_start_queue (dev); return 0; free_ring: tulip_free_ring (dev); return retval; } static void tulip_tx_timeout(struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->base_addr; unsigned long flags; spin_lock_irqsave (&tp->lock, flags); if (tulip_media_cap[dev->if_port] & MediaIsMII) { /* Do nothing -- the media monitor should handle this. */ if (tulip_debug > 1) dev_warn(&dev->dev, "Transmit timeout using MII device\n"); } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 || tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 || tp->chip_id == DM910X) { dev_warn(&dev->dev, "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n", ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12), ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15)); tp->timeout_recovery = 1; schedule_work(&tp->media_work); goto out_unlock; } else if (tp->chip_id == PNIC2) { dev_warn(&dev->dev, "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n", (int)ioread32(ioaddr + CSR5), (int)ioread32(ioaddr + CSR6), (int)ioread32(ioaddr + CSR7), (int)ioread32(ioaddr + CSR12)); } else { dev_warn(&dev->dev, "Transmit timed out, status %08x, CSR12 %08x, resetting...\n", ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12)); dev->if_port = 0; } #if defined(way_too_many_messages) if (tulip_debug > 3) { int i; for (i = 0; i < RX_RING_SIZE; i++) { u8 *buf = (u8 *)(tp->rx_ring[i].buffer1); int j; printk(KERN_DEBUG "%2d: %08x %08x %08x %08x %02x %02x %02x\n", i, (unsigned int)tp->rx_ring[i].status, (unsigned int)tp->rx_ring[i].length, (unsigned int)tp->rx_ring[i].buffer1, (unsigned int)tp->rx_ring[i].buffer2, buf[0], buf[1], buf[2]); for (j = 0; buf[j] != 0xee && j < 1600; j++) if (j < 100) pr_cont(" %02x", buf[j]); pr_cont(" j=%d\n", j); } printk(KERN_DEBUG " Rx ring %08x: ", (int)tp->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status); printk(KERN_DEBUG " Tx ring %08x: ", (int)tp->tx_ring); for (i = 0; i < TX_RING_SIZE; i++) pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status); pr_cont("\n"); } #endif tulip_tx_timeout_complete(tp, ioaddr); out_unlock: spin_unlock_irqrestore (&tp->lock, flags); dev->trans_start = jiffies; /* prevent tx timeout */ netif_wake_queue (dev); } /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ static void tulip_init_ring(struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); int i; tp->susp_rx = 0; tp->ttimer = 0; tp->nir = 0; for (i = 0; i < RX_RING_SIZE; i++) { tp->rx_ring[i].status = 0x00000000; tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ); tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1)); tp->rx_buffers[i].skb = NULL; tp->rx_buffers[i].mapping = 0; } /* Mark the last entry as wrapping the ring. */ tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP); tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma); for (i = 0; i < RX_RING_SIZE; i++) { dma_addr_t mapping; /* Note the receive buffer must be longword aligned. dev_alloc_skb() provides 16 byte alignment. But do *not* use skb_reserve() to align the IP header! */ struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ); tp->rx_buffers[i].skb = skb; if (skb == NULL) break; mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); tp->rx_buffers[i].mapping = mapping; skb->dev = dev; /* Mark as being used by this device. */ tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */ tp->rx_ring[i].buffer1 = cpu_to_le32(mapping); } tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); /* The Tx buffer descriptor is filled in as needed, but we do need to clear the ownership bit. */ for (i = 0; i < TX_RING_SIZE; i++) { tp->tx_buffers[i].skb = NULL; tp->tx_buffers[i].mapping = 0; tp->tx_ring[i].status = 0x00000000; tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1)); } tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma); } static netdev_tx_t tulip_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); int entry; u32 flag; dma_addr_t mapping; unsigned long flags; spin_lock_irqsave(&tp->lock, flags); /* Calculate the next Tx descriptor entry. */ entry = tp->cur_tx % TX_RING_SIZE; tp->tx_buffers[entry].skb = skb; mapping = pci_map_single(tp->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); tp->tx_buffers[entry].mapping = mapping; tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping); if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */ flag = 0x60000000; /* No interrupt */ } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) { flag = 0xe0000000; /* Tx-done intr. */ } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) { flag = 0x60000000; /* No Tx-done intr. */ } else { /* Leave room for set_rx_mode() to fill entries. */ flag = 0xe0000000; /* Tx-done intr. */ netif_stop_queue(dev); } if (entry == TX_RING_SIZE-1) flag = 0xe0000000 | DESC_RING_WRAP; tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag); /* if we were using Transmit Automatic Polling, we would need a * wmb() here. */ tp->tx_ring[entry].status = cpu_to_le32(DescOwned); wmb(); tp->cur_tx++; /* Trigger an immediate transmit demand. */ iowrite32(0, tp->base_addr + CSR1); spin_unlock_irqrestore(&tp->lock, flags); return NETDEV_TX_OK; } static void tulip_clean_tx_ring(struct tulip_private *tp) { unsigned int dirty_tx; for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0; dirty_tx++) { int entry = dirty_tx % TX_RING_SIZE; int status = le32_to_cpu(tp->tx_ring[entry].status); if (status < 0) { tp->stats.tx_errors++; /* It wasn't Txed */ tp->tx_ring[entry].status = 0; } /* Check for Tx filter setup frames. */ if (tp->tx_buffers[entry].skb == NULL) { /* test because dummy frames not mapped */ if (tp->tx_buffers[entry].mapping) pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, sizeof(tp->setup_frame), PCI_DMA_TODEVICE); continue; } pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, tp->tx_buffers[entry].skb->len, PCI_DMA_TODEVICE); /* Free the original skb. */ dev_kfree_skb_irq(tp->tx_buffers[entry].skb); tp->tx_buffers[entry].skb = NULL; tp->tx_buffers[entry].mapping = 0; } } static void tulip_down (struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->base_addr; unsigned long flags; cancel_work_sync(&tp->media_work); #ifdef CONFIG_TULIP_NAPI napi_disable(&tp->napi); #endif del_timer_sync (&tp->timer); #ifdef CONFIG_TULIP_NAPI del_timer_sync (&tp->oom_timer); #endif spin_lock_irqsave (&tp->lock, flags); /* Disable interrupts by clearing the interrupt mask. */ iowrite32 (0x00000000, ioaddr + CSR7); /* Stop the Tx and Rx processes. */ tulip_stop_rxtx(tp); /* prepare receive buffers */ tulip_refill_rx(dev); /* release any unconsumed transmit buffers */ tulip_clean_tx_ring(tp); if (ioread32 (ioaddr + CSR6) != 0xffffffff) tp->stats.rx_missed_errors += ioread32 (ioaddr + CSR8) & 0xffff; spin_unlock_irqrestore (&tp->lock, flags); init_timer(&tp->timer); tp->timer.data = (unsigned long)dev; tp->timer.function = tulip_tbl[tp->chip_id].media_timer; dev->if_port = tp->saved_if_port; /* Leave the driver in snooze, not sleep, mode. */ tulip_set_power_state (tp, 0, 1); } static void tulip_free_ring (struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); int i; /* Free all the skbuffs in the Rx queue. */ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb = tp->rx_buffers[i].skb; dma_addr_t mapping = tp->rx_buffers[i].mapping; tp->rx_buffers[i].skb = NULL; tp->rx_buffers[i].mapping = 0; tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */ tp->rx_ring[i].length = 0; /* An invalid address. */ tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0); if (skb) { pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); dev_kfree_skb (skb); } } for (i = 0; i < TX_RING_SIZE; i++) { struct sk_buff *skb = tp->tx_buffers[i].skb; if (skb != NULL) { pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb (skb); } tp->tx_buffers[i].skb = NULL; tp->tx_buffers[i].mapping = 0; } } static int tulip_close (struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->base_addr; netif_stop_queue (dev); tulip_down (dev); if (tulip_debug > 1) dev_printk(KERN_DEBUG, &dev->dev, "Shutting down ethercard, status was %02x\n", ioread32 (ioaddr + CSR5)); free_irq (dev->irq, dev); tulip_free_ring (dev); return 0; } static struct net_device_stats *tulip_get_stats(struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->base_addr; if (netif_running(dev)) { unsigned long flags; spin_lock_irqsave (&tp->lock, flags); tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff; spin_unlock_irqrestore(&tp->lock, flags); } return &tp->stats; } static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct tulip_private *np = netdev_priv(dev); strcpy(info->driver, DRV_NAME); strcpy(info->version, DRV_VERSION); strcpy(info->bus_info, pci_name(np->pdev)); } static const struct ethtool_ops ops = { .get_drvinfo = tulip_get_drvinfo }; /* Provide ioctl() calls to examine the MII xcvr state. */ static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) { struct tulip_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->base_addr; struct mii_ioctl_data *data = if_mii(rq); const unsigned int phy_idx = 0; int phy = tp->phys[phy_idx] & 0x1f; unsigned int regnum = data->reg_num; switch (cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ if (tp->mii_cnt) data->phy_id = phy; else if (tp->flags & HAS_NWAY) data->phy_id = 32; else if (tp->chip_id == COMET) data->phy_id = 1; else return -ENODEV; case SIOCGMIIREG: /* Read MII PHY register. */ if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) { int csr12 = ioread32 (ioaddr + CSR12); int csr14 = ioread32 (ioaddr + CSR14); switch (regnum) { case 0: if (((csr14<<5) & 0x1000) || (dev->if_port == 5 && tp->nwayset)) data->val_out = 0x1000; else data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0) | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0); break; case 1: data->val_out = 0x1848 + ((csr12&0x7000) == 0x5000 ? 0x20 : 0) + ((csr12&0x06) == 6 ? 0 : 4); data->val_out |= 0x6048; break; case 4: /* Advertised value, bogus 10baseTx-FD value from CSR6. */ data->val_out = ((ioread32(ioaddr + CSR6) >> 3) & 0x0040) + ((csr14 >> 1) & 0x20) + 1; data->val_out |= ((csr14 >> 9) & 0x03C0); break; case 5: data->val_out = tp->lpar; break; default: data->val_out = 0; break; } } else { data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum); } return 0; case SIOCSMIIREG: /* Write MII PHY register. */ if (regnum & ~0x1f) return -EINVAL; if (data->phy_id == phy) { u16 value = data->val_in; switch (regnum) { case 0: /* Check for autonegotiation on or reset. */ tp->full_duplex_lock = (value & 0x9000) ? 0 : 1; if (tp->full_duplex_lock) tp->full_duplex = (value & 0x0100) ? 1 : 0; break; case 4: tp->advertising[phy_idx] = tp->mii_advertise = data->val_in; break; } } if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) { u16 value = data->val_in; if (regnum == 0) { if ((value & 0x1200) == 0x1200) { if (tp->chip_id == PNIC2) { pnic2_start_nway (dev); } else { t21142_start_nway (dev); } } } else if (regnum == 4) tp->sym_advertise = value; } else { tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in); } return 0; default: return -EOPNOTSUPP; } return -EOPNOTSUPP; } /* Set or clear the multicast filter for this adaptor. Note that we only use exclusion around actually queueing the new frame, not around filling tp->setup_frame. This is non-deterministic when re-entered but still correct. */ #undef set_bit_le #define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0) static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); u16 hash_table[32]; struct netdev_hw_addr *ha; int i; u16 *eaddrs; memset(hash_table, 0, sizeof(hash_table)); set_bit_le(255, hash_table); /* Broadcast entry */ /* This should work on big-endian machines as well. */ netdev_for_each_mc_addr(ha, dev) { int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff; set_bit_le(index, hash_table); } for (i = 0; i < 32; i++) { *setup_frm++ = hash_table[i]; *setup_frm++ = hash_table[i]; } setup_frm = &tp->setup_frame[13*6]; /* Fill the final entry with our physical address. */ eaddrs = (u16 *)dev->dev_addr; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; } static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); struct netdev_hw_addr *ha; u16 *eaddrs; /* We have <= 14 addresses so we can use the wonderful 16 address perfect filtering of the Tulip. */ netdev_for_each_mc_addr(ha, dev) { eaddrs = (u16 *) ha->addr; *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; } /* Fill the unused entries with the broadcast address. */ memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12); setup_frm = &tp->setup_frame[15*6]; /* Fill the final entry with our physical address. */ eaddrs = (u16 *)dev->dev_addr; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; } static void set_rx_mode(struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->base_addr; int csr6; csr6 = ioread32(ioaddr + CSR6) & ~0x00D5; tp->csr6 &= ~0x00D5; if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ tp->csr6 |= AcceptAllMulticast | AcceptAllPhys; csr6 |= AcceptAllMulticast | AcceptAllPhys; } else if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) { /* Too many to filter well -- accept all multicasts. */ tp->csr6 |= AcceptAllMulticast; csr6 |= AcceptAllMulticast; } else if (tp->flags & MC_HASH_ONLY) { /* Some work-alikes have only a 64-entry hash filter table. */ /* Should verify correctness on big-endian/__powerpc__ */ struct netdev_hw_addr *ha; if (netdev_mc_count(dev) > 64) { /* Arbitrary non-effective limit. */ tp->csr6 |= AcceptAllMulticast; csr6 |= AcceptAllMulticast; } else { u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */ int filterbit; netdev_for_each_mc_addr(ha, dev) { if (tp->flags & COMET_MAC_ADDR) filterbit = ether_crc_le(ETH_ALEN, ha->addr); else filterbit = ether_crc(ETH_ALEN, ha->addr) >> 26; filterbit &= 0x3f; mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); if (tulip_debug > 2) dev_info(&dev->dev, "Added filter for %pM %08x bit %d\n", ha->addr, ether_crc(ETH_ALEN, ha->addr), filterbit); } if (mc_filter[0] == tp->mc_filter[0] && mc_filter[1] == tp->mc_filter[1]) ; /* No change. */ else if (tp->flags & IS_ASIX) { iowrite32(2, ioaddr + CSR13); iowrite32(mc_filter[0], ioaddr + CSR14); iowrite32(3, ioaddr + CSR13); iowrite32(mc_filter[1], ioaddr + CSR14); } else if (tp->flags & COMET_MAC_ADDR) { iowrite32(mc_filter[0], ioaddr + 0xAC); iowrite32(mc_filter[1], ioaddr + 0xB0); } tp->mc_filter[0] = mc_filter[0]; tp->mc_filter[1] = mc_filter[1]; } } else { unsigned long flags; u32 tx_flags = 0x08000000 | 192; /* Note that only the low-address shortword of setup_frame is valid! The values are doubled for big-endian architectures. */ if (netdev_mc_count(dev) > 14) { /* Must use a multicast hash table. */ build_setup_frame_hash(tp->setup_frame, dev); tx_flags = 0x08400000 | 192; } else { build_setup_frame_perfect(tp->setup_frame, dev); } spin_lock_irqsave(&tp->lock, flags); if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) { /* Same setup recently queued, we need not add it. */ } else { unsigned int entry; int dummy = -1; /* Now add this frame to the Tx list. */ entry = tp->cur_tx++ % TX_RING_SIZE; if (entry != 0) { /* Avoid a chip errata by prefixing a dummy entry. */ tp->tx_buffers[entry].skb = NULL; tp->tx_buffers[entry].mapping = 0; tp->tx_ring[entry].length = (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0; tp->tx_ring[entry].buffer1 = 0; /* Must set DescOwned later to avoid race with chip */ dummy = entry; entry = tp->cur_tx++ % TX_RING_SIZE; } tp->tx_buffers[entry].skb = NULL; tp->tx_buffers[entry].mapping = pci_map_single(tp->pdev, tp->setup_frame, sizeof(tp->setup_frame), PCI_DMA_TODEVICE); /* Put the setup frame on the Tx list. */ if (entry == TX_RING_SIZE-1) tx_flags |= DESC_RING_WRAP; /* Wrap ring. */ tp->tx_ring[entry].length = cpu_to_le32(tx_flags); tp->tx_ring[entry].buffer1 = cpu_to_le32(tp->tx_buffers[entry].mapping); tp->tx_ring[entry].status = cpu_to_le32(DescOwned); if (dummy >= 0) tp->tx_ring[dummy].status = cpu_to_le32(DescOwned); if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2) netif_stop_queue(dev); /* Trigger an immediate transmit demand. */ iowrite32(0, ioaddr + CSR1); } spin_unlock_irqrestore(&tp->lock, flags); } iowrite32(csr6, ioaddr + CSR6); } #ifdef CONFIG_TULIP_MWI static void __devinit tulip_mwi_config (struct pci_dev *pdev, struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); u8 cache; u16 pci_command; u32 csr0; if (tulip_debug > 3) printk(KERN_DEBUG "%s: tulip_mwi_config()\n", pci_name(pdev)); tp->csr0 = csr0 = 0; /* if we have any cache line size at all, we can do MRM and MWI */ csr0 |= MRM | MWI; /* Enable MWI in the standard PCI command bit. * Check for the case where MWI is desired but not available */ pci_try_set_mwi(pdev); /* read result from hardware (in case bit refused to enable) */ pci_read_config_word(pdev, PCI_COMMAND, &pci_command); if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE))) csr0 &= ~MWI; /* if cache line size hardwired to zero, no MWI */ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache); if ((csr0 & MWI) && (cache == 0)) { csr0 &= ~MWI; pci_clear_mwi(pdev); } /* assign per-cacheline-size cache alignment and * burst length values */ switch (cache) { case 8: csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift); break; case 16: csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift); break; case 32: csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift); break; default: cache = 0; break; } /* if we have a good cache line size, we by now have a good * csr0, so save it and exit */ if (cache) goto out; /* we don't have a good csr0 or cache line size, disable MWI */ if (csr0 & MWI) { pci_clear_mwi(pdev); csr0 &= ~MWI; } /* sane defaults for burst length and cache alignment * originally from de4x5 driver */ csr0 |= (8 << BurstLenShift) | (1 << CALShift); out: tp->csr0 = csr0; if (tulip_debug > 2) printk(KERN_DEBUG "%s: MWI config cacheline=%d, csr0=%08x\n", pci_name(pdev), cache, csr0); } #endif /* * Chips that have the MRM/reserved bit quirk and the burst quirk. That * is the DM910X and the on chip ULi devices */ static int tulip_uli_dm_quirk(struct pci_dev *pdev) { if (pdev->vendor == 0x1282 && pdev->device == 0x9102) return 1; return 0; } static const struct net_device_ops tulip_netdev_ops = { .ndo_open = tulip_open, .ndo_start_xmit = tulip_start_xmit, .ndo_tx_timeout = tulip_tx_timeout, .ndo_stop = tulip_close, .ndo_get_stats = tulip_get_stats, .ndo_do_ioctl = private_ioctl, .ndo_set_multicast_list = set_rx_mode, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = poll_tulip, #endif }; static int __devinit tulip_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { struct tulip_private *tp; /* See note below on the multiport cards. */ static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'}; static struct pci_device_id early_486_chipsets[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) }, { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) }, { }, }; static int last_irq; static int multiport_cnt; /* For four-port boards w/one EEPROM */ int i, irq; unsigned short sum; unsigned char *ee_data; struct net_device *dev; void __iomem *ioaddr; static int board_idx = -1; int chip_idx = ent->driver_data; const char *chip_name = tulip_tbl[chip_idx].chip_name; unsigned int eeprom_missing = 0; unsigned int force_csr0 = 0; #ifndef MODULE if (tulip_debug > 0) printk_once(KERN_INFO "%s", version); #endif board_idx++; /* * Lan media wire a tulip chip to a wan interface. Needs a very * different driver (lmc driver) */ if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) { pr_err(PFX "skipping LMC card\n"); return -ENODEV; } /* * DM910x chips should be handled by the dmfe driver, except * on-board chips on SPARC systems. Also, early DM9100s need * software CRC which only the dmfe driver supports. */ #ifdef CONFIG_TULIP_DM910X if (chip_idx == DM910X) { struct device_node *dp; if (pdev->vendor == 0x1282 && pdev->device == 0x9100 && pdev->revision < 0x30) { pr_info(PFX "skipping early DM9100 with Crc bug (use dmfe)\n"); return -ENODEV; } dp = pci_device_to_OF_node(pdev); if (!(dp && of_get_property(dp, "local-mac-address", NULL))) { pr_info(PFX "skipping DM910x expansion card (use dmfe)\n"); return -ENODEV; } } #endif /* * Looks for early PCI chipsets where people report hangs * without the workarounds being on. */ /* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache aligned. Aries might need this too. The Saturn errata are not pretty reading but thankfully it's an old 486 chipset. 2. The dreaded SiS496 486 chipset. Same workaround as Intel Saturn. */ if (pci_dev_present(early_486_chipsets)) { csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift); force_csr0 = 1; } /* bugfix: the ASIX must have a burst limit or horrible things happen. */ if (chip_idx == AX88140) { if ((csr0 & 0x3f00) == 0) csr0 |= 0x2000; } /* PNIC doesn't have MWI/MRL/MRM... */ if (chip_idx == LC82C168) csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */ /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */ if (tulip_uli_dm_quirk(pdev)) { csr0 &= ~0x01f100ff; #if defined(CONFIG_SPARC) csr0 = (csr0 & ~0xff00) | 0xe000; #endif } /* * And back to business */ i = pci_enable_device(pdev); if (i) { pr_err(PFX "Cannot enable tulip board #%d, aborting\n", board_idx); return i; } irq = pdev->irq; /* alloc_etherdev ensures aligned and zeroed private structures */ dev = alloc_etherdev (sizeof (*tp)); if (!dev) { pr_err(PFX "ether device alloc failed, aborting\n"); return -ENOMEM; } SET_NETDEV_DEV(dev, &pdev->dev); if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) { pr_err(PFX "%s: I/O region (0x%llx@0x%llx) too small, aborting\n", pci_name(pdev), (unsigned long long)pci_resource_len (pdev, 0), (unsigned long long)pci_resource_start (pdev, 0)); goto err_out_free_netdev; } /* grab all resources from both PIO and MMIO regions, as we * don't want anyone else messing around with our hardware */ if (pci_request_regions (pdev, DRV_NAME)) goto err_out_free_netdev; ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size); if (!ioaddr) goto err_out_free_res; /* * initialize private data structure 'tp' * it is zeroed and aligned in alloc_etherdev */ tp = netdev_priv(dev); tp->dev = dev; tp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct tulip_rx_desc) * RX_RING_SIZE + sizeof(struct tulip_tx_desc) * TX_RING_SIZE, &tp->rx_ring_dma); if (!tp->rx_ring) goto err_out_mtable; tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE); tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE; tp->chip_id = chip_idx; tp->flags = tulip_tbl[chip_idx].flags; tp->pdev = pdev; tp->base_addr = ioaddr; tp->revision = pdev->revision; tp->csr0 = csr0; spin_lock_init(&tp->lock); spin_lock_init(&tp->mii_lock); init_timer(&tp->timer); tp->timer.data = (unsigned long)dev; tp->timer.function = tulip_tbl[tp->chip_id].media_timer; INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task); dev->base_addr = (unsigned long)ioaddr; #ifdef CONFIG_TULIP_MWI if (!force_csr0 && (tp->flags & HAS_PCI_MWI)) tulip_mwi_config (pdev, dev); #endif /* Stop the chip's Tx and Rx processes. */ tulip_stop_rxtx(tp); pci_set_master(pdev); #ifdef CONFIG_GSC if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) { switch (pdev->subsystem_device) { default: break; case 0x1061: case 0x1062: case 0x1063: case 0x1098: case 0x1099: case 0x10EE: tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE; chip_name = "GSC DS21140 Tulip"; } } #endif /* Clear the missed-packet counter. */ ioread32(ioaddr + CSR8); /* The station address ROM is read byte serially. The register must be polled, waiting for the value to be read bit serially from the EEPROM. */ ee_data = tp->eeprom; memset(ee_data, 0, sizeof(tp->eeprom)); sum = 0; if (chip_idx == LC82C168) { for (i = 0; i < 3; i++) { int value, boguscnt = 100000; iowrite32(0x600 | i, ioaddr + 0x98); do { value = ioread32(ioaddr + CSR9); } while (value < 0 && --boguscnt > 0); put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i); sum += value & 0xffff; } } else if (chip_idx == COMET) { /* No need to read the EEPROM. */ put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr); put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4); for (i = 0; i < 6; i ++) sum += dev->dev_addr[i]; } else { /* A serial EEPROM interface, we read now and sort it out later. */ int sa_offset = 0; int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6; int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16); if (ee_max_addr > sizeof(tp->eeprom)) ee_max_addr = sizeof(tp->eeprom); for (i = 0; i < ee_max_addr ; i += sizeof(u16)) { u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size); ee_data[i] = data & 0xff; ee_data[i + 1] = data >> 8; } /* DEC now has a specification (see Notes) but early board makers just put the address in the first EEPROM locations. */ /* This does memcmp(ee_data, ee_data+16, 8) */ for (i = 0; i < 8; i ++) if (ee_data[i] != ee_data[16+i]) sa_offset = 20; if (chip_idx == CONEXANT) { /* Check that the tuple type and length is correct. */ if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6) sa_offset = 0x19A; } else if (ee_data[0] == 0xff && ee_data[1] == 0xff && ee_data[2] == 0) { sa_offset = 2; /* Grrr, damn Matrox boards. */ multiport_cnt = 4; } #ifdef CONFIG_MIPS_COBALT if ((pdev->bus->number == 0) && ((PCI_SLOT(pdev->devfn) == 7) || (PCI_SLOT(pdev->devfn) == 12))) { /* Cobalt MAC address in first EEPROM locations. */ sa_offset = 0; /* Ensure our media table fixup get's applied */ memcpy(ee_data + 16, ee_data, 8); } #endif #ifdef CONFIG_GSC /* Check to see if we have a broken srom */ if (ee_data[0] == 0x61 && ee_data[1] == 0x10) { /* pci_vendor_id and subsystem_id are swapped */ ee_data[0] = ee_data[2]; ee_data[1] = ee_data[3]; ee_data[2] = 0x61; ee_data[3] = 0x10; /* HSC-PCI boards need to be byte-swaped and shifted * up 1 word. This shift needs to happen at the end * of the MAC first because of the 2 byte overlap. */ for (i = 4; i >= 0; i -= 2) { ee_data[17 + i + 3] = ee_data[17 + i]; ee_data[16 + i + 5] = ee_data[16 + i]; } } #endif for (i = 0; i < 6; i ++) { dev->dev_addr[i] = ee_data[i + sa_offset]; sum += ee_data[i + sa_offset]; } } /* Lite-On boards have the address byte-swapped. */ if ((dev->dev_addr[0] == 0xA0 || dev->dev_addr[0] == 0xC0 || dev->dev_addr[0] == 0x02) && dev->dev_addr[1] == 0x00) for (i = 0; i < 6; i+=2) { char tmp = dev->dev_addr[i]; dev->dev_addr[i] = dev->dev_addr[i+1]; dev->dev_addr[i+1] = tmp; } /* On the Zynx 315 Etherarray and other multiport boards only the first Tulip has an EEPROM. On Sparc systems the mac address is held in the OBP property "local-mac-address". The addresses of the subsequent ports are derived from the first. Many PCI BIOSes also incorrectly report the IRQ line, so we correct that here as well. */ if (sum == 0 || sum == 6*0xff) { #if defined(CONFIG_SPARC) struct device_node *dp = pci_device_to_OF_node(pdev); const unsigned char *addr; int len; #endif eeprom_missing = 1; for (i = 0; i < 5; i++) dev->dev_addr[i] = last_phys_addr[i]; dev->dev_addr[i] = last_phys_addr[i] + 1; #if defined(CONFIG_SPARC) addr = of_get_property(dp, "local-mac-address", &len); if (addr && len == 6) memcpy(dev->dev_addr, addr, 6); #endif #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */ if (last_irq) irq = last_irq; #endif } for (i = 0; i < 6; i++) last_phys_addr[i] = dev->dev_addr[i]; last_irq = irq; dev->irq = irq; /* The lower four bits are the media type. */ if (board_idx >= 0 && board_idx < MAX_UNITS) { if (options[board_idx] & MEDIA_MASK) tp->default_port = options[board_idx] & MEDIA_MASK; if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0) tp->full_duplex = 1; if (mtu[board_idx] > 0) dev->mtu = mtu[board_idx]; } if (dev->mem_start & MEDIA_MASK) tp->default_port = dev->mem_start & MEDIA_MASK; if (tp->default_port) { pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n", board_idx, medianame[tp->default_port & MEDIA_MASK]); tp->medialock = 1; if (tulip_media_cap[tp->default_port] & MediaAlwaysFD) tp->full_duplex = 1; } if (tp->full_duplex) tp->full_duplex_lock = 1; if (tulip_media_cap[tp->default_port] & MediaIsMII) { u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 }; tp->mii_advertise = media2advert[tp->default_port - 9]; tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */ } if (tp->flags & HAS_MEDIA_TABLE) { sprintf(dev->name, DRV_NAME "%d", board_idx); /* hack */ tulip_parse_eeprom(dev); strcpy(dev->name, "eth%d"); /* un-hack */ } if ((tp->flags & ALWAYS_CHECK_MII) || (tp->mtable && tp->mtable->has_mii) || ( ! tp->mtable && (tp->flags & HAS_MII))) { if (tp->mtable && tp->mtable->has_mii) { for (i = 0; i < tp->mtable->leafcount; i++) if (tp->mtable->mleaf[i].media == 11) { tp->cur_index = i; tp->saved_if_port = dev->if_port; tulip_select_media(dev, 2); dev->if_port = tp->saved_if_port; break; } } /* Find the connected MII xcvrs. Doing this in open() would allow detecting external xcvrs later, but takes much time. */ tulip_find_mii (dev, board_idx); } /* The Tulip-specific entries in the device structure. */ dev->netdev_ops = &tulip_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; #ifdef CONFIG_TULIP_NAPI netif_napi_add(dev, &tp->napi, tulip_poll, 16); #endif SET_ETHTOOL_OPS(dev, &ops); if (register_netdev(dev)) goto err_out_free_ring; pci_set_drvdata(pdev, dev); dev_info(&dev->dev, #ifdef CONFIG_TULIP_MMIO "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n", #else "%s rev %d at Port %#llx,%s %pM, IRQ %d\n", #endif chip_name, pdev->revision, (unsigned long long)pci_resource_start(pdev, TULIP_BAR), eeprom_missing ? " EEPROM not present," : "", dev->dev_addr, irq); if (tp->chip_id == PNIC2) tp->link_change = pnic2_lnk_change; else if (tp->flags & HAS_NWAY) tp->link_change = t21142_lnk_change; else if (tp->flags & HAS_PNICNWAY) tp->link_change = pnic_lnk_change; /* Reset the xcvr interface and turn on heartbeat. */ switch (chip_idx) { case DC21140: case DM910X: default: if (tp->mtable) iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12); break; case DC21142: if (tp->mii_cnt || tulip_media_cap[dev->if_port] & MediaIsMII) { iowrite32(csr6_mask_defstate, ioaddr + CSR6); iowrite32(0x0000, ioaddr + CSR13); iowrite32(0x0000, ioaddr + CSR14); iowrite32(csr6_mask_hdcap, ioaddr + CSR6); } else t21142_start_nway(dev); break; case PNIC2: /* just do a reset for sanity sake */ iowrite32(0x0000, ioaddr + CSR13); iowrite32(0x0000, ioaddr + CSR14); break; case LC82C168: if ( ! tp->mii_cnt) { tp->nway = 1; tp->nwayset = 0; iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6); iowrite32(0x30, ioaddr + CSR12); iowrite32(0x0001F078, ioaddr + CSR6); iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */ } break; case MX98713: case COMPEX9881: iowrite32(0x00000000, ioaddr + CSR6); iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */ iowrite32(0x00000001, ioaddr + CSR13); break; case MX98715: case MX98725: iowrite32(0x01a80000, ioaddr + CSR6); iowrite32(0xFFFFFFFF, ioaddr + CSR14); iowrite32(0x00001000, ioaddr + CSR12); break; case COMET: /* No initialization necessary. */ break; } /* put the chip in snooze mode until opened */ tulip_set_power_state (tp, 0, 1); return 0; err_out_free_ring: pci_free_consistent (pdev, sizeof (struct tulip_rx_desc) * RX_RING_SIZE + sizeof (struct tulip_tx_desc) * TX_RING_SIZE, tp->rx_ring, tp->rx_ring_dma); err_out_mtable: kfree (tp->mtable); pci_iounmap(pdev, ioaddr); err_out_free_res: pci_release_regions (pdev); err_out_free_netdev: free_netdev (dev); return -ENODEV; } #ifdef CONFIG_PM static int tulip_suspend (struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pdev); if (!dev) return -EINVAL; if (!netif_running(dev)) goto save_state; tulip_down(dev); netif_device_detach(dev); free_irq(dev->irq, dev); save_state: pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } static int tulip_resume(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); int retval; if (!dev) return -EINVAL; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); if (!netif_running(dev)) return 0; if ((retval = pci_enable_device(pdev))) { pr_err(PFX "pci_enable_device failed in resume\n"); return retval; } if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) { pr_err(PFX "request_irq failed in resume\n"); return retval; } netif_device_attach(dev); if (netif_running(dev)) tulip_up(dev); return 0; } #endif /* CONFIG_PM */ static void __devexit tulip_remove_one (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata (pdev); struct tulip_private *tp; if (!dev) return; tp = netdev_priv(dev); unregister_netdev(dev); pci_free_consistent (pdev, sizeof (struct tulip_rx_desc) * RX_RING_SIZE + sizeof (struct tulip_tx_desc) * TX_RING_SIZE, tp->rx_ring, tp->rx_ring_dma); kfree (tp->mtable); pci_iounmap(pdev, tp->base_addr); free_netdev (dev); pci_release_regions (pdev); pci_set_drvdata (pdev, NULL); /* pci_power_off (pdev, -1); */ } #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling 'interrupt' - used by things like netconsole to send skbs * without having to re-enable interrupts. It's not called while * the interrupt routine is executing. */ static void poll_tulip (struct net_device *dev) { /* disable_irq here is not very nice, but with the lockless interrupt handler we have no other choice. */ disable_irq(dev->irq); tulip_interrupt (dev->irq, dev); enable_irq(dev->irq); } #endif static struct pci_driver tulip_driver = { .name = DRV_NAME, .id_table = tulip_pci_tbl, .probe = tulip_init_one, .remove = __devexit_p(tulip_remove_one), #ifdef CONFIG_PM .suspend = tulip_suspend, .resume = tulip_resume, #endif /* CONFIG_PM */ }; static int __init tulip_init (void) { #ifdef MODULE pr_info("%s", version); #endif /* copy module parms into globals */ tulip_rx_copybreak = rx_copybreak; tulip_max_interrupt_work = max_interrupt_work; /* probe for and init boards */ return pci_register_driver(&tulip_driver); } static void __exit tulip_cleanup (void) { pci_unregister_driver (&tulip_driver); } module_init(tulip_init); module_exit(tulip_cleanup);
gpl-2.0
ThinkingBridge/platform_kernel_lge_hammerhead
arch/arm/mach-msm/qdsp6v2/pcm_in.c
1016
11723
/* * Copyright (C) 2009 Google, Inc. * Copyright (C) 2009 HTC Corporation * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/fs.h> #include <linux/module.h> #include <linux/miscdevice.h> #include <linux/mutex.h> #include <linux/sched.h> #include <linux/uaccess.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/wait.h> #include <linux/msm_audio.h> #include <linux/pm_qos.h> #include <asm/atomic.h> #include <mach/debug_mm.h> #include <mach/qdsp6v2/audio_dev_ctl.h> #include <sound/q6asm.h> #include <sound/apr_audio.h> #include <linux/wakelock.h> #include <mach/cpuidle.h> #define MAX_BUF 4 #define BUFSZ (480 * 8) #define BUFFER_SIZE_MULTIPLE 4 #define MIN_BUFFER_SIZE 160 #define VOC_REC_NONE 0xFF struct pcm { struct mutex lock; struct mutex read_lock; wait_queue_head_t wait; spinlock_t dsp_lock; struct audio_client *ac; uint32_t sample_rate; uint32_t channel_count; uint32_t buffer_size; uint32_t buffer_count; uint32_t rec_mode; uint32_t in_frame_info[MAX_BUF][2]; atomic_t in_count; atomic_t in_enabled; atomic_t in_opened; atomic_t in_stopped; struct wake_lock wakelock; struct pm_qos_request pm_qos_req; }; static void pcm_in_get_dsp_buffers(struct pcm*, uint32_t token, uint32_t *payload); void pcm_in_cb(uint32_t opcode, uint32_t token, uint32_t *payload, void *priv) { struct pcm *pcm = (struct pcm *) priv; unsigned long flags; spin_lock_irqsave(&pcm->dsp_lock, flags); switch (opcode) { case ASM_DATA_EVENT_READ_DONE: pcm_in_get_dsp_buffers(pcm, token, payload); break; case RESET_EVENTS: reset_device(); break; default: break; } spin_unlock_irqrestore(&pcm->dsp_lock, flags); } static void pcm_in_prevent_sleep(struct pcm *audio) { pr_debug("%s:\n", __func__); wake_lock(&audio->wakelock); pm_qos_update_request(&audio->pm_qos_req, msm_cpuidle_get_deep_idle_latency()); } static void pcm_in_allow_sleep(struct pcm *audio) { pr_debug("%s:\n", __func__); pm_qos_update_request(&audio->pm_qos_req, PM_QOS_DEFAULT_VALUE); wake_unlock(&audio->wakelock); } static void pcm_in_get_dsp_buffers(struct pcm *pcm, uint32_t token, uint32_t *payload) { pcm->in_frame_info[token][0] = payload[7]; pcm->in_frame_info[token][1] = payload[3]; if (atomic_read(&pcm->in_count) <= pcm->buffer_count) atomic_inc(&pcm->in_count); wake_up(&pcm->wait); } static int pcm_in_enable(struct pcm *pcm) { if (atomic_read(&pcm->in_enabled)) return 0; return q6asm_run(pcm->ac, 0, 0, 0); } static int pcm_in_disable(struct pcm *pcm) { int rc = 0; if (atomic_read(&pcm->in_opened)) { atomic_set(&pcm->in_enabled, 0); atomic_set(&pcm->in_opened, 0); rc = q6asm_cmd(pcm->ac, CMD_CLOSE); atomic_set(&pcm->in_stopped, 1); memset(pcm->in_frame_info, 0, sizeof(char) * pcm->buffer_count * 2); wake_up(&pcm->wait); } return rc; } static int config(struct pcm *pcm) { int rc = 0; pr_debug("%s: pcm prefill, buffer_size = %d\n", __func__, pcm->buffer_size); rc = q6asm_audio_client_buf_alloc(OUT, pcm->ac, pcm->buffer_size, pcm->buffer_count); if (rc < 0) { pr_err("Audio Start: Buffer Allocation failed \ rc = %d\n", rc); goto fail; } rc = q6asm_enc_cfg_blk_pcm(pcm->ac, pcm->sample_rate, pcm->channel_count); if (rc < 0) { pr_err("%s: cmd media format block failed", __func__); goto fail; } fail: return rc; } static long pcm_in_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct pcm *pcm = file->private_data; int rc = 0; mutex_lock(&pcm->lock); switch (cmd) { case AUDIO_SET_VOLUME: break; case AUDIO_GET_STATS: { struct msm_audio_stats stats; memset(&stats, 0, sizeof(stats)); if (copy_to_user((void *) arg, &stats, sizeof(stats))) rc = -EFAULT; break; } case AUDIO_START: { int cnt = 0; if (atomic_read(&pcm->in_enabled)) { pr_info("%s:AUDIO_START already over\n", __func__); rc = 0; break; } rc = config(pcm); if (rc) { pr_err("%s: IN Configuration failed\n", __func__); rc = -EFAULT; break; } rc = pcm_in_enable(pcm); if (rc) { pr_err("%s: In Enable failed\n", __func__); rc = -EFAULT; break; } pcm_in_prevent_sleep(pcm); atomic_set(&pcm->in_enabled, 1); while (cnt++ < pcm->buffer_count) q6asm_read(pcm->ac); pr_info("%s: AUDIO_START session id[%d]\n", __func__, pcm->ac->session); if (pcm->rec_mode != VOC_REC_NONE) msm_enable_incall_recording(pcm->ac->session, pcm->rec_mode, pcm->sample_rate, pcm->channel_count); break; } case AUDIO_GET_SESSION_ID: { if (copy_to_user((void *) arg, &pcm->ac->session, sizeof(unsigned short))) rc = -EFAULT; break; } case AUDIO_STOP: break; case AUDIO_FLUSH: break; case AUDIO_SET_CONFIG: { struct msm_audio_config config; if (copy_from_user(&config, (void *) arg, sizeof(config))) { rc = -EFAULT; break; } pr_debug("%s: SET_CONFIG: buffer_size:%d channel_count:%d" "sample_rate:%d, buffer_count:%d\n", __func__, config.buffer_size, config.channel_count, config.sample_rate, config.buffer_count); if (!config.channel_count || config.channel_count > 2) { rc = -EINVAL; break; } if (config.sample_rate < 8000 || config.sample_rate > 48000) { rc = -EINVAL; break; } if ((config.buffer_size % (config.channel_count * BUFFER_SIZE_MULTIPLE)) || (config.buffer_size < MIN_BUFFER_SIZE)) { pr_err("%s: Buffer Size should be multiple of " "[4 * no. of channels] and greater than 160\n", __func__); rc = -EINVAL; break; } pcm->sample_rate = config.sample_rate; pcm->channel_count = config.channel_count; pcm->buffer_size = config.buffer_size; pcm->buffer_count = config.buffer_count; break; } case AUDIO_GET_CONFIG: { struct msm_audio_config config; config.buffer_size = pcm->buffer_size; config.buffer_count = pcm->buffer_count; config.sample_rate = pcm->sample_rate; config.channel_count = pcm->channel_count; config.unused[0] = 0; config.unused[1] = 0; config.unused[2] = 0; if (copy_to_user((void *) arg, &config, sizeof(config))) rc = -EFAULT; break; } case AUDIO_ENABLE_AUDPRE: { uint16_t enable_mask; if (copy_from_user(&enable_mask, (void *) arg, sizeof(enable_mask))) { rc = -EFAULT; break; } if (enable_mask & FLUENCE_ENABLE) rc = auddev_cfg_tx_copp_topology(pcm->ac->session, VPM_TX_DM_FLUENCE_COPP_TOPOLOGY); else rc = auddev_cfg_tx_copp_topology(pcm->ac->session, DEFAULT_COPP_TOPOLOGY); break; } case AUDIO_SET_INCALL: { if (copy_from_user(&pcm->rec_mode, (void *) arg, sizeof(pcm->rec_mode))) { rc = -EFAULT; pr_err("%s: Error copying in-call mode\n", __func__); break; } if (pcm->rec_mode != VOC_REC_UPLINK && pcm->rec_mode != VOC_REC_DOWNLINK && pcm->rec_mode != VOC_REC_BOTH) { rc = -EINVAL; pcm->rec_mode = VOC_REC_NONE; pr_err("%s: Invalid %d in-call rec_mode\n", __func__, pcm->rec_mode); break; } pr_debug("%s: In-call rec_mode %d\n", __func__, pcm->rec_mode); break; } default: rc = -EINVAL; break; } mutex_unlock(&pcm->lock); return rc; } static int pcm_in_open(struct inode *inode, struct file *file) { struct pcm *pcm; int rc = 0; char name[24]; pcm = kzalloc(sizeof(struct pcm), GFP_KERNEL); if (!pcm) return -ENOMEM; pcm->channel_count = 1; pcm->sample_rate = 8000; pcm->buffer_size = BUFSZ; pcm->buffer_count = MAX_BUF; pcm->ac = q6asm_audio_client_alloc((app_cb)pcm_in_cb, (void *)pcm); if (!pcm->ac) { pr_err("%s: Could not allocate memory\n", __func__); rc = -ENOMEM; goto fail; } mutex_init(&pcm->lock); mutex_init(&pcm->read_lock); spin_lock_init(&pcm->dsp_lock); init_waitqueue_head(&pcm->wait); rc = q6asm_open_read(pcm->ac, FORMAT_LINEAR_PCM); if (rc < 0) { pr_err("%s: Cmd Open Failed\n", __func__); goto fail; } atomic_set(&pcm->in_stopped, 0); atomic_set(&pcm->in_enabled, 0); atomic_set(&pcm->in_count, 0); atomic_set(&pcm->in_opened, 1); snprintf(name, sizeof name, "pcm_in_%x", pcm->ac->session); wake_lock_init(&pcm->wakelock, WAKE_LOCK_SUSPEND, name); snprintf(name, sizeof name, "pcm_in_idle_%x", pcm->ac->session); pm_qos_add_request(&pcm->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); pcm->rec_mode = VOC_REC_NONE; file->private_data = pcm; pr_info("%s: pcm in open session id[%d]\n", __func__, pcm->ac->session); return 0; fail: if (pcm->ac) q6asm_audio_client_free(pcm->ac); kfree(pcm); return rc; } static ssize_t pcm_in_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct pcm *pcm = file->private_data; const char __user *start = buf; void *data; uint32_t offset = 0; uint32_t size = 0; uint32_t idx; int rc = 0; int len = 0; if (!atomic_read(&pcm->in_enabled)) return -EFAULT; mutex_lock(&pcm->read_lock); while (count > 0) { rc = wait_event_timeout(pcm->wait, (atomic_read(&pcm->in_count) || atomic_read(&pcm->in_stopped)), 5 * HZ); if (!rc) { pr_err("%s: wait_event_timeout failed\n", __func__); goto fail; } if (atomic_read(&pcm->in_stopped) && !atomic_read(&pcm->in_count)) { mutex_unlock(&pcm->read_lock); return 0; } data = q6asm_is_cpu_buf_avail(OUT, pcm->ac, &size, &idx); if (count >= size) len = size; else { len = count; pr_err("%s: short read data[%p]bytesavail[%d]" "bytesrequest[%d]" "bytesrejected%d]\n",\ __func__, data, size, count, (size - count)); } if ((len) && data) { offset = pcm->in_frame_info[idx][1]; if (copy_to_user(buf, data+offset, len)) { pr_err("%s copy_to_user failed len[%d]\n", __func__, len); rc = -EFAULT; goto fail; } count -= len; buf += len; } atomic_dec(&pcm->in_count); memset(&pcm->in_frame_info[idx], 0, sizeof(uint32_t) * 2); rc = q6asm_read(pcm->ac); if (rc < 0) { pr_err("%s q6asm_read fail\n", __func__); goto fail; } rmb(); break; } rc = buf-start; fail: mutex_unlock(&pcm->read_lock); return rc; } static int pcm_in_release(struct inode *inode, struct file *file) { int rc = 0; struct pcm *pcm = file->private_data; pr_info("[%s:%s] release session id[%d]\n", __MM_FILE__, __func__, pcm->ac->session); mutex_lock(&pcm->lock); if ((pcm->rec_mode != VOC_REC_NONE) && atomic_read(&pcm->in_enabled)) { msm_disable_incall_recording(pcm->ac->session, pcm->rec_mode); pcm->rec_mode = VOC_REC_NONE; } /* remove this session from topology list */ auddev_cfg_tx_copp_topology(pcm->ac->session, DEFAULT_COPP_TOPOLOGY); mutex_unlock(&pcm->lock); rc = pcm_in_disable(pcm); msm_clear_session_id(pcm->ac->session); q6asm_audio_client_free(pcm->ac); pcm_in_allow_sleep(pcm); wake_lock_destroy(&pcm->wakelock); pm_qos_remove_request(&pcm->pm_qos_req); kfree(pcm); return rc; } static const struct file_operations pcm_in_fops = { .owner = THIS_MODULE, .open = pcm_in_open, .read = pcm_in_read, .release = pcm_in_release, .unlocked_ioctl = pcm_in_ioctl, }; struct miscdevice pcm_in_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "msm_pcm_in", .fops = &pcm_in_fops, }; static int __init pcm_in_init(void) { return misc_register(&pcm_in_misc); } device_initcall(pcm_in_init);
gpl-2.0
KangBangKreations/KangBanged-Kernel
drivers/net/wireless/wl1251/sdio.c
1016
8573
/* * wl12xx SDIO routines * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * * Copyright (C) 2005 Texas Instruments Incorporated * Copyright (C) 2008 Google Inc * Copyright (C) 2009 Bob Copeland (me@bobcopeland.com) */ #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/mmc/sdio_func.h> #include <linux/mmc/sdio_ids.h> #include <linux/platform_device.h> #include <linux/wl12xx.h> #include <linux/irq.h> #include <linux/pm_runtime.h> #include "wl1251.h" #ifndef SDIO_VENDOR_ID_TI #define SDIO_VENDOR_ID_TI 0x104c #endif #ifndef SDIO_DEVICE_ID_TI_WL1251 #define SDIO_DEVICE_ID_TI_WL1251 0x9066 #endif struct wl1251_sdio { struct sdio_func *func; u32 elp_val; }; static struct sdio_func *wl_to_func(struct wl1251 *wl) { struct wl1251_sdio *wl_sdio = wl->if_priv; return wl_sdio->func; } static void wl1251_sdio_interrupt(struct sdio_func *func) { struct wl1251 *wl = sdio_get_drvdata(func); wl1251_debug(DEBUG_IRQ, "IRQ"); /* FIXME should be synchronous for sdio */ ieee80211_queue_work(wl->hw, &wl->irq_work); } static const struct sdio_device_id wl1251_devices[] = { { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1251) }, {} }; MODULE_DEVICE_TABLE(sdio, wl1251_devices); static void wl1251_sdio_read(struct wl1251 *wl, int addr, void *buf, size_t len) { int ret; struct sdio_func *func = wl_to_func(wl); sdio_claim_host(func); ret = sdio_memcpy_fromio(func, buf, addr, len); if (ret) wl1251_error("sdio read failed (%d)", ret); sdio_release_host(func); } static void wl1251_sdio_write(struct wl1251 *wl, int addr, void *buf, size_t len) { int ret; struct sdio_func *func = wl_to_func(wl); sdio_claim_host(func); ret = sdio_memcpy_toio(func, addr, buf, len); if (ret) wl1251_error("sdio write failed (%d)", ret); sdio_release_host(func); } static void wl1251_sdio_read_elp(struct wl1251 *wl, int addr, u32 *val) { int ret = 0; struct wl1251_sdio *wl_sdio = wl->if_priv; struct sdio_func *func = wl_sdio->func; /* * The hardware only supports RAW (read after write) access for * reading, regular sdio_readb won't work here (it interprets * the unused bits of CMD52 as write data even if we send read * request). */ sdio_claim_host(func); *val = sdio_writeb_readb(func, wl_sdio->elp_val, addr, &ret); sdio_release_host(func); if (ret) wl1251_error("sdio_readb failed (%d)", ret); } static void wl1251_sdio_write_elp(struct wl1251 *wl, int addr, u32 val) { int ret = 0; struct wl1251_sdio *wl_sdio = wl->if_priv; struct sdio_func *func = wl_sdio->func; sdio_claim_host(func); sdio_writeb(func, val, addr, &ret); sdio_release_host(func); if (ret) wl1251_error("sdio_writeb failed (%d)", ret); else wl_sdio->elp_val = val; } static void wl1251_sdio_reset(struct wl1251 *wl) { } static void wl1251_sdio_enable_irq(struct wl1251 *wl) { struct sdio_func *func = wl_to_func(wl); sdio_claim_host(func); sdio_claim_irq(func, wl1251_sdio_interrupt); sdio_release_host(func); } static void wl1251_sdio_disable_irq(struct wl1251 *wl) { struct sdio_func *func = wl_to_func(wl); sdio_claim_host(func); sdio_release_irq(func); sdio_release_host(func); } /* Interrupts when using dedicated WLAN_IRQ pin */ static irqreturn_t wl1251_line_irq(int irq, void *cookie) { struct wl1251 *wl = cookie; ieee80211_queue_work(wl->hw, &wl->irq_work); return IRQ_HANDLED; } static void wl1251_enable_line_irq(struct wl1251 *wl) { return enable_irq(wl->irq); } static void wl1251_disable_line_irq(struct wl1251 *wl) { return disable_irq(wl->irq); } static int wl1251_sdio_set_power(struct wl1251 *wl, bool enable) { struct sdio_func *func = wl_to_func(wl); int ret; if (enable) { /* * Power is controlled by runtime PM, but we still call board * callback in case it wants to do any additional setup, * for example enabling clock buffer for the module. */ if (wl->set_power) wl->set_power(true); ret = pm_runtime_get_sync(&func->dev); if (ret < 0) goto out; sdio_claim_host(func); sdio_enable_func(func); sdio_release_host(func); } else { sdio_claim_host(func); sdio_disable_func(func); sdio_release_host(func); ret = pm_runtime_put_sync(&func->dev); if (ret < 0) goto out; if (wl->set_power) wl->set_power(false); } out: return ret; } static struct wl1251_if_operations wl1251_sdio_ops = { .read = wl1251_sdio_read, .write = wl1251_sdio_write, .write_elp = wl1251_sdio_write_elp, .read_elp = wl1251_sdio_read_elp, .reset = wl1251_sdio_reset, .power = wl1251_sdio_set_power, }; static int wl1251_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) { int ret; struct wl1251 *wl; struct ieee80211_hw *hw; struct wl1251_sdio *wl_sdio; const struct wl12xx_platform_data *wl12xx_board_data; hw = wl1251_alloc_hw(); if (IS_ERR(hw)) return PTR_ERR(hw); wl = hw->priv; wl_sdio = kzalloc(sizeof(*wl_sdio), GFP_KERNEL); if (wl_sdio == NULL) { ret = -ENOMEM; goto out_free_hw; } sdio_claim_host(func); ret = sdio_enable_func(func); if (ret) goto release; sdio_set_block_size(func, 512); sdio_release_host(func); SET_IEEE80211_DEV(hw, &func->dev); wl_sdio->func = func; wl->if_priv = wl_sdio; wl->if_ops = &wl1251_sdio_ops; wl12xx_board_data = wl12xx_get_platform_data(); if (!IS_ERR(wl12xx_board_data)) { wl->set_power = wl12xx_board_data->set_power; wl->irq = wl12xx_board_data->irq; wl->use_eeprom = wl12xx_board_data->use_eeprom; } if (wl->irq) { irq_set_status_flags(wl->irq, IRQ_NOAUTOEN); ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl); if (ret < 0) { wl1251_error("request_irq() failed: %d", ret); goto disable; } irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq; wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq; wl1251_info("using dedicated interrupt line"); } else { wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq; wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq; wl1251_info("using SDIO interrupt"); } ret = wl1251_init_ieee80211(wl); if (ret) goto out_free_irq; sdio_set_drvdata(func, wl); /* Tell PM core that we don't need the card to be powered now */ pm_runtime_put_noidle(&func->dev); return ret; out_free_irq: if (wl->irq) free_irq(wl->irq, wl); disable: sdio_claim_host(func); sdio_disable_func(func); release: sdio_release_host(func); kfree(wl_sdio); out_free_hw: wl1251_free_hw(wl); return ret; } static void __devexit wl1251_sdio_remove(struct sdio_func *func) { struct wl1251 *wl = sdio_get_drvdata(func); struct wl1251_sdio *wl_sdio = wl->if_priv; /* Undo decrement done above in wl1251_probe */ pm_runtime_get_noresume(&func->dev); if (wl->irq) free_irq(wl->irq, wl); wl1251_free_hw(wl); kfree(wl_sdio); sdio_claim_host(func); sdio_release_irq(func); sdio_disable_func(func); sdio_release_host(func); } static int wl1251_suspend(struct device *dev) { /* * Tell MMC/SDIO core it's OK to power down the card * (if it isn't already), but not to remove it completely. */ return 0; } static int wl1251_resume(struct device *dev) { return 0; } static const struct dev_pm_ops wl1251_sdio_pm_ops = { .suspend = wl1251_suspend, .resume = wl1251_resume, }; static struct sdio_driver wl1251_sdio_driver = { .name = "wl1251_sdio", .id_table = wl1251_devices, .probe = wl1251_sdio_probe, .remove = __devexit_p(wl1251_sdio_remove), .drv.pm = &wl1251_sdio_pm_ops, }; static int __init wl1251_sdio_init(void) { int err; err = sdio_register_driver(&wl1251_sdio_driver); if (err) wl1251_error("failed to register sdio driver: %d", err); return err; } static void __exit wl1251_sdio_exit(void) { sdio_unregister_driver(&wl1251_sdio_driver); wl1251_notice("unloaded"); } module_init(wl1251_sdio_init); module_exit(wl1251_sdio_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
gpl-2.0
JustBeYou/linux
arch/x86/crypto/des3_ede_glue.c
1272
12759
/* * Glue Code for assembler optimized version of 3DES * * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> * CTR part based on code (crypto/ctr.c) by: * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <asm/processor.h> #include <crypto/des.h> #include <linux/crypto.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> #include <crypto/algapi.h> struct des3_ede_x86_ctx { u32 enc_expkey[DES3_EDE_EXPKEY_WORDS]; u32 dec_expkey[DES3_EDE_EXPKEY_WORDS]; }; /* regular block cipher functions */ asmlinkage void des3_ede_x86_64_crypt_blk(const u32 *expkey, u8 *dst, const u8 *src); /* 3-way parallel cipher functions */ asmlinkage void des3_ede_x86_64_crypt_blk_3way(const u32 *expkey, u8 *dst, const u8 *src); static inline void des3_ede_enc_blk(struct des3_ede_x86_ctx *ctx, u8 *dst, const u8 *src) { u32 *enc_ctx = ctx->enc_expkey; des3_ede_x86_64_crypt_blk(enc_ctx, dst, src); } static inline void des3_ede_dec_blk(struct des3_ede_x86_ctx *ctx, u8 *dst, const u8 *src) { u32 *dec_ctx = ctx->dec_expkey; des3_ede_x86_64_crypt_blk(dec_ctx, dst, src); } static inline void des3_ede_enc_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst, const u8 *src) { u32 *enc_ctx = ctx->enc_expkey; des3_ede_x86_64_crypt_blk_3way(enc_ctx, dst, src); } static inline void des3_ede_dec_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst, const u8 *src) { u32 *dec_ctx = ctx->dec_expkey; des3_ede_x86_64_crypt_blk_3way(dec_ctx, dst, src); } static void des3_ede_x86_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { des3_ede_enc_blk(crypto_tfm_ctx(tfm), dst, src); } static void des3_ede_x86_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { des3_ede_dec_blk(crypto_tfm_ctx(tfm), dst, src); } static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, const u32 *expkey) { unsigned int bsize = DES3_EDE_BLOCK_SIZE; unsigned int nbytes; int err; err = blkcipher_walk_virt(desc, walk); while ((nbytes = walk->nbytes)) { u8 *wsrc = walk->src.virt.addr; u8 *wdst = walk->dst.virt.addr; /* Process four block batch */ if (nbytes >= bsize * 3) { do { des3_ede_x86_64_crypt_blk_3way(expkey, wdst, wsrc); wsrc += bsize * 3; wdst += bsize * 3; nbytes -= bsize * 3; } while (nbytes >= bsize * 3); if (nbytes < bsize) goto done; } /* Handle leftovers */ do { des3_ede_x86_64_crypt_blk(expkey, wdst, wsrc); wsrc += bsize; wdst += bsize; nbytes -= bsize; } while (nbytes >= bsize); done: err = blkcipher_walk_done(desc, walk, nbytes); } return err; } static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); return ecb_crypt(desc, &walk, ctx->enc_expkey); } static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); return ecb_crypt(desc, &walk, ctx->dec_expkey); } static unsigned int __cbc_encrypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk) { struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); unsigned int bsize = DES3_EDE_BLOCK_SIZE; unsigned int nbytes = walk->nbytes; u64 *src = (u64 *)walk->src.virt.addr; u64 *dst = (u64 *)walk->dst.virt.addr; u64 *iv = (u64 *)walk->iv; do { *dst = *src ^ *iv; des3_ede_enc_blk(ctx, (u8 *)dst, (u8 *)dst); iv = dst; src += 1; dst += 1; nbytes -= bsize; } while (nbytes >= bsize); *(u64 *)walk->iv = *iv; return nbytes; } static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct blkcipher_walk walk; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); while ((nbytes = walk.nbytes)) { nbytes = __cbc_encrypt(desc, &walk); err = blkcipher_walk_done(desc, &walk, nbytes); } return err; } static unsigned int __cbc_decrypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk) { struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); unsigned int bsize = DES3_EDE_BLOCK_SIZE; unsigned int nbytes = walk->nbytes; u64 *src = (u64 *)walk->src.virt.addr; u64 *dst = (u64 *)walk->dst.virt.addr; u64 ivs[3 - 1]; u64 last_iv; /* Start of the last block. */ src += nbytes / bsize - 1; dst += nbytes / bsize - 1; last_iv = *src; /* Process four block batch */ if (nbytes >= bsize * 3) { do { nbytes -= bsize * 3 - bsize; src -= 3 - 1; dst -= 3 - 1; ivs[0] = src[0]; ivs[1] = src[1]; des3_ede_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src); dst[1] ^= ivs[0]; dst[2] ^= ivs[1]; nbytes -= bsize; if (nbytes < bsize) goto done; *dst ^= *(src - 1); src -= 1; dst -= 1; } while (nbytes >= bsize * 3); } /* Handle leftovers */ for (;;) { des3_ede_dec_blk(ctx, (u8 *)dst, (u8 *)src); nbytes -= bsize; if (nbytes < bsize) break; *dst ^= *(src - 1); src -= 1; dst -= 1; } done: *dst ^= *(u64 *)walk->iv; *(u64 *)walk->iv = last_iv; return nbytes; } static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct blkcipher_walk walk; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); while ((nbytes = walk.nbytes)) { nbytes = __cbc_decrypt(desc, &walk); err = blkcipher_walk_done(desc, &walk, nbytes); } return err; } static void ctr_crypt_final(struct des3_ede_x86_ctx *ctx, struct blkcipher_walk *walk) { u8 *ctrblk = walk->iv; u8 keystream[DES3_EDE_BLOCK_SIZE]; u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; unsigned int nbytes = walk->nbytes; des3_ede_enc_blk(ctx, keystream, ctrblk); crypto_xor(keystream, src, nbytes); memcpy(dst, keystream, nbytes); crypto_inc(ctrblk, DES3_EDE_BLOCK_SIZE); } static unsigned int __ctr_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk) { struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); unsigned int bsize = DES3_EDE_BLOCK_SIZE; unsigned int nbytes = walk->nbytes; __be64 *src = (__be64 *)walk->src.virt.addr; __be64 *dst = (__be64 *)walk->dst.virt.addr; u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv); __be64 ctrblocks[3]; /* Process four block batch */ if (nbytes >= bsize * 3) { do { /* create ctrblks for parallel encrypt */ ctrblocks[0] = cpu_to_be64(ctrblk++); ctrblocks[1] = cpu_to_be64(ctrblk++); ctrblocks[2] = cpu_to_be64(ctrblk++); des3_ede_enc_blk_3way(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks); dst[0] = src[0] ^ ctrblocks[0]; dst[1] = src[1] ^ ctrblocks[1]; dst[2] = src[2] ^ ctrblocks[2]; src += 3; dst += 3; } while ((nbytes -= bsize * 3) >= bsize * 3); if (nbytes < bsize) goto done; } /* Handle leftovers */ do { ctrblocks[0] = cpu_to_be64(ctrblk++); des3_ede_enc_blk(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks); dst[0] = src[0] ^ ctrblocks[0]; src += 1; dst += 1; } while ((nbytes -= bsize) >= bsize); done: *(__be64 *)walk->iv = cpu_to_be64(ctrblk); return nbytes; } static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct blkcipher_walk walk; int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt_block(desc, &walk, DES3_EDE_BLOCK_SIZE); while ((nbytes = walk.nbytes) >= DES3_EDE_BLOCK_SIZE) { nbytes = __ctr_crypt(desc, &walk); err = blkcipher_walk_done(desc, &walk, nbytes); } if (walk.nbytes) { ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk); err = blkcipher_walk_done(desc, &walk, 0); } return err; } static int des3_ede_x86_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct des3_ede_x86_ctx *ctx = crypto_tfm_ctx(tfm); u32 i, j, tmp; int err; /* Generate encryption context using generic implementation. */ err = __des3_ede_setkey(ctx->enc_expkey, &tfm->crt_flags, key, keylen); if (err < 0) return err; /* Fix encryption context for this implementation and form decryption * context. */ j = DES3_EDE_EXPKEY_WORDS - 2; for (i = 0; i < DES3_EDE_EXPKEY_WORDS; i += 2, j -= 2) { tmp = ror32(ctx->enc_expkey[i + 1], 4); ctx->enc_expkey[i + 1] = tmp; ctx->dec_expkey[j + 0] = ctx->enc_expkey[i + 0]; ctx->dec_expkey[j + 1] = tmp; } return 0; } static struct crypto_alg des3_ede_algs[4] = { { .cra_name = "des3_ede", .cra_driver_name = "des3_ede-asm", .cra_priority = 200, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct des3_ede_x86_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = DES3_EDE_KEY_SIZE, .cia_max_keysize = DES3_EDE_KEY_SIZE, .cia_setkey = des3_ede_x86_setkey, .cia_encrypt = des3_ede_x86_encrypt, .cia_decrypt = des3_ede_x86_decrypt, } } }, { .cra_name = "ecb(des3_ede)", .cra_driver_name = "ecb-des3_ede-asm", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct des3_ede_x86_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .setkey = des3_ede_x86_setkey, .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, }, }, { .cra_name = "cbc(des3_ede)", .cra_driver_name = "cbc-des3_ede-asm", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct des3_ede_x86_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, .setkey = des3_ede_x86_setkey, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, }, }, }, { .cra_name = "ctr(des3_ede)", .cra_driver_name = "ctr-des3_ede-asm", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct des3_ede_x86_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, .setkey = des3_ede_x86_setkey, .encrypt = ctr_crypt, .decrypt = ctr_crypt, }, }, } }; static bool is_blacklisted_cpu(void) { if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return false; if (boot_cpu_data.x86 == 0x0f) { /* * On Pentium 4, des3_ede-x86_64 is slower than generic C * implementation because use of 64bit rotates (which are really * slow on P4). Therefore blacklist P4s. */ return true; } return false; } static int force; module_param(force, int, 0); MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist"); static int __init des3_ede_x86_init(void) { if (!force && is_blacklisted_cpu()) { pr_info("des3_ede-x86_64: performance on this CPU would be suboptimal: disabling des3_ede-x86_64.\n"); return -ENODEV; } return crypto_register_algs(des3_ede_algs, ARRAY_SIZE(des3_ede_algs)); } static void __exit des3_ede_x86_fini(void) { crypto_unregister_algs(des3_ede_algs, ARRAY_SIZE(des3_ede_algs)); } module_init(des3_ede_x86_init); module_exit(des3_ede_x86_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized"); MODULE_ALIAS_CRYPTO("des3_ede"); MODULE_ALIAS_CRYPTO("des3_ede-asm"); MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>");
gpl-2.0
MoKee/android_kernel_samsung_piranha
arch/arm/plat-s3c24xx/cpu.c
2040
6338
/* linux/arch/arm/plat-s3c24xx/cpu.c * * Copyright (c) 2004-2005 Simtec Electronics * http://www.simtec.co.uk/products/SWLINUX/ * Ben Dooks <ben@simtec.co.uk> * * S3C24XX CPU Support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/cacheflush.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/system-reset.h> #include <mach/regs-gpio.h> #include <plat/regs-serial.h> #include <plat/cpu.h> #include <plat/devs.h> #include <plat/clock.h> #include <plat/s3c2400.h> #include <plat/s3c2410.h> #include <plat/s3c2412.h> #include <plat/s3c2416.h> #include <plat/s3c244x.h> #include <plat/s3c2443.h> /* table of supported CPUs */ static const char name_s3c2400[] = "S3C2400"; static const char name_s3c2410[] = "S3C2410"; static const char name_s3c2412[] = "S3C2412"; static const char name_s3c2416[] = "S3C2416/S3C2450"; static const char name_s3c2440[] = "S3C2440"; static const char name_s3c2442[] = "S3C2442"; static const char name_s3c2442b[] = "S3C2442B"; static const char name_s3c2443[] = "S3C2443"; static const char name_s3c2410a[] = "S3C2410A"; static const char name_s3c2440a[] = "S3C2440A"; static struct cpu_table cpu_ids[] __initdata = { { .idcode = 0x32410000, .idmask = 0xffffffff, .map_io = s3c2410_map_io, .init_clocks = s3c2410_init_clocks, .init_uarts = s3c2410_init_uarts, .init = s3c2410_init, .name = name_s3c2410 }, { .idcode = 0x32410002, .idmask = 0xffffffff, .map_io = s3c2410_map_io, .init_clocks = s3c2410_init_clocks, .init_uarts = s3c2410_init_uarts, .init = s3c2410a_init, .name = name_s3c2410a }, { .idcode = 0x32440000, .idmask = 0xffffffff, .map_io = s3c2440_map_io, .init_clocks = s3c244x_init_clocks, .init_uarts = s3c244x_init_uarts, .init = s3c2440_init, .name = name_s3c2440 }, { .idcode = 0x32440001, .idmask = 0xffffffff, .map_io = s3c2440_map_io, .init_clocks = s3c244x_init_clocks, .init_uarts = s3c244x_init_uarts, .init = s3c2440_init, .name = name_s3c2440a }, { .idcode = 0x32440aaa, .idmask = 0xffffffff, .map_io = s3c2442_map_io, .init_clocks = s3c244x_init_clocks, .init_uarts = s3c244x_init_uarts, .init = s3c2442_init, .name = name_s3c2442 }, { .idcode = 0x32440aab, .idmask = 0xffffffff, .map_io = s3c2442_map_io, .init_clocks = s3c244x_init_clocks, .init_uarts = s3c244x_init_uarts, .init = s3c2442_init, .name = name_s3c2442b }, { .idcode = 0x32412001, .idmask = 0xffffffff, .map_io = s3c2412_map_io, .init_clocks = s3c2412_init_clocks, .init_uarts = s3c2412_init_uarts, .init = s3c2412_init, .name = name_s3c2412, }, { /* a newer version of the s3c2412 */ .idcode = 0x32412003, .idmask = 0xffffffff, .map_io = s3c2412_map_io, .init_clocks = s3c2412_init_clocks, .init_uarts = s3c2412_init_uarts, .init = s3c2412_init, .name = name_s3c2412, }, { /* a strange version of the s3c2416 */ .idcode = 0x32450003, .idmask = 0xffffffff, .map_io = s3c2416_map_io, .init_clocks = s3c2416_init_clocks, .init_uarts = s3c2416_init_uarts, .init = s3c2416_init, .name = name_s3c2416, }, { .idcode = 0x32443001, .idmask = 0xffffffff, .map_io = s3c2443_map_io, .init_clocks = s3c2443_init_clocks, .init_uarts = s3c2443_init_uarts, .init = s3c2443_init, .name = name_s3c2443, }, { .idcode = 0x0, /* S3C2400 doesn't have an idcode */ .idmask = 0xffffffff, .map_io = s3c2400_map_io, .init_clocks = s3c2400_init_clocks, .init_uarts = s3c2400_init_uarts, .init = s3c2400_init, .name = name_s3c2400 }, }; /* minimal IO mapping */ static struct map_desc s3c_iodesc[] __initdata = { IODESC_ENT(GPIO), IODESC_ENT(IRQ), IODESC_ENT(MEMCTRL), IODESC_ENT(UART) }; /* read cpu identificaiton code */ static unsigned long s3c24xx_read_idcode_v5(void) { #if defined(CONFIG_CPU_S3C2416) /* s3c2416 is v5, with S3C24XX_GSTATUS1 instead of S3C2412_GSTATUS1 */ u32 gs = __raw_readl(S3C24XX_GSTATUS1); /* test for s3c2416 or similar device */ if ((gs >> 16) == 0x3245) return gs; #endif #if defined(CONFIG_CPU_S3C2412) || defined(CONFIG_CPU_S3C2413) return __raw_readl(S3C2412_GSTATUS1); #else return 1UL; /* don't look like an 2400 */ #endif } static unsigned long s3c24xx_read_idcode_v4(void) { #ifndef CONFIG_CPU_S3C2400 return __raw_readl(S3C2410_GSTATUS1); #else return 0UL; #endif } /* Hook for arm_pm_restart to ensure we execute the reset code * with the caches enabled. It seems at least the S3C2440 has a problem * resetting if there is bus activity interrupted by the reset. */ static void s3c24xx_pm_restart(char mode, const char *cmd) { if (mode != 's') { unsigned long flags; local_irq_save(flags); __cpuc_flush_kern_all(); __cpuc_flush_user_all(); arch_reset(mode, cmd); local_irq_restore(flags); } /* fallback, or unhandled */ arm_machine_restart(mode, cmd); } void __init s3c24xx_init_io(struct map_desc *mach_desc, int size) { unsigned long idcode = 0x0; /* initialise the io descriptors we need for initialisation */ iotable_init(mach_desc, size); iotable_init(s3c_iodesc, ARRAY_SIZE(s3c_iodesc)); if (cpu_architecture() >= CPU_ARCH_ARMv5) { idcode = s3c24xx_read_idcode_v5(); } else { idcode = s3c24xx_read_idcode_v4(); } arm_pm_restart = s3c24xx_pm_restart; s3c_init_cpu(idcode, cpu_ids, ARRAY_SIZE(cpu_ids)); }
gpl-2.0
assusdan/cyanogenmod_kernel_hs_zerasrs
arch/microblaze/kernel/setup.c
2040
6385
/* * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> * Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2006 Atmark Techno, Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/string.h> #include <linux/seq_file.h> #include <linux/cpu.h> #include <linux/initrd.h> #include <linux/console.h> #include <linux/debugfs.h> #include <asm/setup.h> #include <asm/sections.h> #include <asm/page.h> #include <linux/io.h> #include <linux/bug.h> #include <linux/param.h> #include <linux/pci.h> #include <linux/cache.h> #include <linux/of_platform.h> #include <linux/dma-mapping.h> #include <asm/cacheflush.h> #include <asm/entry.h> #include <asm/cpuinfo.h> #include <asm/prom.h> #include <asm/pgtable.h> DEFINE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */ DEFINE_PER_CPU(unsigned int, KM); /* Kernel/user mode */ DEFINE_PER_CPU(unsigned int, ENTRY_SP); /* Saved SP on kernel entry */ DEFINE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */ DEFINE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */ unsigned int boot_cpuid; /* * Placed cmd_line to .data section because can be initialized from * ASM code. Default position is BSS section which is cleared * in machine_early_init(). */ char cmd_line[COMMAND_LINE_SIZE] __attribute__ ((section(".data"))); void __init setup_arch(char **cmdline_p) { *cmdline_p = cmd_line; console_verbose(); unflatten_device_tree(); setup_cpuinfo(); microblaze_cache_init(); setup_memory(); #ifdef CONFIG_EARLY_PRINTK /* remap early console to virtual address */ remap_early_printk(); #endif xilinx_pci_init(); #if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER) pr_notice("Self modified code enable\n"); #endif #ifdef CONFIG_VT #if defined(CONFIG_XILINX_CONSOLE) conswitchp = &xil_con; #elif defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; #endif #endif } #ifdef CONFIG_MTD_UCLINUX /* Handle both romfs and cramfs types, without generating unnecessary code (ie no point checking for CRAMFS if it's not even enabled) */ inline unsigned get_romfs_len(unsigned *addr) { #ifdef CONFIG_ROMFS_FS if (memcmp(&addr[0], "-rom1fs-", 8) == 0) /* romfs */ return be32_to_cpu(addr[2]); #endif #ifdef CONFIG_CRAMFS if (addr[0] == le32_to_cpu(0x28cd3d45)) /* cramfs */ return le32_to_cpu(addr[1]); #endif return 0; } #endif /* CONFIG_MTD_UCLINUX_EBSS */ unsigned long kernel_tlb; void __init machine_early_init(const char *cmdline, unsigned int ram, unsigned int fdt, unsigned int msr, unsigned int tlb0, unsigned int tlb1) { unsigned long *src, *dst; unsigned int offset = 0; /* If CONFIG_MTD_UCLINUX is defined, assume ROMFS is at the * end of kernel. There are two position which we want to check. * The first is __init_end and the second __bss_start. */ #ifdef CONFIG_MTD_UCLINUX int romfs_size; unsigned int romfs_base; char *old_klimit = klimit; romfs_base = (ram ? ram : (unsigned int)&__init_end); romfs_size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base)); if (!romfs_size) { romfs_base = (unsigned int)&__bss_start; romfs_size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base)); } /* Move ROMFS out of BSS before clearing it */ if (romfs_size > 0) { memmove(&__bss_stop, (int *)romfs_base, romfs_size); klimit += romfs_size; } #endif /* clearing bss section */ memset(__bss_start, 0, __bss_stop-__bss_start); memset(_ssbss, 0, _esbss-_ssbss); lockdep_init(); /* initialize device tree for usage in early_printk */ early_init_devtree((void *)_fdt_start); #ifdef CONFIG_EARLY_PRINTK setup_early_printk(NULL); #endif /* setup kernel_tlb after BSS cleaning * Maybe worth to move to asm code */ kernel_tlb = tlb0 + tlb1; /* printk("TLB1 0x%08x, TLB0 0x%08x, tlb 0x%x\n", tlb0, tlb1, kernel_tlb); */ pr_info("Ramdisk addr 0x%08x, ", ram); if (fdt) pr_info("FDT at 0x%08x\n", fdt); else pr_info("Compiled-in FDT at 0x%08x\n", (unsigned int)_fdt_start); #ifdef CONFIG_MTD_UCLINUX pr_info("Found romfs @ 0x%08x (0x%08x)\n", romfs_base, romfs_size); pr_info("#### klimit %p ####\n", old_klimit); BUG_ON(romfs_size < 0); /* What else can we do? */ pr_info("Moved 0x%08x bytes from 0x%08x to 0x%08x\n", romfs_size, romfs_base, (unsigned)&__bss_stop); pr_info("New klimit: 0x%08x\n", (unsigned)klimit); #endif #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR if (msr) { pr_info("!!!Your kernel has setup MSR instruction but "); pr_cont("CPU don't have it %x\n", msr); } #else if (!msr) { pr_info("!!!Your kernel not setup MSR instruction but "); pr_cont"CPU have it %x\n", msr); } #endif /* Do not copy reset vectors. offset = 0x2 means skip the first * two instructions. dst is pointer to MB vectors which are placed * in block ram. If you want to copy reset vector setup offset to 0x0 */ #if !CONFIG_MANUAL_RESET_VECTOR offset = 0x2; #endif dst = (unsigned long *) (offset * sizeof(u32)); for (src = __ivt_start + offset; src < __ivt_end; src++, dst++) *dst = *src; /* Initialize global data */ per_cpu(KM, 0) = 0x1; /* We start in kernel mode */ per_cpu(CURRENT_SAVE, 0) = (unsigned long)current; } #ifdef CONFIG_DEBUG_FS struct dentry *of_debugfs_root; static int microblaze_debugfs_init(void) { of_debugfs_root = debugfs_create_dir("microblaze", NULL); return of_debugfs_root == NULL; } arch_initcall(microblaze_debugfs_init); # ifdef CONFIG_MMU static int __init debugfs_tlb(void) { struct dentry *d; if (!of_debugfs_root) return -ENODEV; d = debugfs_create_u32("tlb_skip", S_IRUGO, of_debugfs_root, &tlb_skip); if (!d) return -ENOMEM; return 0; } device_initcall(debugfs_tlb); # endif #endif static int dflt_bus_notify(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; /* We are only intereted in device addition */ if (action != BUS_NOTIFY_ADD_DEVICE) return 0; set_dma_ops(dev, &dma_direct_ops); return NOTIFY_DONE; } static struct notifier_block dflt_plat_bus_notifier = { .notifier_call = dflt_bus_notify, .priority = INT_MAX, }; static int __init setup_bus_notifier(void) { bus_register_notifier(&platform_bus_type, &dflt_plat_bus_notifier); return 0; } arch_initcall(setup_bus_notifier);
gpl-2.0
halaszk/universal7420
arch/powerpc/kvm/e500_mmu_host.c
2040
17731
/* * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved. * * Author: Yu Liu, yu.liu@freescale.com * Scott Wood, scottwood@freescale.com * Ashish Kalra, ashish.kalra@freescale.com * Varun Sethi, varun.sethi@freescale.com * Alexander Graf, agraf@suse.de * * Description: * This file is based on arch/powerpc/kvm/44x_tlb.c, * by Hollis Blanchard <hollisb@us.ibm.com>. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/kvm.h> #include <linux/kvm_host.h> #include <linux/highmem.h> #include <linux/log2.h> #include <linux/uaccess.h> #include <linux/sched.h> #include <linux/rwsem.h> #include <linux/vmalloc.h> #include <linux/hugetlb.h> #include <asm/kvm_ppc.h> #include "e500.h" #include "trace.h" #include "timing.h" #include "e500_mmu_host.h" #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; static inline unsigned int tlb1_max_shadow_size(void) { /* reserve one entry for magic page */ return host_tlb_params[1].entries - tlbcam_index - 1; } static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) { /* Mask off reserved bits. */ mas3 &= MAS3_ATTRIB_MASK; #ifndef CONFIG_KVM_BOOKE_HV if (!usermode) { /* Guest is in supervisor mode, * so we need to translate guest * supervisor permissions into user permissions. */ mas3 &= ~E500_TLB_USER_PERM_MASK; mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1; } mas3 |= E500_TLB_SUPER_PERM_MASK; #endif return mas3; } static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode) { #ifdef CONFIG_SMP return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M; #else return mas2 & MAS2_ATTRIB_MASK; #endif } /* * writing shadow tlb entry to host TLB */ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, uint32_t mas0) { unsigned long flags; local_irq_save(flags); mtspr(SPRN_MAS0, mas0); mtspr(SPRN_MAS1, stlbe->mas1); mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2); mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); #ifdef CONFIG_KVM_BOOKE_HV mtspr(SPRN_MAS8, stlbe->mas8); #endif asm volatile("isync; tlbwe" : : : "memory"); #ifdef CONFIG_KVM_BOOKE_HV /* Must clear mas8 for other host tlbwe's */ mtspr(SPRN_MAS8, 0); isync(); #endif local_irq_restore(flags); trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, stlbe->mas2, stlbe->mas7_3); } /* * Acquire a mas0 with victim hint, as if we just took a TLB miss. * * We don't care about the address we're searching for, other than that it's * in the right set and is not present in the TLB. Using a zero PID and a * userspace address means we don't have to set and then restore MAS5, or * calculate a proper MAS6 value. */ static u32 get_host_mas0(unsigned long eaddr) { unsigned long flags; u32 mas0; local_irq_save(flags); mtspr(SPRN_MAS6, 0); asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET)); mas0 = mfspr(SPRN_MAS0); local_irq_restore(flags); return mas0; } /* sesel is for tlb1 only */ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe) { u32 mas0; if (tlbsel == 0) { mas0 = get_host_mas0(stlbe->mas2); __write_host_tlbe(stlbe, mas0); } else { __write_host_tlbe(stlbe, MAS0_TLBSEL(1) | MAS0_ESEL(to_htlb1_esel(sesel))); } } /* sesel is for tlb1 only */ static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, struct kvm_book3e_206_tlb_entry *gtlbe, struct kvm_book3e_206_tlb_entry *stlbe, int stlbsel, int sesel) { int stid; preempt_disable(); stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe); stlbe->mas1 |= MAS1_TID(stid); write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe); preempt_enable(); } #ifdef CONFIG_KVM_E500V2 /* XXX should be a hook in the gva2hpa translation */ void kvmppc_map_magic(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct kvm_book3e_206_tlb_entry magic; ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; unsigned int stid; pfn_t pfn; pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT; get_page(pfn_to_page(pfn)); preempt_disable(); stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0); magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) | MAS1_TSIZE(BOOK3E_PAGESZ_4K); magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) | MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR; magic.mas8 = 0; __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); preempt_enable(); } #endif void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int esel) { struct kvm_book3e_206_tlb_entry *gtlbe = get_entry(vcpu_e500, tlbsel, esel); struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; /* Don't bother with unmapped entries */ if (!(ref->flags & E500_TLB_VALID)) { WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0), "%s: flags %x\n", __func__, ref->flags); WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]); } if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; int hw_tlb_indx; unsigned long flags; local_irq_save(flags); while (tmp) { hw_tlb_indx = __ilog2_u64(tmp & -tmp); mtspr(SPRN_MAS0, MAS0_TLBSEL(1) | MAS0_ESEL(to_htlb1_esel(hw_tlb_indx))); mtspr(SPRN_MAS1, 0); asm volatile("tlbwe"); vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0; tmp &= tmp - 1; } mb(); vcpu_e500->g2h_tlb1_map[esel] = 0; ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID); local_irq_restore(flags); } if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) { /* * TLB1 entry is backed by 4k pages. This should happen * rarely and is not worth optimizing. Invalidate everything. */ kvmppc_e500_tlbil_all(vcpu_e500); ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID); } /* Already invalidated in between */ if (!(ref->flags & E500_TLB_VALID)) return; /* Guest tlbe is backed by at most one host tlbe per shadow pid. */ kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); /* Mark the TLB as not backed by the host anymore */ ref->flags &= ~E500_TLB_VALID; } static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) { return tlbe->mas7_3 & (MAS3_SW|MAS3_UW); } static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, struct kvm_book3e_206_tlb_entry *gtlbe, pfn_t pfn) { ref->pfn = pfn; ref->flags |= E500_TLB_VALID; if (tlbe_is_writable(gtlbe)) kvm_set_pfn_dirty(pfn); } static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) { if (ref->flags & E500_TLB_VALID) { /* FIXME: don't log bogus pfn for TLB1 */ trace_kvm_booke206_ref_release(ref->pfn, ref->flags); ref->flags = 0; } } static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) { if (vcpu_e500->g2h_tlb1_map) memset(vcpu_e500->g2h_tlb1_map, 0, sizeof(u64) * vcpu_e500->gtlb_params[1].entries); if (vcpu_e500->h2g_tlb1_rmap) memset(vcpu_e500->h2g_tlb1_rmap, 0, sizeof(unsigned int) * host_tlb_params[1].entries); } static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) { int tlbsel; int i; for (tlbsel = 0; tlbsel <= 1; tlbsel++) { for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][i].ref; kvmppc_e500_ref_release(ref); } } } void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); kvmppc_e500_tlbil_all(vcpu_e500); clear_tlb_privs(vcpu_e500); clear_tlb1_bitmap(vcpu_e500); } /* TID must be supplied by the caller */ static void kvmppc_e500_setup_stlbe( struct kvm_vcpu *vcpu, struct kvm_book3e_206_tlb_entry *gtlbe, int tsize, struct tlbe_ref *ref, u64 gvaddr, struct kvm_book3e_206_tlb_entry *stlbe) { pfn_t pfn = ref->pfn; u32 pr = vcpu->arch.shared->msr & MSR_PR; BUG_ON(!(ref->flags & E500_TLB_VALID)); /* Force IPROT=0 for all guest mappings. */ stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; stlbe->mas2 = (gvaddr & MAS2_EPN) | e500_shadow_mas2_attrib(gtlbe->mas2, pr); stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); #ifdef CONFIG_KVM_BOOKE_HV stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid; #endif } static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe, struct tlbe_ref *ref) { struct kvm_memory_slot *slot; unsigned long pfn = 0; /* silence GCC warning */ unsigned long hva; int pfnmap = 0; int tsize = BOOK3E_PAGESZ_4K; /* * Translate guest physical to true physical, acquiring * a page reference if it is normal, non-reserved memory. * * gfn_to_memslot() must succeed because otherwise we wouldn't * have gotten this far. Eventually we should just pass the slot * pointer through from the first lookup. */ slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); hva = gfn_to_hva_memslot(slot, gfn); if (tlbsel == 1) { struct vm_area_struct *vma; down_read(&current->mm->mmap_sem); vma = find_vma(current->mm, hva); if (vma && hva >= vma->vm_start && (vma->vm_flags & VM_PFNMAP)) { /* * This VMA is a physically contiguous region (e.g. * /dev/mem) that bypasses normal Linux page * management. Find the overlap between the * vma and the memslot. */ unsigned long start, end; unsigned long slot_start, slot_end; pfnmap = 1; start = vma->vm_pgoff; end = start + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT); slot_start = pfn - (gfn - slot->base_gfn); slot_end = slot_start + slot->npages; if (start < slot_start) start = slot_start; if (end > slot_end) end = slot_end; tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; /* * e500 doesn't implement the lowest tsize bit, * or 1K pages. */ tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); /* * Now find the largest tsize (up to what the guest * requested) that will cover gfn, stay within the * range, and for which gfn and pfn are mutually * aligned. */ for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { unsigned long gfn_start, gfn_end, tsize_pages; tsize_pages = 1 << (tsize - 2); gfn_start = gfn & ~(tsize_pages - 1); gfn_end = gfn_start + tsize_pages; if (gfn_start + pfn - gfn < start) continue; if (gfn_end + pfn - gfn > end) continue; if ((gfn & (tsize_pages - 1)) != (pfn & (tsize_pages - 1))) continue; gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); pfn &= ~(tsize_pages - 1); break; } } else if (vma && hva >= vma->vm_start && (vma->vm_flags & VM_HUGETLB)) { unsigned long psize = vma_kernel_pagesize(vma); tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; /* * Take the largest page size that satisfies both host * and guest mapping */ tsize = min(__ilog2(psize) - 10, tsize); /* * e500 doesn't implement the lowest tsize bit, * or 1K pages. */ tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); } up_read(&current->mm->mmap_sem); } if (likely(!pfnmap)) { unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT); pfn = gfn_to_pfn_memslot(slot, gfn); if (is_error_noslot_pfn(pfn)) { printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", (long)gfn); return -EINVAL; } /* Align guest and physical address to page map boundaries */ pfn &= ~(tsize_pages - 1); gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); } kvmppc_e500_ref_setup(ref, gtlbe, pfn); kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, ref, gvaddr, stlbe); /* Clear i-cache for new pages */ kvmppc_mmu_flush_icache(pfn); /* Drop refcount on page, so that mmu notifiers can clear it */ kvm_release_pfn_clean(pfn); return 0; } /* XXX only map the one-one case, for now use TLB0 */ static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel, struct kvm_book3e_206_tlb_entry *stlbe) { struct kvm_book3e_206_tlb_entry *gtlbe; struct tlbe_ref *ref; int stlbsel = 0; int sesel = 0; int r; gtlbe = get_entry(vcpu_e500, 0, esel); ref = &vcpu_e500->gtlb_priv[0][esel].ref; r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe), get_tlb_raddr(gtlbe) >> PAGE_SHIFT, gtlbe, 0, stlbe, ref); if (r) return r; write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel); return 0; } static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500, struct tlbe_ref *ref, int esel) { unsigned int sesel = vcpu_e500->host_tlb1_nv++; if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) vcpu_e500->host_tlb1_nv = 0; if (vcpu_e500->h2g_tlb1_rmap[sesel]) { unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1; vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); } vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1; WARN_ON(!(ref->flags & E500_TLB_VALID)); return sesel; } /* Caller must ensure that the specified guest TLB entry is safe to insert into * the shadow TLB. */ /* For both one-one and one-to-many */ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, struct kvm_book3e_206_tlb_entry *stlbe, int esel) { struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref; int sesel; int r; r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref); if (r) return r; /* Use TLB0 when we can only map a page with 4k */ if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) { vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0; write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0); return 0; } /* Otherwise map into TLB1 */ sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel); write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel); return 0; } void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, unsigned int index) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct tlbe_priv *priv; struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; int tlbsel = tlbsel_of(index); int esel = esel_of(index); gtlbe = get_entry(vcpu_e500, tlbsel, esel); switch (tlbsel) { case 0: priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; /* Triggers after clear_tlb_privs or on initial mapping */ if (!(priv->ref.flags & E500_TLB_VALID)) { kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); } else { kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K, &priv->ref, eaddr, &stlbe); write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0); } break; case 1: { gfn_t gfn = gpaddr >> PAGE_SHIFT; kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe, esel); break; } default: BUG(); break; } } /************* MMU Notifiers *************/ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) { trace_kvm_unmap_hva(hva); /* * Flush all shadow tlb entries everywhere. This is slow, but * we are 100% sure that we catch the to be unmapped page */ kvm_flush_remote_tlbs(kvm); return 0; } int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) { /* kvm_unmap_hva flushes everything anyways */ kvm_unmap_hva(kvm, start); return 0; } int kvm_age_hva(struct kvm *kvm, unsigned long hva) { /* XXX could be more clever ;) */ return 0; } int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) { /* XXX could be more clever ;) */ return 0; } void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) { /* The page will get remapped properly on its next fault */ kvm_unmap_hva(kvm, hva); } /*****************************************/ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500) { host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY; host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; /* * This should never happen on real e500 hardware, but is * architecturally possible -- e.g. in some weird nested * virtualization case. */ if (host_tlb_params[0].entries == 0 || host_tlb_params[1].entries == 0) { pr_err("%s: need to know host tlb size\n", __func__); return -ENODEV; } host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >> TLBnCFG_ASSOC_SHIFT; host_tlb_params[1].ways = host_tlb_params[1].entries; if (!is_power_of_2(host_tlb_params[0].entries) || !is_power_of_2(host_tlb_params[0].ways) || host_tlb_params[0].entries < host_tlb_params[0].ways || host_tlb_params[0].ways == 0) { pr_err("%s: bad tlb0 host config: %u entries %u ways\n", __func__, host_tlb_params[0].entries, host_tlb_params[0].ways); return -ENODEV; } host_tlb_params[0].sets = host_tlb_params[0].entries / host_tlb_params[0].ways; host_tlb_params[1].sets = 1; vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * host_tlb_params[1].entries, GFP_KERNEL); if (!vcpu_e500->h2g_tlb1_rmap) return -EINVAL; return 0; } void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) { kfree(vcpu_e500->h2g_tlb1_rmap); }
gpl-2.0
boa19861105/android_kernel_htc_b3uhl-JP
drivers/irqchip/irq-metag.c
3064
9589
/* * Meta internal (HWSTATMETA) interrupt code. * * Copyright (C) 2011-2012 Imagination Technologies Ltd. * * This code is based on the code in SoC/common/irq.c and SoC/comet/irq.c * The code base could be generalised/merged as a lot of the functionality is * similar. Until this is done, we try to keep the code simple here. */ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irqdomain.h> #include <asm/irq.h> #include <asm/hwthread.h> #define PERF0VECINT 0x04820580 #define PERF1VECINT 0x04820588 #define PERF0TRIG_OFFSET 16 #define PERF1TRIG_OFFSET 17 /** * struct metag_internal_irq_priv - private meta internal interrupt data * @domain: IRQ domain for all internal Meta IRQs (HWSTATMETA) * @unmasked: Record of unmasked IRQs */ struct metag_internal_irq_priv { struct irq_domain *domain; unsigned long unmasked; }; /* Private data for the one and only internal interrupt controller */ static struct metag_internal_irq_priv metag_internal_irq_priv; static unsigned int metag_internal_irq_startup(struct irq_data *data); static void metag_internal_irq_shutdown(struct irq_data *data); static void metag_internal_irq_ack(struct irq_data *data); static void metag_internal_irq_mask(struct irq_data *data); static void metag_internal_irq_unmask(struct irq_data *data); #ifdef CONFIG_SMP static int metag_internal_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, bool force); #endif static struct irq_chip internal_irq_edge_chip = { .name = "HWSTATMETA-IRQ", .irq_startup = metag_internal_irq_startup, .irq_shutdown = metag_internal_irq_shutdown, .irq_ack = metag_internal_irq_ack, .irq_mask = metag_internal_irq_mask, .irq_unmask = metag_internal_irq_unmask, #ifdef CONFIG_SMP .irq_set_affinity = metag_internal_irq_set_affinity, #endif }; /* * metag_hwvec_addr - get the address of *VECINT regs of irq * * This function is a table of supported triggers on HWSTATMETA * Could do with a structure, but better keep it simple. Changes * in this code should be rare. */ static inline void __iomem *metag_hwvec_addr(irq_hw_number_t hw) { void __iomem *addr; switch (hw) { case PERF0TRIG_OFFSET: addr = (void __iomem *)PERF0VECINT; break; case PERF1TRIG_OFFSET: addr = (void __iomem *)PERF1VECINT; break; default: addr = NULL; break; } return addr; } /* * metag_internal_startup - setup an internal irq * @irq: the irq to startup * * Multiplex interrupts for @irq onto TR1. Clear any pending * interrupts. */ static unsigned int metag_internal_irq_startup(struct irq_data *data) { /* Clear (toggle) the bit in HWSTATMETA for our interrupt. */ metag_internal_irq_ack(data); /* Enable the interrupt by unmasking it */ metag_internal_irq_unmask(data); return 0; } /* * metag_internal_irq_shutdown - turn off the irq * @irq: the irq number to turn off * * Mask @irq and clear any pending interrupts. * Stop muxing @irq onto TR1. */ static void metag_internal_irq_shutdown(struct irq_data *data) { /* Disable the IRQ at the core by masking it. */ metag_internal_irq_mask(data); /* Clear (toggle) the bit in HWSTATMETA for our interrupt. */ metag_internal_irq_ack(data); } /* * metag_internal_irq_ack - acknowledge irq * @irq: the irq to ack */ static void metag_internal_irq_ack(struct irq_data *data) { irq_hw_number_t hw = data->hwirq; unsigned int bit = 1 << hw; if (metag_in32(HWSTATMETA) & bit) metag_out32(bit, HWSTATMETA); } /** * metag_internal_irq_mask() - mask an internal irq by unvectoring * @data: data for the internal irq to mask * * HWSTATMETA has no mask register. Instead the IRQ is unvectored from the core * and retriggered if necessary later. */ static void metag_internal_irq_mask(struct irq_data *data) { struct metag_internal_irq_priv *priv = &metag_internal_irq_priv; irq_hw_number_t hw = data->hwirq; void __iomem *vec_addr = metag_hwvec_addr(hw); clear_bit(hw, &priv->unmasked); /* there is no interrupt mask, so unvector the interrupt */ metag_out32(0, vec_addr); } /** * meta_intc_unmask_edge_irq_nomask() - unmask an edge irq by revectoring * @data: data for the internal irq to unmask * * HWSTATMETA has no mask register. Instead the IRQ is revectored back to the * core and retriggered if necessary. */ static void metag_internal_irq_unmask(struct irq_data *data) { struct metag_internal_irq_priv *priv = &metag_internal_irq_priv; irq_hw_number_t hw = data->hwirq; unsigned int bit = 1 << hw; void __iomem *vec_addr = metag_hwvec_addr(hw); unsigned int thread = hard_processor_id(); set_bit(hw, &priv->unmasked); /* there is no interrupt mask, so revector the interrupt */ metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)), vec_addr); /* * Re-trigger interrupt * * Writing a 1 toggles, and a 0->1 transition triggers. We only * retrigger if the status bit is already set, which means we * need to clear it first. Retriggering is fundamentally racy * because if the interrupt fires again after we clear it we * could end up clearing it again and the interrupt handler * thinking it hasn't fired. Therefore we need to keep trying to * retrigger until the bit is set. */ if (metag_in32(HWSTATMETA) & bit) { metag_out32(bit, HWSTATMETA); while (!(metag_in32(HWSTATMETA) & bit)) metag_out32(bit, HWSTATMETA); } } #ifdef CONFIG_SMP /* * metag_internal_irq_set_affinity - set the affinity for an interrupt */ static int metag_internal_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, bool force) { unsigned int cpu, thread; irq_hw_number_t hw = data->hwirq; /* * Wire up this interrupt from *VECINT to the Meta core. * * Note that we can't wire up *VECINT to interrupt more than * one cpu (the interrupt code doesn't support it), so we just * pick the first cpu we find in 'cpumask'. */ cpu = cpumask_any_and(cpumask, cpu_online_mask); thread = cpu_2_hwthread_id[cpu]; metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)), metag_hwvec_addr(hw)); return 0; } #endif /* * metag_internal_irq_demux - irq de-multiplexer * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * The cpu receives an interrupt on TR1 when an interrupt has * occurred. It is this function's job to demux this irq and * figure out exactly which trigger needs servicing. */ static void metag_internal_irq_demux(unsigned int irq, struct irq_desc *desc) { struct metag_internal_irq_priv *priv = irq_desc_get_handler_data(desc); irq_hw_number_t hw; unsigned int irq_no; u32 status; recalculate: status = metag_in32(HWSTATMETA) & priv->unmasked; for (hw = 0; status != 0; status >>= 1, ++hw) { if (status & 0x1) { /* * Map the hardware IRQ number to a virtual Linux IRQ * number. */ irq_no = irq_linear_revmap(priv->domain, hw); /* * Only fire off interrupts that are * registered to be handled by the kernel. * Other interrupts are probably being * handled by other Meta hardware threads. */ generic_handle_irq(irq_no); /* * The handler may have re-enabled interrupts * which could have caused a nested invocation * of this code and make the copy of the * status register we are using invalid. */ goto recalculate; } } } /** * internal_irq_map() - Map an internal meta IRQ to a virtual IRQ number. * @hw: Number of the internal IRQ. Must be in range. * * Returns: The virtual IRQ number of the Meta internal IRQ specified by * @hw. */ int internal_irq_map(unsigned int hw) { struct metag_internal_irq_priv *priv = &metag_internal_irq_priv; if (!priv->domain) return -ENODEV; return irq_create_mapping(priv->domain, hw); } /** * metag_internal_irq_init_cpu - regsister with the Meta cpu * @cpu: the CPU to register on * * Configure @cpu's TR1 irq so that we can demux irqs. */ static void metag_internal_irq_init_cpu(struct metag_internal_irq_priv *priv, int cpu) { unsigned int thread = cpu_2_hwthread_id[cpu]; unsigned int signum = TBID_SIGNUM_TR1(thread); int irq = tbisig_map(signum); /* Register the multiplexed IRQ handler */ irq_set_handler_data(irq, priv); irq_set_chained_handler(irq, metag_internal_irq_demux); irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW); } /** * metag_internal_intc_map() - map an internal irq * @d: irq domain of internal trigger block * @irq: virtual irq number * @hw: hardware irq number within internal trigger block * * This sets up a virtual irq for a specified hardware interrupt. The irq chip * and handler is configured. */ static int metag_internal_intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { /* only register interrupt if it is mapped */ if (!metag_hwvec_addr(hw)) return -EINVAL; irq_set_chip_and_handler(irq, &internal_irq_edge_chip, handle_edge_irq); return 0; } static const struct irq_domain_ops metag_internal_intc_domain_ops = { .map = metag_internal_intc_map, }; /** * metag_internal_irq_register - register internal IRQs * * Register the irq chip and handler function for all internal IRQs */ int __init init_internal_IRQ(void) { struct metag_internal_irq_priv *priv = &metag_internal_irq_priv; unsigned int cpu; /* Set up an IRQ domain */ priv->domain = irq_domain_add_linear(NULL, 32, &metag_internal_intc_domain_ops, priv); if (unlikely(!priv->domain)) { pr_err("meta-internal-intc: cannot add IRQ domain\n"); return -ENOMEM; } /* Setup TR1 for all cpus. */ for_each_possible_cpu(cpu) metag_internal_irq_init_cpu(priv, cpu); return 0; };
gpl-2.0
TeamExodus/kernel_google_msm
drivers/media/radio/wl128x/fmdrv_v4l2.c
4856
14631
/* * FM Driver for Connectivity chip of Texas Instruments. * This file provides interfaces to V4L2 subsystem. * * This module registers with V4L2 subsystem as Radio * data system interface (/dev/radio). During the registration, * it will expose two set of function pointers. * * 1) File operation related API (open, close, read, write, poll...etc). * 2) Set of V4L2 IOCTL complaint API. * * Copyright (C) 2011 Texas Instruments * Author: Raja Mani <raja_mani@ti.com> * Author: Manjunatha Halli <manjunatha_halli@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/export.h> #include "fmdrv.h" #include "fmdrv_v4l2.h" #include "fmdrv_common.h" #include "fmdrv_rx.h" #include "fmdrv_tx.h" static struct video_device *gradio_dev; static u8 radio_disconnected; /* -- V4L2 RADIO (/dev/radioX) device file operation interfaces --- */ /* Read RX RDS data */ static ssize_t fm_v4l2_fops_read(struct file *file, char __user * buf, size_t count, loff_t *ppos) { u8 rds_mode; int ret; struct fmdev *fmdev; fmdev = video_drvdata(file); if (!radio_disconnected) { fmerr("FM device is already disconnected\n"); return -EIO; } /* Turn on RDS mode , if it is disabled */ ret = fm_rx_get_rds_mode(fmdev, &rds_mode); if (ret < 0) { fmerr("Unable to read current rds mode\n"); return ret; } if (rds_mode == FM_RDS_DISABLE) { ret = fmc_set_rds_mode(fmdev, FM_RDS_ENABLE); if (ret < 0) { fmerr("Failed to enable rds mode\n"); return ret; } } /* Copy RDS data from internal buffer to user buffer */ return fmc_transfer_rds_from_internal_buff(fmdev, file, buf, count); } /* Write TX RDS data */ static ssize_t fm_v4l2_fops_write(struct file *file, const char __user * buf, size_t count, loff_t *ppos) { struct tx_rds rds; int ret; struct fmdev *fmdev; ret = copy_from_user(&rds, buf, sizeof(rds)); rds.text[sizeof(rds.text) - 1] = '\0'; fmdbg("(%d)type: %d, text %s, af %d\n", ret, rds.text_type, rds.text, rds.af_freq); if (ret) return -EFAULT; fmdev = video_drvdata(file); fm_tx_set_radio_text(fmdev, rds.text, rds.text_type); fm_tx_set_af(fmdev, rds.af_freq); return sizeof(rds); } static u32 fm_v4l2_fops_poll(struct file *file, struct poll_table_struct *pts) { int ret; struct fmdev *fmdev; fmdev = video_drvdata(file); ret = fmc_is_rds_data_available(fmdev, file, pts); if (ret < 0) return POLLIN | POLLRDNORM; return 0; } /* * Handle open request for "/dev/radioX" device. * Start with FM RX mode as default. */ static int fm_v4l2_fops_open(struct file *file) { int ret; struct fmdev *fmdev = NULL; /* Don't allow multiple open */ if (radio_disconnected) { fmerr("FM device is already opened\n"); return -EBUSY; } fmdev = video_drvdata(file); ret = fmc_prepare(fmdev); if (ret < 0) { fmerr("Unable to prepare FM CORE\n"); return ret; } fmdbg("Load FM RX firmware..\n"); ret = fmc_set_mode(fmdev, FM_MODE_RX); if (ret < 0) { fmerr("Unable to load FM RX firmware\n"); return ret; } radio_disconnected = 1; return ret; } static int fm_v4l2_fops_release(struct file *file) { int ret; struct fmdev *fmdev; fmdev = video_drvdata(file); if (!radio_disconnected) { fmdbg("FM device is already closed\n"); return 0; } ret = fmc_set_mode(fmdev, FM_MODE_OFF); if (ret < 0) { fmerr("Unable to turn off the chip\n"); return ret; } ret = fmc_release(fmdev); if (ret < 0) { fmerr("FM CORE release failed\n"); return ret; } radio_disconnected = 0; return ret; } /* V4L2 RADIO (/dev/radioX) device IOCTL interfaces */ static int fm_v4l2_vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *capability) { strlcpy(capability->driver, FM_DRV_NAME, sizeof(capability->driver)); strlcpy(capability->card, FM_DRV_CARD_SHORT_NAME, sizeof(capability->card)); sprintf(capability->bus_info, "UART"); capability->capabilities = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_MODULATOR | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE | V4L2_CAP_RDS_CAPTURE; return 0; } static int fm_g_volatile_ctrl(struct v4l2_ctrl *ctrl) { struct fmdev *fmdev = container_of(ctrl->handler, struct fmdev, ctrl_handler); switch (ctrl->id) { case V4L2_CID_TUNE_ANTENNA_CAPACITOR: ctrl->val = fm_tx_get_tune_cap_val(fmdev); break; default: fmwarn("%s: Unknown IOCTL: %d\n", __func__, ctrl->id); break; } return 0; } static int fm_v4l2_s_ctrl(struct v4l2_ctrl *ctrl) { struct fmdev *fmdev = container_of(ctrl->handler, struct fmdev, ctrl_handler); switch (ctrl->id) { case V4L2_CID_AUDIO_VOLUME: /* set volume */ return fm_rx_set_volume(fmdev, (u16)ctrl->val); case V4L2_CID_AUDIO_MUTE: /* set mute */ return fmc_set_mute_mode(fmdev, (u8)ctrl->val); case V4L2_CID_TUNE_POWER_LEVEL: /* set TX power level - ext control */ return fm_tx_set_pwr_lvl(fmdev, (u8)ctrl->val); case V4L2_CID_TUNE_PREEMPHASIS: return fm_tx_set_preemph_filter(fmdev, (u8) ctrl->val); default: return -EINVAL; } } static int fm_v4l2_vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *audio) { memset(audio, 0, sizeof(*audio)); strcpy(audio->name, "Radio"); audio->capability = V4L2_AUDCAP_STEREO; return 0; } static int fm_v4l2_vidioc_s_audio(struct file *file, void *priv, struct v4l2_audio *audio) { if (audio->index != 0) return -EINVAL; return 0; } /* Get tuner attributes. If current mode is NOT RX, return error */ static int fm_v4l2_vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *tuner) { struct fmdev *fmdev = video_drvdata(file); u32 bottom_freq; u32 top_freq; u16 stereo_mono_mode; u16 rssilvl; int ret; if (tuner->index != 0) return -EINVAL; if (fmdev->curr_fmmode != FM_MODE_RX) return -EPERM; ret = fm_rx_get_band_freq_range(fmdev, &bottom_freq, &top_freq); if (ret != 0) return ret; ret = fm_rx_get_stereo_mono(fmdev, &stereo_mono_mode); if (ret != 0) return ret; ret = fm_rx_get_rssi_level(fmdev, &rssilvl); if (ret != 0) return ret; strcpy(tuner->name, "FM"); tuner->type = V4L2_TUNER_RADIO; /* Store rangelow and rangehigh freq in unit of 62.5 Hz */ tuner->rangelow = bottom_freq * 16; tuner->rangehigh = top_freq * 16; tuner->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO | ((fmdev->rx.rds.flag == FM_RDS_ENABLE) ? V4L2_TUNER_SUB_RDS : 0); tuner->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_LOW; tuner->audmode = (stereo_mono_mode ? V4L2_TUNER_MODE_MONO : V4L2_TUNER_MODE_STEREO); /* * Actual rssi value lies in between -128 to +127. * Convert this range from 0 to 255 by adding +128 */ rssilvl += 128; /* * Return signal strength value should be within 0 to 65535. * Find out correct signal radio by multiplying (65535/255) = 257 */ tuner->signal = rssilvl * 257; tuner->afc = 0; return ret; } /* * Set tuner attributes. If current mode is NOT RX, set to RX. * Currently, we set only audio mode (mono/stereo) and RDS state (on/off). * Should we set other tuner attributes, too? */ static int fm_v4l2_vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *tuner) { struct fmdev *fmdev = video_drvdata(file); u16 aud_mode; u8 rds_mode; int ret; if (tuner->index != 0) return -EINVAL; aud_mode = (tuner->audmode == V4L2_TUNER_MODE_STEREO) ? FM_STEREO_MODE : FM_MONO_MODE; rds_mode = (tuner->rxsubchans & V4L2_TUNER_SUB_RDS) ? FM_RDS_ENABLE : FM_RDS_DISABLE; if (fmdev->curr_fmmode != FM_MODE_RX) { ret = fmc_set_mode(fmdev, FM_MODE_RX); if (ret < 0) { fmerr("Failed to set RX mode\n"); return ret; } } ret = fmc_set_stereo_mono(fmdev, aud_mode); if (ret < 0) { fmerr("Failed to set RX stereo/mono mode\n"); return ret; } ret = fmc_set_rds_mode(fmdev, rds_mode); if (ret < 0) fmerr("Failed to set RX RDS mode\n"); return ret; } /* Get tuner or modulator radio frequency */ static int fm_v4l2_vidioc_g_freq(struct file *file, void *priv, struct v4l2_frequency *freq) { struct fmdev *fmdev = video_drvdata(file); int ret; ret = fmc_get_freq(fmdev, &freq->frequency); if (ret < 0) { fmerr("Failed to get frequency\n"); return ret; } /* Frequency unit of 62.5 Hz*/ freq->frequency = (u32) freq->frequency * 16; return 0; } /* Set tuner or modulator radio frequency */ static int fm_v4l2_vidioc_s_freq(struct file *file, void *priv, struct v4l2_frequency *freq) { struct fmdev *fmdev = video_drvdata(file); /* * As V4L2_TUNER_CAP_LOW is set 1 user sends the frequency * in units of 62.5 Hz. */ freq->frequency = (u32)(freq->frequency / 16); return fmc_set_freq(fmdev, freq->frequency); } /* Set hardware frequency seek. If current mode is NOT RX, set it RX. */ static int fm_v4l2_vidioc_s_hw_freq_seek(struct file *file, void *priv, struct v4l2_hw_freq_seek *seek) { struct fmdev *fmdev = video_drvdata(file); int ret; if (fmdev->curr_fmmode != FM_MODE_RX) { ret = fmc_set_mode(fmdev, FM_MODE_RX); if (ret != 0) { fmerr("Failed to set RX mode\n"); return ret; } } ret = fm_rx_seek(fmdev, seek->seek_upward, seek->wrap_around, seek->spacing); if (ret < 0) fmerr("RX seek failed - %d\n", ret); return ret; } /* Get modulator attributes. If mode is not TX, return no attributes. */ static int fm_v4l2_vidioc_g_modulator(struct file *file, void *priv, struct v4l2_modulator *mod) { struct fmdev *fmdev = video_drvdata(file); if (mod->index != 0) return -EINVAL; if (fmdev->curr_fmmode != FM_MODE_TX) return -EPERM; mod->txsubchans = ((fmdev->tx_data.aud_mode == FM_STEREO_MODE) ? V4L2_TUNER_SUB_STEREO : V4L2_TUNER_SUB_MONO) | ((fmdev->tx_data.rds.flag == FM_RDS_ENABLE) ? V4L2_TUNER_SUB_RDS : 0); mod->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_LOW; return 0; } /* Set modulator attributes. If mode is not TX, set to TX. */ static int fm_v4l2_vidioc_s_modulator(struct file *file, void *priv, struct v4l2_modulator *mod) { struct fmdev *fmdev = video_drvdata(file); u8 rds_mode; u16 aud_mode; int ret; if (mod->index != 0) return -EINVAL; if (fmdev->curr_fmmode != FM_MODE_TX) { ret = fmc_set_mode(fmdev, FM_MODE_TX); if (ret != 0) { fmerr("Failed to set TX mode\n"); return ret; } } aud_mode = (mod->txsubchans & V4L2_TUNER_SUB_STEREO) ? FM_STEREO_MODE : FM_MONO_MODE; rds_mode = (mod->txsubchans & V4L2_TUNER_SUB_RDS) ? FM_RDS_ENABLE : FM_RDS_DISABLE; ret = fm_tx_set_stereo_mono(fmdev, aud_mode); if (ret < 0) { fmerr("Failed to set mono/stereo mode for TX\n"); return ret; } ret = fm_tx_set_rds_mode(fmdev, rds_mode); if (ret < 0) fmerr("Failed to set rds mode for TX\n"); return ret; } static const struct v4l2_file_operations fm_drv_fops = { .owner = THIS_MODULE, .read = fm_v4l2_fops_read, .write = fm_v4l2_fops_write, .poll = fm_v4l2_fops_poll, .unlocked_ioctl = video_ioctl2, .open = fm_v4l2_fops_open, .release = fm_v4l2_fops_release, }; static const struct v4l2_ctrl_ops fm_ctrl_ops = { .s_ctrl = fm_v4l2_s_ctrl, .g_volatile_ctrl = fm_g_volatile_ctrl, }; static const struct v4l2_ioctl_ops fm_drv_ioctl_ops = { .vidioc_querycap = fm_v4l2_vidioc_querycap, .vidioc_g_audio = fm_v4l2_vidioc_g_audio, .vidioc_s_audio = fm_v4l2_vidioc_s_audio, .vidioc_g_tuner = fm_v4l2_vidioc_g_tuner, .vidioc_s_tuner = fm_v4l2_vidioc_s_tuner, .vidioc_g_frequency = fm_v4l2_vidioc_g_freq, .vidioc_s_frequency = fm_v4l2_vidioc_s_freq, .vidioc_s_hw_freq_seek = fm_v4l2_vidioc_s_hw_freq_seek, .vidioc_g_modulator = fm_v4l2_vidioc_g_modulator, .vidioc_s_modulator = fm_v4l2_vidioc_s_modulator }; /* V4L2 RADIO device parent structure */ static struct video_device fm_viddev_template = { .fops = &fm_drv_fops, .ioctl_ops = &fm_drv_ioctl_ops, .name = FM_DRV_NAME, .release = video_device_release, }; int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr) { struct v4l2_ctrl *ctrl; int ret; /* Init mutex for core locking */ mutex_init(&fmdev->mutex); /* Allocate new video device */ gradio_dev = video_device_alloc(); if (NULL == gradio_dev) { fmerr("Can't allocate video device\n"); return -ENOMEM; } /* Setup FM driver's V4L2 properties */ memcpy(gradio_dev, &fm_viddev_template, sizeof(fm_viddev_template)); video_set_drvdata(gradio_dev, fmdev); gradio_dev->lock = &fmdev->mutex; /* Register with V4L2 subsystem as RADIO device */ if (video_register_device(gradio_dev, VFL_TYPE_RADIO, radio_nr)) { video_device_release(gradio_dev); fmerr("Could not register video device\n"); return -ENOMEM; } fmdev->radio_dev = gradio_dev; /* Register to v4l2 ctrl handler framework */ fmdev->radio_dev->ctrl_handler = &fmdev->ctrl_handler; ret = v4l2_ctrl_handler_init(&fmdev->ctrl_handler, 5); if (ret < 0) { fmerr("(fmdev): Can't init ctrl handler\n"); v4l2_ctrl_handler_free(&fmdev->ctrl_handler); return -EBUSY; } /* * Following controls are handled by V4L2 control framework. * Added in ascending ID order. */ v4l2_ctrl_new_std(&fmdev->ctrl_handler, &fm_ctrl_ops, V4L2_CID_AUDIO_VOLUME, FM_RX_VOLUME_MIN, FM_RX_VOLUME_MAX, 1, FM_RX_VOLUME_MAX); v4l2_ctrl_new_std(&fmdev->ctrl_handler, &fm_ctrl_ops, V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1); v4l2_ctrl_new_std_menu(&fmdev->ctrl_handler, &fm_ctrl_ops, V4L2_CID_TUNE_PREEMPHASIS, V4L2_PREEMPHASIS_75_uS, 0, V4L2_PREEMPHASIS_75_uS); v4l2_ctrl_new_std(&fmdev->ctrl_handler, &fm_ctrl_ops, V4L2_CID_TUNE_POWER_LEVEL, FM_PWR_LVL_LOW, FM_PWR_LVL_HIGH, 1, FM_PWR_LVL_HIGH); ctrl = v4l2_ctrl_new_std(&fmdev->ctrl_handler, &fm_ctrl_ops, V4L2_CID_TUNE_ANTENNA_CAPACITOR, 0, 255, 1, 255); if (ctrl) ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; return 0; } void *fm_v4l2_deinit_video_device(void) { struct fmdev *fmdev; fmdev = video_get_drvdata(gradio_dev); /* Unregister to v4l2 ctrl handler framework*/ v4l2_ctrl_handler_free(&fmdev->ctrl_handler); /* Unregister RADIO device from V4L2 subsystem */ video_unregister_device(gradio_dev); return fmdev; }
gpl-2.0
AK-Kernel/AK-Mako
drivers/leds/leds-cobalt-qube.c
5112
2021
/* * Copyright 2006 - Florian Fainelli <florian@openwrt.org> * * Control the Cobalt Qube/RaQ front LED */ #include <linux/init.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/leds.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/types.h> #define LED_FRONT_LEFT 0x01 #define LED_FRONT_RIGHT 0x02 static void __iomem *led_port; static u8 led_value; static void qube_front_led_set(struct led_classdev *led_cdev, enum led_brightness brightness) { if (brightness) led_value = LED_FRONT_LEFT | LED_FRONT_RIGHT; else led_value = ~(LED_FRONT_LEFT | LED_FRONT_RIGHT); writeb(led_value, led_port); } static struct led_classdev qube_front_led = { .name = "qube::front", .brightness = LED_FULL, .brightness_set = qube_front_led_set, .default_trigger = "default-on", }; static int __devinit cobalt_qube_led_probe(struct platform_device *pdev) { struct resource *res; int retval; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -EBUSY; led_port = ioremap(res->start, resource_size(res)); if (!led_port) return -ENOMEM; led_value = LED_FRONT_LEFT | LED_FRONT_RIGHT; writeb(led_value, led_port); retval = led_classdev_register(&pdev->dev, &qube_front_led); if (retval) goto err_iounmap; return 0; err_iounmap: iounmap(led_port); led_port = NULL; return retval; } static int __devexit cobalt_qube_led_remove(struct platform_device *pdev) { led_classdev_unregister(&qube_front_led); if (led_port) { iounmap(led_port); led_port = NULL; } return 0; } static struct platform_driver cobalt_qube_led_driver = { .probe = cobalt_qube_led_probe, .remove = __devexit_p(cobalt_qube_led_remove), .driver = { .name = "cobalt-qube-leds", .owner = THIS_MODULE, }, }; module_platform_driver(cobalt_qube_led_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Front LED support for Cobalt Server"); MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); MODULE_ALIAS("platform:cobalt-qube-leds");
gpl-2.0
varund7726/AK-OnePone-Reborn
arch/arm/mach-ux500/board-mop500-uib.c
5368
2590
/* * Copyright (C) ST-Ericsson SA 2010 * * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson * License terms: GNU General Public License (GPL), version 2 */ #define pr_fmt(fmt) "mop500-uib: " fmt #include <linux/kernel.h> #include <linux/init.h> #include <linux/i2c.h> #include <mach/hardware.h> #include "board-mop500.h" enum mop500_uib { STUIB, U8500UIB, }; struct uib { const char *name; const char *option; void (*init)(void); }; static struct uib __initdata mop500_uibs[] = { [STUIB] = { .name = "ST-UIB", .option = "stuib", .init = mop500_stuib_init, }, [U8500UIB] = { .name = "U8500-UIB", .option = "u8500uib", .init = mop500_u8500uib_init, }, }; static struct uib *mop500_uib; static int __init mop500_uib_setup(char *str) { int i; for (i = 0; i < ARRAY_SIZE(mop500_uibs); i++) { struct uib *uib = &mop500_uibs[i]; if (!strcmp(str, uib->option)) { mop500_uib = uib; break; } } if (i == ARRAY_SIZE(mop500_uibs)) pr_err("invalid uib= option (%s)\n", str); return 1; } __setup("uib=", mop500_uib_setup); /* * The UIBs are detected after the I2C host controllers are registered, so * i2c_register_board_info() can't be used. */ void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info, unsigned n) { struct i2c_adapter *adap; struct i2c_client *client; int i; adap = i2c_get_adapter(busnum); if (!adap) { pr_err("failed to get adapter i2c%d\n", busnum); return; } for (i = 0; i < n; i++) { client = i2c_new_device(adap, &info[i]); if (!client) pr_err("failed to register %s to i2c%d\n", info[i].type, busnum); } i2c_put_adapter(adap); } static void __init __mop500_uib_init(struct uib *uib, const char *why) { pr_info("%s (%s)\n", uib->name, why); uib->init(); } /* * Detect the UIB attached based on the presence or absence of i2c devices. */ static int __init mop500_uib_init(void) { struct uib *uib = mop500_uib; struct i2c_adapter *i2c0; int ret; if (!cpu_is_u8500()) return -ENODEV; if (uib) { __mop500_uib_init(uib, "from uib= boot argument"); return 0; } i2c0 = i2c_get_adapter(0); if (!i2c0) { __mop500_uib_init(&mop500_uibs[STUIB], "fallback, could not get i2c0"); return -ENODEV; } /* U8500-UIB has the TC35893 at 0x44 on I2C0, the ST-UIB doesn't. */ ret = i2c_smbus_xfer(i2c0, 0x44, 0, I2C_SMBUS_WRITE, 0, I2C_SMBUS_QUICK, NULL); i2c_put_adapter(i2c0); if (ret == 0) uib = &mop500_uibs[U8500UIB]; else uib = &mop500_uibs[STUIB]; __mop500_uib_init(uib, "detected"); return 0; } module_init(mop500_uib_init);
gpl-2.0
wangxingchao/oriole
drivers/uwb/ie-rcv.c
12792
1632
/* * Ultra Wide Band * IE Received notification handling. * * Copyright (C) 2008 Cambridge Silicon Radio Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/errno.h> #include <linux/module.h> #include <linux/device.h> #include <linux/bitmap.h> #include "uwb-internal.h" /* * Process an incoming IE Received notification. */ int uwbd_evt_handle_rc_ie_rcv(struct uwb_event *evt) { int result = -EINVAL; struct device *dev = &evt->rc->uwb_dev.dev; struct uwb_rc_evt_ie_rcv *iercv; size_t iesize; /* Is there enough data to decode it? */ if (evt->notif.size < sizeof(*iercv)) { dev_err(dev, "IE Received notification: Not enough data to " "decode (%zu vs %zu bytes needed)\n", evt->notif.size, sizeof(*iercv)); goto error; } iercv = container_of(evt->notif.rceb, struct uwb_rc_evt_ie_rcv, rceb); iesize = le16_to_cpu(iercv->wIELength); dev_dbg(dev, "IE received, element ID=%d\n", iercv->IEData[0]); if (iercv->IEData[0] == UWB_RELINQUISH_REQUEST_IE) { dev_warn(dev, "unhandled Relinquish Request IE\n"); } return 0; error: return result; }
gpl-2.0
thicklizard/p4wifi
drivers/misc/sgi-xp/xpc_partition.c
13304
14014
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. */ /* * Cross Partition Communication (XPC) partition support. * * This is the part of XPC that detects the presence/absence of * other partitions. It provides a heartbeat and monitors the * heartbeats of other partitions. * */ #include <linux/device.h> #include <linux/hardirq.h> #include <linux/slab.h> #include "xpc.h" #include <asm/uv/uv_hub.h> /* XPC is exiting flag */ int xpc_exiting; /* this partition's reserved page pointers */ struct xpc_rsvd_page *xpc_rsvd_page; static unsigned long *xpc_part_nasids; unsigned long *xpc_mach_nasids; static int xpc_nasid_mask_nbytes; /* #of bytes in nasid mask */ int xpc_nasid_mask_nlongs; /* #of longs in nasid mask */ struct xpc_partition *xpc_partitions; /* * Guarantee that the kmalloc'd memory is cacheline aligned. */ void * xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) { /* see if kmalloc will give us cachline aligned memory by default */ *base = kmalloc(size, flags); if (*base == NULL) return NULL; if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) return *base; kfree(*base); /* nope, we'll have to do it ourselves */ *base = kmalloc(size + L1_CACHE_BYTES, flags); if (*base == NULL) return NULL; return (void *)L1_CACHE_ALIGN((u64)*base); } /* * Given a nasid, get the physical address of the partition's reserved page * for that nasid. This function returns 0 on any error. */ static unsigned long xpc_get_rsvd_page_pa(int nasid) { enum xp_retval ret; u64 cookie = 0; unsigned long rp_pa = nasid; /* seed with nasid */ size_t len = 0; size_t buf_len = 0; void *buf = buf; void *buf_base = NULL; enum xp_retval (*get_partition_rsvd_page_pa) (void *, u64 *, unsigned long *, size_t *) = xpc_arch_ops.get_partition_rsvd_page_pa; while (1) { /* !!! rp_pa will need to be _gpa on UV. * ??? So do we save it into the architecture specific parts * ??? of the xpc_partition structure? Do we rename this * ??? function or have two versions? Rename rp_pa for UV to * ??? rp_gpa? */ ret = get_partition_rsvd_page_pa(buf, &cookie, &rp_pa, &len); dev_dbg(xpc_part, "SAL returned with ret=%d, cookie=0x%016lx, " "address=0x%016lx, len=0x%016lx\n", ret, (unsigned long)cookie, rp_pa, len); if (ret != xpNeedMoreInfo) break; /* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */ if (is_shub()) len = L1_CACHE_ALIGN(len); if (len > buf_len) { if (buf_base != NULL) kfree(buf_base); buf_len = L1_CACHE_ALIGN(len); buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL, &buf_base); if (buf_base == NULL) { dev_err(xpc_part, "unable to kmalloc " "len=0x%016lx\n", buf_len); ret = xpNoMemory; break; } } ret = xp_remote_memcpy(xp_pa(buf), rp_pa, len); if (ret != xpSuccess) { dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret); break; } } kfree(buf_base); if (ret != xpSuccess) rp_pa = 0; dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa); return rp_pa; } /* * Fill the partition reserved page with the information needed by * other partitions to discover we are alive and establish initial * communications. */ int xpc_setup_rsvd_page(void) { int ret; struct xpc_rsvd_page *rp; unsigned long rp_pa; unsigned long new_ts_jiffies; /* get the local reserved page's address */ preempt_disable(); rp_pa = xpc_get_rsvd_page_pa(xp_cpu_to_nasid(smp_processor_id())); preempt_enable(); if (rp_pa == 0) { dev_err(xpc_part, "SAL failed to locate the reserved page\n"); return -ESRCH; } rp = (struct xpc_rsvd_page *)__va(xp_socket_pa(rp_pa)); if (rp->SAL_version < 3) { /* SAL_versions < 3 had a SAL_partid defined as a u8 */ rp->SAL_partid &= 0xff; } BUG_ON(rp->SAL_partid != xp_partition_id); if (rp->SAL_partid < 0 || rp->SAL_partid >= xp_max_npartitions) { dev_err(xpc_part, "the reserved page's partid of %d is outside " "supported range (< 0 || >= %d)\n", rp->SAL_partid, xp_max_npartitions); return -EINVAL; } rp->version = XPC_RP_VERSION; rp->max_npartitions = xp_max_npartitions; /* establish the actual sizes of the nasid masks */ if (rp->SAL_version == 1) { /* SAL_version 1 didn't set the nasids_size field */ rp->SAL_nasids_size = 128; } xpc_nasid_mask_nbytes = rp->SAL_nasids_size; xpc_nasid_mask_nlongs = BITS_TO_LONGS(rp->SAL_nasids_size * BITS_PER_BYTE); /* setup the pointers to the various items in the reserved page */ xpc_part_nasids = XPC_RP_PART_NASIDS(rp); xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp); ret = xpc_arch_ops.setup_rsvd_page(rp); if (ret != 0) return ret; /* * Set timestamp of when reserved page was setup by XPC. * This signifies to the remote partition that our reserved * page is initialized. */ new_ts_jiffies = jiffies; if (new_ts_jiffies == 0 || new_ts_jiffies == rp->ts_jiffies) new_ts_jiffies++; rp->ts_jiffies = new_ts_jiffies; xpc_rsvd_page = rp; return 0; } void xpc_teardown_rsvd_page(void) { /* a zero timestamp indicates our rsvd page is not initialized */ xpc_rsvd_page->ts_jiffies = 0; } /* * Get a copy of a portion of the remote partition's rsvd page. * * remote_rp points to a buffer that is cacheline aligned for BTE copies and * is large enough to contain a copy of their reserved page header and * part_nasids mask. */ enum xp_retval xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids, struct xpc_rsvd_page *remote_rp, unsigned long *remote_rp_pa) { int l; enum xp_retval ret; /* get the reserved page's physical address */ *remote_rp_pa = xpc_get_rsvd_page_pa(nasid); if (*remote_rp_pa == 0) return xpNoRsvdPageAddr; /* pull over the reserved page header and part_nasids mask */ ret = xp_remote_memcpy(xp_pa(remote_rp), *remote_rp_pa, XPC_RP_HEADER_SIZE + xpc_nasid_mask_nbytes); if (ret != xpSuccess) return ret; if (discovered_nasids != NULL) { unsigned long *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp); for (l = 0; l < xpc_nasid_mask_nlongs; l++) discovered_nasids[l] |= remote_part_nasids[l]; } /* zero timestamp indicates the reserved page has not been setup */ if (remote_rp->ts_jiffies == 0) return xpRsvdPageNotSet; if (XPC_VERSION_MAJOR(remote_rp->version) != XPC_VERSION_MAJOR(XPC_RP_VERSION)) { return xpBadVersion; } /* check that both remote and local partids are valid for each side */ if (remote_rp->SAL_partid < 0 || remote_rp->SAL_partid >= xp_max_npartitions || remote_rp->max_npartitions <= xp_partition_id) { return xpInvalidPartid; } if (remote_rp->SAL_partid == xp_partition_id) return xpLocalPartid; return xpSuccess; } /* * See if the other side has responded to a partition deactivate request * from us. Though we requested the remote partition to deactivate with regard * to us, we really only need to wait for the other side to disengage from us. */ int xpc_partition_disengaged(struct xpc_partition *part) { short partid = XPC_PARTID(part); int disengaged; disengaged = !xpc_arch_ops.partition_engaged(partid); if (part->disengage_timeout) { if (!disengaged) { if (time_is_after_jiffies(part->disengage_timeout)) { /* timelimit hasn't been reached yet */ return 0; } /* * Other side hasn't responded to our deactivate * request in a timely fashion, so assume it's dead. */ dev_info(xpc_part, "deactivate request to remote " "partition %d timed out\n", partid); xpc_disengage_timedout = 1; xpc_arch_ops.assume_partition_disengaged(partid); disengaged = 1; } part->disengage_timeout = 0; /* cancel the timer function, provided it's not us */ if (!in_interrupt()) del_singleshot_timer_sync(&part->disengage_timer); DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING && part->act_state != XPC_P_AS_INACTIVE); if (part->act_state != XPC_P_AS_INACTIVE) xpc_wakeup_channel_mgr(part); xpc_arch_ops.cancel_partition_deactivation_request(part); } return disengaged; } /* * Mark specified partition as active. */ enum xp_retval xpc_mark_partition_active(struct xpc_partition *part) { unsigned long irq_flags; enum xp_retval ret; dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part)); spin_lock_irqsave(&part->act_lock, irq_flags); if (part->act_state == XPC_P_AS_ACTIVATING) { part->act_state = XPC_P_AS_ACTIVE; ret = xpSuccess; } else { DBUG_ON(part->reason == xpSuccess); ret = part->reason; } spin_unlock_irqrestore(&part->act_lock, irq_flags); return ret; } /* * Start the process of deactivating the specified partition. */ void xpc_deactivate_partition(const int line, struct xpc_partition *part, enum xp_retval reason) { unsigned long irq_flags; spin_lock_irqsave(&part->act_lock, irq_flags); if (part->act_state == XPC_P_AS_INACTIVE) { XPC_SET_REASON(part, reason, line); spin_unlock_irqrestore(&part->act_lock, irq_flags); if (reason == xpReactivating) { /* we interrupt ourselves to reactivate partition */ xpc_arch_ops.request_partition_reactivation(part); } return; } if (part->act_state == XPC_P_AS_DEACTIVATING) { if ((part->reason == xpUnloading && reason != xpUnloading) || reason == xpReactivating) { XPC_SET_REASON(part, reason, line); } spin_unlock_irqrestore(&part->act_lock, irq_flags); return; } part->act_state = XPC_P_AS_DEACTIVATING; XPC_SET_REASON(part, reason, line); spin_unlock_irqrestore(&part->act_lock, irq_flags); /* ask remote partition to deactivate with regard to us */ xpc_arch_ops.request_partition_deactivation(part); /* set a timelimit on the disengage phase of the deactivation request */ part->disengage_timeout = jiffies + (xpc_disengage_timelimit * HZ); part->disengage_timer.expires = part->disengage_timeout; add_timer(&part->disengage_timer); dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n", XPC_PARTID(part), reason); xpc_partition_going_down(part, reason); } /* * Mark specified partition as inactive. */ void xpc_mark_partition_inactive(struct xpc_partition *part) { unsigned long irq_flags; dev_dbg(xpc_part, "setting partition %d to INACTIVE\n", XPC_PARTID(part)); spin_lock_irqsave(&part->act_lock, irq_flags); part->act_state = XPC_P_AS_INACTIVE; spin_unlock_irqrestore(&part->act_lock, irq_flags); part->remote_rp_pa = 0; } /* * SAL has provided a partition and machine mask. The partition mask * contains a bit for each even nasid in our partition. The machine * mask contains a bit for each even nasid in the entire machine. * * Using those two bit arrays, we can determine which nasids are * known in the machine. Each should also have a reserved page * initialized if they are available for partitioning. */ void xpc_discovery(void) { void *remote_rp_base; struct xpc_rsvd_page *remote_rp; unsigned long remote_rp_pa; int region; int region_size; int max_regions; int nasid; struct xpc_rsvd_page *rp; unsigned long *discovered_nasids; enum xp_retval ret; remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE + xpc_nasid_mask_nbytes, GFP_KERNEL, &remote_rp_base); if (remote_rp == NULL) return; discovered_nasids = kzalloc(sizeof(long) * xpc_nasid_mask_nlongs, GFP_KERNEL); if (discovered_nasids == NULL) { kfree(remote_rp_base); return; } rp = (struct xpc_rsvd_page *)xpc_rsvd_page; /* * The term 'region' in this context refers to the minimum number of * nodes that can comprise an access protection grouping. The access * protection is in regards to memory, IOI and IPI. */ region_size = xp_region_size; if (is_uv()) max_regions = 256; else { max_regions = 64; switch (region_size) { case 128: max_regions *= 2; case 64: max_regions *= 2; case 32: max_regions *= 2; region_size = 16; DBUG_ON(!is_shub2()); } } for (region = 0; region < max_regions; region++) { if (xpc_exiting) break; dev_dbg(xpc_part, "searching region %d\n", region); for (nasid = (region * region_size * 2); nasid < ((region + 1) * region_size * 2); nasid += 2) { if (xpc_exiting) break; dev_dbg(xpc_part, "checking nasid %d\n", nasid); if (test_bit(nasid / 2, xpc_part_nasids)) { dev_dbg(xpc_part, "PROM indicates Nasid %d is " "part of the local partition; skipping " "region\n", nasid); break; } if (!(test_bit(nasid / 2, xpc_mach_nasids))) { dev_dbg(xpc_part, "PROM indicates Nasid %d was " "not on Numa-Link network at reset\n", nasid); continue; } if (test_bit(nasid / 2, discovered_nasids)) { dev_dbg(xpc_part, "Nasid %d is part of a " "partition which was previously " "discovered\n", nasid); continue; } /* pull over the rsvd page header & part_nasids mask */ ret = xpc_get_remote_rp(nasid, discovered_nasids, remote_rp, &remote_rp_pa); if (ret != xpSuccess) { dev_dbg(xpc_part, "unable to get reserved page " "from nasid %d, reason=%d\n", nasid, ret); if (ret == xpLocalPartid) break; continue; } xpc_arch_ops.request_partition_activation(remote_rp, remote_rp_pa, nasid); } } kfree(discovered_nasids); kfree(remote_rp_base); } /* * Given a partid, get the nasids owned by that partition from the * remote partition's reserved page. */ enum xp_retval xpc_initiate_partid_to_nasids(short partid, void *nasid_mask) { struct xpc_partition *part; unsigned long part_nasid_pa; part = &xpc_partitions[partid]; if (part->remote_rp_pa == 0) return xpPartitionDown; memset(nasid_mask, 0, xpc_nasid_mask_nbytes); part_nasid_pa = (unsigned long)XPC_RP_PART_NASIDS(part->remote_rp_pa); return xp_remote_memcpy(xp_pa(nasid_mask), part_nasid_pa, xpc_nasid_mask_nbytes); }
gpl-2.0
motley-git/Kernel-GT-P73xx-v2
drivers/char/n_tty.c
249
52908
/* * n_tty.c --- implements the N_TTY line discipline. * * This code used to be in tty_io.c, but things are getting hairy * enough that it made sense to split things off. (The N_TTY * processing has changed so much that it's hardly recognizable, * anyway...) * * Note that the open routine for N_TTY is guaranteed never to return * an error. This is because Linux will fall back to setting a line * to N_TTY if it can not switch to any other line discipline. * * Written by Theodore Ts'o, Copyright 1994. * * This file also contains code originally written by Linus Torvalds, * Copyright 1991, 1992, 1993, and by Julian Cowley, Copyright 1994. * * This file may be redistributed under the terms of the GNU General Public * License. * * Reduced memory usage for older ARM systems - Russell King. * * 2000/01/20 Fixed SMP locking on put_tty_queue using bits of * the patch by Andrew J. Kroll <ag784@freenet.buffalo.edu> * who actually finally proved there really was a race. * * 2002/03/18 Implemented n_tty_wakeup to send SIGIO POLL_OUTs to * waiting writing processes-Sapan Bhatia <sapan@corewars.org>. * Also fixed a bug in BLOCKING mode where n_tty_write returns * EAGAIN */ #include <linux/types.h> #include <linux/major.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/fcntl.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <linux/timer.h> #include <linux/ctype.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/bitops.h> #include <linux/audit.h> #include <linux/file.h> #include <linux/uaccess.h> #include <linux/module.h> #include <asm/system.h> /* number of characters left in xmit buffer before select has we have room */ #define WAKEUP_CHARS 256 /* * This defines the low- and high-watermarks for throttling and * unthrottling the TTY driver. These watermarks are used for * controlling the space in the read buffer. */ #define TTY_THRESHOLD_THROTTLE 128 /* now based on remaining room */ #define TTY_THRESHOLD_UNTHROTTLE 128 /* * Special byte codes used in the echo buffer to represent operations * or special handling of characters. Bytes in the echo buffer that * are not part of such special blocks are treated as normal character * codes. */ #define ECHO_OP_START 0xff #define ECHO_OP_MOVE_BACK_COL 0x80 #define ECHO_OP_SET_CANON_COL 0x81 #define ECHO_OP_ERASE_TAB 0x82 static inline int tty_put_user(struct tty_struct *tty, unsigned char x, unsigned char __user *ptr) { tty_audit_add_data(tty, &x, 1); return put_user(x, ptr); } /** * n_tty_set__room - receive space * @tty: terminal * * Called by the driver to find out how much data it is * permitted to feed to the line discipline without any being lost * and thus to manage flow control. Not serialized. Answers for the * "instant". */ static void n_tty_set_room(struct tty_struct *tty) { /* tty->read_cnt is not read locked ? */ int left = N_TTY_BUF_SIZE - tty->read_cnt - 1; /* * If we are doing input canonicalization, and there are no * pending newlines, let characters through without limit, so * that erase characters will be handled. Other excess * characters will be beeped. */ if (left <= 0) left = tty->icanon && !tty->canon_data; tty->receive_room = left; } static void put_tty_queue_nolock(unsigned char c, struct tty_struct *tty) { if (tty->read_cnt < N_TTY_BUF_SIZE) { tty->read_buf[tty->read_head] = c; tty->read_head = (tty->read_head + 1) & (N_TTY_BUF_SIZE-1); tty->read_cnt++; } } /** * put_tty_queue - add character to tty * @c: character * @tty: tty device * * Add a character to the tty read_buf queue. This is done under the * read_lock to serialize character addition and also to protect us * against parallel reads or flushes */ static void put_tty_queue(unsigned char c, struct tty_struct *tty) { unsigned long flags; /* * The problem of stomping on the buffers ends here. * Why didn't anyone see this one coming? --AJK */ spin_lock_irqsave(&tty->read_lock, flags); put_tty_queue_nolock(c, tty); spin_unlock_irqrestore(&tty->read_lock, flags); } /** * check_unthrottle - allow new receive data * @tty; tty device * * Check whether to call the driver unthrottle functions * * Can sleep, may be called under the atomic_read_lock mutex but * this is not guaranteed. */ static void check_unthrottle(struct tty_struct *tty) { if (tty->count) tty_unthrottle(tty); } /** * reset_buffer_flags - reset buffer state * @tty: terminal to reset * * Reset the read buffer counters, clear the flags, * and make sure the driver is unthrottled. Called * from n_tty_open() and n_tty_flush_buffer(). * * Locking: tty_read_lock for read fields. */ static void reset_buffer_flags(struct tty_struct *tty) { unsigned long flags; spin_lock_irqsave(&tty->read_lock, flags); tty->read_head = tty->read_tail = tty->read_cnt = 0; spin_unlock_irqrestore(&tty->read_lock, flags); mutex_lock(&tty->echo_lock); tty->echo_pos = tty->echo_cnt = tty->echo_overrun = 0; mutex_unlock(&tty->echo_lock); tty->canon_head = tty->canon_data = tty->erasing = 0; memset(&tty->read_flags, 0, sizeof tty->read_flags); n_tty_set_room(tty); check_unthrottle(tty); } /** * n_tty_flush_buffer - clean input queue * @tty: terminal device * * Flush the input buffer. Called when the line discipline is * being closed, when the tty layer wants the buffer flushed (eg * at hangup) or when the N_TTY line discipline internally has to * clean the pending queue (for example some signals). * * Locking: ctrl_lock, read_lock. */ static void n_tty_flush_buffer(struct tty_struct *tty) { unsigned long flags; /* clear everything and unthrottle the driver */ reset_buffer_flags(tty); if (!tty->link) return; spin_lock_irqsave(&tty->ctrl_lock, flags); if (tty->link->packet) { tty->ctrl_status |= TIOCPKT_FLUSHREAD; wake_up_interruptible(&tty->link->read_wait); } spin_unlock_irqrestore(&tty->ctrl_lock, flags); } /** * n_tty_chars_in_buffer - report available bytes * @tty: tty device * * Report the number of characters buffered to be delivered to user * at this instant in time. * * Locking: read_lock */ static ssize_t n_tty_chars_in_buffer(struct tty_struct *tty) { unsigned long flags; ssize_t n = 0; spin_lock_irqsave(&tty->read_lock, flags); if (!tty->icanon) { n = tty->read_cnt; } else if (tty->canon_data) { n = (tty->canon_head > tty->read_tail) ? tty->canon_head - tty->read_tail : tty->canon_head + (N_TTY_BUF_SIZE - tty->read_tail); } spin_unlock_irqrestore(&tty->read_lock, flags); return n; } /** * is_utf8_continuation - utf8 multibyte check * @c: byte to check * * Returns true if the utf8 character 'c' is a multibyte continuation * character. We use this to correctly compute the on screen size * of the character when printing */ static inline int is_utf8_continuation(unsigned char c) { return (c & 0xc0) == 0x80; } /** * is_continuation - multibyte check * @c: byte to check * * Returns true if the utf8 character 'c' is a multibyte continuation * character and the terminal is in unicode mode. */ static inline int is_continuation(unsigned char c, struct tty_struct *tty) { return I_IUTF8(tty) && is_utf8_continuation(c); } /** * do_output_char - output one character * @c: character (or partial unicode symbol) * @tty: terminal device * @space: space available in tty driver write buffer * * This is a helper function that handles one output character * (including special characters like TAB, CR, LF, etc.), * doing OPOST processing and putting the results in the * tty driver's write buffer. * * Note that Linux currently ignores TABDLY, CRDLY, VTDLY, FFDLY * and NLDLY. They simply aren't relevant in the world today. * If you ever need them, add them here. * * Returns the number of bytes of buffer space used or -1 if * no space left. * * Locking: should be called under the output_lock to protect * the column state and space left in the buffer */ static int do_output_char(unsigned char c, struct tty_struct *tty, int space) { int spaces; if (!space) return -1; switch (c) { case '\n': if (O_ONLRET(tty)) tty->column = 0; if (O_ONLCR(tty)) { if (space < 2) return -1; tty->canon_column = tty->column = 0; tty->ops->write(tty, "\r\n", 2); return 2; } tty->canon_column = tty->column; break; case '\r': if (O_ONOCR(tty) && tty->column == 0) return 0; if (O_OCRNL(tty)) { c = '\n'; if (O_ONLRET(tty)) tty->canon_column = tty->column = 0; break; } tty->canon_column = tty->column = 0; break; case '\t': spaces = 8 - (tty->column & 7); if (O_TABDLY(tty) == XTABS) { if (space < spaces) return -1; tty->column += spaces; tty->ops->write(tty, " ", spaces); return spaces; } tty->column += spaces; break; case '\b': if (tty->column > 0) tty->column--; break; default: if (!iscntrl(c)) { if (O_OLCUC(tty)) c = toupper(c); if (!is_continuation(c, tty)) tty->column++; } break; } tty_put_char(tty, c); return 1; } /** * process_output - output post processor * @c: character (or partial unicode symbol) * @tty: terminal device * * Output one character with OPOST processing. * Returns -1 when the output device is full and the character * must be retried. * * Locking: output_lock to protect column state and space left * (also, this is called from n_tty_write under the * tty layer write lock) */ static int process_output(unsigned char c, struct tty_struct *tty) { int space, retval; mutex_lock(&tty->output_lock); space = tty_write_room(tty); retval = do_output_char(c, tty, space); mutex_unlock(&tty->output_lock); if (retval < 0) return -1; else return 0; } /** * process_output_block - block post processor * @tty: terminal device * @buf: character buffer * @nr: number of bytes to output * * Output a block of characters with OPOST processing. * Returns the number of characters output. * * This path is used to speed up block console writes, among other * things when processing blocks of output data. It handles only * the simple cases normally found and helps to generate blocks of * symbols for the console driver and thus improve performance. * * Locking: output_lock to protect column state and space left * (also, this is called from n_tty_write under the * tty layer write lock) */ static ssize_t process_output_block(struct tty_struct *tty, const unsigned char *buf, unsigned int nr) { int space; int i; const unsigned char *cp; mutex_lock(&tty->output_lock); space = tty_write_room(tty); if (!space) { mutex_unlock(&tty->output_lock); return 0; } if (nr > space) nr = space; for (i = 0, cp = buf; i < nr; i++, cp++) { unsigned char c = *cp; switch (c) { case '\n': if (O_ONLRET(tty)) tty->column = 0; if (O_ONLCR(tty)) goto break_out; tty->canon_column = tty->column; break; case '\r': if (O_ONOCR(tty) && tty->column == 0) goto break_out; if (O_OCRNL(tty)) goto break_out; tty->canon_column = tty->column = 0; break; case '\t': goto break_out; case '\b': if (tty->column > 0) tty->column--; break; default: if (!iscntrl(c)) { if (O_OLCUC(tty)) goto break_out; if (!is_continuation(c, tty)) tty->column++; } break; } } break_out: i = tty->ops->write(tty, buf, i); mutex_unlock(&tty->output_lock); return i; } /** * process_echoes - write pending echo characters * @tty: terminal device * * Write previously buffered echo (and other ldisc-generated) * characters to the tty. * * Characters generated by the ldisc (including echoes) need to * be buffered because the driver's write buffer can fill during * heavy program output. Echoing straight to the driver will * often fail under these conditions, causing lost characters and * resulting mismatches of ldisc state information. * * Since the ldisc state must represent the characters actually sent * to the driver at the time of the write, operations like certain * changes in column state are also saved in the buffer and executed * here. * * A circular fifo buffer is used so that the most recent characters * are prioritized. Also, when control characters are echoed with a * prefixed "^", the pair is treated atomically and thus not separated. * * Locking: output_lock to protect column state and space left, * echo_lock to protect the echo buffer */ static void process_echoes(struct tty_struct *tty) { int space, nr; unsigned char c; unsigned char *cp, *buf_end; if (!tty->echo_cnt) return; mutex_lock(&tty->output_lock); mutex_lock(&tty->echo_lock); space = tty_write_room(tty); buf_end = tty->echo_buf + N_TTY_BUF_SIZE; cp = tty->echo_buf + tty->echo_pos; nr = tty->echo_cnt; while (nr > 0) { c = *cp; if (c == ECHO_OP_START) { unsigned char op; unsigned char *opp; int no_space_left = 0; /* * If the buffer byte is the start of a multi-byte * operation, get the next byte, which is either the * op code or a control character value. */ opp = cp + 1; if (opp == buf_end) opp -= N_TTY_BUF_SIZE; op = *opp; switch (op) { unsigned int num_chars, num_bs; case ECHO_OP_ERASE_TAB: if (++opp == buf_end) opp -= N_TTY_BUF_SIZE; num_chars = *opp; /* * Determine how many columns to go back * in order to erase the tab. * This depends on the number of columns * used by other characters within the tab * area. If this (modulo 8) count is from * the start of input rather than from a * previous tab, we offset by canon column. * Otherwise, tab spacing is normal. */ if (!(num_chars & 0x80)) num_chars += tty->canon_column; num_bs = 8 - (num_chars & 7); if (num_bs > space) { no_space_left = 1; break; } space -= num_bs; while (num_bs--) { tty_put_char(tty, '\b'); if (tty->column > 0) tty->column--; } cp += 3; nr -= 3; break; case ECHO_OP_SET_CANON_COL: tty->canon_column = tty->column; cp += 2; nr -= 2; break; case ECHO_OP_MOVE_BACK_COL: if (tty->column > 0) tty->column--; cp += 2; nr -= 2; break; case ECHO_OP_START: /* This is an escaped echo op start code */ if (!space) { no_space_left = 1; break; } tty_put_char(tty, ECHO_OP_START); tty->column++; space--; cp += 2; nr -= 2; break; default: /* * If the op is not a special byte code, * it is a ctrl char tagged to be echoed * as "^X" (where X is the letter * representing the control char). * Note that we must ensure there is * enough space for the whole ctrl pair. * */ if (space < 2) { no_space_left = 1; break; } tty_put_char(tty, '^'); tty_put_char(tty, op ^ 0100); tty->column += 2; space -= 2; cp += 2; nr -= 2; } if (no_space_left) break; } else { if (O_OPOST(tty) && !(test_bit(TTY_HW_COOK_OUT, &tty->flags))) { int retval = do_output_char(c, tty, space); if (retval < 0) break; space -= retval; } else { if (!space) break; tty_put_char(tty, c); space -= 1; } cp += 1; nr -= 1; } /* When end of circular buffer reached, wrap around */ if (cp >= buf_end) cp -= N_TTY_BUF_SIZE; } if (nr == 0) { tty->echo_pos = 0; tty->echo_cnt = 0; tty->echo_overrun = 0; } else { int num_processed = tty->echo_cnt - nr; tty->echo_pos += num_processed; tty->echo_pos &= N_TTY_BUF_SIZE - 1; tty->echo_cnt = nr; if (num_processed > 0) tty->echo_overrun = 0; } mutex_unlock(&tty->echo_lock); mutex_unlock(&tty->output_lock); if (tty->ops->flush_chars) tty->ops->flush_chars(tty); } /** * add_echo_byte - add a byte to the echo buffer * @c: unicode byte to echo * @tty: terminal device * * Add a character or operation byte to the echo buffer. * * Should be called under the echo lock to protect the echo buffer. */ static void add_echo_byte(unsigned char c, struct tty_struct *tty) { int new_byte_pos; if (tty->echo_cnt == N_TTY_BUF_SIZE) { /* Circular buffer is already at capacity */ new_byte_pos = tty->echo_pos; /* * Since the buffer start position needs to be advanced, * be sure to step by a whole operation byte group. */ if (tty->echo_buf[tty->echo_pos] == ECHO_OP_START) { if (tty->echo_buf[(tty->echo_pos + 1) & (N_TTY_BUF_SIZE - 1)] == ECHO_OP_ERASE_TAB) { tty->echo_pos += 3; tty->echo_cnt -= 2; } else { tty->echo_pos += 2; tty->echo_cnt -= 1; } } else { tty->echo_pos++; } tty->echo_pos &= N_TTY_BUF_SIZE - 1; tty->echo_overrun = 1; } else { new_byte_pos = tty->echo_pos + tty->echo_cnt; new_byte_pos &= N_TTY_BUF_SIZE - 1; tty->echo_cnt++; } tty->echo_buf[new_byte_pos] = c; } /** * echo_move_back_col - add operation to move back a column * @tty: terminal device * * Add an operation to the echo buffer to move back one column. * * Locking: echo_lock to protect the echo buffer */ static void echo_move_back_col(struct tty_struct *tty) { mutex_lock(&tty->echo_lock); add_echo_byte(ECHO_OP_START, tty); add_echo_byte(ECHO_OP_MOVE_BACK_COL, tty); mutex_unlock(&tty->echo_lock); } /** * echo_set_canon_col - add operation to set the canon column * @tty: terminal device * * Add an operation to the echo buffer to set the canon column * to the current column. * * Locking: echo_lock to protect the echo buffer */ static void echo_set_canon_col(struct tty_struct *tty) { mutex_lock(&tty->echo_lock); add_echo_byte(ECHO_OP_START, tty); add_echo_byte(ECHO_OP_SET_CANON_COL, tty); mutex_unlock(&tty->echo_lock); } /** * echo_erase_tab - add operation to erase a tab * @num_chars: number of character columns already used * @after_tab: true if num_chars starts after a previous tab * @tty: terminal device * * Add an operation to the echo buffer to erase a tab. * * Called by the eraser function, which knows how many character * columns have been used since either a previous tab or the start * of input. This information will be used later, along with * canon column (if applicable), to go back the correct number * of columns. * * Locking: echo_lock to protect the echo buffer */ static void echo_erase_tab(unsigned int num_chars, int after_tab, struct tty_struct *tty) { mutex_lock(&tty->echo_lock); add_echo_byte(ECHO_OP_START, tty); add_echo_byte(ECHO_OP_ERASE_TAB, tty); /* We only need to know this modulo 8 (tab spacing) */ num_chars &= 7; /* Set the high bit as a flag if num_chars is after a previous tab */ if (after_tab) num_chars |= 0x80; add_echo_byte(num_chars, tty); mutex_unlock(&tty->echo_lock); } /** * echo_char_raw - echo a character raw * @c: unicode byte to echo * @tty: terminal device * * Echo user input back onto the screen. This must be called only when * L_ECHO(tty) is true. Called from the driver receive_buf path. * * This variant does not treat control characters specially. * * Locking: echo_lock to protect the echo buffer */ static void echo_char_raw(unsigned char c, struct tty_struct *tty) { mutex_lock(&tty->echo_lock); if (c == ECHO_OP_START) { add_echo_byte(ECHO_OP_START, tty); add_echo_byte(ECHO_OP_START, tty); } else { add_echo_byte(c, tty); } mutex_unlock(&tty->echo_lock); } /** * echo_char - echo a character * @c: unicode byte to echo * @tty: terminal device * * Echo user input back onto the screen. This must be called only when * L_ECHO(tty) is true. Called from the driver receive_buf path. * * This variant tags control characters to be echoed as "^X" * (where X is the letter representing the control char). * * Locking: echo_lock to protect the echo buffer */ static void echo_char(unsigned char c, struct tty_struct *tty) { mutex_lock(&tty->echo_lock); if (c == ECHO_OP_START) { add_echo_byte(ECHO_OP_START, tty); add_echo_byte(ECHO_OP_START, tty); } else { if (L_ECHOCTL(tty) && iscntrl(c) && c != '\t') add_echo_byte(ECHO_OP_START, tty); add_echo_byte(c, tty); } mutex_unlock(&tty->echo_lock); } /** * finish_erasing - complete erase * @tty: tty doing the erase */ static inline void finish_erasing(struct tty_struct *tty) { if (tty->erasing) { echo_char_raw('/', tty); tty->erasing = 0; } } /** * eraser - handle erase function * @c: character input * @tty: terminal device * * Perform erase and necessary output when an erase character is * present in the stream from the driver layer. Handles the complexities * of UTF-8 multibyte symbols. * * Locking: read_lock for tty buffers */ static void eraser(unsigned char c, struct tty_struct *tty) { enum { ERASE, WERASE, KILL } kill_type; int head, seen_alnums, cnt; unsigned long flags; /* FIXME: locking needed ? */ if (tty->read_head == tty->canon_head) { /* process_output('\a', tty); */ /* what do you think? */ return; } if (c == ERASE_CHAR(tty)) kill_type = ERASE; else if (c == WERASE_CHAR(tty)) kill_type = WERASE; else { if (!L_ECHO(tty)) { spin_lock_irqsave(&tty->read_lock, flags); tty->read_cnt -= ((tty->read_head - tty->canon_head) & (N_TTY_BUF_SIZE - 1)); tty->read_head = tty->canon_head; spin_unlock_irqrestore(&tty->read_lock, flags); return; } if (!L_ECHOK(tty) || !L_ECHOKE(tty) || !L_ECHOE(tty)) { spin_lock_irqsave(&tty->read_lock, flags); tty->read_cnt -= ((tty->read_head - tty->canon_head) & (N_TTY_BUF_SIZE - 1)); tty->read_head = tty->canon_head; spin_unlock_irqrestore(&tty->read_lock, flags); finish_erasing(tty); echo_char(KILL_CHAR(tty), tty); /* Add a newline if ECHOK is on and ECHOKE is off. */ if (L_ECHOK(tty)) echo_char_raw('\n', tty); return; } kill_type = KILL; } seen_alnums = 0; /* FIXME: Locking ?? */ while (tty->read_head != tty->canon_head) { head = tty->read_head; /* erase a single possibly multibyte character */ do { head = (head - 1) & (N_TTY_BUF_SIZE-1); c = tty->read_buf[head]; } while (is_continuation(c, tty) && head != tty->canon_head); /* do not partially erase */ if (is_continuation(c, tty)) break; if (kill_type == WERASE) { /* Equivalent to BSD's ALTWERASE. */ if (isalnum(c) || c == '_') seen_alnums++; else if (seen_alnums) break; } cnt = (tty->read_head - head) & (N_TTY_BUF_SIZE-1); spin_lock_irqsave(&tty->read_lock, flags); tty->read_head = head; tty->read_cnt -= cnt; spin_unlock_irqrestore(&tty->read_lock, flags); if (L_ECHO(tty)) { if (L_ECHOPRT(tty)) { if (!tty->erasing) { echo_char_raw('\\', tty); tty->erasing = 1; } /* if cnt > 1, output a multi-byte character */ echo_char(c, tty); while (--cnt > 0) { head = (head+1) & (N_TTY_BUF_SIZE-1); echo_char_raw(tty->read_buf[head], tty); echo_move_back_col(tty); } } else if (kill_type == ERASE && !L_ECHOE(tty)) { echo_char(ERASE_CHAR(tty), tty); } else if (c == '\t') { unsigned int num_chars = 0; int after_tab = 0; unsigned long tail = tty->read_head; /* * Count the columns used for characters * since the start of input or after a * previous tab. * This info is used to go back the correct * number of columns. */ while (tail != tty->canon_head) { tail = (tail-1) & (N_TTY_BUF_SIZE-1); c = tty->read_buf[tail]; if (c == '\t') { after_tab = 1; break; } else if (iscntrl(c)) { if (L_ECHOCTL(tty)) num_chars += 2; } else if (!is_continuation(c, tty)) { num_chars++; } } echo_erase_tab(num_chars, after_tab, tty); } else { if (iscntrl(c) && L_ECHOCTL(tty)) { echo_char_raw('\b', tty); echo_char_raw(' ', tty); echo_char_raw('\b', tty); } if (!iscntrl(c) || L_ECHOCTL(tty)) { echo_char_raw('\b', tty); echo_char_raw(' ', tty); echo_char_raw('\b', tty); } } } if (kill_type == ERASE) break; } if (tty->read_head == tty->canon_head && L_ECHO(tty)) finish_erasing(tty); } /** * isig - handle the ISIG optio * @sig: signal * @tty: terminal * @flush: force flush * * Called when a signal is being sent due to terminal input. This * may caus terminal flushing to take place according to the termios * settings and character used. Called from the driver receive_buf * path so serialized. * * Locking: ctrl_lock, read_lock (both via flush buffer) */ static inline void isig(int sig, struct tty_struct *tty, int flush) { if (tty->pgrp) kill_pgrp(tty->pgrp, sig, 1); if (flush || !L_NOFLSH(tty)) { n_tty_flush_buffer(tty); tty_driver_flush_buffer(tty); } } /** * n_tty_receive_break - handle break * @tty: terminal * * An RS232 break event has been hit in the incoming bitstream. This * can cause a variety of events depending upon the termios settings. * * Called from the receive_buf path so single threaded. */ static inline void n_tty_receive_break(struct tty_struct *tty) { if (I_IGNBRK(tty)) return; if (I_BRKINT(tty)) { isig(SIGINT, tty, 1); return; } if (I_PARMRK(tty)) { put_tty_queue('\377', tty); put_tty_queue('\0', tty); } put_tty_queue('\0', tty); wake_up_interruptible(&tty->read_wait); } /** * n_tty_receive_overrun - handle overrun reporting * @tty: terminal * * Data arrived faster than we could process it. While the tty * driver has flagged this the bits that were missed are gone * forever. * * Called from the receive_buf path so single threaded. Does not * need locking as num_overrun and overrun_time are function * private. */ static inline void n_tty_receive_overrun(struct tty_struct *tty) { char buf[64]; tty->num_overrun++; if (time_before(tty->overrun_time, jiffies - HZ) || time_after(tty->overrun_time, jiffies)) { printk(KERN_WARNING "%s: %d input overrun(s)\n", tty_name(tty, buf), tty->num_overrun); tty->overrun_time = jiffies; tty->num_overrun = 0; } } /** * n_tty_receive_parity_error - error notifier * @tty: terminal device * @c: character * * Process a parity error and queue the right data to indicate * the error case if necessary. Locking as per n_tty_receive_buf. */ static inline void n_tty_receive_parity_error(struct tty_struct *tty, unsigned char c) { if (I_IGNPAR(tty)) return; if (I_PARMRK(tty)) { put_tty_queue('\377', tty); put_tty_queue('\0', tty); put_tty_queue(c, tty); } else if (I_INPCK(tty)) put_tty_queue('\0', tty); else put_tty_queue(c, tty); wake_up_interruptible(&tty->read_wait); } /** * n_tty_receive_char - perform processing * @tty: terminal device * @c: character * * Process an individual character of input received from the driver. * This is serialized with respect to itself by the rules for the * driver above. */ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c) { unsigned long flags; int parmrk; if (tty->raw) { put_tty_queue(c, tty); return; } if (I_ISTRIP(tty)) c &= 0x7f; if (I_IUCLC(tty) && L_IEXTEN(tty)) c = tolower(c); if (L_EXTPROC(tty)) { put_tty_queue(c, tty); return; } if (tty->stopped && !tty->flow_stopped && I_IXON(tty) && I_IXANY(tty) && c != START_CHAR(tty) && c != STOP_CHAR(tty) && c != INTR_CHAR(tty) && c != QUIT_CHAR(tty) && c != SUSP_CHAR(tty)) { start_tty(tty); process_echoes(tty); } if (tty->closing) { if (I_IXON(tty)) { if (c == START_CHAR(tty)) { start_tty(tty); process_echoes(tty); } else if (c == STOP_CHAR(tty)) stop_tty(tty); } return; } /* * If the previous character was LNEXT, or we know that this * character is not one of the characters that we'll have to * handle specially, do shortcut processing to speed things * up. */ if (!test_bit(c, tty->process_char_map) || tty->lnext) { tty->lnext = 0; parmrk = (c == (unsigned char) '\377' && I_PARMRK(tty)) ? 1 : 0; if (tty->read_cnt >= (N_TTY_BUF_SIZE - parmrk - 1)) { /* beep if no space */ if (L_ECHO(tty)) process_output('\a', tty); return; } if (L_ECHO(tty)) { finish_erasing(tty); /* Record the column of first canon char. */ if (tty->canon_head == tty->read_head) echo_set_canon_col(tty); echo_char(c, tty); process_echoes(tty); } if (parmrk) put_tty_queue(c, tty); put_tty_queue(c, tty); return; } if (I_IXON(tty)) { if (c == START_CHAR(tty)) { start_tty(tty); process_echoes(tty); return; } if (c == STOP_CHAR(tty)) { stop_tty(tty); return; } } if (L_ISIG(tty)) { int signal; signal = SIGINT; if (c == INTR_CHAR(tty)) goto send_signal; signal = SIGQUIT; if (c == QUIT_CHAR(tty)) goto send_signal; signal = SIGTSTP; if (c == SUSP_CHAR(tty)) { send_signal: /* * Note that we do not use isig() here because we want * the order to be: * 1) flush, 2) echo, 3) signal */ if (!L_NOFLSH(tty)) { n_tty_flush_buffer(tty); tty_driver_flush_buffer(tty); } if (I_IXON(tty)) start_tty(tty); if (L_ECHO(tty)) { echo_char(c, tty); process_echoes(tty); } if (tty->pgrp) kill_pgrp(tty->pgrp, signal, 1); return; } } if (c == '\r') { if (I_IGNCR(tty)) return; if (I_ICRNL(tty)) c = '\n'; } else if (c == '\n' && I_INLCR(tty)) c = '\r'; if (tty->icanon) { if (c == ERASE_CHAR(tty) || c == KILL_CHAR(tty) || (c == WERASE_CHAR(tty) && L_IEXTEN(tty))) { eraser(c, tty); process_echoes(tty); return; } if (c == LNEXT_CHAR(tty) && L_IEXTEN(tty)) { tty->lnext = 1; if (L_ECHO(tty)) { finish_erasing(tty); if (L_ECHOCTL(tty)) { echo_char_raw('^', tty); echo_char_raw('\b', tty); process_echoes(tty); } } return; } if (c == REPRINT_CHAR(tty) && L_ECHO(tty) && L_IEXTEN(tty)) { unsigned long tail = tty->canon_head; finish_erasing(tty); echo_char(c, tty); echo_char_raw('\n', tty); while (tail != tty->read_head) { echo_char(tty->read_buf[tail], tty); tail = (tail+1) & (N_TTY_BUF_SIZE-1); } process_echoes(tty); return; } if (c == '\n') { if (tty->read_cnt >= N_TTY_BUF_SIZE) { if (L_ECHO(tty)) process_output('\a', tty); return; } if (L_ECHO(tty) || L_ECHONL(tty)) { echo_char_raw('\n', tty); process_echoes(tty); } goto handle_newline; } if (c == EOF_CHAR(tty)) { if (tty->read_cnt >= N_TTY_BUF_SIZE) return; if (tty->canon_head != tty->read_head) set_bit(TTY_PUSH, &tty->flags); c = __DISABLED_CHAR; goto handle_newline; } if ((c == EOL_CHAR(tty)) || (c == EOL2_CHAR(tty) && L_IEXTEN(tty))) { parmrk = (c == (unsigned char) '\377' && I_PARMRK(tty)) ? 1 : 0; if (tty->read_cnt >= (N_TTY_BUF_SIZE - parmrk)) { if (L_ECHO(tty)) process_output('\a', tty); return; } /* * XXX are EOL_CHAR and EOL2_CHAR echoed?!? */ if (L_ECHO(tty)) { /* Record the column of first canon char. */ if (tty->canon_head == tty->read_head) echo_set_canon_col(tty); echo_char(c, tty); process_echoes(tty); } /* * XXX does PARMRK doubling happen for * EOL_CHAR and EOL2_CHAR? */ if (parmrk) put_tty_queue(c, tty); handle_newline: spin_lock_irqsave(&tty->read_lock, flags); set_bit(tty->read_head, tty->read_flags); put_tty_queue_nolock(c, tty); tty->canon_head = tty->read_head; tty->canon_data++; spin_unlock_irqrestore(&tty->read_lock, flags); kill_fasync(&tty->fasync, SIGIO, POLL_IN); if (waitqueue_active(&tty->read_wait)) wake_up_interruptible(&tty->read_wait); return; } } parmrk = (c == (unsigned char) '\377' && I_PARMRK(tty)) ? 1 : 0; if (tty->read_cnt >= (N_TTY_BUF_SIZE - parmrk - 1)) { /* beep if no space */ if (L_ECHO(tty)) process_output('\a', tty); return; } if (L_ECHO(tty)) { finish_erasing(tty); if (c == '\n') echo_char_raw('\n', tty); else { /* Record the column of first canon char. */ if (tty->canon_head == tty->read_head) echo_set_canon_col(tty); echo_char(c, tty); } process_echoes(tty); } if (parmrk) put_tty_queue(c, tty); put_tty_queue(c, tty); } /** * n_tty_write_wakeup - asynchronous I/O notifier * @tty: tty device * * Required for the ptys, serial driver etc. since processes * that attach themselves to the master and rely on ASYNC * IO must be woken up */ static void n_tty_write_wakeup(struct tty_struct *tty) { if (tty->fasync && test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) kill_fasync(&tty->fasync, SIGIO, POLL_OUT); } /** * n_tty_receive_buf - data receive * @tty: terminal device * @cp: buffer * @fp: flag buffer * @count: characters * * Called by the terminal driver when a block of characters has * been received. This function must be called from soft contexts * not from interrupt context. The driver is responsible for making * calls one at a time and in order (or using flush_to_ldisc) */ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { const unsigned char *p; char *f, flags = TTY_NORMAL; int i; char buf[64]; unsigned long cpuflags; if (!tty->read_buf) return; if (tty->real_raw) { spin_lock_irqsave(&tty->read_lock, cpuflags); i = min(N_TTY_BUF_SIZE - tty->read_cnt, N_TTY_BUF_SIZE - tty->read_head); i = min(count, i); memcpy(tty->read_buf + tty->read_head, cp, i); tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1); tty->read_cnt += i; cp += i; count -= i; i = min(N_TTY_BUF_SIZE - tty->read_cnt, N_TTY_BUF_SIZE - tty->read_head); i = min(count, i); memcpy(tty->read_buf + tty->read_head, cp, i); tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1); tty->read_cnt += i; spin_unlock_irqrestore(&tty->read_lock, cpuflags); } else { for (i = count, p = cp, f = fp; i; i--, p++) { if (f) flags = *f++; switch (flags) { case TTY_NORMAL: n_tty_receive_char(tty, *p); break; case TTY_BREAK: n_tty_receive_break(tty); break; case TTY_PARITY: case TTY_FRAME: n_tty_receive_parity_error(tty, *p); break; case TTY_OVERRUN: n_tty_receive_overrun(tty); break; default: printk(KERN_ERR "%s: unknown flag %d\n", tty_name(tty, buf), flags); break; } } if (tty->ops->flush_chars) tty->ops->flush_chars(tty); } n_tty_set_room(tty); if ((!tty->icanon && (tty->read_cnt >= tty->minimum_to_wake)) || L_EXTPROC(tty)) { kill_fasync(&tty->fasync, SIGIO, POLL_IN); if (waitqueue_active(&tty->read_wait)) wake_up_interruptible(&tty->read_wait); } /* * Check the remaining room for the input canonicalization * mode. We don't want to throttle the driver if we're in * canonical mode and don't have a newline yet! */ if (tty->receive_room < TTY_THRESHOLD_THROTTLE) tty_throttle(tty); } int is_ignored(int sig) { return (sigismember(&current->blocked, sig) || current->sighand->action[sig-1].sa.sa_handler == SIG_IGN); } /** * n_tty_set_termios - termios data changed * @tty: terminal * @old: previous data * * Called by the tty layer when the user changes termios flags so * that the line discipline can plan ahead. This function cannot sleep * and is protected from re-entry by the tty layer. The user is * guaranteed that this function will not be re-entered or in progress * when the ldisc is closed. * * Locking: Caller holds tty->termios_mutex */ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) { int canon_change = 1; BUG_ON(!tty); if (old) canon_change = (old->c_lflag ^ tty->termios->c_lflag) & ICANON; if (canon_change) { memset(&tty->read_flags, 0, sizeof tty->read_flags); tty->canon_head = tty->read_tail; tty->canon_data = 0; tty->erasing = 0; } if (canon_change && !L_ICANON(tty) && tty->read_cnt) wake_up_interruptible(&tty->read_wait); tty->icanon = (L_ICANON(tty) != 0); if (test_bit(TTY_HW_COOK_IN, &tty->flags)) { tty->raw = 1; tty->real_raw = 1; n_tty_set_room(tty); return; } if (I_ISTRIP(tty) || I_IUCLC(tty) || I_IGNCR(tty) || I_ICRNL(tty) || I_INLCR(tty) || L_ICANON(tty) || I_IXON(tty) || L_ISIG(tty) || L_ECHO(tty) || I_PARMRK(tty)) { memset(tty->process_char_map, 0, 256/8); if (I_IGNCR(tty) || I_ICRNL(tty)) set_bit('\r', tty->process_char_map); if (I_INLCR(tty)) set_bit('\n', tty->process_char_map); if (L_ICANON(tty)) { set_bit(ERASE_CHAR(tty), tty->process_char_map); set_bit(KILL_CHAR(tty), tty->process_char_map); set_bit(EOF_CHAR(tty), tty->process_char_map); set_bit('\n', tty->process_char_map); set_bit(EOL_CHAR(tty), tty->process_char_map); if (L_IEXTEN(tty)) { set_bit(WERASE_CHAR(tty), tty->process_char_map); set_bit(LNEXT_CHAR(tty), tty->process_char_map); set_bit(EOL2_CHAR(tty), tty->process_char_map); if (L_ECHO(tty)) set_bit(REPRINT_CHAR(tty), tty->process_char_map); } } if (I_IXON(tty)) { set_bit(START_CHAR(tty), tty->process_char_map); set_bit(STOP_CHAR(tty), tty->process_char_map); } if (L_ISIG(tty)) { set_bit(INTR_CHAR(tty), tty->process_char_map); set_bit(QUIT_CHAR(tty), tty->process_char_map); set_bit(SUSP_CHAR(tty), tty->process_char_map); } clear_bit(__DISABLED_CHAR, tty->process_char_map); tty->raw = 0; tty->real_raw = 0; } else { tty->raw = 1; if ((I_IGNBRK(tty) || (!I_BRKINT(tty) && !I_PARMRK(tty))) && (I_IGNPAR(tty) || !I_INPCK(tty)) && (tty->driver->flags & TTY_DRIVER_REAL_RAW)) tty->real_raw = 1; else tty->real_raw = 0; } n_tty_set_room(tty); /* The termios change make the tty ready for I/O */ wake_up_interruptible(&tty->write_wait); wake_up_interruptible(&tty->read_wait); } /** * n_tty_close - close the ldisc for this tty * @tty: device * * Called from the terminal layer when this line discipline is * being shut down, either because of a close or becsuse of a * discipline change. The function will not be called while other * ldisc methods are in progress. */ static void n_tty_close(struct tty_struct *tty) { n_tty_flush_buffer(tty); if (tty->read_buf) { kfree(tty->read_buf); tty->read_buf = NULL; } if (tty->echo_buf) { kfree(tty->echo_buf); tty->echo_buf = NULL; } } /** * n_tty_open - open an ldisc * @tty: terminal to open * * Called when this line discipline is being attached to the * terminal device. Can sleep. Called serialized so that no * other events will occur in parallel. No further open will occur * until a close. */ static int n_tty_open(struct tty_struct *tty) { if (!tty) return -EINVAL; /* These are ugly. Currently a malloc failure here can panic */ if (!tty->read_buf) { tty->read_buf = kzalloc(N_TTY_BUF_SIZE, GFP_KERNEL); if (!tty->read_buf) return -ENOMEM; } if (!tty->echo_buf) { tty->echo_buf = kzalloc(N_TTY_BUF_SIZE, GFP_KERNEL); if (!tty->echo_buf) return -ENOMEM; } reset_buffer_flags(tty); tty->column = 0; n_tty_set_termios(tty, NULL); tty->minimum_to_wake = 1; tty->closing = 0; return 0; } static inline int input_available_p(struct tty_struct *tty, int amt) { tty_flush_to_ldisc(tty); if (tty->icanon && !L_EXTPROC(tty)) { if (tty->canon_data) return 1; } else if (tty->read_cnt >= (amt ? amt : 1)) return 1; return 0; } /** * copy_from_read_buf - copy read data directly * @tty: terminal device * @b: user data * @nr: size of data * * Helper function to speed up n_tty_read. It is only called when * ICANON is off; it copies characters straight from the tty queue to * user space directly. It can be profitably called twice; once to * drain the space from the tail pointer to the (physical) end of the * buffer, and once to drain the space from the (physical) beginning of * the buffer to head pointer. * * Called under the tty->atomic_read_lock sem * */ static int copy_from_read_buf(struct tty_struct *tty, unsigned char __user **b, size_t *nr) { int retval; size_t n; unsigned long flags; retval = 0; spin_lock_irqsave(&tty->read_lock, flags); n = min(tty->read_cnt, N_TTY_BUF_SIZE - tty->read_tail); n = min(*nr, n); spin_unlock_irqrestore(&tty->read_lock, flags); if (n) { retval = copy_to_user(*b, &tty->read_buf[tty->read_tail], n); n -= retval; tty_audit_add_data(tty, &tty->read_buf[tty->read_tail], n); spin_lock_irqsave(&tty->read_lock, flags); tty->read_tail = (tty->read_tail + n) & (N_TTY_BUF_SIZE-1); tty->read_cnt -= n; /* Turn single EOF into zero-length read */ if (L_EXTPROC(tty) && tty->icanon && n == 1) { if (!tty->read_cnt && (*b)[n-1] == EOF_CHAR(tty)) n--; } spin_unlock_irqrestore(&tty->read_lock, flags); *b += n; *nr -= n; } return retval; } extern ssize_t redirected_tty_write(struct file *, const char __user *, size_t, loff_t *); /** * job_control - check job control * @tty: tty * @file: file handle * * Perform job control management checks on this file/tty descriptor * and if appropriate send any needed signals and return a negative * error code if action should be taken. * * FIXME: * Locking: None - redirected write test is safe, testing * current->signal should possibly lock current->sighand * pgrp locking ? */ static int job_control(struct tty_struct *tty, struct file *file) { /* Job control check -- must be done at start and after every sleep (POSIX.1 7.1.1.4). */ /* NOTE: not yet done after every sleep pending a thorough check of the logic of this change. -- jlc */ /* don't stop on /dev/console */ if (file->f_op->write != redirected_tty_write && current->signal->tty == tty) { if (!tty->pgrp) printk(KERN_ERR "n_tty_read: no tty->pgrp!\n"); else if (task_pgrp(current) != tty->pgrp) { if (is_ignored(SIGTTIN) || is_current_pgrp_orphaned()) return -EIO; kill_pgrp(task_pgrp(current), SIGTTIN, 1); set_thread_flag(TIF_SIGPENDING); return -ERESTARTSYS; } } return 0; } /** * n_tty_read - read function for tty * @tty: tty device * @file: file object * @buf: userspace buffer pointer * @nr: size of I/O * * Perform reads for the line discipline. We are guaranteed that the * line discipline will not be closed under us but we may get multiple * parallel readers and must handle this ourselves. We may also get * a hangup. Always called in user context, may sleep. * * This code must be sure never to sleep through a hangup. */ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, unsigned char __user *buf, size_t nr) { unsigned char __user *b = buf; DECLARE_WAITQUEUE(wait, current); int c; int minimum, time; ssize_t retval = 0; ssize_t size; long timeout; unsigned long flags; int packet; do_it_again: BUG_ON(!tty->read_buf); c = job_control(tty, file); if (c < 0) return c; minimum = time = 0; timeout = MAX_SCHEDULE_TIMEOUT; if (!tty->icanon) { time = (HZ / 10) * TIME_CHAR(tty); minimum = MIN_CHAR(tty); if (minimum) { if (time) tty->minimum_to_wake = 1; else if (!waitqueue_active(&tty->read_wait) || (tty->minimum_to_wake > minimum)) tty->minimum_to_wake = minimum; } else { timeout = 0; if (time) { timeout = time; time = 0; } tty->minimum_to_wake = minimum = 1; } } /* * Internal serialization of reads. */ if (file->f_flags & O_NONBLOCK) { if (!mutex_trylock(&tty->atomic_read_lock)) return -EAGAIN; } else { if (mutex_lock_interruptible(&tty->atomic_read_lock)) return -ERESTARTSYS; } packet = tty->packet; add_wait_queue(&tty->read_wait, &wait); while (nr) { /* First test for status change. */ if (packet && tty->link->ctrl_status) { unsigned char cs; if (b != buf) break; spin_lock_irqsave(&tty->link->ctrl_lock, flags); cs = tty->link->ctrl_status; tty->link->ctrl_status = 0; spin_unlock_irqrestore(&tty->link->ctrl_lock, flags); if (tty_put_user(tty, cs, b++)) { retval = -EFAULT; b--; break; } nr--; break; } /* This statement must be first before checking for input so that any interrupt will set the state back to TASK_RUNNING. */ set_current_state(TASK_INTERRUPTIBLE); if (((minimum - (b - buf)) < tty->minimum_to_wake) && ((minimum - (b - buf)) >= 1)) tty->minimum_to_wake = (minimum - (b - buf)); if (!input_available_p(tty, 0)) { if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { retval = -EIO; break; } if (tty_hung_up_p(file)) break; if (!timeout) break; if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; break; } if (signal_pending(current)) { retval = -ERESTARTSYS; break; } /* FIXME: does n_tty_set_room need locking ? */ n_tty_set_room(tty); timeout = schedule_timeout(timeout); continue; } __set_current_state(TASK_RUNNING); /* Deal with packet mode. */ if (packet && b == buf) { if (tty_put_user(tty, TIOCPKT_DATA, b++)) { retval = -EFAULT; b--; break; } nr--; } if (tty->icanon && !L_EXTPROC(tty)) { /* N.B. avoid overrun if nr == 0 */ while (nr && tty->read_cnt) { int eol; eol = test_and_clear_bit(tty->read_tail, tty->read_flags); c = tty->read_buf[tty->read_tail]; spin_lock_irqsave(&tty->read_lock, flags); tty->read_tail = ((tty->read_tail+1) & (N_TTY_BUF_SIZE-1)); tty->read_cnt--; if (eol) { /* this test should be redundant: * we shouldn't be reading data if * canon_data is 0 */ if (--tty->canon_data < 0) tty->canon_data = 0; } spin_unlock_irqrestore(&tty->read_lock, flags); if (!eol || (c != __DISABLED_CHAR)) { if (tty_put_user(tty, c, b++)) { retval = -EFAULT; b--; break; } nr--; } if (eol) { tty_audit_push(tty); break; } } if (retval) break; } else { int uncopied; /* The copy function takes the read lock and handles locking internally for this case */ uncopied = copy_from_read_buf(tty, &b, &nr); uncopied += copy_from_read_buf(tty, &b, &nr); if (uncopied) { retval = -EFAULT; break; } } /* If there is enough space in the read buffer now, let the * low-level driver know. We use n_tty_chars_in_buffer() to * check the buffer, as it now knows about canonical mode. * Otherwise, if the driver is throttled and the line is * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode, * we won't get any more characters. */ if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) { n_tty_set_room(tty); check_unthrottle(tty); } if (b - buf >= minimum) break; if (time) timeout = time; } mutex_unlock(&tty->atomic_read_lock); remove_wait_queue(&tty->read_wait, &wait); if (!waitqueue_active(&tty->read_wait)) tty->minimum_to_wake = minimum; __set_current_state(TASK_RUNNING); size = b - buf; if (size) { retval = size; if (nr) clear_bit(TTY_PUSH, &tty->flags); } else if (test_and_clear_bit(TTY_PUSH, &tty->flags)) goto do_it_again; n_tty_set_room(tty); return retval; } /** * n_tty_write - write function for tty * @tty: tty device * @file: file object * @buf: userspace buffer pointer * @nr: size of I/O * * Write function of the terminal device. This is serialized with * respect to other write callers but not to termios changes, reads * and other such events. Since the receive code will echo characters, * thus calling driver write methods, the output_lock is used in * the output processing functions called here as well as in the * echo processing function to protect the column state and space * left in the buffer. * * This code must be sure never to sleep through a hangup. * * Locking: output_lock to protect column state and space left * (note that the process_output*() functions take this * lock themselves) */ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file, const unsigned char *buf, size_t nr) { const unsigned char *b = buf; DECLARE_WAITQUEUE(wait, current); int c; ssize_t retval = 0; /* Job control check -- must be done at start (POSIX.1 7.1.1.4). */ if (L_TOSTOP(tty) && file->f_op->write != redirected_tty_write) { retval = tty_check_change(tty); if (retval) return retval; } /* Write out any echoed characters that are still pending */ process_echoes(tty); add_wait_queue(&tty->write_wait, &wait); while (1) { set_current_state(TASK_INTERRUPTIBLE); if (signal_pending(current)) { retval = -ERESTARTSYS; break; } if (tty_hung_up_p(file) || (tty->link && !tty->link->count)) { retval = -EIO; break; } if (O_OPOST(tty) && !(test_bit(TTY_HW_COOK_OUT, &tty->flags))) { while (nr > 0) { ssize_t num = process_output_block(tty, b, nr); if (num < 0) { if (num == -EAGAIN) break; retval = num; goto break_out; } b += num; nr -= num; if (nr == 0) break; c = *b; if (process_output(c, tty) < 0) break; b++; nr--; } if (tty->ops->flush_chars) tty->ops->flush_chars(tty); } else { while (nr > 0) { c = tty->ops->write(tty, b, nr); if (c < 0) { retval = c; goto break_out; } if (!c) break; b += c; nr -= c; } } if (!nr) break; if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; break; } schedule(); } break_out: __set_current_state(TASK_RUNNING); remove_wait_queue(&tty->write_wait, &wait); if (b - buf != nr && tty->fasync) set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); return (b - buf) ? b - buf : retval; } /** * n_tty_poll - poll method for N_TTY * @tty: terminal device * @file: file accessing it * @wait: poll table * * Called when the line discipline is asked to poll() for data or * for special events. This code is not serialized with respect to * other events save open/close. * * This code must be sure never to sleep through a hangup. * Called without the kernel lock held - fine */ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file, poll_table *wait) { unsigned int mask = 0; poll_wait(file, &tty->read_wait, wait); poll_wait(file, &tty->write_wait, wait); if (input_available_p(tty, TIME_CHAR(tty) ? 0 : MIN_CHAR(tty))) mask |= POLLIN | POLLRDNORM; if (tty->packet && tty->link->ctrl_status) mask |= POLLPRI | POLLIN | POLLRDNORM; if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) mask |= POLLHUP; if (tty_hung_up_p(file)) mask |= POLLHUP; if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) { if (MIN_CHAR(tty) && !TIME_CHAR(tty)) tty->minimum_to_wake = MIN_CHAR(tty); else tty->minimum_to_wake = 1; } if (tty->ops->write && !tty_is_writelocked(tty) && tty_chars_in_buffer(tty) < WAKEUP_CHARS && tty_write_room(tty) > 0) mask |= POLLOUT | POLLWRNORM; return mask; } static unsigned long inq_canon(struct tty_struct *tty) { int nr, head, tail; if (!tty->canon_data) return 0; head = tty->canon_head; tail = tty->read_tail; nr = (head - tail) & (N_TTY_BUF_SIZE-1); /* Skip EOF-chars.. */ while (head != tail) { if (test_bit(tail, tty->read_flags) && tty->read_buf[tail] == __DISABLED_CHAR) nr--; tail = (tail+1) & (N_TTY_BUF_SIZE-1); } return nr; } static int n_tty_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg) { int retval; switch (cmd) { case TIOCOUTQ: return put_user(tty_chars_in_buffer(tty), (int __user *) arg); case TIOCINQ: /* FIXME: Locking */ retval = tty->read_cnt; if (L_ICANON(tty)) retval = inq_canon(tty); return put_user(retval, (unsigned int __user *) arg); default: return n_tty_ioctl_helper(tty, file, cmd, arg); } } struct tty_ldisc_ops tty_ldisc_N_TTY = { .magic = TTY_LDISC_MAGIC, .name = "n_tty", .open = n_tty_open, .close = n_tty_close, .flush_buffer = n_tty_flush_buffer, .chars_in_buffer = n_tty_chars_in_buffer, .read = n_tty_read, .write = n_tty_write, .ioctl = n_tty_ioctl, .set_termios = n_tty_set_termios, .poll = n_tty_poll, .receive_buf = n_tty_receive_buf, .write_wakeup = n_tty_write_wakeup }; /** * n_tty_inherit_ops - inherit N_TTY methods * @ops: struct tty_ldisc_ops where to save N_TTY methods * * Used by a generic struct tty_ldisc_ops to easily inherit N_TTY * methods. */ void n_tty_inherit_ops(struct tty_ldisc_ops *ops) { *ops = tty_ldisc_N_TTY; ops->owner = NULL; ops->refcount = ops->flags = 0; } EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
gpl-2.0
akuster/linux-yocto-3.14
drivers/staging/wlags49_h2/wl_main.c
249
133186
/******************************************************************************* * Agere Systems Inc. * Wireless device driver for Linux (wlags49). * * Copyright (c) 1998-2003 Agere Systems Inc. * All rights reserved. * http://www.agere.com * * Initially developed by TriplePoint, Inc. * http://www.triplepoint.com * *------------------------------------------------------------------------------ * * This file contains the main driver entry points and other adapter * specific routines. * *------------------------------------------------------------------------------ * * SOFTWARE LICENSE * * This software is provided subject to the following terms and conditions, * which you should read carefully before using the software. Using this * software indicates your acceptance of these terms and conditions. If you do * not agree with these terms and conditions, do not use the software. * * Copyright © 2003 Agere Systems Inc. * All rights reserved. * * Redistribution and use in source or binary forms, with or without * modifications, are permitted provided that the following conditions are met: * * . Redistributions of source code must retain the above copyright notice, this * list of conditions and the following Disclaimer as comments in the code as * well as in the documentation and/or other materials provided with the * distribution. * * . Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following Disclaimer in the documentation * and/or other materials provided with the distribution. * * . Neither the name of Agere Systems Inc. nor the names of the contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Disclaimer * * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * ******************************************************************************/ /******************************************************************************* * constant definitions ******************************************************************************/ /* Allow support for calling system fcns to access F/W image file */ #define __KERNEL_SYSCALLS__ /******************************************************************************* * include files ******************************************************************************/ #include <wl_version.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/types.h> #include <linux/kernel.h> // #include <linux/sched.h> // #include <linux/ptrace.h> // #include <linux/slab.h> // #include <linux/ctype.h> // #include <linux/string.h> // #include <linux/timer.h> //#include <linux/interrupt.h> // #include <linux/tqueue.h> // #include <linux/in.h> // #include <linux/delay.h> // #include <asm/io.h> // // #include <asm/bitops.h> #include <linux/unistd.h> #include <asm/uaccess.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> // #include <linux/skbuff.h> // #include <linux/if_arp.h> // #include <linux/ioport.h> #define BIN_DL 0 #if BIN_DL #include <linux/vmalloc.h> #endif // BIN_DL #include <debug.h> #include <hcf.h> #include <dhf.h> //in order to get around:: wl_main.c:2229: `HREG_EV_RDMAD' undeclared (first use in this function) #include <hcfdef.h> #include <wl_if.h> #include <wl_internal.h> #include <wl_util.h> #include <wl_main.h> #include <wl_netdev.h> #include <wl_wext.h> #ifdef USE_PROFILE #include <wl_profile.h> #endif /* USE_PROFILE */ #ifdef BUS_PCMCIA #include <wl_cs.h> #endif /* BUS_PCMCIA */ #ifdef BUS_PCI #include <wl_pci.h> #endif /* BUS_PCI */ /******************************************************************************* * macro definitions ******************************************************************************/ #define VALID_PARAM(C) \ { \ if (!(C)) \ { \ printk(KERN_INFO "Wireless, parameter error: \"%s\"\n", #C); \ goto failed; \ } \ } /******************************************************************************* * local functions ******************************************************************************/ void wl_isr_handler( unsigned long p ); #if 0 //SCULL_USE_PROC /* don't waste space if unused */ static int scull_read_procmem(struct seq_file *m, void *v); static int write_int(struct file *file, const char *buffer, unsigned long count, void *data); /* * seq_file wrappers for procfile show routines. */ static int scull_read_procmem_open(struct inode *inode, struct file *file) { return single_open(file, scull_read_procmem, PDE_DATA(inode)); } static const struct file_operations scull_read_procmem_fops = { .open = scull_read_procmem_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif /* SCULL_USE_PROC */ /******************************************************************************* * module parameter definitions - set with 'insmod' ******************************************************************************/ static p_u16 irq_mask = 0xdeb8; // IRQ3,4,5,7,9,10,11,12,14,15 static p_s8 irq_list[4] = { -1 }; #if 0 MODULE_PARM(irq_mask, "h"); MODULE_PARM_DESC(irq_mask, "IRQ mask [0xdeb8]"); MODULE_PARM(irq_list, "1-4b"); MODULE_PARM_DESC(irq_list, "IRQ list [<irq_mask>]"); #endif static p_u8 PARM_AUTHENTICATION = PARM_DEFAULT_AUTHENTICATION; static p_u16 PARM_AUTH_KEY_MGMT_SUITE = PARM_DEFAULT_AUTH_KEY_MGMT_SUITE; static p_u16 PARM_BRSC_2GHZ = PARM_DEFAULT_BRSC_2GHZ; static p_u16 PARM_BRSC_5GHZ = PARM_DEFAULT_BRSC_5GHZ; static p_u16 PARM_COEXISTENCE = PARM_DEFAULT_COEXISTENCE; static p_u16 PARM_CONNECTION_CONTROL = PARM_DEFAULT_CONNECTION_CONTROL; //;?rename and move static p_char *PARM_CREATE_IBSS = PARM_DEFAULT_CREATE_IBSS_STR; static p_char *PARM_DESIRED_SSID = PARM_DEFAULT_SSID; static p_char *PARM_DOWNLOAD_FIRMWARE = ""; static p_u16 PARM_ENABLE_ENCRYPTION = PARM_DEFAULT_ENABLE_ENCRYPTION; static p_char *PARM_EXCLUDE_UNENCRYPTED = PARM_DEFAULT_EXCLUDE_UNENCRYPTED_STR; static p_char *PARM_INTRA_BSS_RELAY = PARM_DEFAULT_INTRA_BSS_RELAY_STR; static p_char *PARM_KEY1 = ""; static p_char *PARM_KEY2 = ""; static p_char *PARM_KEY3 = ""; static p_char *PARM_KEY4 = ""; static p_char *PARM_LOAD_BALANCING = PARM_DEFAULT_LOAD_BALANCING_STR; static p_u16 PARM_MAX_SLEEP = PARM_DEFAULT_MAX_PM_SLEEP; static p_char *PARM_MEDIUM_DISTRIBUTION = PARM_DEFAULT_MEDIUM_DISTRIBUTION_STR; static p_char *PARM_MICROWAVE_ROBUSTNESS = PARM_DEFAULT_MICROWAVE_ROBUSTNESS_STR; static p_char *PARM_MULTICAST_PM_BUFFERING = PARM_DEFAULT_MULTICAST_PM_BUFFERING_STR; static p_u16 PARM_MULTICAST_RATE = PARM_DEFAULT_MULTICAST_RATE_2GHZ; static p_char *PARM_MULTICAST_RX = PARM_DEFAULT_MULTICAST_RX_STR; static p_u8 PARM_NETWORK_ADDR[ETH_ALEN] = PARM_DEFAULT_NETWORK_ADDR; static p_u16 PARM_OWN_ATIM_WINDOW = PARM_DEFAULT_OWN_ATIM_WINDOW; static p_u16 PARM_OWN_BEACON_INTERVAL = PARM_DEFAULT_OWN_BEACON_INTERVAL; static p_u8 PARM_OWN_CHANNEL = PARM_DEFAULT_OWN_CHANNEL; static p_u8 PARM_OWN_DTIM_PERIOD = PARM_DEFAULT_OWN_DTIM_PERIOD; static p_char *PARM_OWN_NAME = PARM_DEFAULT_OWN_NAME; static p_char *PARM_OWN_SSID = PARM_DEFAULT_SSID; static p_u16 PARM_PM_ENABLED = WVLAN_PM_STATE_DISABLED; static p_u16 PARM_PM_HOLDOVER_DURATION = PARM_DEFAULT_PM_HOLDOVER_DURATION; static p_u8 PARM_PORT_TYPE = PARM_DEFAULT_PORT_TYPE; static p_char *PARM_PROMISCUOUS_MODE = PARM_DEFAULT_PROMISCUOUS_MODE_STR; static p_char *PARM_REJECT_ANY = PARM_DEFAULT_REJECT_ANY_STR; #ifdef USE_WDS static p_u16 PARM_RTS_THRESHOLD1 = PARM_DEFAULT_RTS_THRESHOLD; static p_u16 PARM_RTS_THRESHOLD2 = PARM_DEFAULT_RTS_THRESHOLD; static p_u16 PARM_RTS_THRESHOLD3 = PARM_DEFAULT_RTS_THRESHOLD; static p_u16 PARM_RTS_THRESHOLD4 = PARM_DEFAULT_RTS_THRESHOLD; static p_u16 PARM_RTS_THRESHOLD5 = PARM_DEFAULT_RTS_THRESHOLD; static p_u16 PARM_RTS_THRESHOLD6 = PARM_DEFAULT_RTS_THRESHOLD; #endif // USE_WDS static p_u16 PARM_RTS_THRESHOLD = PARM_DEFAULT_RTS_THRESHOLD; static p_u16 PARM_SRSC_2GHZ = PARM_DEFAULT_SRSC_2GHZ; static p_u16 PARM_SRSC_5GHZ = PARM_DEFAULT_SRSC_5GHZ; static p_u8 PARM_SYSTEM_SCALE = PARM_DEFAULT_SYSTEM_SCALE; static p_u8 PARM_TX_KEY = PARM_DEFAULT_TX_KEY; static p_u16 PARM_TX_POW_LEVEL = PARM_DEFAULT_TX_POW_LEVEL; #ifdef USE_WDS static p_u16 PARM_TX_RATE1 = PARM_DEFAULT_TX_RATE_2GHZ; static p_u16 PARM_TX_RATE2 = PARM_DEFAULT_TX_RATE_2GHZ; static p_u16 PARM_TX_RATE3 = PARM_DEFAULT_TX_RATE_2GHZ; static p_u16 PARM_TX_RATE4 = PARM_DEFAULT_TX_RATE_2GHZ; static p_u16 PARM_TX_RATE5 = PARM_DEFAULT_TX_RATE_2GHZ; static p_u16 PARM_TX_RATE6 = PARM_DEFAULT_TX_RATE_2GHZ; #endif // USE_WDS static p_u16 PARM_TX_RATE = PARM_DEFAULT_TX_RATE_2GHZ; #ifdef USE_WDS static p_u8 PARM_WDS_ADDRESS1[ETH_ALEN] = PARM_DEFAULT_NETWORK_ADDR; static p_u8 PARM_WDS_ADDRESS2[ETH_ALEN] = PARM_DEFAULT_NETWORK_ADDR; static p_u8 PARM_WDS_ADDRESS3[ETH_ALEN] = PARM_DEFAULT_NETWORK_ADDR; static p_u8 PARM_WDS_ADDRESS4[ETH_ALEN] = PARM_DEFAULT_NETWORK_ADDR; static p_u8 PARM_WDS_ADDRESS5[ETH_ALEN] = PARM_DEFAULT_NETWORK_ADDR; static p_u8 PARM_WDS_ADDRESS6[ETH_ALEN] = PARM_DEFAULT_NETWORK_ADDR; #endif // USE_WDS #if 0 MODULE_PARM(PARM_DESIRED_SSID, "s"); MODULE_PARM_DESC(PARM_DESIRED_SSID, "Network Name (<string>) [ANY]"); MODULE_PARM(PARM_OWN_SSID, "s"); MODULE_PARM_DESC(PARM_OWN_SSID, "Network Name (<string>) [ANY]"); MODULE_PARM(PARM_OWN_CHANNEL, "b"); MODULE_PARM_DESC(PARM_OWN_CHANNEL, "Channel (0 - 14) [0]"); MODULE_PARM(PARM_SYSTEM_SCALE, "b"); MODULE_PARM_DESC(PARM_SYSTEM_SCALE, "Distance Between APs (1 - 3) [1]"); MODULE_PARM(PARM_TX_RATE, "b"); MODULE_PARM_DESC(PARM_TX_RATE, "Transmit Rate Control"); MODULE_PARM(PARM_RTS_THRESHOLD, "h"); MODULE_PARM_DESC(PARM_RTS_THRESHOLD, "Medium Reservation (RTS/CTS Fragment Length) (256 - 2347) [2347]"); MODULE_PARM(PARM_MICROWAVE_ROBUSTNESS, "s"); MODULE_PARM_DESC(PARM_MICROWAVE_ROBUSTNESS, "Microwave Oven Robustness Enabled (<string> N or Y) [N]"); MODULE_PARM(PARM_OWN_NAME, "s"); MODULE_PARM_DESC(PARM_OWN_NAME, "Station Name (<string>) [Linux]"); MODULE_PARM(PARM_ENABLE_ENCRYPTION, "b"); MODULE_PARM_DESC(PARM_ENABLE_ENCRYPTION, "Encryption Mode (0 - 7) [0]"); MODULE_PARM(PARM_KEY1, "s"); MODULE_PARM_DESC(PARM_KEY1, "Data Encryption Key 1 (<string>) []"); MODULE_PARM(PARM_KEY2, "s"); MODULE_PARM_DESC(PARM_KEY2, "Data Encryption Key 2 (<string>) []"); MODULE_PARM(PARM_KEY3, "s"); MODULE_PARM_DESC(PARM_KEY3, "Data Encryption Key 3 (<string>) []"); MODULE_PARM(PARM_KEY4, "s"); MODULE_PARM_DESC(PARM_KEY4, "Data Encryption Key 4 (<string>) []"); MODULE_PARM(PARM_TX_KEY, "b"); MODULE_PARM_DESC(PARM_TX_KEY, "Transmit Key ID (1 - 4) [1]"); MODULE_PARM(PARM_MULTICAST_RATE, "b"); MODULE_PARM_DESC(PARM_MULTICAST_RATE, "Multicast Rate"); MODULE_PARM(PARM_DOWNLOAD_FIRMWARE, "s"); MODULE_PARM_DESC(PARM_DOWNLOAD_FIRMWARE, "filename of firmware image"); MODULE_PARM(PARM_AUTH_KEY_MGMT_SUITE, "b"); MODULE_PARM_DESC(PARM_AUTH_KEY_MGMT_SUITE, "Authentication Key Management suite (0-4) [0]"); MODULE_PARM(PARM_LOAD_BALANCING, "s"); MODULE_PARM_DESC(PARM_LOAD_BALANCING, "Load Balancing Enabled (<string> N or Y) [Y]"); MODULE_PARM(PARM_MEDIUM_DISTRIBUTION, "s"); MODULE_PARM_DESC(PARM_MEDIUM_DISTRIBUTION, "Medium Distribution Enabled (<string> N or Y) [Y]"); MODULE_PARM(PARM_TX_POW_LEVEL, "b"); MODULE_PARM_DESC(PARM_TX_POW_LEVEL, "Transmit Power (0 - 6) [3]"); MODULE_PARM(PARM_SRSC_2GHZ, "b"); MODULE_PARM_DESC(PARM_SRSC_2GHZ, "Supported Rate Set Control 2.4 GHz"); MODULE_PARM(PARM_SRSC_5GHZ, "b"); MODULE_PARM_DESC(PARM_SRSC_5GHZ, "Supported Rate Set Control 5.0 GHz"); MODULE_PARM(PARM_BRSC_2GHZ, "b"); MODULE_PARM_DESC(PARM_BRSC_2GHZ, "Basic Rate Set Control 2.4 GHz"); MODULE_PARM(PARM_BRSC_5GHZ, "b"); MODULE_PARM_DESC(PARM_BRSC_5GHZ, "Basic Rate Set Control 5.0 GHz"); #if 1 //;? (HCF_TYPE) & HCF_TYPE_STA //;?seems reasonable that even an AP-only driver could afford this small additional footprint MODULE_PARM(PARM_PM_ENABLED, "h"); MODULE_PARM_DESC(PARM_PM_ENABLED, "Power Management State (0 - 2, 8001 - 8002) [0]"); MODULE_PARM(PARM_PORT_TYPE, "b"); MODULE_PARM_DESC(PARM_PORT_TYPE, "Port Type (1 - 3) [1]"); //;?MODULE_PARM(PARM_CREATE_IBSS, "s"); //;?MODULE_PARM_DESC(PARM_CREATE_IBSS, "Create IBSS (<string> N or Y) [N]"); //;?MODULE_PARM(PARM_MULTICAST_RX, "s"); //;?MODULE_PARM_DESC(PARM_MULTICAST_RX, "Multicast Receive Enable (<string> N or Y) [Y]"); //;?MODULE_PARM(PARM_MAX_SLEEP, "h"); //;?MODULE_PARM_DESC(PARM_MAX_SLEEP, "Maximum Power Management Sleep Duration (0 - 65535) [100]"); //;?MODULE_PARM(PARM_NETWORK_ADDR, "6b"); //;?MODULE_PARM_DESC(PARM_NETWORK_ADDR, "Hardware Ethernet Address ([0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff]) [<factory value>]"); //;?MODULE_PARM(PARM_AUTHENTICATION, "b"); // //tracker 12448 //;?MODULE_PARM_DESC(PARM_AUTHENTICATION, "Authentication Type (0-2) [0] 0=Open 1=SharedKey 2=LEAP"); //;?MODULE_PARM_DESC(authentication, "Authentication Type (1-2) [1] 1=Open 2=SharedKey"); //tracker 12448 // //;?MODULE_PARM(PARM_OWN_ATIM_WINDOW, "b"); //;?MODULE_PARM_DESC(PARM_OWN_ATIM_WINDOW, "ATIM Window time in TU for IBSS creation (0-100) [0]"); //;?MODULE_PARM(PARM_PM_HOLDOVER_DURATION, "b"); //;?MODULE_PARM_DESC(PARM_PM_HOLDOVER_DURATION, "Time station remains awake after MAC frame transfer when PM is on (0-65535) [100]"); //;?MODULE_PARM(PARM_PROMISCUOUS_MODE, "s"); //;?MODULE_PARM_DESC(PARM_PROMISCUOUS_MODE, "Promiscuous Mode Enable (<string> Y or N ) [N]" ); //;? MODULE_PARM(PARM_CONNECTION_CONTROL, "b"); MODULE_PARM_DESC(PARM_CONNECTION_CONTROL, "Connection Control (0 - 3) [2]"); #endif /* HCF_STA */ #if 1 //;? (HCF_TYPE) & HCF_TYPE_AP //;?should we restore this to allow smaller memory footprint MODULE_PARM(PARM_OWN_DTIM_PERIOD, "b"); MODULE_PARM_DESC(PARM_OWN_DTIM_PERIOD, "DTIM Period (0 - 255) [1]"); MODULE_PARM(PARM_REJECT_ANY, "s"); MODULE_PARM_DESC(PARM_REJECT_ANY, "Closed System (<string> N or Y) [N]"); MODULE_PARM(PARM_EXCLUDE_UNENCRYPTED, "s"); MODULE_PARM_DESC(PARM_EXCLUDE_UNENCRYPTED, "Deny non-encrypted (<string> N or Y) [Y]"); MODULE_PARM(PARM_MULTICAST_PM_BUFFERING,"s"); MODULE_PARM_DESC(PARM_MULTICAST_PM_BUFFERING, "Buffer MAC frames for Tx after DTIM (<string> Y or N) [Y]"); MODULE_PARM(PARM_INTRA_BSS_RELAY, "s"); MODULE_PARM_DESC(PARM_INTRA_BSS_RELAY, "IntraBSS Relay (<string> N or Y) [Y]"); MODULE_PARM(PARM_RTS_THRESHOLD1, "h"); MODULE_PARM_DESC(PARM_RTS_THRESHOLD1, "RTS Threshold, WDS Port 1 (256 - 2347) [2347]"); MODULE_PARM(PARM_RTS_THRESHOLD2, "h"); MODULE_PARM_DESC(PARM_RTS_THRESHOLD2, "RTS Threshold, WDS Port 2 (256 - 2347) [2347]"); MODULE_PARM(PARM_RTS_THRESHOLD3, "h"); MODULE_PARM_DESC(PARM_RTS_THRESHOLD3, "RTS Threshold, WDS Port 3 (256 - 2347) [2347]"); MODULE_PARM(PARM_RTS_THRESHOLD4, "h"); MODULE_PARM_DESC(PARM_RTS_THRESHOLD4, "RTS Threshold, WDS Port 4 (256 - 2347) [2347]"); MODULE_PARM(PARM_RTS_THRESHOLD5, "h"); MODULE_PARM_DESC(PARM_RTS_THRESHOLD5, "RTS Threshold, WDS Port 5 (256 - 2347) [2347]"); MODULE_PARM(PARM_RTS_THRESHOLD6, "h"); MODULE_PARM_DESC(PARM_RTS_THRESHOLD6, "RTS Threshold, WDS Port 6 (256 - 2347) [2347]"); MODULE_PARM(PARM_TX_RATE1, "b"); MODULE_PARM_DESC(PARM_TX_RATE1, "Transmit Rate Control, WDS Port 1 (1 - 7) [3]"); MODULE_PARM(PARM_TX_RATE2, "b"); MODULE_PARM_DESC(PARM_TX_RATE2, "Transmit Rate Control, WDS Port 2 (1 - 7) [3]"); MODULE_PARM(PARM_TX_RATE3, "b"); MODULE_PARM_DESC(PARM_TX_RATE3, "Transmit Rate Control, WDS Port 3 (1 - 7) [3]"); MODULE_PARM(PARM_TX_RATE4, "b"); MODULE_PARM_DESC(PARM_TX_RATE4, "Transmit Rate Control, WDS Port 4 (1 - 7) [3]"); MODULE_PARM(PARM_TX_RATE5, "b"); MODULE_PARM_DESC(PARM_TX_RATE5, "Transmit Rate Control, WDS Port 5 (1 - 7) [3]"); MODULE_PARM(PARM_TX_RATE6, "b"); MODULE_PARM_DESC(PARM_TX_RATE6, "Transmit Rate Control, WDS Port 6 (1 - 7) [3]"); MODULE_PARM(PARM_WDS_ADDRESS1, "6b"); MODULE_PARM_DESC(PARM_WDS_ADDRESS1, "MAC Address, WDS Port 1 ([0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff]) [{0}]"); MODULE_PARM(PARM_WDS_ADDRESS2, "6b"); MODULE_PARM_DESC(PARM_WDS_ADDRESS2, "MAC Address, WDS Port 2 ([0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff]) [{0}]"); MODULE_PARM(PARM_WDS_ADDRESS3, "6b"); MODULE_PARM_DESC(PARM_WDS_ADDRESS3, "MAC Address, WDS Port 3 ([0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff]) [{0}]"); MODULE_PARM(PARM_WDS_ADDRESS4, "6b"); MODULE_PARM_DESC(PARM_WDS_ADDRESS4, "MAC Address, WDS Port 4 ([0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff]) [{0}]"); MODULE_PARM(PARM_WDS_ADDRESS5, "6b"); MODULE_PARM_DESC(PARM_WDS_ADDRESS5, "MAC Address, WDS Port 5 ([0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff]) [{0}]"); MODULE_PARM(PARM_WDS_ADDRESS6, "6b"); MODULE_PARM_DESC(PARM_WDS_ADDRESS6, "MAC Address, WDS Port 6 ([0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff],[0x00-0xff]) [{0}]"); MODULE_PARM(PARM_OWN_BEACON_INTERVAL, "b"); MODULE_PARM_DESC(PARM_OWN_BEACON_INTERVAL, "Own Beacon Interval (20 - 200) [100]"); MODULE_PARM(PARM_COEXISTENCE, "b"); MODULE_PARM_DESC(PARM_COEXISTENCE, "Coexistence (0-7) [0]"); #endif /* HCF_AP */ #endif /* END NEW PARAMETERS */ /******************************************************************************* * debugging specifics ******************************************************************************/ #if DBG static p_u32 pc_debug = DBG_LVL; //MODULE_PARM(pc_debug, "i"); /*static ;?conflicts with my understanding of CL parameters and breaks now I moved * the correspondig logic to wl_profile */ p_u32 DebugFlag = ~0; //recognizable "undefined value" rather then DBG_DEFAULTS; //MODULE_PARM(DebugFlag, "l"); static struct dbg_info wl_info = { KBUILD_MODNAME, 0, 0 }; struct dbg_info *DbgInfo = &wl_info; #endif /* DBG */ #ifdef USE_RTS static p_char *useRTS = "N"; MODULE_PARM( useRTS, "s" ); MODULE_PARM_DESC( useRTS, "Use RTS test interface (<string> N or Y) [N]" ); #endif /* USE_RTS */ /******************************************************************************* * firmware download specifics ******************************************************************************/ extern struct CFG_RANGE2_STRCT BASED cfg_drv_act_ranges_pri; // describes primary-actor range of HCF #if 0 //;? (HCF_TYPE) & HCF_TYPE_AP extern memimage ap; // AP firmware image to be downloaded #endif /* HCF_AP */ #if 1 //;? (HCF_TYPE) & HCF_TYPE_STA //extern memimage station; // STA firmware image to be downloaded extern memimage fw_image; // firmware image to be downloaded #endif /* HCF_STA */ int wl_insert( struct net_device *dev ) { int result = 0; int hcf_status = HCF_SUCCESS; int i; unsigned long flags = 0; struct wl_private *lp = wl_priv(dev); /* Initialize the adapter hardware. */ memset( &( lp->hcfCtx ), 0, sizeof( IFB_STRCT )); /* Initialize the adapter parameters. */ spin_lock_init( &( lp->slock )); /* Initialize states */ //lp->lockcount = 0; //PE1DNN lp->is_handling_int = WL_NOT_HANDLING_INT; lp->firmware_present = WL_FRIMWARE_NOT_PRESENT; lp->dev = dev; DBG_PARAM( DbgInfo, "irq_mask", "0x%04x", irq_mask & 0x0FFFF ); DBG_PARAM( DbgInfo, "irq_list", "0x%02x 0x%02x 0x%02x 0x%02x", irq_list[0] & 0x0FF, irq_list[1] & 0x0FF, irq_list[2] & 0x0FF, irq_list[3] & 0x0FF ); DBG_PARAM( DbgInfo, PARM_NAME_DESIRED_SSID, "\"%s\"", PARM_DESIRED_SSID ); DBG_PARAM( DbgInfo, PARM_NAME_OWN_SSID, "\"%s\"", PARM_OWN_SSID ); DBG_PARAM( DbgInfo, PARM_NAME_OWN_CHANNEL, "%d", PARM_OWN_CHANNEL); DBG_PARAM( DbgInfo, PARM_NAME_SYSTEM_SCALE, "%d", PARM_SYSTEM_SCALE ); DBG_PARAM( DbgInfo, PARM_NAME_TX_RATE, "%d", PARM_TX_RATE ); DBG_PARAM( DbgInfo, PARM_NAME_RTS_THRESHOLD, "%d", PARM_RTS_THRESHOLD ); DBG_PARAM( DbgInfo, PARM_NAME_MICROWAVE_ROBUSTNESS, "\"%s\"", PARM_MICROWAVE_ROBUSTNESS ); DBG_PARAM( DbgInfo, PARM_NAME_OWN_NAME, "\"%s\"", PARM_OWN_NAME ); //;? DBG_PARAM( DbgInfo, PARM_NAME_ENABLE_ENCRYPTION, "\"%s\"", PARM_ENABLE_ENCRYPTION ); DBG_PARAM( DbgInfo, PARM_NAME_KEY1, "\"%s\"", PARM_KEY1 ); DBG_PARAM( DbgInfo, PARM_NAME_KEY2, "\"%s\"", PARM_KEY2 ); DBG_PARAM( DbgInfo, PARM_NAME_KEY3, "\"%s\"", PARM_KEY3 ); DBG_PARAM( DbgInfo, PARM_NAME_KEY4, "\"%s\"", PARM_KEY4 ); DBG_PARAM( DbgInfo, PARM_NAME_TX_KEY, "%d", PARM_TX_KEY ); DBG_PARAM( DbgInfo, PARM_NAME_MULTICAST_RATE, "%d", PARM_MULTICAST_RATE ); DBG_PARAM( DbgInfo, PARM_NAME_DOWNLOAD_FIRMWARE, "\"%s\"", PARM_DOWNLOAD_FIRMWARE ); DBG_PARAM( DbgInfo, PARM_NAME_AUTH_KEY_MGMT_SUITE, "%d", PARM_AUTH_KEY_MGMT_SUITE ); //;?#if (HCF_TYPE) & HCF_TYPE_STA //;?should we make this code conditional depending on in STA mode //;? DBG_PARAM( DbgInfo, PARM_NAME_PORT_TYPE, "%d", PARM_PORT_TYPE ); DBG_PARAM( DbgInfo, PARM_NAME_PM_ENABLED, "%04x", PARM_PM_ENABLED ); //;? DBG_PARAM( DbgInfo, PARM_NAME_CREATE_IBSS, "\"%s\"", PARM_CREATE_IBSS ); //;? DBG_PARAM( DbgInfo, PARM_NAME_MULTICAST_RX, "\"%s\"", PARM_MULTICAST_RX ); //;? DBG_PARAM( DbgInfo, PARM_NAME_MAX_SLEEP, "%d", PARM_MAX_SLEEP ); /* DBG_PARAM(DbgInfo, PARM_NAME_NETWORK_ADDR, "\"%pM\"", PARM_NETWORK_ADDR); */ //;? DBG_PARAM( DbgInfo, PARM_NAME_AUTHENTICATION, "%d", PARM_AUTHENTICATION ); //;? DBG_PARAM( DbgInfo, PARM_NAME_OWN_ATIM_WINDOW, "%d", PARM_OWN_ATIM_WINDOW ); //;? DBG_PARAM( DbgInfo, PARM_NAME_PM_HOLDOVER_DURATION, "%d", PARM_PM_HOLDOVER_DURATION ); //;? DBG_PARAM( DbgInfo, PARM_NAME_PROMISCUOUS_MODE, "\"%s\"", PARM_PROMISCUOUS_MODE ); //;?#endif /* HCF_STA */ #if 1 //;? (HCF_TYPE) & HCF_TYPE_AP //;?should we restore this to allow smaller memory footprint //;?I guess: no, since this is Debug mode only DBG_PARAM( DbgInfo, PARM_NAME_OWN_DTIM_PERIOD, "%d", PARM_OWN_DTIM_PERIOD ); DBG_PARAM( DbgInfo, PARM_NAME_REJECT_ANY, "\"%s\"", PARM_REJECT_ANY ); DBG_PARAM( DbgInfo, PARM_NAME_EXCLUDE_UNENCRYPTED, "\"%s\"", PARM_EXCLUDE_UNENCRYPTED ); DBG_PARAM( DbgInfo, PARM_NAME_MULTICAST_PM_BUFFERING, "\"%s\"", PARM_MULTICAST_PM_BUFFERING ); DBG_PARAM( DbgInfo, PARM_NAME_INTRA_BSS_RELAY, "\"%s\"", PARM_INTRA_BSS_RELAY ); #ifdef USE_WDS DBG_PARAM( DbgInfo, PARM_NAME_RTS_THRESHOLD1, "%d", PARM_RTS_THRESHOLD1 ); DBG_PARAM( DbgInfo, PARM_NAME_RTS_THRESHOLD2, "%d", PARM_RTS_THRESHOLD2 ); DBG_PARAM( DbgInfo, PARM_NAME_RTS_THRESHOLD3, "%d", PARM_RTS_THRESHOLD3 ); DBG_PARAM( DbgInfo, PARM_NAME_RTS_THRESHOLD4, "%d", PARM_RTS_THRESHOLD4 ); DBG_PARAM( DbgInfo, PARM_NAME_RTS_THRESHOLD5, "%d", PARM_RTS_THRESHOLD5 ); DBG_PARAM( DbgInfo, PARM_NAME_RTS_THRESHOLD6, "%d", PARM_RTS_THRESHOLD6 ); DBG_PARAM( DbgInfo, PARM_NAME_TX_RATE1, "%d", PARM_TX_RATE1 ); DBG_PARAM( DbgInfo, PARM_NAME_TX_RATE2, "%d", PARM_TX_RATE2 ); DBG_PARAM( DbgInfo, PARM_NAME_TX_RATE3, "%d", PARM_TX_RATE3 ); DBG_PARAM( DbgInfo, PARM_NAME_TX_RATE4, "%d", PARM_TX_RATE4 ); DBG_PARAM( DbgInfo, PARM_NAME_TX_RATE5, "%d", PARM_TX_RATE5 ); DBG_PARAM( DbgInfo, PARM_NAME_TX_RATE6, "%d", PARM_TX_RATE6 ); DBG_PARAM(DbgInfo, PARM_NAME_WDS_ADDRESS1, "\"%pM\"", PARM_WDS_ADDRESS1); DBG_PARAM(DbgInfo, PARM_NAME_WDS_ADDRESS2, "\"%pM\"", PARM_WDS_ADDRESS2); DBG_PARAM(DbgInfo, PARM_NAME_WDS_ADDRESS3, "\"%pM\"", PARM_WDS_ADDRESS3); DBG_PARAM(DbgInfo, PARM_NAME_WDS_ADDRESS4, "\"%pM\"", PARM_WDS_ADDRESS4); DBG_PARAM(DbgInfo, PARM_NAME_WDS_ADDRESS5, "\"%pM\"", PARM_WDS_ADDRESS5); DBG_PARAM(DbgInfo, PARM_NAME_WDS_ADDRESS6, "\"%pM\"", PARM_WDS_ADDRESS6); #endif /* USE_WDS */ #endif /* HCF_AP */ VALID_PARAM( !PARM_DESIRED_SSID || ( strlen( PARM_DESIRED_SSID ) <= PARM_MAX_NAME_LEN )); VALID_PARAM( !PARM_OWN_SSID || ( strlen( PARM_OWN_SSID ) <= PARM_MAX_NAME_LEN )); VALID_PARAM(( PARM_OWN_CHANNEL <= PARM_MAX_OWN_CHANNEL )); VALID_PARAM(( PARM_SYSTEM_SCALE >= PARM_MIN_SYSTEM_SCALE ) && ( PARM_SYSTEM_SCALE <= PARM_MAX_SYSTEM_SCALE )); VALID_PARAM(( PARM_TX_RATE >= PARM_MIN_TX_RATE ) && ( PARM_TX_RATE <= PARM_MAX_TX_RATE )); VALID_PARAM(( PARM_RTS_THRESHOLD <= PARM_MAX_RTS_THRESHOLD )); VALID_PARAM( !PARM_MICROWAVE_ROBUSTNESS || strchr( "NnYy", PARM_MICROWAVE_ROBUSTNESS[0] ) != NULL ); VALID_PARAM( !PARM_OWN_NAME || ( strlen( PARM_NAME_OWN_NAME ) <= PARM_MAX_NAME_LEN )); VALID_PARAM(( PARM_ENABLE_ENCRYPTION <= PARM_MAX_ENABLE_ENCRYPTION )); VALID_PARAM( is_valid_key_string( PARM_KEY1 )); VALID_PARAM( is_valid_key_string( PARM_KEY2 )); VALID_PARAM( is_valid_key_string( PARM_KEY3 )); VALID_PARAM( is_valid_key_string( PARM_KEY4 )); VALID_PARAM(( PARM_TX_KEY >= PARM_MIN_TX_KEY ) && ( PARM_TX_KEY <= PARM_MAX_TX_KEY )); VALID_PARAM(( PARM_MULTICAST_RATE >= PARM_MIN_MULTICAST_RATE ) && ( PARM_MULTICAST_RATE <= PARM_MAX_MULTICAST_RATE )); VALID_PARAM( !PARM_DOWNLOAD_FIRMWARE || ( strlen( PARM_DOWNLOAD_FIRMWARE ) <= 255 /*;?*/ )); VALID_PARAM(( PARM_AUTH_KEY_MGMT_SUITE < PARM_MAX_AUTH_KEY_MGMT_SUITE )); VALID_PARAM( !PARM_LOAD_BALANCING || strchr( "NnYy", PARM_LOAD_BALANCING[0] ) != NULL ); VALID_PARAM( !PARM_MEDIUM_DISTRIBUTION || strchr( "NnYy", PARM_MEDIUM_DISTRIBUTION[0] ) != NULL ); VALID_PARAM(( PARM_TX_POW_LEVEL <= PARM_MAX_TX_POW_LEVEL )); VALID_PARAM(( PARM_PORT_TYPE >= PARM_MIN_PORT_TYPE ) && ( PARM_PORT_TYPE <= PARM_MAX_PORT_TYPE )); VALID_PARAM( PARM_PM_ENABLED <= WVLAN_PM_STATE_STANDARD || ( PARM_PM_ENABLED & 0x7FFF ) <= WVLAN_PM_STATE_STANDARD ); VALID_PARAM( !PARM_CREATE_IBSS || strchr( "NnYy", PARM_CREATE_IBSS[0] ) != NULL ); VALID_PARAM( !PARM_MULTICAST_RX || strchr( "NnYy", PARM_MULTICAST_RX[0] ) != NULL ); VALID_PARAM(( PARM_MAX_SLEEP <= PARM_MAX_MAX_PM_SLEEP )); VALID_PARAM(( PARM_AUTHENTICATION <= PARM_MAX_AUTHENTICATION )); VALID_PARAM(( PARM_OWN_ATIM_WINDOW <= PARM_MAX_OWN_ATIM_WINDOW )); VALID_PARAM(( PARM_PM_HOLDOVER_DURATION <= PARM_MAX_PM_HOLDOVER_DURATION )); VALID_PARAM( !PARM_PROMISCUOUS_MODE || strchr( "NnYy", PARM_PROMISCUOUS_MODE[0] ) != NULL ); VALID_PARAM(( PARM_CONNECTION_CONTROL <= PARM_MAX_CONNECTION_CONTROL )); VALID_PARAM(( PARM_OWN_DTIM_PERIOD >= PARM_MIN_OWN_DTIM_PERIOD )); VALID_PARAM( !PARM_REJECT_ANY || strchr( "NnYy", PARM_REJECT_ANY[0] ) != NULL ); VALID_PARAM( !PARM_EXCLUDE_UNENCRYPTED || strchr( "NnYy", PARM_EXCLUDE_UNENCRYPTED[0] ) != NULL ); VALID_PARAM( !PARM_MULTICAST_PM_BUFFERING || strchr( "NnYy", PARM_MULTICAST_PM_BUFFERING[0] ) != NULL ); VALID_PARAM( !PARM_INTRA_BSS_RELAY || strchr( "NnYy", PARM_INTRA_BSS_RELAY[0] ) != NULL ); #ifdef USE_WDS VALID_PARAM(( PARM_RTS_THRESHOLD1 <= PARM_MAX_RTS_THRESHOLD )); VALID_PARAM(( PARM_RTS_THRESHOLD2 <= PARM_MAX_RTS_THRESHOLD )); VALID_PARAM(( PARM_RTS_THRESHOLD3 <= PARM_MAX_RTS_THRESHOLD )); VALID_PARAM(( PARM_RTS_THRESHOLD4 <= PARM_MAX_RTS_THRESHOLD )); VALID_PARAM(( PARM_RTS_THRESHOLD5 <= PARM_MAX_RTS_THRESHOLD )); VALID_PARAM(( PARM_RTS_THRESHOLD6 <= PARM_MAX_RTS_THRESHOLD )); VALID_PARAM(( PARM_TX_RATE1 >= PARM_MIN_TX_RATE ) && (PARM_TX_RATE1 <= PARM_MAX_TX_RATE )); VALID_PARAM(( PARM_TX_RATE2 >= PARM_MIN_TX_RATE ) && (PARM_TX_RATE2 <= PARM_MAX_TX_RATE )); VALID_PARAM(( PARM_TX_RATE3 >= PARM_MIN_TX_RATE ) && (PARM_TX_RATE3 <= PARM_MAX_TX_RATE )); VALID_PARAM(( PARM_TX_RATE4 >= PARM_MIN_TX_RATE ) && (PARM_TX_RATE4 <= PARM_MAX_TX_RATE )); VALID_PARAM(( PARM_TX_RATE5 >= PARM_MIN_TX_RATE ) && (PARM_TX_RATE5 <= PARM_MAX_TX_RATE )); VALID_PARAM(( PARM_TX_RATE6 >= PARM_MIN_TX_RATE ) && (PARM_TX_RATE6 <= PARM_MAX_TX_RATE )); #endif /* USE_WDS */ VALID_PARAM(( PARM_OWN_BEACON_INTERVAL >= PARM_MIN_OWN_BEACON_INTERVAL ) && ( PARM_OWN_BEACON_INTERVAL <= PARM_MAX_OWN_BEACON_INTERVAL )); VALID_PARAM(( PARM_COEXISTENCE <= PARM_COEXISTENCE )); /* Set the driver parameters from the passed in parameters. */ /* THESE MODULE PARAMETERS ARE TO BE DEPRECATED IN FAVOR OF A NAMING CONVENTION WHICH IS INLINE WITH THE FORTHCOMING WAVELAN API */ /* START NEW PARAMETERS */ lp->Channel = PARM_OWN_CHANNEL; lp->DistanceBetweenAPs = PARM_SYSTEM_SCALE; /* Need to determine how to handle the new bands for 5GHz */ lp->TxRateControl[0] = PARM_DEFAULT_TX_RATE_2GHZ; lp->TxRateControl[1] = PARM_DEFAULT_TX_RATE_5GHZ; lp->RTSThreshold = PARM_RTS_THRESHOLD; /* Need to determine how to handle the new bands for 5GHz */ lp->MulticastRate[0] = PARM_DEFAULT_MULTICAST_RATE_2GHZ; lp->MulticastRate[1] = PARM_DEFAULT_MULTICAST_RATE_5GHZ; if ( strchr( "Yy", PARM_MICROWAVE_ROBUSTNESS[0] ) != NULL ) { lp->MicrowaveRobustness = 1; } else { lp->MicrowaveRobustness = 0; } if ( PARM_DESIRED_SSID && ( strlen( PARM_DESIRED_SSID ) <= HCF_MAX_NAME_LEN )) { strcpy( lp->NetworkName, PARM_DESIRED_SSID ); } if ( PARM_OWN_SSID && ( strlen( PARM_OWN_SSID ) <= HCF_MAX_NAME_LEN )) { strcpy( lp->NetworkName, PARM_OWN_SSID ); } if ( PARM_OWN_NAME && ( strlen( PARM_OWN_NAME ) <= HCF_MAX_NAME_LEN )) { strcpy( lp->StationName, PARM_OWN_NAME ); } lp->EnableEncryption = PARM_ENABLE_ENCRYPTION; if ( PARM_KEY1 && ( strlen( PARM_KEY1 ) <= MAX_KEY_LEN )) { strcpy( lp->Key1, PARM_KEY1 ); } if ( PARM_KEY2 && ( strlen( PARM_KEY2 ) <= MAX_KEY_LEN )) { strcpy( lp->Key2, PARM_KEY2 ); } if ( PARM_KEY3 && ( strlen( PARM_KEY3 ) <= MAX_KEY_LEN )) { strcpy( lp->Key3, PARM_KEY3 ); } if ( PARM_KEY4 && ( strlen( PARM_KEY4 ) <= MAX_KEY_LEN )) { strcpy( lp->Key4, PARM_KEY4 ); } lp->TransmitKeyID = PARM_TX_KEY; key_string2key( lp->Key1, &(lp->DefaultKeys.key[0] )); key_string2key( lp->Key2, &(lp->DefaultKeys.key[1] )); key_string2key( lp->Key3, &(lp->DefaultKeys.key[2] )); key_string2key( lp->Key4, &(lp->DefaultKeys.key[3] )); lp->DownloadFirmware = 1 ; //;?to be upgraded PARM_DOWNLOAD_FIRMWARE; lp->AuthKeyMgmtSuite = PARM_AUTH_KEY_MGMT_SUITE; if ( strchr( "Yy", PARM_LOAD_BALANCING[0] ) != NULL ) { lp->loadBalancing = 1; } else { lp->loadBalancing = 0; } if ( strchr( "Yy", PARM_MEDIUM_DISTRIBUTION[0] ) != NULL ) { lp->mediumDistribution = 1; } else { lp->mediumDistribution = 0; } lp->txPowLevel = PARM_TX_POW_LEVEL; lp->srsc[0] = PARM_SRSC_2GHZ; lp->srsc[1] = PARM_SRSC_5GHZ; lp->brsc[0] = PARM_BRSC_2GHZ; lp->brsc[1] = PARM_BRSC_5GHZ; #if 1 //;? (HCF_TYPE) & HCF_TYPE_STA //;?seems reasonable that even an AP-only driver could afford this small additional footprint lp->PortType = PARM_PORT_TYPE; lp->MaxSleepDuration = PARM_MAX_SLEEP; lp->authentication = PARM_AUTHENTICATION; lp->atimWindow = PARM_OWN_ATIM_WINDOW; lp->holdoverDuration = PARM_PM_HOLDOVER_DURATION; lp->PMEnabled = PARM_PM_ENABLED; //;? if ( strchr( "Yy", PARM_CREATE_IBSS[0] ) != NULL ) { lp->CreateIBSS = 1; } else { lp->CreateIBSS = 0; } if ( strchr( "Nn", PARM_MULTICAST_RX[0] ) != NULL ) { lp->MulticastReceive = 0; } else { lp->MulticastReceive = 1; } if ( strchr( "Yy", PARM_PROMISCUOUS_MODE[0] ) != NULL ) { lp->promiscuousMode = 1; } else { lp->promiscuousMode = 0; } for( i = 0; i < ETH_ALEN; i++ ) { lp->MACAddress[i] = PARM_NETWORK_ADDR[i]; } lp->connectionControl = PARM_CONNECTION_CONTROL; #endif /* HCF_STA */ #if 1 //;? (HCF_TYPE) & HCF_TYPE_AP //;?should we restore this to allow smaller memory footprint lp->DTIMPeriod = PARM_OWN_DTIM_PERIOD; if ( strchr( "Yy", PARM_REJECT_ANY[0] ) != NULL ) { lp->RejectAny = 1; } else { lp->RejectAny = 0; } if ( strchr( "Nn", PARM_EXCLUDE_UNENCRYPTED[0] ) != NULL ) { lp->ExcludeUnencrypted = 0; } else { lp->ExcludeUnencrypted = 1; } if ( strchr( "Yy", PARM_MULTICAST_PM_BUFFERING[0] ) != NULL ) { lp->multicastPMBuffering = 1; } else { lp->multicastPMBuffering = 0; } if ( strchr( "Yy", PARM_INTRA_BSS_RELAY[0] ) != NULL ) { lp->intraBSSRelay = 1; } else { lp->intraBSSRelay = 0; } lp->ownBeaconInterval = PARM_OWN_BEACON_INTERVAL; lp->coexistence = PARM_COEXISTENCE; #ifdef USE_WDS lp->wds_port[0].rtsThreshold = PARM_RTS_THRESHOLD1; lp->wds_port[1].rtsThreshold = PARM_RTS_THRESHOLD2; lp->wds_port[2].rtsThreshold = PARM_RTS_THRESHOLD3; lp->wds_port[3].rtsThreshold = PARM_RTS_THRESHOLD4; lp->wds_port[4].rtsThreshold = PARM_RTS_THRESHOLD5; lp->wds_port[5].rtsThreshold = PARM_RTS_THRESHOLD6; lp->wds_port[0].txRateCntl = PARM_TX_RATE1; lp->wds_port[1].txRateCntl = PARM_TX_RATE2; lp->wds_port[2].txRateCntl = PARM_TX_RATE3; lp->wds_port[3].txRateCntl = PARM_TX_RATE4; lp->wds_port[4].txRateCntl = PARM_TX_RATE5; lp->wds_port[5].txRateCntl = PARM_TX_RATE6; for( i = 0; i < ETH_ALEN; i++ ) { lp->wds_port[0].wdsAddress[i] = PARM_WDS_ADDRESS1[i]; } for( i = 0; i < ETH_ALEN; i++ ) { lp->wds_port[1].wdsAddress[i] = PARM_WDS_ADDRESS2[i]; } for( i = 0; i < ETH_ALEN; i++ ) { lp->wds_port[2].wdsAddress[i] = PARM_WDS_ADDRESS3[i]; } for( i = 0; i < ETH_ALEN; i++ ) { lp->wds_port[3].wdsAddress[i] = PARM_WDS_ADDRESS4[i]; } for( i = 0; i < ETH_ALEN; i++ ) { lp->wds_port[4].wdsAddress[i] = PARM_WDS_ADDRESS5[i]; } for( i = 0; i < ETH_ALEN; i++ ) { lp->wds_port[5].wdsAddress[i] = PARM_WDS_ADDRESS6[i]; } #endif /* USE_WDS */ #endif /* HCF_AP */ #ifdef USE_RTS if ( strchr( "Yy", useRTS[0] ) != NULL ) { lp->useRTS = 1; } else { lp->useRTS = 0; } #endif /* USE_RTS */ /* END NEW PARAMETERS */ wl_lock( lp, &flags ); /* Initialize the portState variable */ lp->portState = WVLAN_PORT_STATE_DISABLED; /* Initialize the ScanResult struct */ memset( &( lp->scan_results ), 0, sizeof( lp->scan_results )); lp->scan_results.scan_complete = FALSE; /* Initialize the ProbeResult struct */ memset( &( lp->probe_results ), 0, sizeof( lp->probe_results )); lp->probe_results.scan_complete = FALSE; lp->probe_num_aps = 0; /* Initialize Tx queue stuff */ memset( lp->txList, 0, sizeof( lp->txList )); INIT_LIST_HEAD( &( lp->txFree )); lp->txF.skb = NULL; lp->txF.port = 0; for( i = 0; i < DEFAULT_NUM_TX_FRAMES; i++ ) { list_add_tail( &( lp->txList[i].node ), &( lp->txFree )); } for( i = 0; i < WVLAN_MAX_TX_QUEUES; i++ ) { INIT_LIST_HEAD( &( lp->txQ[i] )); } lp->netif_queue_on = TRUE; lp->txQ_count = 0; /* Initialize the use_dma element in the adapter structure. Not sure if this should be a compile-time or run-time configurable. So for now, implement as run-time and just define here */ #ifdef WARP #ifdef ENABLE_DMA DBG_TRACE( DbgInfo, "HERMES 2.5 BUSMASTER DMA MODE\n" ); lp->use_dma = 1; #else DBG_TRACE( DbgInfo, "HERMES 2.5 PORT I/O MODE\n" ); lp->use_dma = 0; #endif // ENABLE_DMA #endif // WARP /* Register the ISR handler information here, so that it's not done repeatedly in the ISR */ tasklet_init(&lp->task, wl_isr_handler, (unsigned long)lp); /* Connect to the adapter */ DBG_TRACE( DbgInfo, "Calling hcf_connect()...\n" ); hcf_status = hcf_connect( &lp->hcfCtx, dev->base_addr ); //HCF_ERR_INCOMP_FW is acceptable, because download must still take place //HCF_ERR_INCOMP_PRI is not acceptable if ( hcf_status != HCF_SUCCESS && hcf_status != HCF_ERR_INCOMP_FW ) { DBG_ERROR( DbgInfo, "hcf_connect() failed, status: 0x%x\n", hcf_status ); wl_unlock( lp, &flags ); goto hcf_failed; } //;?should set HCF_version and how about driver_stat lp->driverInfo.IO_address = dev->base_addr; lp->driverInfo.IO_range = HCF_NUM_IO_PORTS; //;?conditionally 0x40 or 0x80 seems better lp->driverInfo.IRQ_number = dev->irq; lp->driverInfo.card_stat = lp->hcfCtx.IFB_CardStat; //;? what happened to frame_type /* Fill in the driver identity structure */ lp->driverIdentity.len = ( sizeof( lp->driverIdentity ) / sizeof( hcf_16 )) - 1; lp->driverIdentity.typ = CFG_DRV_IDENTITY; lp->driverIdentity.comp_id = DRV_IDENTITY; lp->driverIdentity.variant = DRV_VARIANT; lp->driverIdentity.version_major = DRV_MAJOR_VERSION; lp->driverIdentity.version_minor = DRV_MINOR_VERSION; /* Start the card here - This needs to be done in order to get the MAC address for the network layer */ DBG_TRACE( DbgInfo, "Calling wvlan_go() to perform a card reset...\n" ); hcf_status = wl_go( lp ); if ( hcf_status != HCF_SUCCESS ) { DBG_ERROR( DbgInfo, "wl_go() failed\n" ); wl_unlock( lp, &flags ); goto hcf_failed; } /* Certain RIDs must be set before enabling the ports */ wl_put_ltv_init( lp ); #if 0 //;?why was this already commented out in wl_lkm_720 /* Enable the ports */ if ( wl_adapter_is_open( lp->dev )) { /* Enable the ports */ DBG_TRACE( DbgInfo, "Enabling Port 0\n" ); hcf_status = wl_enable( lp ); if ( hcf_status != HCF_SUCCESS ) { DBG_TRACE( DbgInfo, "Enable port 0 failed: 0x%x\n", hcf_status ); } #if (HCF_TYPE) & HCF_TYPE_AP DBG_TRACE( DbgInfo, "Enabling WDS Ports\n" ); //wl_enable_wds_ports( lp ); #endif /* (HCF_TYPE) & HCF_TYPE_AP */ } #endif /* Fill out the MAC address information in the net_device struct */ memcpy( lp->dev->dev_addr, lp->MACAddress, ETH_ALEN ); dev->addr_len = ETH_ALEN; lp->is_registered = TRUE; #ifdef USE_PROFILE /* Parse the config file for the sake of creating WDS ports if WDS is configured there but not in the module options */ parse_config( dev ); #endif /* USE_PROFILE */ /* If we're going into AP Mode, register the "virtual" ethernet devices needed for WDS */ WL_WDS_NETDEV_REGISTER( lp ); /* Reset the DownloadFirmware variable in the private struct. If the config file is not used, this will not matter; if it is used, it will be reparsed in wl_open(). This is done because logic in wl_open used to check if a firmware download is needed is broken by parsing the file here; however, this parsing is needed to register WDS ports in AP mode, if they are configured */ lp->DownloadFirmware = WVLAN_DRV_MODE_STA; //;?download_firmware; #ifdef USE_RTS if ( lp->useRTS == 1 ) { DBG_TRACE( DbgInfo, "ENTERING RTS MODE...\n" ); wl_act_int_off( lp ); lp->is_handling_int = WL_NOT_HANDLING_INT; // Not handling interrupts anymore wl_disable( lp ); hcf_connect( &lp->hcfCtx, HCF_DISCONNECT); } #endif /* USE_RTS */ wl_unlock( lp, &flags ); DBG_TRACE( DbgInfo, "%s: Wireless, io_addr %#03lx, irq %d, ""mac_address ", dev->name, dev->base_addr, dev->irq ); for( i = 0; i < ETH_ALEN; i++ ) { printk( "%02X%c", dev->dev_addr[i], (( i < ( ETH_ALEN-1 )) ? ':' : '\n' )); } #if 0 //SCULL_USE_PROC /* don't waste space if unused */ proc_create_data( "wlags", 0, NULL, &scull_read_procmem_fops, dev ); proc_mkdir("driver/wlags49", 0); #endif /* SCULL_USE_PROC */ return result; hcf_failed: wl_hcf_error( dev, hcf_status ); failed: DBG_ERROR( DbgInfo, "wl_insert() FAILED\n" ); if ( lp->is_registered == TRUE ) { lp->is_registered = FALSE; } WL_WDS_NETDEV_DEREGISTER( lp ); result = -EFAULT; return result; } // wl_insert /*============================================================================*/ /******************************************************************************* * wl_reset() ******************************************************************************* * * DESCRIPTION: * * Reset the adapter. * * PARAMETERS: * * dev - a pointer to the net_device struct of the wireless device * * RETURNS: * * an HCF status code * ******************************************************************************/ int wl_reset(struct net_device *dev) { struct wl_private *lp = wl_priv(dev); int hcf_status = HCF_SUCCESS; DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev ); DBG_PARAM( DbgInfo, "dev->base_addr", "(%#03lx)", dev->base_addr ); /* * The caller should already have a lock and * disable the interrupts, we do not lock here, * nor do we enable/disable interrupts! */ DBG_TRACE( DbgInfo, "Device Base Address: %#03lx\n", dev->base_addr ); if ( dev->base_addr ) { /* Shutdown the adapter. */ hcf_connect( &lp->hcfCtx, HCF_DISCONNECT ); /* Reset the driver information. */ lp->txBytes = 0; /* Connect to the adapter. */ hcf_status = hcf_connect( &lp->hcfCtx, dev->base_addr ); if ( hcf_status != HCF_SUCCESS && hcf_status != HCF_ERR_INCOMP_FW ) { DBG_ERROR( DbgInfo, "hcf_connect() failed, status: 0x%x\n", hcf_status ); goto out; } /* Check if firmware is present, if not change state */ if ( hcf_status == HCF_ERR_INCOMP_FW ) { lp->firmware_present = WL_FRIMWARE_NOT_PRESENT; } /* Initialize the portState variable */ lp->portState = WVLAN_PORT_STATE_DISABLED; /* Restart the adapter. */ hcf_status = wl_go( lp ); if ( hcf_status != HCF_SUCCESS ) { DBG_ERROR( DbgInfo, "wl_go() failed, status: 0x%x\n", hcf_status ); goto out; } /* Certain RIDs must be set before enabling the ports */ wl_put_ltv_init( lp ); } else { DBG_ERROR( DbgInfo, "Device Base Address INVALID!!!\n" ); } out: return hcf_status; } // wl_reset /*============================================================================*/ /******************************************************************************* * wl_go() ******************************************************************************* * * DESCRIPTION: * * Reset the adapter. * * PARAMETERS: * * dev - a pointer to the net_device struct of the wireless device * * RETURNS: * * an HCF status code * ******************************************************************************/ int wl_go( struct wl_private *lp ) { int hcf_status = HCF_SUCCESS; char *cp = NULL; //fw_image int retries = 0; hcf_status = wl_disable( lp ); if ( hcf_status != HCF_SUCCESS ) { DBG_TRACE( DbgInfo, "Disable port 0 failed: 0x%x\n", hcf_status ); while (( hcf_status != HCF_SUCCESS ) && (retries < 10)) { retries++; hcf_status = wl_disable( lp ); } if ( hcf_status == HCF_SUCCESS ) { DBG_TRACE( DbgInfo, "Disable port 0 succes : %d retries\n", retries ); } else { DBG_TRACE( DbgInfo, "Disable port 0 failed after: %d retries\n", retries ); } } #if 1 //;? (HCF_TYPE) & HCF_TYPE_AP //DBG_TRACE( DbgInfo, "Disabling WDS Ports\n" ); //wl_disable_wds_ports( lp ); #endif /* (HCF_TYPE) & HCF_TYPE_AP */ //;?what was the purpose of this // /* load the appropriate firmware image, depending on driver mode */ // lp->ltvRecord.len = ( sizeof( CFG_RANGE20_STRCT ) / sizeof( hcf_16 )) - 1; // lp->ltvRecord.typ = CFG_DRV_ACT_RANGES_PRI; // hcf_get_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); #if BIN_DL if ( strlen( lp->fw_image_filename ) ) { mm_segment_t fs; int file_desc; int rc; DBG_TRACE( DbgInfo, "F/W image:%s:\n", lp->fw_image_filename ); /* Obtain a user-space process context, storing the original context */ fs = get_fs( ); set_fs( get_ds( )); file_desc = open( lp->fw_image_filename, O_RDONLY, 0 ); if ( file_desc == -1 ) { DBG_ERROR( DbgInfo, "No image file found\n" ); } else { DBG_TRACE( DbgInfo, "F/W image file found\n" ); #define DHF_ALLOC_SIZE 96000 //just below 96K, let's hope it suffices for now and for the future cp = (char*)vmalloc( DHF_ALLOC_SIZE ); if ( cp == NULL ) { DBG_ERROR( DbgInfo, "error in vmalloc\n" ); } else { rc = read( file_desc, cp, DHF_ALLOC_SIZE ); if ( rc == DHF_ALLOC_SIZE ) { DBG_ERROR( DbgInfo, "buffer too small, %d\n", DHF_ALLOC_SIZE ); } else if ( rc > 0 ) { DBG_TRACE( DbgInfo, "read O.K.: %d bytes %.12s\n", rc, cp ); rc = read( file_desc, &cp[rc], 1 ); if ( rc == 0 ) { //;/change to an until-loop at rc<=0 DBG_TRACE( DbgInfo, "no more to read\n" ); } } if ( rc != 0 ) { DBG_ERROR( DbgInfo, "file not read in one swoop or other error"\ ", give up, too complicated, rc = %0X\n", rc ); DBG_ERROR( DbgInfo, "still have to change code to get a real download now !!!!!!!!\n" ); } else { DBG_TRACE( DbgInfo, "before dhf_download_binary\n" ); hcf_status = dhf_download_binary( (memimage *)cp ); DBG_TRACE( DbgInfo, "after dhf_download_binary, before dhf_download_fw\n" ); //;?improve error flow/handling hcf_status = dhf_download_fw( &lp->hcfCtx, (memimage *)cp ); DBG_TRACE( DbgInfo, "after dhf_download_fw\n" ); } vfree( cp ); } close( file_desc ); } set_fs( fs ); /* Return to the original context */ } #endif // BIN_DL /* If firmware is present but the type is unknown then download anyway */ if ( (lp->firmware_present == WL_FRIMWARE_PRESENT) && ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) != COMP_ID_FW_STA ) && ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) != COMP_ID_FW_AP ) ) { /* Unknown type, download needed. */ lp->firmware_present = WL_FRIMWARE_NOT_PRESENT; } if(lp->firmware_present == WL_FRIMWARE_NOT_PRESENT) { if ( cp == NULL ) { DBG_TRACE( DbgInfo, "Downloading STA firmware...\n" ); // hcf_status = dhf_download_fw( &lp->hcfCtx, &station ); hcf_status = dhf_download_fw( &lp->hcfCtx, &fw_image ); } if ( hcf_status != HCF_SUCCESS ) { DBG_ERROR( DbgInfo, "Firmware Download failed\n" ); return hcf_status; } } /* Report the FW versions */ //;?obsolete, use the available IFB info:: wl_get_pri_records( lp ); if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_STA ) { DBG_TRACE( DbgInfo, "downloaded station F/W\n" ); } else if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_AP ) { DBG_TRACE( DbgInfo, "downloaded AP F/W\n" ); } else { DBG_ERROR( DbgInfo, "unknown F/W type\n" ); } /* * Downloaded, no need to repeat this next time, assume the * contents stays in the card until it is powered off. Note we * do not switch firmware on the fly, the firmware is fixed in * the driver for now. */ lp->firmware_present = WL_FRIMWARE_PRESENT; DBG_TRACE( DbgInfo, "ComponentID:%04x variant:%04x major:%04x minor:%04x\n", CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ), CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.variant ), CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.version_major ), CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.version_minor )); /* now we will get the MAC address of the card */ lp->ltvRecord.len = 4; if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_AP ) { lp->ltvRecord.typ = CFG_NIC_MAC_ADDR; } else { lp->ltvRecord.typ = CFG_CNF_OWN_MAC_ADDR; } hcf_status = hcf_get_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); if ( hcf_status != HCF_SUCCESS ) { DBG_ERROR( DbgInfo, "Could not retrieve MAC address\n" ); return hcf_status; } memcpy( lp->MACAddress, &lp->ltvRecord.u.u8[0], ETH_ALEN ); DBG_TRACE(DbgInfo, "Card MAC Address: %pM\n", lp->MACAddress); /* Write out configuration to the device, enable, and reconnect. However, only reconnect if in AP mode. For STA mode, need to wait for passive scan completion before a connect can be issued */ wl_put_ltv( lp ); /* Enable the ports */ hcf_status = wl_enable( lp ); if ( lp->DownloadFirmware == WVLAN_DRV_MODE_AP ) { #ifdef USE_WDS wl_enable_wds_ports( lp ); #endif // USE_WDS hcf_status = wl_connect( lp ); } return hcf_status; } // wl_go /*============================================================================*/ /******************************************************************************* * wl_set_wep_keys() ******************************************************************************* * * DESCRIPTION: * * Write TxKeyID and WEP keys to the adapter. This is separated from * wl_apply() to allow dynamic WEP key updates through the wireless * extensions. * * PARAMETERS: * * lp - a pointer to the wireless adapter's private structure * * RETURNS: * * N/A * ******************************************************************************/ void wl_set_wep_keys( struct wl_private *lp ) { int count = 0; DBG_PARAM( DbgInfo, "lp", "%s (0x%p)", lp->dev->name, lp ); if ( lp->EnableEncryption ) { /* NOTE: CFG_CNF_ENCRYPTION is set in wl_put_ltv() as it's a static RID */ /* set TxKeyID */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_TX_KEY_ID; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE(lp->TransmitKeyID - 1); hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); DBG_TRACE( DbgInfo, "Key 1 len: %d\n", lp->DefaultKeys.key[0].len ); DBG_TRACE( DbgInfo, "Key 2 len: %d\n", lp->DefaultKeys.key[1].len ); DBG_TRACE( DbgInfo, "Key 3 len: %d\n", lp->DefaultKeys.key[2].len ); DBG_TRACE( DbgInfo, "Key 4 len: %d\n", lp->DefaultKeys.key[3].len ); /* write keys */ lp->DefaultKeys.len = sizeof( lp->DefaultKeys ) / sizeof( hcf_16 ) - 1; lp->DefaultKeys.typ = CFG_DEFAULT_KEYS; /* endian translate the appropriate key information */ for( count = 0; count < MAX_KEYS; count++ ) { lp->DefaultKeys.key[count].len = CNV_INT_TO_LITTLE( lp->DefaultKeys.key[count].len ); } hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->DefaultKeys )); /* Reverse the above endian translation, since these keys are accessed elsewhere */ for( count = 0; count < MAX_KEYS; count++ ) { lp->DefaultKeys.key[count].len = CNV_INT_TO_LITTLE( lp->DefaultKeys.key[count].len ); } DBG_NOTICE( DbgInfo, "encrypt: %d, ID: %d\n", lp->EnableEncryption, lp->TransmitKeyID ); DBG_NOTICE( DbgInfo, "set key: %s(%d) [%d]\n", lp->DefaultKeys.key[lp->TransmitKeyID-1].key, lp->DefaultKeys.key[lp->TransmitKeyID-1].len, lp->TransmitKeyID-1 ); } } // wl_set_wep_keys /*============================================================================*/ /******************************************************************************* * wl_apply() ******************************************************************************* * * DESCRIPTION: * * Write the parameters to the adapter. (re-)enables the card if device is * open. Returns hcf_status of hcf_enable(). * * PARAMETERS: * * lp - a pointer to the wireless adapter's private structure * * RETURNS: * * an HCF status code * ******************************************************************************/ int wl_apply(struct wl_private *lp) { int hcf_status = HCF_SUCCESS; DBG_ASSERT( lp != NULL); DBG_PARAM( DbgInfo, "lp", "%s (0x%p)", lp->dev->name, lp ); if ( !( lp->flags & WVLAN2_UIL_BUSY )) { /* The adapter parameters have changed: disable card reload parameters enable card */ if ( wl_adapter_is_open( lp->dev )) { /* Disconnect and disable if necessary */ hcf_status = wl_disconnect( lp ); if ( hcf_status != HCF_SUCCESS ) { DBG_ERROR( DbgInfo, "Disconnect failed\n" ); return -1; } hcf_status = wl_disable( lp ); if ( hcf_status != HCF_SUCCESS ) { DBG_ERROR( DbgInfo, "Disable failed\n" ); return -1; } else { /* Write out configuration to the device, enable, and reconnect. However, only reconnect if in AP mode. For STA mode, need to wait for passive scan completion before a connect can be issued */ hcf_status = wl_put_ltv( lp ); if ( hcf_status == HCF_SUCCESS ) { hcf_status = wl_enable( lp ); if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_AP ) { hcf_status = wl_connect( lp ); } } else { DBG_WARNING( DbgInfo, "wl_put_ltv() failed\n" ); } } } } return hcf_status; } // wl_apply /*============================================================================*/ /******************************************************************************* * wl_put_ltv_init() ******************************************************************************* * * DESCRIPTION: * * Used to set basic parameters for card initialization. * * PARAMETERS: * * lp - a pointer to the wireless adapter's private structure * * RETURNS: * * an HCF status code * ******************************************************************************/ int wl_put_ltv_init( struct wl_private *lp ) { int i; int hcf_status; CFG_RID_LOG_STRCT *RidLog; if ( lp == NULL ) { DBG_ERROR( DbgInfo, "lp pointer is NULL\n" ); return -1; } /* DMA/IO */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNTL_OPT; /* The Card Services build must ALWAYS be configured for 16-bit I/O. PCI or CardBus can be set to either 16/32 bit I/O, or Bus Master DMA, but only for Hermes-2.5 */ #ifdef BUS_PCMCIA lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( USE_16BIT ); #else if ( lp->use_dma ) { lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( USE_DMA ); } else { lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 0 ); } #endif hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); DBG_TRACE( DbgInfo, "CFG_CNTL_OPT : 0x%04x\n", lp->ltvRecord.u.u16[0] ); DBG_TRACE( DbgInfo, "CFG_CNTL_OPT result : 0x%04x\n", hcf_status ); /* Register the list of RIDs on which asynchronous notification is required. Note that this mechanism replaces the mailbox, so the mailbox can be queried by the host (if desired) without contention from us */ i=0; lp->RidList[i].len = sizeof( lp->ProbeResp ); lp->RidList[i].typ = CFG_ACS_SCAN; lp->RidList[i].bufp = (wci_recordp)&lp->ProbeResp; //lp->ProbeResp.infoType = 0xFFFF; i++; lp->RidList[i].len = sizeof( lp->assoc_stat ); lp->RidList[i].typ = CFG_ASSOC_STAT; lp->RidList[i].bufp = (wci_recordp)&lp->assoc_stat; lp->assoc_stat.len = 0xFFFF; i++; lp->RidList[i].len = 4; lp->RidList[i].typ = CFG_UPDATED_INFO_RECORD; lp->RidList[i].bufp = (wci_recordp)&lp->updatedRecord; lp->updatedRecord.len = 0xFFFF; i++; lp->RidList[i].len = sizeof( lp->sec_stat ); lp->RidList[i].typ = CFG_SECURITY_STAT; lp->RidList[i].bufp = (wci_recordp)&lp->sec_stat; lp->sec_stat.len = 0xFFFF; i++; lp->RidList[i].typ = 0; // Terminate List RidLog = (CFG_RID_LOG_STRCT *)&lp->ltvRecord; RidLog->len = 3; RidLog->typ = CFG_REG_INFO_LOG; RidLog->recordp = (RID_LOGP)&lp->RidList[0]; hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); DBG_TRACE( DbgInfo, "CFG_REG_INFO_LOG\n" ); DBG_TRACE( DbgInfo, "CFG_REG_INFO_LOG result : 0x%04x\n", hcf_status ); return hcf_status; } // wl_put_ltv_init /*============================================================================*/ /******************************************************************************* * wl_put_ltv() ******************************************************************************* * * DESCRIPTION: * * Used by wvlan_apply() and wvlan_go to set the card's configuration. * * PARAMETERS: * * lp - a pointer to the wireless adapter's private structure * * RETURNS: * * an HCF status code * ******************************************************************************/ int wl_put_ltv( struct wl_private *lp ) { int len; int hcf_status; if ( lp == NULL ) { DBG_ERROR( DbgInfo, "lp pointer is NULL\n" ); return -1; } if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_AP ) { lp->maxPort = 6; //;?why set this here and not as part of download process } else { lp->maxPort = 0; } /* Send our configuration to the card. Perform any endian translation necessary */ /* Register the Mailbox; VxWorks does this elsewhere; why;? */ lp->ltvRecord.len = 4; lp->ltvRecord.typ = CFG_REG_MB; lp->ltvRecord.u.u32[0] = (u_long)&( lp->mailbox ); lp->ltvRecord.u.u16[2] = ( MB_SIZE / sizeof( hcf_16 )); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Max Data Length */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNF_MAX_DATA_LEN; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( HCF_MAX_PACKET_SIZE ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* System Scale / Distance between APs */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNF_SYSTEM_SCALE; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->DistanceBetweenAPs ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Channel */ if ( lp->CreateIBSS && ( lp->Channel == 0 )) { DBG_TRACE( DbgInfo, "Create IBSS" ); lp->Channel = 10; } lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNF_OWN_CHANNEL; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->Channel ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Microwave Robustness */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNF_MICRO_WAVE; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->MicrowaveRobustness ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Load Balancing */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNF_LOAD_BALANCING; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->loadBalancing ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Medium Distribution */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNF_MEDIUM_DISTRIBUTION; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->mediumDistribution ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Country Code */ #ifdef WARP /* Tx Power Level (for supported cards) */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNF_TX_POW_LVL; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->txPowLevel ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Short Retry Limit */ /*lp->ltvRecord.len = 2; lp->ltvRecord.typ = 0xFC32; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->shortRetryLimit ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); */ /* Long Retry Limit */ /*lp->ltvRecord.len = 2; lp->ltvRecord.typ = 0xFC33; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->longRetryLimit ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); */ /* Supported Rate Set Control */ lp->ltvRecord.len = 3; lp->ltvRecord.typ = CFG_SUPPORTED_RATE_SET_CNTL; //0xFC88; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->srsc[0] ); lp->ltvRecord.u.u16[1] = CNV_INT_TO_LITTLE( lp->srsc[1] ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Basic Rate Set Control */ lp->ltvRecord.len = 3; lp->ltvRecord.typ = CFG_BASIC_RATE_SET_CNTL; //0xFC89; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->brsc[0] ); lp->ltvRecord.u.u16[1] = CNV_INT_TO_LITTLE( lp->brsc[1] ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Frame Burst Limit */ /* Defined, but not currently available in Firmware */ #endif // WARP #ifdef WARP /* Multicast Rate */ lp->ltvRecord.len = 3; lp->ltvRecord.typ = CFG_CNF_MCAST_RATE; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->MulticastRate[0] ); lp->ltvRecord.u.u16[1] = CNV_INT_TO_LITTLE( lp->MulticastRate[1] ); #else lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNF_MCAST_RATE; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->MulticastRate[0] ); #endif // WARP hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Own Name (Station Nickname) */ if (( len = ( strlen( lp->StationName ) + 1 ) & ~0x01 ) != 0 ) { //DBG_TRACE( DbgInfo, "CFG_CNF_OWN_NAME : %s\n", // lp->StationName ); lp->ltvRecord.len = 2 + ( len / sizeof( hcf_16 )); lp->ltvRecord.typ = CFG_CNF_OWN_NAME; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( strlen( lp->StationName )); memcpy( &( lp->ltvRecord.u.u8[2] ), lp->StationName, len ); } else { //DBG_TRACE( DbgInfo, "CFG_CNF_OWN_NAME : EMPTY\n" ); lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNF_OWN_NAME; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 0 ); } hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); //DBG_TRACE( DbgInfo, "CFG_CNF_OWN_NAME result : 0x%04x\n", // hcf_status ); /* The following are set in STA mode only */ if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_STA ) { /* RTS Threshold */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_RTS_THRH; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->RTSThreshold ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Port Type */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNF_PORT_TYPE; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->PortType ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Tx Rate Control */ #ifdef WARP lp->ltvRecord.len = 3; lp->ltvRecord.typ = CFG_TX_RATE_CNTL; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->TxRateControl[0] ); lp->ltvRecord.u.u16[1] = CNV_INT_TO_LITTLE( lp->TxRateControl[1] ); #else lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_TX_RATE_CNTL; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->TxRateControl[0] ); #endif // WARP //;?skip temporarily to see whether the RID or something else is the problem hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); DBG_TRACE( DbgInfo, "CFG_TX_RATE_CNTL 2.4GHz : 0x%04x\n", lp->TxRateControl[0] ); DBG_TRACE( DbgInfo, "CFG_TX_RATE_CNTL 5.0GHz : 0x%04x\n", lp->TxRateControl[1] ); DBG_TRACE( DbgInfo, "CFG_TX_RATE_CNTL result : 0x%04x\n", hcf_status ); /* Power Management */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNF_PM_ENABLED; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->PMEnabled ); // lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 0x8001 ); DBG_TRACE( DbgInfo, "CFG_CNF_PM_ENABLED : 0x%04x\n", lp->PMEnabled ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Multicast Receive */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNF_MCAST_RX; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->MulticastReceive ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Max Sleep Duration */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNF_MAX_SLEEP_DURATION; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->MaxSleepDuration ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Create IBSS */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CREATE_IBSS; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->CreateIBSS ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Desired SSID */ if ((( len = ( strlen( lp->NetworkName ) + 1 ) & ~0x01 ) != 0 ) && ( strcmp( lp->NetworkName, "ANY" ) != 0 ) && ( strcmp( lp->NetworkName, "any" ) != 0 )) { //DBG_TRACE( DbgInfo, "CFG_DESIRED_SSID : %s\n", // lp->NetworkName ); lp->ltvRecord.len = 2 + (len / sizeof(hcf_16)); lp->ltvRecord.typ = CFG_DESIRED_SSID; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( strlen( lp->NetworkName )); memcpy( &( lp->ltvRecord.u.u8[2] ), lp->NetworkName, len ); } else { //DBG_TRACE( DbgInfo, "CFG_DESIRED_SSID : ANY\n" ); lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_DESIRED_SSID; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 0 ); } hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); //DBG_TRACE( DbgInfo, "CFG_DESIRED_SSID result : 0x%04x\n", // hcf_status ); /* Own ATIM window */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNF_OWN_ATIM_WINDOW; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->atimWindow ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Holdover Duration */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNF_HOLDOVER_DURATION; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->holdoverDuration ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Promiscuous Mode */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_PROMISCUOUS_MODE; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->promiscuousMode ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Authentication */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNF_AUTHENTICATION; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->authentication ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); #ifdef WARP /* Connection Control */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNF_CONNECTION_CNTL; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->connectionControl ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Probe data rate */ /*lp->ltvRecord.len = 3; lp->ltvRecord.typ = CFG_PROBE_DATA_RATE; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->probeDataRates[0] ); lp->ltvRecord.u.u16[1] = CNV_INT_TO_LITTLE( lp->probeDataRates[1] ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); DBG_TRACE( DbgInfo, "CFG_PROBE_DATA_RATE 2.4GHz : 0x%04x\n", lp->probeDataRates[0] ); DBG_TRACE( DbgInfo, "CFG_PROBE_DATA_RATE 5.0GHz : 0x%04x\n", lp->probeDataRates[1] ); DBG_TRACE( DbgInfo, "CFG_PROBE_DATA_RATE result : 0x%04x\n", hcf_status );*/ #endif // WARP } else { /* The following are set in AP mode only */ #if 0 //;? (HCF_TYPE) & HCF_TYPE_AP //;?should we restore this to allow smaller memory footprint /* DTIM Period */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNF_OWN_DTIM_PERIOD; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->DTIMPeriod ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Multicast PM Buffering */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNF_MCAST_PM_BUF; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->multicastPMBuffering ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Reject ANY - Closed System */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNF_REJECT_ANY; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->RejectAny ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Exclude Unencrypted */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNF_EXCL_UNENCRYPTED; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->ExcludeUnencrypted ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* IntraBSS Relay */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNF_INTRA_BSS_RELAY; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->intraBSSRelay ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* RTS Threshold 0 */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_RTS_THRH0; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->RTSThreshold ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Tx Rate Control 0 */ #ifdef WARP lp->ltvRecord.len = 3; lp->ltvRecord.typ = CFG_TX_RATE_CNTL0; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->TxRateControl[0] ); lp->ltvRecord.u.u16[1] = CNV_INT_TO_LITTLE( lp->TxRateControl[1] ); #else lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_TX_RATE_CNTL0; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->TxRateControl[0] ); #endif // WARP hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Own Beacon Interval */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = 0xFC31; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->ownBeaconInterval ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Co-Existence Behavior */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = 0xFCC7; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->coexistence ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); #ifdef USE_WDS /* RTS Threshold 1 */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_RTS_THRH1; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->wds_port[0].rtsThreshold ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* RTS Threshold 2 */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_RTS_THRH2; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->wds_port[1].rtsThreshold ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* RTS Threshold 3 */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_RTS_THRH3; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->wds_port[2].rtsThreshold ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* RTS Threshold 4 */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_RTS_THRH4; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->wds_port[3].rtsThreshold ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* RTS Threshold 5 */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_RTS_THRH5; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->wds_port[4].rtsThreshold ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* RTS Threshold 6 */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_RTS_THRH6; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->wds_port[5].rtsThreshold ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); #if 0 /* TX Rate Control 1 */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_TX_RATE_CNTL1; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->wds_port[0].txRateCntl ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* TX Rate Control 2 */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_TX_RATE_CNTL2; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->wds_port[1].txRateCntl ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* TX Rate Control 3 */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_TX_RATE_CNTL3; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->wds_port[2].txRateCntl ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* TX Rate Control 4 */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_TX_RATE_CNTL4; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->wds_port[3].txRateCntl ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* TX Rate Control 5 */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_TX_RATE_CNTL5; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->wds_port[4].txRateCntl ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* TX Rate Control 6 */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_TX_RATE_CNTL6; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->wds_port[5].txRateCntl ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); #endif /* WDS addresses. It's okay to blindly send these parameters, because the port needs to be enabled, before anything is done with it. */ /* WDS Address 1 */ lp->ltvRecord.len = 4; lp->ltvRecord.typ = CFG_CNF_WDS_ADDR1; memcpy( &lp->ltvRecord.u.u8[0], lp->wds_port[0].wdsAddress, ETH_ALEN ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* WDS Address 2 */ lp->ltvRecord.len = 4; lp->ltvRecord.typ = CFG_CNF_WDS_ADDR2; memcpy( &lp->ltvRecord.u.u8[0], lp->wds_port[1].wdsAddress, ETH_ALEN ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* WDS Address 3 */ lp->ltvRecord.len = 4; lp->ltvRecord.typ = CFG_CNF_WDS_ADDR3; memcpy( &lp->ltvRecord.u.u8[0], lp->wds_port[2].wdsAddress, ETH_ALEN ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* WDS Address 4 */ lp->ltvRecord.len = 4; lp->ltvRecord.typ = CFG_CNF_WDS_ADDR4; memcpy( &lp->ltvRecord.u.u8[0], lp->wds_port[3].wdsAddress, ETH_ALEN ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* WDS Address 5 */ lp->ltvRecord.len = 4; lp->ltvRecord.typ = CFG_CNF_WDS_ADDR5; memcpy( &lp->ltvRecord.u.u8[0], lp->wds_port[4].wdsAddress, ETH_ALEN ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* WDS Address 6 */ lp->ltvRecord.len = 4; lp->ltvRecord.typ = CFG_CNF_WDS_ADDR6; memcpy( &lp->ltvRecord.u.u8[0], lp->wds_port[5].wdsAddress, ETH_ALEN ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); #endif /* USE_WDS */ #endif /* (HCF_TYPE) & HCF_TYPE_AP */ } /* Own MAC Address */ /* DBG_TRACE(DbgInfo, "MAC Address : %pM\n", lp->MACAddress); */ if ( WVLAN_VALID_MAC_ADDRESS( lp->MACAddress )) { /* Make the MAC address valid by: Clearing the multicast bit Setting the local MAC address bit */ //lp->MACAddress[0] &= ~0x03; //;?why is this commented out already in 720 //lp->MACAddress[0] |= 0x02; lp->ltvRecord.len = 1 + ( ETH_ALEN / sizeof( hcf_16 )); if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_AP ) { //DBG_TRACE( DbgInfo, "CFG_NIC_MAC_ADDR\n" ); lp->ltvRecord.typ = CFG_NIC_MAC_ADDR; } else { //DBG_TRACE( DbgInfo, "CFG_CNF_OWN_MAC_ADDR\n" ); lp->ltvRecord.typ = CFG_CNF_OWN_MAC_ADDR; } /* MAC address is byte aligned, no endian conversion needed */ memcpy( &( lp->ltvRecord.u.u8[0] ), lp->MACAddress, ETH_ALEN ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); //DBG_TRACE( DbgInfo, "CFG_XXX_MAC_ADDR result : 0x%04x\n", // hcf_status ); /* Update the MAC address in the netdevice struct */ memcpy( lp->dev->dev_addr, lp->MACAddress, ETH_ALEN ); //;?what is the purpose of this seemingly complex logic } /* Own SSID */ if ((( len = ( strlen( lp->NetworkName ) + 1 ) & ~0x01 ) != 0 ) && ( strcmp( lp->NetworkName, "ANY" ) != 0 ) && ( strcmp( lp->NetworkName, "any" ) != 0 )) { //DBG_TRACE( DbgInfo, "CFG_CNF_OWN_SSID : %s\n", // lp->NetworkName ); lp->ltvRecord.len = 2 + (len / sizeof(hcf_16)); lp->ltvRecord.typ = CFG_CNF_OWN_SSID; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( strlen( lp->NetworkName )); memcpy( &( lp->ltvRecord.u.u8[2] ), lp->NetworkName, len ); } else { //DBG_TRACE( DbgInfo, "CFG_CNF_OWN_SSID : ANY\n" ); lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNF_OWN_SSID; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 0 ); } hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); //DBG_TRACE( DbgInfo, "CFG_CNF_OWN_SSID result : 0x%04x\n", // hcf_status ); /* enable/disable encryption */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_CNF_ENCRYPTION; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->EnableEncryption ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* Set the Authentication Key Management Suite */ lp->ltvRecord.len = 2; lp->ltvRecord.typ = CFG_SET_WPA_AUTH_KEY_MGMT_SUITE; lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( lp->AuthKeyMgmtSuite ); hcf_status = hcf_put_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); /* If WEP (or no) keys are being used, write (or clear) them */ if (lp->wext_enc != IW_ENCODE_ALG_TKIP) wl_set_wep_keys(lp); /* Country Code */ /* countryInfo, ltvCountryInfo, CFG_CNF_COUNTRY_INFO */ return hcf_status; } // wl_put_ltv /*============================================================================*/ /******************************************************************************* * init_module() ******************************************************************************* * * DESCRIPTION: * * Load the kernel module. * * PARAMETERS: * * N/A * * RETURNS: * * 0 on success * an errno value otherwise * ******************************************************************************/ static int __init wl_module_init( void ) { int result; /*------------------------------------------------------------------------*/ #if DBG /* Convert "standard" PCMCIA parameter pc_debug to a reasonable DebugFlag value. * NOTE: The values all fall through to the lower values. */ DbgInfo->DebugFlag = 0; DbgInfo->DebugFlag = DBG_TRACE_ON; //;?get this mess resolved one day if ( pc_debug ) switch( pc_debug ) { case 8: DbgInfo->DebugFlag |= DBG_DS_ON; case 7: DbgInfo->DebugFlag |= DBG_RX_ON | DBG_TX_ON; case 6: DbgInfo->DebugFlag |= DBG_PARAM_ON; case 5: DbgInfo->DebugFlag |= DBG_TRACE_ON; case 4: DbgInfo->DebugFlag |= DBG_VERBOSE_ON; case 1: DbgInfo->DebugFlag |= DBG_DEFAULTS; default: break; } #endif /* DBG */ printk(KERN_INFO "%s\n", VERSION_INFO); printk(KERN_INFO "*** Modified for kernel 2.6 by Henk de Groot <pe1dnn@amsat.org>\n"); printk(KERN_INFO "*** Based on 7.18 version by Andrey Borzenkov <arvidjaar@mail.ru> $Revision: 39 $\n"); // ;?#if (HCF_TYPE) & HCF_TYPE_AP // DBG_PRINT( "Access Point Mode (AP) Support: YES\n" ); // #else // DBG_PRINT( "Access Point Mode (AP) Support: NO\n" ); // #endif /* (HCF_TYPE) & HCF_TYPE_AP */ result = wl_adapter_init_module( ); return result; } // init_module /*============================================================================*/ /******************************************************************************* * cleanup_module() ******************************************************************************* * * DESCRIPTION: * * Unload the kernel module. * * PARAMETERS: * * N/A * * RETURNS: * * N/A * ******************************************************************************/ static void __exit wl_module_exit( void ) { wl_adapter_cleanup_module( ); #if 0 //SCULL_USE_PROC /* don't waste space if unused */ remove_proc_entry( "wlags", NULL ); //;?why so a-symmetric compared to location of proc_create_data #endif } // cleanup_module /*============================================================================*/ module_init(wl_module_init); module_exit(wl_module_exit); /******************************************************************************* * wl_isr() ******************************************************************************* * * DESCRIPTION: * * The Interrupt Service Routine for the driver. * * PARAMETERS: * * irq - the irq the interrupt came in on * dev_id - a buffer containing information about the request * regs - * * RETURNS: * * N/A * ******************************************************************************/ irqreturn_t wl_isr( int irq, void *dev_id, struct pt_regs *regs ) { int events; struct net_device *dev = (struct net_device *) dev_id; struct wl_private *lp = NULL; /*------------------------------------------------------------------------*/ if (( dev == NULL ) || ( !netif_device_present( dev ))) { return IRQ_NONE; } /* Set the wl_private pointer (lp), now that we know that dev is non-null */ lp = wl_priv(dev); #ifdef USE_RTS if ( lp->useRTS == 1 ) { DBG_PRINT( "EXITING ISR, IN RTS MODE...\n" ); return; } #endif /* USE_RTS */ /* If we have interrupts pending, then put them on a system task queue. Otherwise turn interrupts back on */ events = hcf_action( &lp->hcfCtx, HCF_ACT_INT_OFF ); if ( events == HCF_INT_PENDING ) { /* Schedule the ISR handler as a bottom-half task in the tq_immediate queue */ tasklet_schedule(&lp->task); } else { //DBG_PRINT( "NOT OUR INTERRUPT\n" ); hcf_action( &lp->hcfCtx, HCF_ACT_INT_ON ); } return IRQ_RETVAL(events == HCF_INT_PENDING); } // wl_isr /*============================================================================*/ /******************************************************************************* * wl_isr_handler() ******************************************************************************* * * DESCRIPTION: * * The ISR handler, scheduled to run in a deferred context by the ISR. This * is where the ISR's work actually gets done. * * PARAMETERS: * * lp - a pointer to the device's private adapter structure * * RETURNS: * * N/A * ******************************************************************************/ #define WVLAN_MAX_INT_SERVICES 50 void wl_isr_handler( unsigned long p ) { struct net_device *dev; unsigned long flags; bool_t stop = TRUE; int count; int result; struct wl_private *lp = (struct wl_private *)p; /*------------------------------------------------------------------------*/ if ( lp == NULL ) { DBG_PRINT( "wl_isr_handler lp adapter pointer is NULL!!!\n" ); } else { wl_lock( lp, &flags ); dev = (struct net_device *)lp->dev; if ( dev != NULL && netif_device_present( dev ) ) stop = FALSE; for( count = 0; stop == FALSE && count < WVLAN_MAX_INT_SERVICES; count++ ) { stop = TRUE; result = hcf_service_nic( &lp->hcfCtx, (wci_bufp)lp->lookAheadBuf, sizeof( lp->lookAheadBuf )); if ( result == HCF_ERR_MIC ) { wl_wext_event_mic_failed( dev ); /* Send an event that MIC failed */ //;?this seems wrong if HCF_ERR_MIC coincides with another event, stop gets FALSE //so why not do it always ;? } #ifndef USE_MBOX_SYNC if ( lp->hcfCtx.IFB_MBInfoLen != 0 ) { /* anything in the mailbox */ wl_mbx( lp ); stop = FALSE; } #endif /* Check for a Link status event */ if ( ( lp->hcfCtx.IFB_LinkStat & CFG_LINK_STAT_FW ) != 0 ) { wl_process_link_status( lp ); stop = FALSE; } /* Check for probe response events */ if ( lp->ProbeResp.infoType != 0 && lp->ProbeResp.infoType != 0xFFFF ) { wl_process_probe_response( lp ); memset( &lp->ProbeResp, 0, sizeof( lp->ProbeResp )); lp->ProbeResp.infoType = 0xFFFF; stop = FALSE; } /* Check for updated record events */ if ( lp->updatedRecord.len != 0xFFFF ) { wl_process_updated_record( lp ); lp->updatedRecord.len = 0xFFFF; stop = FALSE; } /* Check for association status events */ if ( lp->assoc_stat.len != 0xFFFF ) { wl_process_assoc_status( lp ); lp->assoc_stat.len = 0xFFFF; stop = FALSE; } /* Check for security status events */ if ( lp->sec_stat.len != 0xFFFF ) { wl_process_security_status( lp ); lp->sec_stat.len = 0xFFFF; stop = FALSE; } #ifdef ENABLE_DMA if ( lp->use_dma ) { /* Check for DMA Rx packets */ if ( lp->hcfCtx.IFB_DmaPackets & HREG_EV_RDMAD ) { wl_rx_dma( dev ); stop = FALSE; } /* Return Tx DMA descriptors to host */ if ( lp->hcfCtx.IFB_DmaPackets & HREG_EV_TDMAD ) { wl_pci_dma_hcf_reclaim_tx( lp ); stop = FALSE; } } else #endif // ENABLE_DMA { /* Check for Rx packets */ if ( lp->hcfCtx.IFB_RxLen != 0 ) { wl_rx( dev ); stop = FALSE; } /* Make sure that queued frames get sent */ if ( wl_send( lp )) { stop = FALSE; } } } /* We're done, so turn interrupts which were turned off in wl_isr, back on */ hcf_action( &lp->hcfCtx, HCF_ACT_INT_ON ); wl_unlock( lp, &flags ); } return; } // wl_isr_handler /*============================================================================*/ /******************************************************************************* * wl_remove() ******************************************************************************* * * DESCRIPTION: * * Notify the adapter that it has been removed. Since the adapter is gone, * we should no longer try to talk to it. * * PARAMETERS: * * dev - a pointer to the device's net_device structure * * RETURNS: * * N/A * ******************************************************************************/ void wl_remove( struct net_device *dev ) { struct wl_private *lp = wl_priv(dev); unsigned long flags; DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev ); wl_lock( lp, &flags ); /* stop handling interrupts */ wl_act_int_off( lp ); lp->is_handling_int = WL_NOT_HANDLING_INT; /* * Disable the ports: just change state: since the * card is gone it is useless to talk to it and at * disconnect all state information is lost anyway. */ /* Reset portState */ lp->portState = WVLAN_PORT_STATE_DISABLED; #if 0 //;? (HCF_TYPE) & HCF_TYPE_AP #ifdef USE_WDS //wl_disable_wds_ports( lp ); #endif // USE_WDS #endif /* (HCF_TYPE) & HCF_TYPE_AP */ /* Mark the device as unregistered */ lp->is_registered = FALSE; /* Deregister the WDS ports as well */ WL_WDS_NETDEV_DEREGISTER( lp ); #ifdef USE_RTS if ( lp->useRTS == 1 ) { wl_unlock( lp, &flags ); return; } #endif /* USE_RTS */ /* Inform the HCF that the card has been removed */ hcf_connect( &lp->hcfCtx, HCF_DISCONNECT ); wl_unlock( lp, &flags ); } // wl_remove /*============================================================================*/ /******************************************************************************* * wl_suspend() ******************************************************************************* * * DESCRIPTION: * * Power-down and halt the adapter. * * PARAMETERS: * * dev - a pointer to the device's net_device structure * * RETURNS: * * N/A * ******************************************************************************/ void wl_suspend( struct net_device *dev ) { struct wl_private *lp = wl_priv(dev); unsigned long flags; DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev ); /* The adapter is suspended: Stop the adapter Power down */ wl_lock( lp, &flags ); /* Disable interrupt handling */ wl_act_int_off( lp ); /* Disconnect */ wl_disconnect( lp ); /* Disable */ wl_disable( lp ); /* Disconnect from the adapter */ hcf_connect( &lp->hcfCtx, HCF_DISCONNECT ); /* Reset portState to be sure (should have been done by wl_disable */ lp->portState = WVLAN_PORT_STATE_DISABLED; wl_unlock( lp, &flags ); } // wl_suspend /*============================================================================*/ /******************************************************************************* * wl_resume() ******************************************************************************* * * DESCRIPTION: * * Resume a previously suspended adapter. * * PARAMETERS: * * dev - a pointer to the device's net_device structure * * RETURNS: * * N/A * ******************************************************************************/ void wl_resume(struct net_device *dev) { struct wl_private *lp = wl_priv(dev); unsigned long flags; DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev ); wl_lock( lp, &flags ); /* Connect to the adapter */ hcf_connect( &lp->hcfCtx, dev->base_addr ); /* Reset portState */ lp->portState = WVLAN_PORT_STATE_DISABLED; /* Power might have been off, assume the card lost the firmware*/ lp->firmware_present = WL_FRIMWARE_NOT_PRESENT; /* Reload the firmware and restart */ wl_reset( dev ); /* Resume interrupt handling */ wl_act_int_on( lp ); wl_unlock( lp, &flags ); } // wl_resume /*============================================================================*/ /******************************************************************************* * wl_release() ******************************************************************************* * * DESCRIPTION: * * This function performs a check on the device and calls wl_remove() if * necessary. This function can be used for all bus types, but exists mostly * for the benefit of the Card Services driver, as there are times when * wl_remove() does not get called. * * PARAMETERS: * * dev - a pointer to the device's net_device structure * * RETURNS: * * N/A * ******************************************************************************/ void wl_release( struct net_device *dev ) { struct wl_private *lp = wl_priv(dev); DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev ); /* If wl_remove() hasn't been called (i.e. when Card Services is shut down with the card in the slot), then call it */ if ( lp->is_registered == TRUE ) { DBG_TRACE( DbgInfo, "Calling unregister_netdev(), as it wasn't called yet\n" ); wl_remove( dev ); lp->is_registered = FALSE; } } // wl_release /*============================================================================*/ /******************************************************************************* * wl_get_irq_mask() ******************************************************************************* * * DESCRIPTION: * * Accessor function to retrieve the irq_mask module parameter * * PARAMETERS: * * N/A * * RETURNS: * * The irq_mask module parameter * ******************************************************************************/ p_u16 wl_get_irq_mask( void ) { return irq_mask; } // wl_get_irq_mask /*============================================================================*/ /******************************************************************************* * wl_get_irq_list() ******************************************************************************* * * DESCRIPTION: * * Accessor function to retrieve the irq_list module parameter * * PARAMETERS: * * N/A * * RETURNS: * * The irq_list module parameter * ******************************************************************************/ p_s8 * wl_get_irq_list( void ) { return irq_list; } // wl_get_irq_list /*============================================================================*/ /******************************************************************************* * wl_enable() ******************************************************************************* * * DESCRIPTION: * * Used to enable MAC ports * * PARAMETERS: * * lp - pointer to the device's private adapter structure * * RETURNS: * * N/A * ******************************************************************************/ int wl_enable( struct wl_private *lp ) { int hcf_status = HCF_SUCCESS; if ( lp->portState == WVLAN_PORT_STATE_ENABLED ) { DBG_TRACE( DbgInfo, "No action: Card already enabled\n" ); } else if ( lp->portState == WVLAN_PORT_STATE_CONNECTED ) { //;?suspicuous logic, how can you be connected without being enabled so this is probably dead code DBG_TRACE( DbgInfo, "No action: Card already connected\n" ); } else { hcf_status = hcf_cntl( &lp->hcfCtx, HCF_CNTL_ENABLE ); if ( hcf_status == HCF_SUCCESS ) { /* Set the status of the NIC to enabled */ lp->portState = WVLAN_PORT_STATE_ENABLED; //;?bad mnemonic, NIC iso PORT #ifdef ENABLE_DMA if ( lp->use_dma ) { wl_pci_dma_hcf_supply( lp ); //;?always successful? } #endif } } if ( hcf_status != HCF_SUCCESS ) { //;?make this an assert DBG_TRACE( DbgInfo, "failed: 0x%x\n", hcf_status ); } return hcf_status; } // wl_enable /*============================================================================*/ #ifdef USE_WDS /******************************************************************************* * wl_enable_wds_ports() ******************************************************************************* * * DESCRIPTION: * * Used to enable the WDS MAC ports 1-6 * * PARAMETERS: * * lp - pointer to the device's private adapter structure * * RETURNS: * * N/A * ******************************************************************************/ void wl_enable_wds_ports( struct wl_private * lp ) { if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_AP ){ DBG_ERROR( DbgInfo, "!!!!;? someone misunderstood something !!!!!\n" ); } } // wl_enable_wds_ports #endif /* USE_WDS */ /*============================================================================*/ /******************************************************************************* * wl_connect() ******************************************************************************* * * DESCRIPTION: * * Used to connect a MAC port * * PARAMETERS: * * lp - pointer to the device's private adapter structure * * RETURNS: * * N/A * ******************************************************************************/ int wl_connect( struct wl_private *lp ) { int hcf_status; if ( lp->portState != WVLAN_PORT_STATE_ENABLED ) { DBG_TRACE( DbgInfo, "No action: Not in enabled state\n" ); return HCF_SUCCESS; } hcf_status = hcf_cntl( &lp->hcfCtx, HCF_CNTL_CONNECT ); if ( hcf_status == HCF_SUCCESS ) { lp->portState = WVLAN_PORT_STATE_CONNECTED; } return hcf_status; } // wl_connect /*============================================================================*/ /******************************************************************************* * wl_disconnect() ******************************************************************************* * * DESCRIPTION: * * Used to disconnect a MAC port * * PARAMETERS: * * lp - pointer to the device's private adapter structure * * RETURNS: * * N/A * ******************************************************************************/ int wl_disconnect( struct wl_private *lp ) { int hcf_status; if ( lp->portState != WVLAN_PORT_STATE_CONNECTED ) { DBG_TRACE( DbgInfo, "No action: Not in connected state\n" ); return HCF_SUCCESS; } hcf_status = hcf_cntl( &lp->hcfCtx, HCF_CNTL_DISCONNECT ); if ( hcf_status == HCF_SUCCESS ) { lp->portState = WVLAN_PORT_STATE_ENABLED; } return hcf_status; } // wl_disconnect /*============================================================================*/ /******************************************************************************* * wl_disable() ******************************************************************************* * * DESCRIPTION: * * Used to disable MAC ports * * PARAMETERS: * * lp - pointer to the device's private adapter structure * port - the MAC port to disable * * RETURNS: * * N/A * ******************************************************************************/ int wl_disable( struct wl_private *lp ) { int hcf_status = HCF_SUCCESS; if ( lp->portState == WVLAN_PORT_STATE_DISABLED ) { DBG_TRACE( DbgInfo, "No action: Port state is disabled\n" ); } else { hcf_status = hcf_cntl( &lp->hcfCtx, HCF_CNTL_DISABLE ); if ( hcf_status == HCF_SUCCESS ) { /* Set the status of the port to disabled */ //;?bad mnemonic use NIC iso PORT lp->portState = WVLAN_PORT_STATE_DISABLED; #ifdef ENABLE_DMA if ( lp->use_dma ) { wl_pci_dma_hcf_reclaim( lp ); } #endif } } if ( hcf_status != HCF_SUCCESS ) { DBG_TRACE( DbgInfo, "failed: 0x%x\n", hcf_status ); } return hcf_status; } // wl_disable /*============================================================================*/ #ifdef USE_WDS /******************************************************************************* * wl_disable_wds_ports() ******************************************************************************* * * DESCRIPTION: * * Used to disable the WDS MAC ports 1-6 * * PARAMETERS: * * lp - pointer to the device's private adapter structure * * RETURNS: * * N/A * ******************************************************************************/ void wl_disable_wds_ports( struct wl_private * lp ) { if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_AP ){ DBG_ERROR( DbgInfo, "!!!!;? someone misunderstood something !!!!!\n" ); } // if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_AP ) { // wl_disable( lp, HCF_PORT_1 ); // wl_disable( lp, HCF_PORT_2 ); // wl_disable( lp, HCF_PORT_3 ); // wl_disable( lp, HCF_PORT_4 ); // wl_disable( lp, HCF_PORT_5 ); // wl_disable( lp, HCF_PORT_6 ); // } return; } // wl_disable_wds_ports #endif // USE_WDS /*============================================================================*/ #ifndef USE_MBOX_SYNC /******************************************************************************* * wl_mbx() ******************************************************************************* * * DESCRIPTION: * This function is used to read and process a mailbox message. * * * PARAMETERS: * * lp - pointer to the device's private adapter structure * * RETURNS: * * an HCF status code * ******************************************************************************/ int wl_mbx( struct wl_private *lp ) { int hcf_status = HCF_SUCCESS; DBG_TRACE( DbgInfo, "Mailbox Info: IFB_MBInfoLen: %d\n", lp->hcfCtx.IFB_MBInfoLen ); memset( &( lp->ltvRecord ), 0, sizeof( ltv_t )); lp->ltvRecord.len = MB_SIZE; lp->ltvRecord.typ = CFG_MB_INFO; hcf_status = hcf_get_info( &lp->hcfCtx, (LTVP)&( lp->ltvRecord )); if ( hcf_status != HCF_SUCCESS ) { DBG_ERROR( DbgInfo, "hcf_get_info returned 0x%x\n", hcf_status ); return hcf_status; } if ( lp->ltvRecord.typ == CFG_MB_INFO ) return hcf_status; /* Endian translate the mailbox data, then process the message */ wl_endian_translate_mailbox( &( lp->ltvRecord )); wl_process_mailbox( lp ); return hcf_status; } // wl_mbx /*============================================================================*/ /******************************************************************************* * wl_endian_translate_mailbox() ******************************************************************************* * * DESCRIPTION: * * This function will perform the tedious task of endian translating all * fields within a mailbox message which need translating. * * PARAMETERS: * * ltv - pointer to the LTV to endian translate * * RETURNS: * * none * ******************************************************************************/ void wl_endian_translate_mailbox( ltv_t *ltv ) { switch( ltv->typ ) { case CFG_TALLIES: break; case CFG_SCAN: { int num_aps; SCAN_RS_STRCT *aps = (SCAN_RS_STRCT *)&ltv->u.u8[0]; num_aps = (hcf_16)(( (size_t)(ltv->len - 1 ) * 2 ) / ( sizeof( SCAN_RS_STRCT ))); while( num_aps >= 1 ) { num_aps--; aps[num_aps].channel_id = CNV_LITTLE_TO_INT( aps[num_aps].channel_id ); aps[num_aps].noise_level = CNV_LITTLE_TO_INT( aps[num_aps].noise_level ); aps[num_aps].signal_level = CNV_LITTLE_TO_INT( aps[num_aps].signal_level ); aps[num_aps].beacon_interval_time = CNV_LITTLE_TO_INT( aps[num_aps].beacon_interval_time ); aps[num_aps].capability = CNV_LITTLE_TO_INT( aps[num_aps].capability ); aps[num_aps].ssid_len = CNV_LITTLE_TO_INT( aps[num_aps].ssid_len ); aps[num_aps].ssid_val[aps[num_aps].ssid_len] = 0; } } break; case CFG_ACS_SCAN: { PROBE_RESP *probe_resp = (PROBE_RESP *)ltv; probe_resp->frameControl = CNV_LITTLE_TO_INT( probe_resp->frameControl ); probe_resp->durID = CNV_LITTLE_TO_INT( probe_resp->durID ); probe_resp->sequence = CNV_LITTLE_TO_INT( probe_resp->sequence ); probe_resp->dataLength = CNV_LITTLE_TO_INT( probe_resp->dataLength ); #ifndef WARP probe_resp->lenType = CNV_LITTLE_TO_INT( probe_resp->lenType ); #endif // WARP probe_resp->beaconInterval = CNV_LITTLE_TO_INT( probe_resp->beaconInterval ); probe_resp->capability = CNV_LITTLE_TO_INT( probe_resp->capability ); probe_resp->flags = CNV_LITTLE_TO_INT( probe_resp->flags ); } break; case CFG_LINK_STAT: #define ls ((LINK_STATUS_STRCT *)ltv) ls->linkStatus = CNV_LITTLE_TO_INT( ls->linkStatus ); break; #undef ls case CFG_ASSOC_STAT: { ASSOC_STATUS_STRCT *as = (ASSOC_STATUS_STRCT *)ltv; as->assocStatus = CNV_LITTLE_TO_INT( as->assocStatus ); } break; case CFG_SECURITY_STAT: { SECURITY_STATUS_STRCT *ss = (SECURITY_STATUS_STRCT *)ltv; ss->securityStatus = CNV_LITTLE_TO_INT( ss->securityStatus ); ss->reason = CNV_LITTLE_TO_INT( ss->reason ); } break; case CFG_WMP: break; case CFG_NULL: break; default: break; } } // wl_endian_translate_mailbox /*============================================================================*/ /******************************************************************************* * wl_process_mailbox() ******************************************************************************* * * DESCRIPTION: * * This function processes the mailbox data. * * PARAMETERS: * * ltv - pointer to the LTV to be processed. * * RETURNS: * * none * ******************************************************************************/ void wl_process_mailbox( struct wl_private *lp ) { ltv_t *ltv; hcf_16 ltv_val = 0xFFFF; ltv = &( lp->ltvRecord ); switch( ltv->typ ) { case CFG_TALLIES: DBG_TRACE( DbgInfo, "CFG_TALLIES\n" ); break; case CFG_SCAN: DBG_TRACE( DbgInfo, "CFG_SCAN\n" ); { int num_aps; SCAN_RS_STRCT *aps = (SCAN_RS_STRCT *)&ltv->u.u8[0]; num_aps = (hcf_16)(( (size_t)(ltv->len - 1 ) * 2 ) / ( sizeof( SCAN_RS_STRCT ))); lp->scan_results.num_aps = num_aps; DBG_TRACE( DbgInfo, "Number of APs: %d\n", num_aps ); while( num_aps >= 1 ) { num_aps--; DBG_TRACE( DbgInfo, "AP : %d\n", num_aps ); DBG_TRACE( DbgInfo, "=========================\n" ); DBG_TRACE( DbgInfo, "Channel ID : 0x%04x\n", aps[num_aps].channel_id ); DBG_TRACE( DbgInfo, "Noise Level : 0x%04x\n", aps[num_aps].noise_level ); DBG_TRACE( DbgInfo, "Signal Level : 0x%04x\n", aps[num_aps].signal_level ); DBG_TRACE( DbgInfo, "Beacon Interval : 0x%04x\n", aps[num_aps].beacon_interval_time ); DBG_TRACE( DbgInfo, "Capability : 0x%04x\n", aps[num_aps].capability ); DBG_TRACE( DbgInfo, "SSID Length : 0x%04x\n", aps[num_aps].ssid_len ); DBG_TRACE(DbgInfo, "BSSID : %pM\n", aps[num_aps].bssid); if ( aps[num_aps].ssid_len != 0 ) { DBG_TRACE( DbgInfo, "SSID : %s.\n", aps[num_aps].ssid_val ); } else { DBG_TRACE( DbgInfo, "SSID : %s.\n", "ANY" ); } DBG_TRACE( DbgInfo, "\n" ); /* Copy the info to the ScanResult structure in the private adapter struct */ memcpy( &( lp->scan_results.APTable[num_aps]), &( aps[num_aps] ), sizeof( SCAN_RS_STRCT )); } /* Set scan result to true so that any scan requests will complete */ lp->scan_results.scan_complete = TRUE; } break; case CFG_ACS_SCAN: DBG_TRACE( DbgInfo, "CFG_ACS_SCAN\n" ); { PROBE_RESP *probe_rsp = (PROBE_RESP *)ltv; hcf_8 *wpa_ie = NULL; hcf_16 wpa_ie_len = 0; DBG_TRACE( DbgInfo, "(%s) =========================\n", lp->dev->name ); DBG_TRACE( DbgInfo, "(%s) length : 0x%04x.\n", lp->dev->name, probe_rsp->length ); if ( probe_rsp->length > 1 ) { DBG_TRACE( DbgInfo, "(%s) infoType : 0x%04x.\n", lp->dev->name, probe_rsp->infoType ); DBG_TRACE( DbgInfo, "(%s) signal : 0x%02x.\n", lp->dev->name, probe_rsp->signal ); DBG_TRACE( DbgInfo, "(%s) silence : 0x%02x.\n", lp->dev->name, probe_rsp->silence ); DBG_TRACE( DbgInfo, "(%s) rxFlow : 0x%02x.\n", lp->dev->name, probe_rsp->rxFlow ); DBG_TRACE( DbgInfo, "(%s) rate : 0x%02x.\n", lp->dev->name, probe_rsp->rate ); DBG_TRACE( DbgInfo, "(%s) frame cntl : 0x%04x.\n", lp->dev->name, probe_rsp->frameControl ); DBG_TRACE( DbgInfo, "(%s) durID : 0x%04x.\n", lp->dev->name, probe_rsp->durID ); DBG_TRACE(DbgInfo, "(%s) address1 : %pM\n", lp->dev->name, probe_rsp->address1); DBG_TRACE(DbgInfo, "(%s) address2 : %pM\n", lp->dev->name, probe_rsp->address2); DBG_TRACE(DbgInfo, "(%s) BSSID : %pM\n", lp->dev->name, probe_rsp->BSSID); DBG_TRACE( DbgInfo, "(%s) sequence : 0x%04x.\n", lp->dev->name, probe_rsp->sequence ); DBG_TRACE(DbgInfo, "(%s) address4 : %pM\n", lp->dev->name, probe_rsp->address4); DBG_TRACE( DbgInfo, "(%s) datalength : 0x%04x.\n", lp->dev->name, probe_rsp->dataLength ); DBG_TRACE(DbgInfo, "(%s) DA : %pM\n", lp->dev->name, probe_rsp->DA); DBG_TRACE(DbgInfo, "(%s) SA : %pM\n", lp->dev->name, probe_rsp->SA); //DBG_TRACE( DbgInfo, "(%s) lenType : 0x%04x.\n", // lp->dev->name, probe_rsp->lenType ); DBG_TRACE(DbgInfo, "(%s) timeStamp : " "%d.%d.%d.%d.%d.%d.%d.%d\n", lp->dev->name, probe_rsp->timeStamp[0], probe_rsp->timeStamp[1], probe_rsp->timeStamp[2], probe_rsp->timeStamp[3], probe_rsp->timeStamp[4], probe_rsp->timeStamp[5], probe_rsp->timeStamp[6], probe_rsp->timeStamp[7]); DBG_TRACE( DbgInfo, "(%s) beaconInt : 0x%04x.\n", lp->dev->name, probe_rsp->beaconInterval ); DBG_TRACE( DbgInfo, "(%s) capability : 0x%04x.\n", lp->dev->name, probe_rsp->capability ); DBG_TRACE( DbgInfo, "(%s) SSID len : 0x%04x.\n", lp->dev->name, probe_rsp->rawData[1] ); if ( probe_rsp->rawData[1] > 0 ) { char ssid[HCF_MAX_NAME_LEN]; memset( ssid, 0, sizeof( ssid )); strncpy( ssid, &probe_rsp->rawData[2], min_t(u8, probe_rsp->rawData[1], HCF_MAX_NAME_LEN - 1)); DBG_TRACE( DbgInfo, "(%s) SSID : %s\n", lp->dev->name, ssid ); } /* Parse out the WPA-IE, if one exists */ wpa_ie = wl_parse_wpa_ie( probe_rsp, &wpa_ie_len ); if ( wpa_ie != NULL ) { DBG_TRACE( DbgInfo, "(%s) WPA-IE : %s\n", lp->dev->name, wl_print_wpa_ie( wpa_ie, wpa_ie_len )); } DBG_TRACE( DbgInfo, "(%s) flags : 0x%04x.\n", lp->dev->name, probe_rsp->flags ); } DBG_TRACE( DbgInfo, "\n\n" ); /* If probe response length is 1, then the scan is complete */ if ( probe_rsp->length == 1 ) { DBG_TRACE( DbgInfo, "SCAN COMPLETE\n" ); lp->probe_results.num_aps = lp->probe_num_aps; lp->probe_results.scan_complete = TRUE; /* Reset the counter for the next scan request */ lp->probe_num_aps = 0; /* Send a wireless extensions event that the scan completed */ wl_wext_event_scan_complete( lp->dev ); } else { /* Only copy to the table if the entry is unique; APs sometimes respond more than once to a probe */ if ( lp->probe_num_aps == 0 ) { /* Copy the info to the ScanResult structure in the private adapter struct */ memcpy( &( lp->probe_results.ProbeTable[lp->probe_num_aps] ), probe_rsp, sizeof( PROBE_RESP )); /* Increment the number of APs detected */ lp->probe_num_aps++; } else { int count; int unique = 1; for( count = 0; count < lp->probe_num_aps; count++ ) { if ( memcmp( &( probe_rsp->BSSID ), lp->probe_results.ProbeTable[count].BSSID, ETH_ALEN ) == 0 ) { unique = 0; } } if ( unique ) { /* Copy the info to the ScanResult structure in the private adapter struct. Only copy if there's room in the table */ if ( lp->probe_num_aps < MAX_NAPS ) { memcpy( &( lp->probe_results.ProbeTable[lp->probe_num_aps] ), probe_rsp, sizeof( PROBE_RESP )); } else { DBG_WARNING( DbgInfo, "Num of scan results exceeds storage, truncating\n" ); } /* Increment the number of APs detected. Note I do this here even when I don't copy the probe response to the buffer in order to detect the overflow condition */ lp->probe_num_aps++; } } } } break; case CFG_LINK_STAT: #define ls ((LINK_STATUS_STRCT *)ltv) DBG_TRACE( DbgInfo, "CFG_LINK_STAT\n" ); switch( ls->linkStatus ) { case 1: DBG_TRACE( DbgInfo, "Link Status : Connected\n" ); wl_wext_event_ap( lp->dev ); break; case 2: DBG_TRACE( DbgInfo, "Link Status : Disconnected\n" ); break; case 3: DBG_TRACE( DbgInfo, "Link Status : Access Point Change\n" ); break; case 4: DBG_TRACE( DbgInfo, "Link Status : Access Point Out of Range\n" ); break; case 5: DBG_TRACE( DbgInfo, "Link Status : Access Point In Range\n" ); break; default: DBG_TRACE( DbgInfo, "Link Status : UNKNOWN (0x%04x)\n", ls->linkStatus ); break; } break; #undef ls case CFG_ASSOC_STAT: DBG_TRACE( DbgInfo, "CFG_ASSOC_STAT\n" ); { ASSOC_STATUS_STRCT *as = (ASSOC_STATUS_STRCT *)ltv; switch( as->assocStatus ) { case 1: DBG_TRACE( DbgInfo, "Association Status : STA Associated\n" ); break; case 2: DBG_TRACE( DbgInfo, "Association Status : STA Reassociated\n" ); break; case 3: DBG_TRACE( DbgInfo, "Association Status : STA Disassociated\n" ); break; default: DBG_TRACE( DbgInfo, "Association Status : UNKNOWN (0x%04x)\n", as->assocStatus ); break; } DBG_TRACE(DbgInfo, "STA Address : %pM\n", as->staAddr); if (( as->assocStatus == 2 ) && ( as->len == 8 )) { DBG_TRACE(DbgInfo, "Old AP Address : %pM\n", as->oldApAddr); } } break; case CFG_SECURITY_STAT: DBG_TRACE( DbgInfo, "CFG_SECURITY_STAT\n" ); { SECURITY_STATUS_STRCT *ss = (SECURITY_STATUS_STRCT *)ltv; switch( ss->securityStatus ) { case 1: DBG_TRACE( DbgInfo, "Security Status : Dissassociate [AP]\n" ); break; case 2: DBG_TRACE( DbgInfo, "Security Status : Deauthenticate [AP]\n" ); break; case 3: DBG_TRACE( DbgInfo, "Security Status : Authenticate Fail [STA] or [AP]\n" ); break; case 4: DBG_TRACE( DbgInfo, "Security Status : MIC Fail\n" ); break; case 5: DBG_TRACE( DbgInfo, "Security Status : Associate Fail\n" ); break; default: DBG_TRACE( DbgInfo, "Security Status : UNKNOWN %d\n", ss->securityStatus ); break; } DBG_TRACE(DbgInfo, "STA Address : %pM\n", ss->staAddr); DBG_TRACE(DbgInfo, "Reason : 0x%04x\n", ss->reason); } break; case CFG_WMP: DBG_TRACE( DbgInfo, "CFG_WMP, size is %d bytes\n", ltv->len ); { WMP_RSP_STRCT *wmp_rsp = (WMP_RSP_STRCT *)ltv; DBG_TRACE( DbgInfo, "CFG_WMP, pdu type is 0x%x\n", wmp_rsp->wmpRsp.wmpHdr.type ); switch( wmp_rsp->wmpRsp.wmpHdr.type ) { case WVLAN_WMP_PDU_TYPE_LT_RSP: { #if DBG LINKTEST_RSP_STRCT *lt_rsp = (LINKTEST_RSP_STRCT *)ltv; #endif // DBG DBG_TRACE( DbgInfo, "LINK TEST RESULT\n" ); DBG_TRACE( DbgInfo, "================\n" ); DBG_TRACE( DbgInfo, "Length : %d.\n", lt_rsp->len ); DBG_TRACE( DbgInfo, "Name : %s.\n", lt_rsp->ltRsp.ltRsp.name ); DBG_TRACE( DbgInfo, "Signal Level : 0x%02x.\n", lt_rsp->ltRsp.ltRsp.signal ); DBG_TRACE( DbgInfo, "Noise Level : 0x%02x.\n", lt_rsp->ltRsp.ltRsp.noise ); DBG_TRACE( DbgInfo, "Receive Flow : 0x%02x.\n", lt_rsp->ltRsp.ltRsp.rxFlow ); DBG_TRACE( DbgInfo, "Data Rate : 0x%02x.\n", lt_rsp->ltRsp.ltRsp.dataRate ); DBG_TRACE( DbgInfo, "Protocol : 0x%04x.\n", lt_rsp->ltRsp.ltRsp.protocol ); DBG_TRACE( DbgInfo, "Station : 0x%02x.\n", lt_rsp->ltRsp.ltRsp.station ); DBG_TRACE( DbgInfo, "Data Rate Cap : 0x%02x.\n", lt_rsp->ltRsp.ltRsp.dataRateCap ); DBG_TRACE( DbgInfo, "Power Mgmt : 0x%02x 0x%02x 0x%02x 0x%02x.\n", lt_rsp->ltRsp.ltRsp.powerMgmt[0], lt_rsp->ltRsp.ltRsp.powerMgmt[1], lt_rsp->ltRsp.ltRsp.powerMgmt[2], lt_rsp->ltRsp.ltRsp.powerMgmt[3] ); DBG_TRACE( DbgInfo, "Robustness : 0x%02x 0x%02x 0x%02x 0x%02x.\n", lt_rsp->ltRsp.ltRsp.robustness[0], lt_rsp->ltRsp.ltRsp.robustness[1], lt_rsp->ltRsp.ltRsp.robustness[2], lt_rsp->ltRsp.ltRsp.robustness[3] ); DBG_TRACE( DbgInfo, "Scaling : 0x%02x.\n", lt_rsp->ltRsp.ltRsp.scaling ); } break; default: break; } } break; case CFG_NULL: DBG_TRACE( DbgInfo, "CFG_NULL\n" ); break; case CFG_UPDATED_INFO_RECORD: // Updated Information Record DBG_TRACE( DbgInfo, "UPDATED INFORMATION RECORD\n" ); ltv_val = CNV_INT_TO_LITTLE( ltv->u.u16[0] ); /* Check and see which RID was updated */ switch( ltv_val ) { case CFG_CUR_COUNTRY_INFO: // Indicate Passive Scan Completion DBG_TRACE( DbgInfo, "Updated country info\n" ); /* Do I need to hold off on updating RIDs until the process is complete? */ wl_connect( lp ); break; case CFG_PORT_STAT: // Wait for Connect Event //wl_connect( lp ); break; default: DBG_WARNING( DbgInfo, "Unknown RID: 0x%04x\n", ltv_val ); } break; default: DBG_TRACE( DbgInfo, "UNKNOWN MESSAGE: 0x%04x\n", ltv->typ ); break; } } // wl_process_mailbox /*============================================================================*/ #endif /* ifndef USE_MBOX_SYNC */ #ifdef USE_WDS /******************************************************************************* * wl_wds_netdev_register() ******************************************************************************* * * DESCRIPTION: * * This function registers net_device structures with the system's network * layer for use with the WDS ports. * * * PARAMETERS: * * lp - pointer to the device's private adapter structure * * RETURNS: * * N/A * ******************************************************************************/ void wl_wds_netdev_register( struct wl_private *lp ) { int count; //;?why is there no USE_WDS clause like in wl_enable_wds_ports if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_AP ) { for( count = 0; count < NUM_WDS_PORTS; count++ ) { if ( WVLAN_VALID_MAC_ADDRESS( lp->wds_port[count].wdsAddress )) { if ( register_netdev( lp->wds_port[count].dev ) != 0 ) { DBG_WARNING( DbgInfo, "net device for WDS port %d could not be registered\n", ( count + 1 )); } lp->wds_port[count].is_registered = TRUE; /* Fill out the net_device structs with the MAC addr */ memcpy( lp->wds_port[count].dev->dev_addr, lp->MACAddress, ETH_ALEN ); lp->wds_port[count].dev->addr_len = ETH_ALEN; } } } } // wl_wds_netdev_register /*============================================================================*/ /******************************************************************************* * wl_wds_netdev_deregister() ******************************************************************************* * * DESCRIPTION: * * This function deregisters the WDS net_device structures used by the * system's network layer. * * * PARAMETERS: * * lp - pointer to the device's private adapter structure * * RETURNS: * * N/A * ******************************************************************************/ void wl_wds_netdev_deregister( struct wl_private *lp ) { int count; if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_AP ) { for( count = 0; count < NUM_WDS_PORTS; count++ ) { if ( WVLAN_VALID_MAC_ADDRESS( lp->wds_port[count].wdsAddress )) { unregister_netdev( lp->wds_port[count].dev ); } lp->wds_port[count].is_registered = FALSE; } } } // wl_wds_netdev_deregister /*============================================================================*/ #endif /* USE_WDS */ #if 0 //SCULL_USE_PROC /* don't waste space if unused */ /* * The proc filesystem: function to read and entry */ static void printf_hcf_16(struct seq_file *m, const char *s, hcf_16 *p, int n) { int i, len; seq_printf(m, "%-20.20s: ", s); len = 22; for (i = 0; i < n; i++) { if (len % 80 > 75) seq_putc(m, '\n'); seq_printf(m, "%04X ", p[i]); } seq_putc(m, '\n'); } static void printf_hcf_8(struct seq_file *m, const char *s, hcf_8 *p, int n) { int i, len; seq_printf(m, "%-20.20s: ", s); len = 22; for (i = 0; i <= n; i++) { if (len % 80 > 77) seq_putc(m, '\n'); seq_printf(m, "%02X ", p[i]); } seq_putc(m, '\n'); } static void printf_strct(struct seq_file *m, const char *s, hcf_16 *p) { int i, len; seq_printf(m, "%-20.20s: ", s); len = 22; for ( i = 0; i <= *p; i++ ) { if (len % 80 > 75) seq_putc(m, '\n'); seq_printf(m,"%04X ", p[i]); } seq_putc(m, '\n'); } int scull_read_procmem(struct seq_file *m, void *v) { struct wl_private *lp = m->private; IFBP ifbp; CFG_HERMES_TALLIES_STRCT *p; if (lp == NULL) { seq_puts(m, "No wl_private in scull_read_procmem\n" ); } else if ( lp->wlags49_type == 0 ){ ifbp = &lp->hcfCtx; seq_printf(m, "Magic: 0x%04X\n", ifbp->IFB_Magic ); seq_printf(m, "IOBase: 0x%04X\n", ifbp->IFB_IOBase ); seq_printf(m, "LinkStat: 0x%04X\n", ifbp->IFB_LinkStat ); seq_printf(m, "DSLinkStat: 0x%04X\n", ifbp->IFB_DSLinkStat ); seq_printf(m, "TickIni: 0x%08lX\n", ifbp->IFB_TickIni ); seq_printf(m, "TickCnt: 0x%04X\n", ifbp->IFB_TickCnt ); seq_printf(m, "IntOffCnt: 0x%04X\n", ifbp->IFB_IntOffCnt ); printf_hcf_16(m, "IFB_FWIdentity", &ifbp->IFB_FWIdentity.len, ifbp->IFB_FWIdentity.len + 1 ); } else if ( lp->wlags49_type == 1 ) { seq_printf(m, "Channel: 0x%04X\n", lp->Channel ); /****** seq_printf(m, "slock: %d\n", lp->slock ); */ //x struct tq_struct "task: 0x%04X\n", lp->task ); //x struct net_device_stats "stats: 0x%04X\n", lp->stats ); #ifdef WIRELESS_EXT //x struct iw_statistics "wstats: 0x%04X\n", lp->wstats ); //x seq_printf(m, "spy_number: 0x%04X\n", lp->spy_number ); //x u_char spy_address[IW_MAX_SPY][ETH_ALEN]; //x struct iw_quality spy_stat[IW_MAX_SPY]; #endif // WIRELESS_EXT seq_printf(m, "IFB: 0x%p\n", &lp->hcfCtx ); seq_printf(m, "flags: %#.8lX\n", lp->flags ); //;?use this format from now on seq_printf(m, "DebugFlag(wl_private) 0x%04X\n", lp->DebugFlag ); #if DBG seq_printf(m, "DebugFlag (DbgInfo): 0x%08lX\n", DbgInfo->DebugFlag ); #endif // DBG seq_printf(m, "is_registered: 0x%04X\n", lp->is_registered ); //x CFG_DRV_INFO_STRCT "driverInfo: 0x%04X\n", lp->driverInfo ); printf_strct( m, "driverInfo", (hcf_16*)&lp->driverInfo ); //x CFG_IDENTITY_STRCT "driverIdentity: 0x%04X\n", lp->driverIdentity ); printf_strct( m, "driverIdentity", (hcf_16*)&lp->driverIdentity ); //x CFG_FW_IDENTITY_STRCT "StationIdentity: 0x%04X\n", lp->StationIdentity ); printf_strct( m, "StationIdentity", (hcf_16*)&lp->StationIdentity ); //x CFG_PRI_IDENTITY_STRCT "PrimaryIdentity: 0x%04X\n", lp->PrimaryIdentity ); printf_strct( m, "PrimaryIdentity", (hcf_16*)&lp->hcfCtx.IFB_PRIIdentity ); printf_strct( m, "PrimarySupplier", (hcf_16*)&lp->hcfCtx.IFB_PRISup ); //x CFG_PRI_IDENTITY_STRCT "NICIdentity: 0x%04X\n", lp->NICIdentity ); printf_strct( m, "NICIdentity", (hcf_16*)&lp->NICIdentity ); //x ltv_t "ltvRecord: 0x%04X\n", lp->ltvRecord ); seq_printf(m, "txBytes: 0x%08lX\n", lp->txBytes ); seq_printf(m, "maxPort: 0x%04X\n", lp->maxPort ); /* 0 for STA, 6 for AP */ /* Elements used for async notification from hardware */ //x RID_LOG_STRCT RidList[10]; //x ltv_t "updatedRecord: 0x%04X\n", lp->updatedRecord ); //x PROBE_RESP "ProbeResp: 0x%04X\n", lp->ProbeResp ); //x ASSOC_STATUS_STRCT "assoc_stat: 0x%04X\n", lp->assoc_stat ); //x SECURITY_STATUS_STRCT "sec_stat: 0x%04X\n", lp->sec_stat ); //x u_char lookAheadBuf[WVLAN_MAX_LOOKAHEAD]; seq_printf(m, "PortType: 0x%04X\n", lp->PortType ); // 1 - 3 (1 [Normal] | 3 [AdHoc]) seq_printf(m, "Channel: 0x%04X\n", lp->Channel ); // 0 - 14 (0) //x hcf_16 TxRateControl[2]; seq_printf(m, "TxRateControl[2]: 0x%04X 0x%04X\n", lp->TxRateControl[0], lp->TxRateControl[1] ); seq_printf(m, "DistanceBetweenAPs: 0x%04X\n", lp->DistanceBetweenAPs ); // 1 - 3 (1) seq_printf(m, "RTSThreshold: 0x%04X\n", lp->RTSThreshold ); // 0 - 2347 (2347) seq_printf(m, "PMEnabled: 0x%04X\n", lp->PMEnabled ); // 0 - 2, 8001 - 8002 (0) seq_printf(m, "MicrowaveRobustness: 0x%04X\n", lp->MicrowaveRobustness );// 0 - 1 (0) seq_printf(m, "CreateIBSS: 0x%04X\n", lp->CreateIBSS ); // 0 - 1 (0) seq_printf(m, "MulticastReceive: 0x%04X\n", lp->MulticastReceive ); // 0 - 1 (1) seq_printf(m, "MaxSleepDuration: 0x%04X\n", lp->MaxSleepDuration ); // 0 - 65535 (100) //x hcf_8 MACAddress[ETH_ALEN]; printf_hcf_8(m, "MACAddress", lp->MACAddress, ETH_ALEN ); //x char NetworkName[HCF_MAX_NAME_LEN+1]; seq_printf(m, "NetworkName: %.32s\n", lp->NetworkName ); //x char StationName[HCF_MAX_NAME_LEN+1]; seq_printf(m, "EnableEncryption: 0x%04X\n", lp->EnableEncryption ); // 0 - 1 (0) //x char Key1[MAX_KEY_LEN+1]; printf_hcf_8( m, "Key1", lp->Key1, MAX_KEY_LEN ); //x char Key2[MAX_KEY_LEN+1]; //x char Key3[MAX_KEY_LEN+1]; //x char Key4[MAX_KEY_LEN+1]; seq_printf(m, "TransmitKeyID: 0x%04X\n", lp->TransmitKeyID ); // 1 - 4 (1) //x CFG_DEFAULT_KEYS_STRCT "DefaultKeys: 0x%04X\n", lp->DefaultKeys ); //x u_char mailbox[MB_SIZE]; //x char szEncryption[MAX_ENC_LEN]; seq_printf(m, "driverEnable: 0x%04X\n", lp->driverEnable ); seq_printf(m, "wolasEnable: 0x%04X\n", lp->wolasEnable ); seq_printf(m, "atimWindow: 0x%04X\n", lp->atimWindow ); seq_printf(m, "holdoverDuration: 0x%04X\n", lp->holdoverDuration ); //x hcf_16 MulticastRate[2]; seq_printf(m, "authentication: 0x%04X\n", lp->authentication ); // is this AP specific? seq_printf(m, "promiscuousMode: 0x%04X\n", lp->promiscuousMode ); seq_printf(m, "DownloadFirmware: 0x%04X\n", lp->DownloadFirmware ); // 0 - 2 (0 [None] | 1 [STA] | 2 [AP]) seq_printf(m, "AuthKeyMgmtSuite: 0x%04X\n", lp->AuthKeyMgmtSuite ); seq_printf(m, "loadBalancing: 0x%04X\n", lp->loadBalancing ); seq_printf(m, "mediumDistribution: 0x%04X\n", lp->mediumDistribution ); seq_printf(m, "txPowLevel: 0x%04X\n", lp->txPowLevel ); // seq_printf(m, "shortRetryLimit: 0x%04X\n", lp->shortRetryLimit ); // seq_printf(m, "longRetryLimit: 0x%04X\n", lp->longRetryLimit ); //x hcf_16 srsc[2]; //x hcf_16 brsc[2]; seq_printf(m, "connectionControl: 0x%04X\n", lp->connectionControl ); //x //hcf_16 probeDataRates[2]; seq_printf(m, "ownBeaconInterval: 0x%04X\n", lp->ownBeaconInterval ); seq_printf(m, "coexistence: 0x%04X\n", lp->coexistence ); //x WVLAN_FRAME "txF: 0x%04X\n", lp->txF ); //x WVLAN_LFRAME txList[DEFAULT_NUM_TX_FRAMES]; //x struct list_head "txFree: 0x%04X\n", lp->txFree ); //x struct list_head txQ[WVLAN_MAX_TX_QUEUES]; seq_printf(m, "netif_queue_on: 0x%04X\n", lp->netif_queue_on ); seq_printf(m, "txQ_count: 0x%04X\n", lp->txQ_count ); //x DESC_STRCT "desc_rx: 0x%04X\n", lp->desc_rx ); //x DESC_STRCT "desc_tx: 0x%04X\n", lp->desc_tx ); //x WVLAN_PORT_STATE "portState: 0x%04X\n", lp->portState ); //x ScanResult "scan_results: 0x%04X\n", lp->scan_results ); //x ProbeResult "probe_results: 0x%04X\n", lp->probe_results ); seq_printf(m, "probe_num_aps: 0x%04X\n", lp->probe_num_aps ); seq_printf(m, "use_dma: 0x%04X\n", lp->use_dma ); //x DMA_STRCT "dma: 0x%04X\n", lp->dma ); #ifdef USE_RTS seq_printf(m, "useRTS: 0x%04X\n", lp->useRTS ); #endif // USE_RTS #if 1 //;? (HCF_TYPE) & HCF_TYPE_AP //;?should we restore this to allow smaller memory footprint //;?I guess not. This should be brought under Debug mode only seq_printf(m, "DTIMPeriod: 0x%04X\n", lp->DTIMPeriod ); // 1 - 255 (1) seq_printf(m, "multicastPMBuffering: 0x%04X\n", lp->multicastPMBuffering ); seq_printf(m, "RejectAny: 0x%04X\n", lp->RejectAny ); // 0 - 1 (0) seq_printf(m, "ExcludeUnencrypted: 0x%04X\n", lp->ExcludeUnencrypted ); // 0 - 1 (1) seq_printf(m, "intraBSSRelay: 0x%04X\n", lp->intraBSSRelay ); seq_printf(m, "wlags49_type: 0x%08lX\n", lp->wlags49_type ); #ifdef USE_WDS //x WVLAN_WDS_IF wds_port[NUM_WDS_PORTS]; #endif // USE_WDS #endif // HCF_AP } else if ( lp->wlags49_type == 2 ){ seq_printf(m, "tallies to be added\n" ); //Hermes Tallies (IFB substructure) { p = &lp->hcfCtx.IFB_NIC_Tallies; seq_printf(m, "TxUnicastFrames: %08lX\n", p->TxUnicastFrames ); seq_printf(m, "TxMulticastFrames: %08lX\n", p->TxMulticastFrames ); seq_printf(m, "TxFragments: %08lX\n", p->TxFragments ); seq_printf(m, "TxUnicastOctets: %08lX\n", p->TxUnicastOctets ); seq_printf(m, "TxMulticastOctets: %08lX\n", p->TxMulticastOctets ); seq_printf(m, "TxDeferredTransmissions: %08lX\n", p->TxDeferredTransmissions ); seq_printf(m, "TxSingleRetryFrames: %08lX\n", p->TxSingleRetryFrames ); seq_printf(m, "TxMultipleRetryFrames: %08lX\n", p->TxMultipleRetryFrames ); seq_printf(m, "TxRetryLimitExceeded: %08lX\n", p->TxRetryLimitExceeded ); seq_printf(m, "TxDiscards: %08lX\n", p->TxDiscards ); seq_printf(m, "RxUnicastFrames: %08lX\n", p->RxUnicastFrames ); seq_printf(m, "RxMulticastFrames: %08lX\n", p->RxMulticastFrames ); seq_printf(m, "RxFragments: %08lX\n", p->RxFragments ); seq_printf(m, "RxUnicastOctets: %08lX\n", p->RxUnicastOctets ); seq_printf(m, "RxMulticastOctets: %08lX\n", p->RxMulticastOctets ); seq_printf(m, "RxFCSErrors: %08lX\n", p->RxFCSErrors ); seq_printf(m, "RxDiscardsNoBuffer: %08lX\n", p->RxDiscardsNoBuffer ); seq_printf(m, "TxDiscardsWrongSA: %08lX\n", p->TxDiscardsWrongSA ); seq_printf(m, "RxWEPUndecryptable: %08lX\n", p->RxWEPUndecryptable ); seq_printf(m, "RxMsgInMsgFragments: %08lX\n", p->RxMsgInMsgFragments ); seq_printf(m, "RxMsgInBadMsgFragments: %08lX\n", p->RxMsgInBadMsgFragments ); seq_printf(m, "RxDiscardsWEPICVError: %08lX\n", p->RxDiscardsWEPICVError ); seq_printf(m, "RxDiscardsWEPExcluded: %08lX\n", p->RxDiscardsWEPExcluded ); #if (HCF_EXT) & HCF_EXT_TALLIES_FW //to be added ;? #endif // HCF_EXT_TALLIES_FW } else if ( lp->wlags49_type & 0x8000 ) { //;?kludgy but it is unclear to me were else to place this #if DBG DbgInfo->DebugFlag = lp->wlags49_type & 0x7FFF; #endif // DBG lp->wlags49_type = 0; //default to IFB again ;? } else { seq_printf(m, "unknown value for wlags49_type: 0x%08lX\n", lp->wlags49_type ); seq_puts(m, "0x0000 - IFB\n" "0x0001 - wl_private\n" "0x0002 - Tallies\n" "0x8xxx - Change debufflag\n" "ERROR 0001\nWARNING 0002\nNOTICE 0004\nTRACE 0008\n" "VERBOSE 0010\nPARAM 0020\nBREAK 0040\nRX 0100\n" "TX 0200\nDS 0400\n"); } return 0; } // scull_read_procmem static int write_int(struct file *file, const char *buffer, unsigned long count, void *data) { static char proc_number[11]; unsigned int nr = 0; if (count > 9) { count = -EINVAL; } else if ( copy_from_user(proc_number, buffer, count) ) { count = -EFAULT; } if (count > 0 ) { proc_number[count] = 0; nr = simple_strtoul(proc_number , NULL, 0); *(unsigned int *)data = nr; if ( nr & 0x8000 ) { //;?kludgy but it is unclear to me were else to place this #if DBG DbgInfo->DebugFlag = nr & 0x7FFF; #endif // DBG } } DBG_PRINT( "value: %08X\n", nr ); return count; } // write_int #endif /* SCULL_USE_PROC */ #ifdef DN554 #define RUN_AT(x) (jiffies+(x)) //"borrowed" from include/pcmcia/k_compat.h #define DS_OOR 0x8000 //Deepsleep OutOfRange Status lp->timer_oor_cnt = DS_OOR; init_timer( &lp->timer_oor ); lp->timer_oor.function = timer_oor; lp->timer_oor.data = (unsigned long)lp; lp->timer_oor.expires = RUN_AT( 3 * HZ ); add_timer( &lp->timer_oor ); printk(KERN_NOTICE "wl_enable: %ld\n", jiffies ); //;?remove me 1 day #endif //DN554 #ifdef DN554 /******************************************************************************* * timer_oor() ******************************************************************************* * * DESCRIPTION: * * * PARAMETERS: * * arg - a u_long representing a pointer to a dev_link_t structure for the * device to be released. * * RETURNS: * * N/A * ******************************************************************************/ void timer_oor( u_long arg ) { struct wl_private *lp = (struct wl_private *)arg; DBG_PARAM( DbgInfo, "arg", "0x%08lx", arg ); printk(KERN_NOTICE "timer_oor: %ld 0x%04X\n", jiffies, lp->timer_oor_cnt ); //;?remove me 1 day lp->timer_oor_cnt += 10; if ( (lp->timer_oor_cnt & ~DS_OOR) > 300 ) { lp->timer_oor_cnt = 300; } lp->timer_oor_cnt |= DS_OOR; init_timer( &lp->timer_oor ); lp->timer_oor.function = timer_oor; lp->timer_oor.data = (unsigned long)lp; lp->timer_oor.expires = RUN_AT( (lp->timer_oor_cnt & ~DS_OOR) * HZ ); add_timer( &lp->timer_oor ); } // timer_oor #endif //DN554 MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
GalaxyTab4/maxicm_kernel_samsung_degaswifi
drivers/block/cciss.c
249
152258
/* * Disk Array driver for HP Smart Array controllers. * (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307, USA. * * Questions/Comments/Bugfixes to iss_storagedev@hp.com * */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/pci-aspm.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/major.h> #include <linux/fs.h> #include <linux/bio.h> #include <linux/blkpg.h> #include <linux/timer.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/hdreg.h> #include <linux/spinlock.h> #include <linux/compat.h> #include <linux/mutex.h> #include <linux/bitmap.h> #include <linux/io.h> #include <asm/uaccess.h> #include <linux/dma-mapping.h> #include <linux/blkdev.h> #include <linux/genhd.h> #include <linux/completion.h> #include <scsi/scsi.h> #include <scsi/sg.h> #include <scsi/scsi_ioctl.h> #include <linux/cdrom.h> #include <linux/scatterlist.h> #include <linux/kthread.h> #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin)) #define DRIVER_NAME "HP CISS Driver (v 3.6.26)" #define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 26) /* Embedded module documentation macros - see modules.h */ MODULE_AUTHOR("Hewlett-Packard Company"); MODULE_DESCRIPTION("Driver for HP Smart Array Controllers"); MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); MODULE_VERSION("3.6.26"); MODULE_LICENSE("GPL"); static int cciss_tape_cmds = 6; module_param(cciss_tape_cmds, int, 0644); MODULE_PARM_DESC(cciss_tape_cmds, "number of commands to allocate for tape devices (default: 6)"); static int cciss_simple_mode; module_param(cciss_simple_mode, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(cciss_simple_mode, "Use 'simple mode' rather than 'performant mode'"); static int cciss_allow_hpsa; module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(cciss_allow_hpsa, "Prevent cciss driver from accessing hardware known to be " " supported by the hpsa driver"); static DEFINE_MUTEX(cciss_mutex); static struct proc_dir_entry *proc_cciss; #include "cciss_cmd.h" #include "cciss.h" #include <linux/cciss_ioctl.h> /* define the PCI info for the cards we can control */ static const struct pci_device_id cciss_pci_device_id[] = { {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070}, {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080}, {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082}, {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083}, {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091}, {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A}, {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B}, {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C}, {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D}, {0,} }; MODULE_DEVICE_TABLE(pci, cciss_pci_device_id); /* board_id = Subsystem Device ID & Vendor ID * product = Marketing Name for the board * access = Address of the struct of function pointers */ static struct board_type products[] = { {0x40700E11, "Smart Array 5300", &SA5_access}, {0x40800E11, "Smart Array 5i", &SA5B_access}, {0x40820E11, "Smart Array 532", &SA5B_access}, {0x40830E11, "Smart Array 5312", &SA5B_access}, {0x409A0E11, "Smart Array 641", &SA5_access}, {0x409B0E11, "Smart Array 642", &SA5_access}, {0x409C0E11, "Smart Array 6400", &SA5_access}, {0x409D0E11, "Smart Array 6400 EM", &SA5_access}, {0x40910E11, "Smart Array 6i", &SA5_access}, {0x3225103C, "Smart Array P600", &SA5_access}, {0x3223103C, "Smart Array P800", &SA5_access}, {0x3234103C, "Smart Array P400", &SA5_access}, {0x3235103C, "Smart Array P400i", &SA5_access}, {0x3211103C, "Smart Array E200i", &SA5_access}, {0x3212103C, "Smart Array E200", &SA5_access}, {0x3213103C, "Smart Array E200i", &SA5_access}, {0x3214103C, "Smart Array E200i", &SA5_access}, {0x3215103C, "Smart Array E200i", &SA5_access}, {0x3237103C, "Smart Array E500", &SA5_access}, {0x3223103C, "Smart Array P800", &SA5_access}, {0x3234103C, "Smart Array P400", &SA5_access}, {0x323D103C, "Smart Array P700m", &SA5_access}, }; /* How long to wait (in milliseconds) for board to go into simple mode */ #define MAX_CONFIG_WAIT 30000 #define MAX_IOCTL_CONFIG_WAIT 1000 /*define how many times we will try a command because of bus resets */ #define MAX_CMD_RETRIES 3 #define MAX_CTLR 32 /* Originally cciss driver only supports 8 major numbers */ #define MAX_CTLR_ORIG 8 static ctlr_info_t *hba[MAX_CTLR]; static struct task_struct *cciss_scan_thread; static DEFINE_MUTEX(scan_mutex); static LIST_HEAD(scan_q); static void do_cciss_request(struct request_queue *q); static irqreturn_t do_cciss_intx(int irq, void *dev_id); static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id); static int cciss_open(struct block_device *bdev, fmode_t mode); static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode); static void cciss_release(struct gendisk *disk, fmode_t mode); static int cciss_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); static int cciss_revalidate(struct gendisk *disk); static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl); static int deregister_disk(ctlr_info_t *h, int drv_index, int clear_all, int via_ioctl); static void cciss_read_capacity(ctlr_info_t *h, int logvol, sector_t *total_size, unsigned int *block_size); static void cciss_read_capacity_16(ctlr_info_t *h, int logvol, sector_t *total_size, unsigned int *block_size); static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol, sector_t total_size, unsigned int block_size, InquiryData_struct *inq_buff, drive_info_struct *drv); static void cciss_interrupt_mode(ctlr_info_t *); static int cciss_enter_simple_mode(struct ctlr_info *h); static void start_io(ctlr_info_t *h); static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size, __u8 page_code, unsigned char scsi3addr[], int cmd_type); static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c, int attempt_retry); static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c); static int add_to_scan_list(struct ctlr_info *h); static int scan_thread(void *data); static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c); static void cciss_hba_release(struct device *dev); static void cciss_device_release(struct device *dev); static void cciss_free_gendisk(ctlr_info_t *h, int drv_index); static void cciss_free_drive_info(ctlr_info_t *h, int drv_index); static inline u32 next_command(ctlr_info_t *h); static int cciss_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, u64 *cfg_offset); static int cciss_pci_find_memory_BAR(struct pci_dev *pdev, unsigned long *memory_bar); static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag); static int write_driver_ver_to_cfgtable(CfgTable_struct __iomem *cfgtable); /* performant mode helper functions */ static void calc_bucket_map(int *bucket, int num_buckets, int nsgs, int *bucket_map); static void cciss_put_controller_into_performant_mode(ctlr_info_t *h); #ifdef CONFIG_PROC_FS static void cciss_procinit(ctlr_info_t *h); #else static void cciss_procinit(ctlr_info_t *h) { } #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_COMPAT static int cciss_compat_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); #endif static const struct block_device_operations cciss_fops = { .owner = THIS_MODULE, .open = cciss_unlocked_open, .release = cciss_release, .ioctl = cciss_ioctl, .getgeo = cciss_getgeo, #ifdef CONFIG_COMPAT .compat_ioctl = cciss_compat_ioctl, #endif .revalidate_disk = cciss_revalidate, }; /* set_performant_mode: Modify the tag for cciss performant * set bit 0 for pull model, bits 3-1 for block fetch * register number */ static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c) { if (likely(h->transMethod & CFGTBL_Trans_Performant)) c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); } /* * Enqueuing and dequeuing functions for cmdlists. */ static inline void addQ(struct list_head *list, CommandList_struct *c) { list_add_tail(&c->list, list); } static inline void removeQ(CommandList_struct *c) { /* * After kexec/dump some commands might still * be in flight, which the firmware will try * to complete. Resetting the firmware doesn't work * with old fw revisions, so we have to mark * them off as 'stale' to prevent the driver from * falling over. */ if (WARN_ON(list_empty(&c->list))) { c->cmd_type = CMD_MSG_STALE; return; } list_del_init(&c->list); } static void enqueue_cmd_and_start_io(ctlr_info_t *h, CommandList_struct *c) { unsigned long flags; set_performant_mode(h, c); spin_lock_irqsave(&h->lock, flags); addQ(&h->reqQ, c); h->Qdepth++; if (h->Qdepth > h->maxQsinceinit) h->maxQsinceinit = h->Qdepth; start_io(h); spin_unlock_irqrestore(&h->lock, flags); } static void cciss_free_sg_chain_blocks(SGDescriptor_struct **cmd_sg_list, int nr_cmds) { int i; if (!cmd_sg_list) return; for (i = 0; i < nr_cmds; i++) { kfree(cmd_sg_list[i]); cmd_sg_list[i] = NULL; } kfree(cmd_sg_list); } static SGDescriptor_struct **cciss_allocate_sg_chain_blocks( ctlr_info_t *h, int chainsize, int nr_cmds) { int j; SGDescriptor_struct **cmd_sg_list; if (chainsize <= 0) return NULL; cmd_sg_list = kmalloc(sizeof(*cmd_sg_list) * nr_cmds, GFP_KERNEL); if (!cmd_sg_list) return NULL; /* Build up chain blocks for each command */ for (j = 0; j < nr_cmds; j++) { /* Need a block of chainsized s/g elements. */ cmd_sg_list[j] = kmalloc((chainsize * sizeof(*cmd_sg_list[j])), GFP_KERNEL); if (!cmd_sg_list[j]) { dev_err(&h->pdev->dev, "Cannot get memory " "for s/g chains.\n"); goto clean; } } return cmd_sg_list; clean: cciss_free_sg_chain_blocks(cmd_sg_list, nr_cmds); return NULL; } static void cciss_unmap_sg_chain_block(ctlr_info_t *h, CommandList_struct *c) { SGDescriptor_struct *chain_sg; u64bit temp64; if (c->Header.SGTotal <= h->max_cmd_sgentries) return; chain_sg = &c->SG[h->max_cmd_sgentries - 1]; temp64.val32.lower = chain_sg->Addr.lower; temp64.val32.upper = chain_sg->Addr.upper; pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); } static void cciss_map_sg_chain_block(ctlr_info_t *h, CommandList_struct *c, SGDescriptor_struct *chain_block, int len) { SGDescriptor_struct *chain_sg; u64bit temp64; chain_sg = &c->SG[h->max_cmd_sgentries - 1]; chain_sg->Ext = CCISS_SG_CHAIN; chain_sg->Len = len; temp64.val = pci_map_single(h->pdev, chain_block, len, PCI_DMA_TODEVICE); chain_sg->Addr.lower = temp64.val32.lower; chain_sg->Addr.upper = temp64.val32.upper; } #include "cciss_scsi.c" /* For SCSI tape support */ static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", "UNKNOWN" }; #define RAID_UNKNOWN (ARRAY_SIZE(raid_label)-1) #ifdef CONFIG_PROC_FS /* * Report information about this controller. */ #define ENG_GIG 1000000000 #define ENG_GIG_FACTOR (ENG_GIG/512) #define ENGAGE_SCSI "engage scsi" static void cciss_seq_show_header(struct seq_file *seq) { ctlr_info_t *h = seq->private; seq_printf(seq, "%s: HP %s Controller\n" "Board ID: 0x%08lx\n" "Firmware Version: %c%c%c%c\n" "IRQ: %d\n" "Logical drives: %d\n" "Current Q depth: %d\n" "Current # commands on controller: %d\n" "Max Q depth since init: %d\n" "Max # commands on controller since init: %d\n" "Max SG entries since init: %d\n", h->devname, h->product_name, (unsigned long)h->board_id, h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], h->firm_ver[3], (unsigned int)h->intr[h->intr_mode], h->num_luns, h->Qdepth, h->commands_outstanding, h->maxQsinceinit, h->max_outstanding, h->maxSG); #ifdef CONFIG_CISS_SCSI_TAPE cciss_seq_tape_report(seq, h); #endif /* CONFIG_CISS_SCSI_TAPE */ } static void *cciss_seq_start(struct seq_file *seq, loff_t *pos) { ctlr_info_t *h = seq->private; unsigned long flags; /* prevent displaying bogus info during configuration * or deconfiguration of a logical volume */ spin_lock_irqsave(&h->lock, flags); if (h->busy_configuring) { spin_unlock_irqrestore(&h->lock, flags); return ERR_PTR(-EBUSY); } h->busy_configuring = 1; spin_unlock_irqrestore(&h->lock, flags); if (*pos == 0) cciss_seq_show_header(seq); return pos; } static int cciss_seq_show(struct seq_file *seq, void *v) { sector_t vol_sz, vol_sz_frac; ctlr_info_t *h = seq->private; unsigned ctlr = h->ctlr; loff_t *pos = v; drive_info_struct *drv = h->drv[*pos]; if (*pos > h->highest_lun) return 0; if (drv == NULL) /* it's possible for h->drv[] to have holes. */ return 0; if (drv->heads == 0) return 0; vol_sz = drv->nr_blocks; vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR); vol_sz_frac *= 100; sector_div(vol_sz_frac, ENG_GIG_FACTOR); if (drv->raid_level < 0 || drv->raid_level > RAID_UNKNOWN) drv->raid_level = RAID_UNKNOWN; seq_printf(seq, "cciss/c%dd%d:" "\t%4u.%02uGB\tRAID %s\n", ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac, raid_label[drv->raid_level]); return 0; } static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ctlr_info_t *h = seq->private; if (*pos > h->highest_lun) return NULL; *pos += 1; return pos; } static void cciss_seq_stop(struct seq_file *seq, void *v) { ctlr_info_t *h = seq->private; /* Only reset h->busy_configuring if we succeeded in setting * it during cciss_seq_start. */ if (v == ERR_PTR(-EBUSY)) return; h->busy_configuring = 0; } static const struct seq_operations cciss_seq_ops = { .start = cciss_seq_start, .show = cciss_seq_show, .next = cciss_seq_next, .stop = cciss_seq_stop, }; static int cciss_seq_open(struct inode *inode, struct file *file) { int ret = seq_open(file, &cciss_seq_ops); struct seq_file *seq = file->private_data; if (!ret) seq->private = PDE_DATA(inode); return ret; } static ssize_t cciss_proc_write(struct file *file, const char __user *buf, size_t length, loff_t *ppos) { int err; char *buffer; #ifndef CONFIG_CISS_SCSI_TAPE return -EINVAL; #endif if (!buf || length > PAGE_SIZE - 1) return -EINVAL; buffer = (char *)__get_free_page(GFP_KERNEL); if (!buffer) return -ENOMEM; err = -EFAULT; if (copy_from_user(buffer, buf, length)) goto out; buffer[length] = '\0'; #ifdef CONFIG_CISS_SCSI_TAPE if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) { struct seq_file *seq = file->private_data; ctlr_info_t *h = seq->private; err = cciss_engage_scsi(h); if (err == 0) err = length; } else #endif /* CONFIG_CISS_SCSI_TAPE */ err = -EINVAL; /* might be nice to have "disengage" too, but it's not safely possible. (only 1 module use count, lock issues.) */ out: free_page((unsigned long)buffer); return err; } static const struct file_operations cciss_proc_fops = { .owner = THIS_MODULE, .open = cciss_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, .write = cciss_proc_write, }; static void cciss_procinit(ctlr_info_t *h) { struct proc_dir_entry *pde; if (proc_cciss == NULL) proc_cciss = proc_mkdir("driver/cciss", NULL); if (!proc_cciss) return; pde = proc_create_data(h->devname, S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH, proc_cciss, &cciss_proc_fops, h); } #endif /* CONFIG_PROC_FS */ #define MAX_PRODUCT_NAME_LEN 19 #define to_hba(n) container_of(n, struct ctlr_info, dev) #define to_drv(n) container_of(n, drive_info_struct, dev) /* List of controllers which cannot be hard reset on kexec with reset_devices */ static u32 unresettable_controller[] = { 0x324a103C, /* Smart Array P712m */ 0x324b103C, /* SmartArray P711m */ 0x3223103C, /* Smart Array P800 */ 0x3234103C, /* Smart Array P400 */ 0x3235103C, /* Smart Array P400i */ 0x3211103C, /* Smart Array E200i */ 0x3212103C, /* Smart Array E200 */ 0x3213103C, /* Smart Array E200i */ 0x3214103C, /* Smart Array E200i */ 0x3215103C, /* Smart Array E200i */ 0x3237103C, /* Smart Array E500 */ 0x323D103C, /* Smart Array P700m */ 0x409C0E11, /* Smart Array 6400 */ 0x409D0E11, /* Smart Array 6400 EM */ }; /* List of controllers which cannot even be soft reset */ static u32 soft_unresettable_controller[] = { 0x409C0E11, /* Smart Array 6400 */ 0x409D0E11, /* Smart Array 6400 EM */ }; static int ctlr_is_hard_resettable(u32 board_id) { int i; for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) if (unresettable_controller[i] == board_id) return 0; return 1; } static int ctlr_is_soft_resettable(u32 board_id) { int i; for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++) if (soft_unresettable_controller[i] == board_id) return 0; return 1; } static int ctlr_is_resettable(u32 board_id) { return ctlr_is_hard_resettable(board_id) || ctlr_is_soft_resettable(board_id); } static ssize_t host_show_resettable(struct device *dev, struct device_attribute *attr, char *buf) { struct ctlr_info *h = to_hba(dev); return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); } static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL); static ssize_t host_store_rescan(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ctlr_info *h = to_hba(dev); add_to_scan_list(h); wake_up_process(cciss_scan_thread); wait_for_completion_interruptible(&h->scan_wait); return count; } static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); static ssize_t host_show_transport_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct ctlr_info *h = to_hba(dev); return snprintf(buf, 20, "%s\n", h->transMethod & CFGTBL_Trans_Performant ? "performant" : "simple"); } static DEVICE_ATTR(transport_mode, S_IRUGO, host_show_transport_mode, NULL); static ssize_t dev_show_unique_id(struct device *dev, struct device_attribute *attr, char *buf) { drive_info_struct *drv = to_drv(dev); struct ctlr_info *h = to_hba(drv->dev.parent); __u8 sn[16]; unsigned long flags; int ret = 0; spin_lock_irqsave(&h->lock, flags); if (h->busy_configuring) ret = -EBUSY; else memcpy(sn, drv->serial_no, sizeof(sn)); spin_unlock_irqrestore(&h->lock, flags); if (ret) return ret; else return snprintf(buf, 16 * 2 + 2, "%02X%02X%02X%02X%02X%02X%02X%02X" "%02X%02X%02X%02X%02X%02X%02X%02X\n", sn[0], sn[1], sn[2], sn[3], sn[4], sn[5], sn[6], sn[7], sn[8], sn[9], sn[10], sn[11], sn[12], sn[13], sn[14], sn[15]); } static DEVICE_ATTR(unique_id, S_IRUGO, dev_show_unique_id, NULL); static ssize_t dev_show_vendor(struct device *dev, struct device_attribute *attr, char *buf) { drive_info_struct *drv = to_drv(dev); struct ctlr_info *h = to_hba(drv->dev.parent); char vendor[VENDOR_LEN + 1]; unsigned long flags; int ret = 0; spin_lock_irqsave(&h->lock, flags); if (h->busy_configuring) ret = -EBUSY; else memcpy(vendor, drv->vendor, VENDOR_LEN + 1); spin_unlock_irqrestore(&h->lock, flags); if (ret) return ret; else return snprintf(buf, sizeof(vendor) + 1, "%s\n", drv->vendor); } static DEVICE_ATTR(vendor, S_IRUGO, dev_show_vendor, NULL); static ssize_t dev_show_model(struct device *dev, struct device_attribute *attr, char *buf) { drive_info_struct *drv = to_drv(dev); struct ctlr_info *h = to_hba(drv->dev.parent); char model[MODEL_LEN + 1]; unsigned long flags; int ret = 0; spin_lock_irqsave(&h->lock, flags); if (h->busy_configuring) ret = -EBUSY; else memcpy(model, drv->model, MODEL_LEN + 1); spin_unlock_irqrestore(&h->lock, flags); if (ret) return ret; else return snprintf(buf, sizeof(model) + 1, "%s\n", drv->model); } static DEVICE_ATTR(model, S_IRUGO, dev_show_model, NULL); static ssize_t dev_show_rev(struct device *dev, struct device_attribute *attr, char *buf) { drive_info_struct *drv = to_drv(dev); struct ctlr_info *h = to_hba(drv->dev.parent); char rev[REV_LEN + 1]; unsigned long flags; int ret = 0; spin_lock_irqsave(&h->lock, flags); if (h->busy_configuring) ret = -EBUSY; else memcpy(rev, drv->rev, REV_LEN + 1); spin_unlock_irqrestore(&h->lock, flags); if (ret) return ret; else return snprintf(buf, sizeof(rev) + 1, "%s\n", drv->rev); } static DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL); static ssize_t cciss_show_lunid(struct device *dev, struct device_attribute *attr, char *buf) { drive_info_struct *drv = to_drv(dev); struct ctlr_info *h = to_hba(drv->dev.parent); unsigned long flags; unsigned char lunid[8]; spin_lock_irqsave(&h->lock, flags); if (h->busy_configuring) { spin_unlock_irqrestore(&h->lock, flags); return -EBUSY; } if (!drv->heads) { spin_unlock_irqrestore(&h->lock, flags); return -ENOTTY; } memcpy(lunid, drv->LunID, sizeof(lunid)); spin_unlock_irqrestore(&h->lock, flags); return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", lunid[0], lunid[1], lunid[2], lunid[3], lunid[4], lunid[5], lunid[6], lunid[7]); } static DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL); static ssize_t cciss_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf) { drive_info_struct *drv = to_drv(dev); struct ctlr_info *h = to_hba(drv->dev.parent); int raid; unsigned long flags; spin_lock_irqsave(&h->lock, flags); if (h->busy_configuring) { spin_unlock_irqrestore(&h->lock, flags); return -EBUSY; } raid = drv->raid_level; spin_unlock_irqrestore(&h->lock, flags); if (raid < 0 || raid > RAID_UNKNOWN) raid = RAID_UNKNOWN; return snprintf(buf, strlen(raid_label[raid]) + 7, "RAID %s\n", raid_label[raid]); } static DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL); static ssize_t cciss_show_usage_count(struct device *dev, struct device_attribute *attr, char *buf) { drive_info_struct *drv = to_drv(dev); struct ctlr_info *h = to_hba(drv->dev.parent); unsigned long flags; int count; spin_lock_irqsave(&h->lock, flags); if (h->busy_configuring) { spin_unlock_irqrestore(&h->lock, flags); return -EBUSY; } count = drv->usage_count; spin_unlock_irqrestore(&h->lock, flags); return snprintf(buf, 20, "%d\n", count); } static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL); static struct attribute *cciss_host_attrs[] = { &dev_attr_rescan.attr, &dev_attr_resettable.attr, &dev_attr_transport_mode.attr, NULL }; static struct attribute_group cciss_host_attr_group = { .attrs = cciss_host_attrs, }; static const struct attribute_group *cciss_host_attr_groups[] = { &cciss_host_attr_group, NULL }; static struct device_type cciss_host_type = { .name = "cciss_host", .groups = cciss_host_attr_groups, .release = cciss_hba_release, }; static struct attribute *cciss_dev_attrs[] = { &dev_attr_unique_id.attr, &dev_attr_model.attr, &dev_attr_vendor.attr, &dev_attr_rev.attr, &dev_attr_lunid.attr, &dev_attr_raid_level.attr, &dev_attr_usage_count.attr, NULL }; static struct attribute_group cciss_dev_attr_group = { .attrs = cciss_dev_attrs, }; static const struct attribute_group *cciss_dev_attr_groups[] = { &cciss_dev_attr_group, NULL }; static struct device_type cciss_dev_type = { .name = "cciss_device", .groups = cciss_dev_attr_groups, .release = cciss_device_release, }; static struct bus_type cciss_bus_type = { .name = "cciss", }; /* * cciss_hba_release is called when the reference count * of h->dev goes to zero. */ static void cciss_hba_release(struct device *dev) { /* * nothing to do, but need this to avoid a warning * about not having a release handler from lib/kref.c. */ } /* * Initialize sysfs entry for each controller. This sets up and registers * the 'cciss#' directory for each individual controller under * /sys/bus/pci/devices/<dev>/. */ static int cciss_create_hba_sysfs_entry(struct ctlr_info *h) { device_initialize(&h->dev); h->dev.type = &cciss_host_type; h->dev.bus = &cciss_bus_type; dev_set_name(&h->dev, "%s", h->devname); h->dev.parent = &h->pdev->dev; return device_add(&h->dev); } /* * Remove sysfs entries for an hba. */ static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h) { device_del(&h->dev); put_device(&h->dev); /* final put. */ } /* cciss_device_release is called when the reference count * of h->drv[x]dev goes to zero. */ static void cciss_device_release(struct device *dev) { drive_info_struct *drv = to_drv(dev); kfree(drv); } /* * Initialize sysfs for each logical drive. This sets up and registers * the 'c#d#' directory for each individual logical drive under * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from * /sys/block/cciss!c#d# to this entry. */ static long cciss_create_ld_sysfs_entry(struct ctlr_info *h, int drv_index) { struct device *dev; if (h->drv[drv_index]->device_initialized) return 0; dev = &h->drv[drv_index]->dev; device_initialize(dev); dev->type = &cciss_dev_type; dev->bus = &cciss_bus_type; dev_set_name(dev, "c%dd%d", h->ctlr, drv_index); dev->parent = &h->dev; h->drv[drv_index]->device_initialized = 1; return device_add(dev); } /* * Remove sysfs entries for a logical drive. */ static void cciss_destroy_ld_sysfs_entry(struct ctlr_info *h, int drv_index, int ctlr_exiting) { struct device *dev = &h->drv[drv_index]->dev; /* special case for c*d0, we only destroy it on controller exit */ if (drv_index == 0 && !ctlr_exiting) return; device_del(dev); put_device(dev); /* the "final" put. */ h->drv[drv_index] = NULL; } /* * For operations that cannot sleep, a command block is allocated at init, * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track * which ones are free or in use. */ static CommandList_struct *cmd_alloc(ctlr_info_t *h) { CommandList_struct *c; int i; u64bit temp64; dma_addr_t cmd_dma_handle, err_dma_handle; do { i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); if (i == h->nr_cmds) return NULL; } while (test_and_set_bit(i, h->cmd_pool_bits) != 0); c = h->cmd_pool + i; memset(c, 0, sizeof(CommandList_struct)); cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(CommandList_struct); c->err_info = h->errinfo_pool + i; memset(c->err_info, 0, sizeof(ErrorInfo_struct)); err_dma_handle = h->errinfo_pool_dhandle + i * sizeof(ErrorInfo_struct); h->nr_allocs++; c->cmdindex = i; INIT_LIST_HEAD(&c->list); c->busaddr = (__u32) cmd_dma_handle; temp64.val = (__u64) err_dma_handle; c->ErrDesc.Addr.lower = temp64.val32.lower; c->ErrDesc.Addr.upper = temp64.val32.upper; c->ErrDesc.Len = sizeof(ErrorInfo_struct); c->ctlr = h->ctlr; return c; } /* allocate a command using pci_alloc_consistent, used for ioctls, * etc., not for the main i/o path. */ static CommandList_struct *cmd_special_alloc(ctlr_info_t *h) { CommandList_struct *c; u64bit temp64; dma_addr_t cmd_dma_handle, err_dma_handle; c = (CommandList_struct *) pci_alloc_consistent(h->pdev, sizeof(CommandList_struct), &cmd_dma_handle); if (c == NULL) return NULL; memset(c, 0, sizeof(CommandList_struct)); c->cmdindex = -1; c->err_info = (ErrorInfo_struct *) pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct), &err_dma_handle); if (c->err_info == NULL) { pci_free_consistent(h->pdev, sizeof(CommandList_struct), c, cmd_dma_handle); return NULL; } memset(c->err_info, 0, sizeof(ErrorInfo_struct)); INIT_LIST_HEAD(&c->list); c->busaddr = (__u32) cmd_dma_handle; temp64.val = (__u64) err_dma_handle; c->ErrDesc.Addr.lower = temp64.val32.lower; c->ErrDesc.Addr.upper = temp64.val32.upper; c->ErrDesc.Len = sizeof(ErrorInfo_struct); c->ctlr = h->ctlr; return c; } static void cmd_free(ctlr_info_t *h, CommandList_struct *c) { int i; i = c - h->cmd_pool; clear_bit(i, h->cmd_pool_bits); h->nr_frees++; } static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c) { u64bit temp64; temp64.val32.lower = c->ErrDesc.Addr.lower; temp64.val32.upper = c->ErrDesc.Addr.upper; pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct), c->err_info, (dma_addr_t) temp64.val); pci_free_consistent(h->pdev, sizeof(CommandList_struct), c, (dma_addr_t) cciss_tag_discard_error_bits(h, (u32) c->busaddr)); } static inline ctlr_info_t *get_host(struct gendisk *disk) { return disk->queue->queuedata; } static inline drive_info_struct *get_drv(struct gendisk *disk) { return disk->private_data; } /* * Open. Make sure the device is really there. */ static int cciss_open(struct block_device *bdev, fmode_t mode) { ctlr_info_t *h = get_host(bdev->bd_disk); drive_info_struct *drv = get_drv(bdev->bd_disk); dev_dbg(&h->pdev->dev, "cciss_open %s\n", bdev->bd_disk->disk_name); if (drv->busy_configuring) return -EBUSY; /* * Root is allowed to open raw volume zero even if it's not configured * so array config can still work. Root is also allowed to open any * volume that has a LUN ID, so it can issue IOCTL to reread the * disk information. I don't think I really like this * but I'm already using way to many device nodes to claim another one * for "raw controller". */ if (drv->heads == 0) { if (MINOR(bdev->bd_dev) != 0) { /* not node 0? */ /* if not node 0 make sure it is a partition = 0 */ if (MINOR(bdev->bd_dev) & 0x0f) { return -ENXIO; /* if it is, make sure we have a LUN ID */ } else if (memcmp(drv->LunID, CTLR_LUNID, sizeof(drv->LunID))) { return -ENXIO; } } if (!capable(CAP_SYS_ADMIN)) return -EPERM; } drv->usage_count++; h->usage_count++; return 0; } static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode) { int ret; mutex_lock(&cciss_mutex); ret = cciss_open(bdev, mode); mutex_unlock(&cciss_mutex); return ret; } /* * Close. Sync first. */ static void cciss_release(struct gendisk *disk, fmode_t mode) { ctlr_info_t *h; drive_info_struct *drv; mutex_lock(&cciss_mutex); h = get_host(disk); drv = get_drv(disk); dev_dbg(&h->pdev->dev, "cciss_release %s\n", disk->disk_name); drv->usage_count--; h->usage_count--; mutex_unlock(&cciss_mutex); } #ifdef CONFIG_COMPAT static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, unsigned cmd, unsigned long arg); static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode, unsigned cmd, unsigned long arg); static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, unsigned long arg) { switch (cmd) { case CCISS_GETPCIINFO: case CCISS_GETINTINFO: case CCISS_SETINTINFO: case CCISS_GETNODENAME: case CCISS_SETNODENAME: case CCISS_GETHEARTBEAT: case CCISS_GETBUSTYPES: case CCISS_GETFIRMVER: case CCISS_GETDRIVVER: case CCISS_REVALIDVOLS: case CCISS_DEREGDISK: case CCISS_REGNEWDISK: case CCISS_REGNEWD: case CCISS_RESCANDISK: case CCISS_GETLUNINFO: return cciss_ioctl(bdev, mode, cmd, arg); case CCISS_PASSTHRU32: return cciss_ioctl32_passthru(bdev, mode, cmd, arg); case CCISS_BIG_PASSTHRU32: return cciss_ioctl32_big_passthru(bdev, mode, cmd, arg); default: return -ENOIOCTLCMD; } } static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, unsigned cmd, unsigned long arg) { IOCTL32_Command_struct __user *arg32 = (IOCTL32_Command_struct __user *) arg; IOCTL_Command_struct arg64; IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); int err; u32 cp; err = 0; err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info)); err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request)); err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info)); err |= get_user(arg64.buf_size, &arg32->buf_size); err |= get_user(cp, &arg32->buf); arg64.buf = compat_ptr(cp); err |= copy_to_user(p, &arg64, sizeof(arg64)); if (err) return -EFAULT; err = cciss_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p); if (err) return err; err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info)); if (err) return -EFAULT; return err; } static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode, unsigned cmd, unsigned long arg) { BIG_IOCTL32_Command_struct __user *arg32 = (BIG_IOCTL32_Command_struct __user *) arg; BIG_IOCTL_Command_struct arg64; BIG_IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); int err; u32 cp; memset(&arg64, 0, sizeof(arg64)); err = 0; err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info)); err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request)); err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info)); err |= get_user(arg64.buf_size, &arg32->buf_size); err |= get_user(arg64.malloc_size, &arg32->malloc_size); err |= get_user(cp, &arg32->buf); arg64.buf = compat_ptr(cp); err |= copy_to_user(p, &arg64, sizeof(arg64)); if (err) return -EFAULT; err = cciss_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p); if (err) return err; err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info)); if (err) return -EFAULT; return err; } #endif static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo) { drive_info_struct *drv = get_drv(bdev->bd_disk); if (!drv->cylinders) return -ENXIO; geo->heads = drv->heads; geo->sectors = drv->sectors; geo->cylinders = drv->cylinders; return 0; } static void check_ioctl_unit_attention(ctlr_info_t *h, CommandList_struct *c) { if (c->err_info->CommandStatus == CMD_TARGET_STATUS && c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) (void)check_for_unit_attention(h, c); } static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp) { cciss_pci_info_struct pciinfo; if (!argp) return -EINVAL; pciinfo.domain = pci_domain_nr(h->pdev->bus); pciinfo.bus = h->pdev->bus->number; pciinfo.dev_fn = h->pdev->devfn; pciinfo.board_id = h->board_id; if (copy_to_user(argp, &pciinfo, sizeof(cciss_pci_info_struct))) return -EFAULT; return 0; } static int cciss_getintinfo(ctlr_info_t *h, void __user *argp) { cciss_coalint_struct intinfo; unsigned long flags; if (!argp) return -EINVAL; spin_lock_irqsave(&h->lock, flags); intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay); intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount); spin_unlock_irqrestore(&h->lock, flags); if (copy_to_user (argp, &intinfo, sizeof(cciss_coalint_struct))) return -EFAULT; return 0; } static int cciss_setintinfo(ctlr_info_t *h, void __user *argp) { cciss_coalint_struct intinfo; unsigned long flags; int i; if (!argp) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (copy_from_user(&intinfo, argp, sizeof(intinfo))) return -EFAULT; if ((intinfo.delay == 0) && (intinfo.count == 0)) return -EINVAL; spin_lock_irqsave(&h->lock, flags); /* Update the field, and then ring the doorbell */ writel(intinfo.delay, &(h->cfgtable->HostWrite.CoalIntDelay)); writel(intinfo.count, &(h->cfgtable->HostWrite.CoalIntCount)); writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) break; udelay(1000); /* delay and try again */ } spin_unlock_irqrestore(&h->lock, flags); if (i >= MAX_IOCTL_CONFIG_WAIT) return -EAGAIN; return 0; } static int cciss_getnodename(ctlr_info_t *h, void __user *argp) { NodeName_type NodeName; unsigned long flags; int i; if (!argp) return -EINVAL; spin_lock_irqsave(&h->lock, flags); for (i = 0; i < 16; i++) NodeName[i] = readb(&h->cfgtable->ServerName[i]); spin_unlock_irqrestore(&h->lock, flags); if (copy_to_user(argp, NodeName, sizeof(NodeName_type))) return -EFAULT; return 0; } static int cciss_setnodename(ctlr_info_t *h, void __user *argp) { NodeName_type NodeName; unsigned long flags; int i; if (!argp) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (copy_from_user(NodeName, argp, sizeof(NodeName_type))) return -EFAULT; spin_lock_irqsave(&h->lock, flags); /* Update the field, and then ring the doorbell */ for (i = 0; i < 16; i++) writeb(NodeName[i], &h->cfgtable->ServerName[i]); writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) break; udelay(1000); /* delay and try again */ } spin_unlock_irqrestore(&h->lock, flags); if (i >= MAX_IOCTL_CONFIG_WAIT) return -EAGAIN; return 0; } static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp) { Heartbeat_type heartbeat; unsigned long flags; if (!argp) return -EINVAL; spin_lock_irqsave(&h->lock, flags); heartbeat = readl(&h->cfgtable->HeartBeat); spin_unlock_irqrestore(&h->lock, flags); if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type))) return -EFAULT; return 0; } static int cciss_getbustypes(ctlr_info_t *h, void __user *argp) { BusTypes_type BusTypes; unsigned long flags; if (!argp) return -EINVAL; spin_lock_irqsave(&h->lock, flags); BusTypes = readl(&h->cfgtable->BusTypes); spin_unlock_irqrestore(&h->lock, flags); if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type))) return -EFAULT; return 0; } static int cciss_getfirmver(ctlr_info_t *h, void __user *argp) { FirmwareVer_type firmware; if (!argp) return -EINVAL; memcpy(firmware, h->firm_ver, 4); if (copy_to_user (argp, firmware, sizeof(FirmwareVer_type))) return -EFAULT; return 0; } static int cciss_getdrivver(ctlr_info_t *h, void __user *argp) { DriverVer_type DriverVer = DRIVER_VERSION; if (!argp) return -EINVAL; if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) return -EFAULT; return 0; } static int cciss_getluninfo(ctlr_info_t *h, struct gendisk *disk, void __user *argp) { LogvolInfo_struct luninfo; drive_info_struct *drv = get_drv(disk); if (!argp) return -EINVAL; memcpy(&luninfo.LunID, drv->LunID, sizeof(luninfo.LunID)); luninfo.num_opens = drv->usage_count; luninfo.num_parts = 0; if (copy_to_user(argp, &luninfo, sizeof(LogvolInfo_struct))) return -EFAULT; return 0; } static int cciss_passthru(ctlr_info_t *h, void __user *argp) { IOCTL_Command_struct iocommand; CommandList_struct *c; char *buff = NULL; u64bit temp64; DECLARE_COMPLETION_ONSTACK(wait); if (!argp) return -EINVAL; if (!capable(CAP_SYS_RAWIO)) return -EPERM; if (copy_from_user (&iocommand, argp, sizeof(IOCTL_Command_struct))) return -EFAULT; if ((iocommand.buf_size < 1) && (iocommand.Request.Type.Direction != XFER_NONE)) { return -EINVAL; } if (iocommand.buf_size > 0) { buff = kmalloc(iocommand.buf_size, GFP_KERNEL); if (buff == NULL) return -EFAULT; } if (iocommand.Request.Type.Direction == XFER_WRITE) { /* Copy the data into the buffer we created */ if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) { kfree(buff); return -EFAULT; } } else { memset(buff, 0, iocommand.buf_size); } c = cmd_special_alloc(h); if (!c) { kfree(buff); return -ENOMEM; } /* Fill in the command type */ c->cmd_type = CMD_IOCTL_PEND; /* Fill in Command Header */ c->Header.ReplyQueue = 0; /* unused in simple mode */ if (iocommand.buf_size > 0) { /* buffer to fill */ c->Header.SGList = 1; c->Header.SGTotal = 1; } else { /* no buffers to fill */ c->Header.SGList = 0; c->Header.SGTotal = 0; } c->Header.LUN = iocommand.LUN_info; /* use the kernel address the cmd block for tag */ c->Header.Tag.lower = c->busaddr; /* Fill in Request block */ c->Request = iocommand.Request; /* Fill in the scatter gather information */ if (iocommand.buf_size > 0) { temp64.val = pci_map_single(h->pdev, buff, iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); c->SG[0].Addr.lower = temp64.val32.lower; c->SG[0].Addr.upper = temp64.val32.upper; c->SG[0].Len = iocommand.buf_size; c->SG[0].Ext = 0; /* we are not chaining */ } c->waiting = &wait; enqueue_cmd_and_start_io(h, c); wait_for_completion(&wait); /* unlock the buffers from DMA */ temp64.val32.lower = c->SG[0].Addr.lower; temp64.val32.upper = c->SG[0].Addr.upper; pci_unmap_single(h->pdev, (dma_addr_t) temp64.val, iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); check_ioctl_unit_attention(h, c); /* Copy the error information out */ iocommand.error_info = *(c->err_info); if (copy_to_user(argp, &iocommand, sizeof(IOCTL_Command_struct))) { kfree(buff); cmd_special_free(h, c); return -EFAULT; } if (iocommand.Request.Type.Direction == XFER_READ) { /* Copy the data out of the buffer we created */ if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { kfree(buff); cmd_special_free(h, c); return -EFAULT; } } kfree(buff); cmd_special_free(h, c); return 0; } static int cciss_bigpassthru(ctlr_info_t *h, void __user *argp) { BIG_IOCTL_Command_struct *ioc; CommandList_struct *c; unsigned char **buff = NULL; int *buff_size = NULL; u64bit temp64; BYTE sg_used = 0; int status = 0; int i; DECLARE_COMPLETION_ONSTACK(wait); __u32 left; __u32 sz; BYTE __user *data_ptr; if (!argp) return -EINVAL; if (!capable(CAP_SYS_RAWIO)) return -EPERM; ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); if (!ioc) { status = -ENOMEM; goto cleanup1; } if (copy_from_user(ioc, argp, sizeof(*ioc))) { status = -EFAULT; goto cleanup1; } if ((ioc->buf_size < 1) && (ioc->Request.Type.Direction != XFER_NONE)) { status = -EINVAL; goto cleanup1; } /* Check kmalloc limits using all SGs */ if (ioc->malloc_size > MAX_KMALLOC_SIZE) { status = -EINVAL; goto cleanup1; } if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) { status = -EINVAL; goto cleanup1; } buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL); if (!buff) { status = -ENOMEM; goto cleanup1; } buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL); if (!buff_size) { status = -ENOMEM; goto cleanup1; } left = ioc->buf_size; data_ptr = ioc->buf; while (left) { sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; buff_size[sg_used] = sz; buff[sg_used] = kmalloc(sz, GFP_KERNEL); if (buff[sg_used] == NULL) { status = -ENOMEM; goto cleanup1; } if (ioc->Request.Type.Direction == XFER_WRITE) { if (copy_from_user(buff[sg_used], data_ptr, sz)) { status = -EFAULT; goto cleanup1; } } else { memset(buff[sg_used], 0, sz); } left -= sz; data_ptr += sz; sg_used++; } c = cmd_special_alloc(h); if (!c) { status = -ENOMEM; goto cleanup1; } c->cmd_type = CMD_IOCTL_PEND; c->Header.ReplyQueue = 0; c->Header.SGList = sg_used; c->Header.SGTotal = sg_used; c->Header.LUN = ioc->LUN_info; c->Header.Tag.lower = c->busaddr; c->Request = ioc->Request; for (i = 0; i < sg_used; i++) { temp64.val = pci_map_single(h->pdev, buff[i], buff_size[i], PCI_DMA_BIDIRECTIONAL); c->SG[i].Addr.lower = temp64.val32.lower; c->SG[i].Addr.upper = temp64.val32.upper; c->SG[i].Len = buff_size[i]; c->SG[i].Ext = 0; /* we are not chaining */ } c->waiting = &wait; enqueue_cmd_and_start_io(h, c); wait_for_completion(&wait); /* unlock the buffers from DMA */ for (i = 0; i < sg_used; i++) { temp64.val32.lower = c->SG[i].Addr.lower; temp64.val32.upper = c->SG[i].Addr.upper; pci_unmap_single(h->pdev, (dma_addr_t) temp64.val, buff_size[i], PCI_DMA_BIDIRECTIONAL); } check_ioctl_unit_attention(h, c); /* Copy the error information out */ ioc->error_info = *(c->err_info); if (copy_to_user(argp, ioc, sizeof(*ioc))) { cmd_special_free(h, c); status = -EFAULT; goto cleanup1; } if (ioc->Request.Type.Direction == XFER_READ) { /* Copy the data out of the buffer we created */ BYTE __user *ptr = ioc->buf; for (i = 0; i < sg_used; i++) { if (copy_to_user(ptr, buff[i], buff_size[i])) { cmd_special_free(h, c); status = -EFAULT; goto cleanup1; } ptr += buff_size[i]; } } cmd_special_free(h, c); status = 0; cleanup1: if (buff) { for (i = 0; i < sg_used; i++) kfree(buff[i]); kfree(buff); } kfree(buff_size); kfree(ioc); return status; } static int cciss_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct gendisk *disk = bdev->bd_disk; ctlr_info_t *h = get_host(disk); void __user *argp = (void __user *)arg; dev_dbg(&h->pdev->dev, "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg); switch (cmd) { case CCISS_GETPCIINFO: return cciss_getpciinfo(h, argp); case CCISS_GETINTINFO: return cciss_getintinfo(h, argp); case CCISS_SETINTINFO: return cciss_setintinfo(h, argp); case CCISS_GETNODENAME: return cciss_getnodename(h, argp); case CCISS_SETNODENAME: return cciss_setnodename(h, argp); case CCISS_GETHEARTBEAT: return cciss_getheartbeat(h, argp); case CCISS_GETBUSTYPES: return cciss_getbustypes(h, argp); case CCISS_GETFIRMVER: return cciss_getfirmver(h, argp); case CCISS_GETDRIVVER: return cciss_getdrivver(h, argp); case CCISS_DEREGDISK: case CCISS_REGNEWD: case CCISS_REVALIDVOLS: return rebuild_lun_table(h, 0, 1); case CCISS_GETLUNINFO: return cciss_getluninfo(h, disk, argp); case CCISS_PASSTHRU: return cciss_passthru(h, argp); case CCISS_BIG_PASSTHRU: return cciss_bigpassthru(h, argp); /* scsi_cmd_blk_ioctl handles these, below, though some are not */ /* very meaningful for cciss. SG_IO is the main one people want. */ case SG_GET_VERSION_NUM: case SG_SET_TIMEOUT: case SG_GET_TIMEOUT: case SG_GET_RESERVED_SIZE: case SG_SET_RESERVED_SIZE: case SG_EMULATED_HOST: case SG_IO: case SCSI_IOCTL_SEND_COMMAND: return scsi_cmd_blk_ioctl(bdev, mode, cmd, argp); /* scsi_cmd_blk_ioctl would normally handle these, below, but */ /* they aren't a good fit for cciss, as CD-ROMs are */ /* not supported, and we don't have any bus/target/lun */ /* which we present to the kernel. */ case CDROM_SEND_PACKET: case CDROMCLOSETRAY: case CDROMEJECT: case SCSI_IOCTL_GET_IDLUN: case SCSI_IOCTL_GET_BUS_NUMBER: default: return -ENOTTY; } } static void cciss_check_queues(ctlr_info_t *h) { int start_queue = h->next_to_run; int i; /* check to see if we have maxed out the number of commands that can * be placed on the queue. If so then exit. We do this check here * in case the interrupt we serviced was from an ioctl and did not * free any new commands. */ if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) return; /* We have room on the queue for more commands. Now we need to queue * them up. We will also keep track of the next queue to run so * that every queue gets a chance to be started first. */ for (i = 0; i < h->highest_lun + 1; i++) { int curr_queue = (start_queue + i) % (h->highest_lun + 1); /* make sure the disk has been added and the drive is real * because this can be called from the middle of init_one. */ if (!h->drv[curr_queue]) continue; if (!(h->drv[curr_queue]->queue) || !(h->drv[curr_queue]->heads)) continue; blk_start_queue(h->gendisk[curr_queue]->queue); /* check to see if we have maxed out the number of commands * that can be placed on the queue. */ if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) { if (curr_queue == start_queue) { h->next_to_run = (start_queue + 1) % (h->highest_lun + 1); break; } else { h->next_to_run = curr_queue; break; } } } } static void cciss_softirq_done(struct request *rq) { CommandList_struct *c = rq->completion_data; ctlr_info_t *h = hba[c->ctlr]; SGDescriptor_struct *curr_sg = c->SG; u64bit temp64; unsigned long flags; int i, ddir; int sg_index = 0; if (c->Request.Type.Direction == XFER_READ) ddir = PCI_DMA_FROMDEVICE; else ddir = PCI_DMA_TODEVICE; /* command did not need to be retried */ /* unmap the DMA mapping for all the scatter gather elements */ for (i = 0; i < c->Header.SGList; i++) { if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) { cciss_unmap_sg_chain_block(h, c); /* Point to the next block */ curr_sg = h->cmd_sg_list[c->cmdindex]; sg_index = 0; } temp64.val32.lower = curr_sg[sg_index].Addr.lower; temp64.val32.upper = curr_sg[sg_index].Addr.upper; pci_unmap_page(h->pdev, temp64.val, curr_sg[sg_index].Len, ddir); ++sg_index; } dev_dbg(&h->pdev->dev, "Done with %p\n", rq); /* set the residual count for pc requests */ if (rq->cmd_type == REQ_TYPE_BLOCK_PC) rq->resid_len = c->err_info->ResidualCnt; blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO); spin_lock_irqsave(&h->lock, flags); cmd_free(h, c); cciss_check_queues(h); spin_unlock_irqrestore(&h->lock, flags); } static inline void log_unit_to_scsi3addr(ctlr_info_t *h, unsigned char scsi3addr[], uint32_t log_unit) { memcpy(scsi3addr, h->drv[log_unit]->LunID, sizeof(h->drv[log_unit]->LunID)); } /* This function gets the SCSI vendor, model, and revision of a logical drive * via the inquiry page 0. Model, vendor, and rev are set to empty strings if * they cannot be read. */ static void cciss_get_device_descr(ctlr_info_t *h, int logvol, char *vendor, char *model, char *rev) { int rc; InquiryData_struct *inq_buf; unsigned char scsi3addr[8]; *vendor = '\0'; *model = '\0'; *rev = '\0'; inq_buf = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL); if (!inq_buf) return; log_unit_to_scsi3addr(h, scsi3addr, logvol); rc = sendcmd_withirq(h, CISS_INQUIRY, inq_buf, sizeof(*inq_buf), 0, scsi3addr, TYPE_CMD); if (rc == IO_OK) { memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN); vendor[VENDOR_LEN] = '\0'; memcpy(model, &inq_buf->data_byte[16], MODEL_LEN); model[MODEL_LEN] = '\0'; memcpy(rev, &inq_buf->data_byte[32], REV_LEN); rev[REV_LEN] = '\0'; } kfree(inq_buf); return; } /* This function gets the serial number of a logical drive via * inquiry page 0x83. Serial no. is 16 bytes. If the serial * number cannot be had, for whatever reason, 16 bytes of 0xff * are returned instead. */ static void cciss_get_serial_no(ctlr_info_t *h, int logvol, unsigned char *serial_no, int buflen) { #define PAGE_83_INQ_BYTES 64 int rc; unsigned char *buf; unsigned char scsi3addr[8]; if (buflen > 16) buflen = 16; memset(serial_no, 0xff, buflen); buf = kzalloc(PAGE_83_INQ_BYTES, GFP_KERNEL); if (!buf) return; memset(serial_no, 0, buflen); log_unit_to_scsi3addr(h, scsi3addr, logvol); rc = sendcmd_withirq(h, CISS_INQUIRY, buf, PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD); if (rc == IO_OK) memcpy(serial_no, &buf[8], buflen); kfree(buf); return; } /* * cciss_add_disk sets up the block device queue for a logical drive */ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, int drv_index) { disk->queue = blk_init_queue(do_cciss_request, &h->lock); if (!disk->queue) goto init_queue_failure; sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index); disk->major = h->major; disk->first_minor = drv_index << NWD_SHIFT; disk->fops = &cciss_fops; if (cciss_create_ld_sysfs_entry(h, drv_index)) goto cleanup_queue; disk->private_data = h->drv[drv_index]; disk->driverfs_dev = &h->drv[drv_index]->dev; /* Set up queue information */ blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); /* This is a hardware imposed limit. */ blk_queue_max_segments(disk->queue, h->maxsgentries); blk_queue_max_hw_sectors(disk->queue, h->cciss_max_sectors); blk_queue_softirq_done(disk->queue, cciss_softirq_done); disk->queue->queuedata = h; blk_queue_logical_block_size(disk->queue, h->drv[drv_index]->block_size); /* Make sure all queue data is written out before */ /* setting h->drv[drv_index]->queue, as setting this */ /* allows the interrupt handler to start the queue */ wmb(); h->drv[drv_index]->queue = disk->queue; add_disk(disk); return 0; cleanup_queue: blk_cleanup_queue(disk->queue); disk->queue = NULL; init_queue_failure: return -1; } /* This function will check the usage_count of the drive to be updated/added. * If the usage_count is zero and it is a heretofore unknown drive, or, * the drive's capacity, geometry, or serial number has changed, * then the drive information will be updated and the disk will be * re-registered with the kernel. If these conditions don't hold, * then it will be left alone for the next reboot. The exception to this * is disk 0 which will always be left registered with the kernel since it * is also the controller node. Any changes to disk 0 will show up on * the next reboot. */ static void cciss_update_drive_info(ctlr_info_t *h, int drv_index, int first_time, int via_ioctl) { struct gendisk *disk; InquiryData_struct *inq_buff = NULL; unsigned int block_size; sector_t total_size; unsigned long flags = 0; int ret = 0; drive_info_struct *drvinfo; /* Get information about the disk and modify the driver structure */ inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); drvinfo = kzalloc(sizeof(*drvinfo), GFP_KERNEL); if (inq_buff == NULL || drvinfo == NULL) goto mem_msg; /* testing to see if 16-byte CDBs are already being used */ if (h->cciss_read == CCISS_READ_16) { cciss_read_capacity_16(h, drv_index, &total_size, &block_size); } else { cciss_read_capacity(h, drv_index, &total_size, &block_size); /* if read_capacity returns all F's this volume is >2TB */ /* in size so we switch to 16-byte CDB's for all */ /* read/write ops */ if (total_size == 0xFFFFFFFFULL) { cciss_read_capacity_16(h, drv_index, &total_size, &block_size); h->cciss_read = CCISS_READ_16; h->cciss_write = CCISS_WRITE_16; } else { h->cciss_read = CCISS_READ_10; h->cciss_write = CCISS_WRITE_10; } } cciss_geometry_inquiry(h, drv_index, total_size, block_size, inq_buff, drvinfo); drvinfo->block_size = block_size; drvinfo->nr_blocks = total_size + 1; cciss_get_device_descr(h, drv_index, drvinfo->vendor, drvinfo->model, drvinfo->rev); cciss_get_serial_no(h, drv_index, drvinfo->serial_no, sizeof(drvinfo->serial_no)); /* Save the lunid in case we deregister the disk, below. */ memcpy(drvinfo->LunID, h->drv[drv_index]->LunID, sizeof(drvinfo->LunID)); /* Is it the same disk we already know, and nothing's changed? */ if (h->drv[drv_index]->raid_level != -1 && ((memcmp(drvinfo->serial_no, h->drv[drv_index]->serial_no, 16) == 0) && drvinfo->block_size == h->drv[drv_index]->block_size && drvinfo->nr_blocks == h->drv[drv_index]->nr_blocks && drvinfo->heads == h->drv[drv_index]->heads && drvinfo->sectors == h->drv[drv_index]->sectors && drvinfo->cylinders == h->drv[drv_index]->cylinders)) /* The disk is unchanged, nothing to update */ goto freeret; /* If we get here it's not the same disk, or something's changed, * so we need to * deregister it, and re-register it, if it's not * in use. * If the disk already exists then deregister it before proceeding * (unless it's the first disk (for the controller node). */ if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) { dev_warn(&h->pdev->dev, "disk %d has changed.\n", drv_index); spin_lock_irqsave(&h->lock, flags); h->drv[drv_index]->busy_configuring = 1; spin_unlock_irqrestore(&h->lock, flags); /* deregister_disk sets h->drv[drv_index]->queue = NULL * which keeps the interrupt handler from starting * the queue. */ ret = deregister_disk(h, drv_index, 0, via_ioctl); } /* If the disk is in use return */ if (ret) goto freeret; /* Save the new information from cciss_geometry_inquiry * and serial number inquiry. If the disk was deregistered * above, then h->drv[drv_index] will be NULL. */ if (h->drv[drv_index] == NULL) { drvinfo->device_initialized = 0; h->drv[drv_index] = drvinfo; drvinfo = NULL; /* so it won't be freed below. */ } else { /* special case for cxd0 */ h->drv[drv_index]->block_size = drvinfo->block_size; h->drv[drv_index]->nr_blocks = drvinfo->nr_blocks; h->drv[drv_index]->heads = drvinfo->heads; h->drv[drv_index]->sectors = drvinfo->sectors; h->drv[drv_index]->cylinders = drvinfo->cylinders; h->drv[drv_index]->raid_level = drvinfo->raid_level; memcpy(h->drv[drv_index]->serial_no, drvinfo->serial_no, 16); memcpy(h->drv[drv_index]->vendor, drvinfo->vendor, VENDOR_LEN + 1); memcpy(h->drv[drv_index]->model, drvinfo->model, MODEL_LEN + 1); memcpy(h->drv[drv_index]->rev, drvinfo->rev, REV_LEN + 1); } ++h->num_luns; disk = h->gendisk[drv_index]; set_capacity(disk, h->drv[drv_index]->nr_blocks); /* If it's not disk 0 (drv_index != 0) * or if it was disk 0, but there was previously * no actual corresponding configured logical drive * (raid_leve == -1) then we want to update the * logical drive's information. */ if (drv_index || first_time) { if (cciss_add_disk(h, disk, drv_index) != 0) { cciss_free_gendisk(h, drv_index); cciss_free_drive_info(h, drv_index); dev_warn(&h->pdev->dev, "could not update disk %d\n", drv_index); --h->num_luns; } } freeret: kfree(inq_buff); kfree(drvinfo); return; mem_msg: dev_err(&h->pdev->dev, "out of memory\n"); goto freeret; } /* This function will find the first index of the controllers drive array * that has a null drv pointer and allocate the drive info struct and * will return that index This is where new drives will be added. * If the index to be returned is greater than the highest_lun index for * the controller then highest_lun is set * to this new index. * If there are no available indexes or if tha allocation fails, then -1 * is returned. * "controller_node" is used to know if this is a real * logical drive, or just the controller node, which determines if this * counts towards highest_lun. */ static int cciss_alloc_drive_info(ctlr_info_t *h, int controller_node) { int i; drive_info_struct *drv; /* Search for an empty slot for our drive info */ for (i = 0; i < CISS_MAX_LUN; i++) { /* if not cxd0 case, and it's occupied, skip it. */ if (h->drv[i] && i != 0) continue; /* * If it's cxd0 case, and drv is alloc'ed already, and a * disk is configured there, skip it. */ if (i == 0 && h->drv[i] && h->drv[i]->raid_level != -1) continue; /* * We've found an empty slot. Update highest_lun * provided this isn't just the fake cxd0 controller node. */ if (i > h->highest_lun && !controller_node) h->highest_lun = i; /* If adding a real disk at cxd0, and it's already alloc'ed */ if (i == 0 && h->drv[i] != NULL) return i; /* * Found an empty slot, not already alloc'ed. Allocate it. * Mark it with raid_level == -1, so we know it's new later on. */ drv = kzalloc(sizeof(*drv), GFP_KERNEL); if (!drv) return -1; drv->raid_level = -1; /* so we know it's new */ h->drv[i] = drv; return i; } return -1; } static void cciss_free_drive_info(ctlr_info_t *h, int drv_index) { kfree(h->drv[drv_index]); h->drv[drv_index] = NULL; } static void cciss_free_gendisk(ctlr_info_t *h, int drv_index) { put_disk(h->gendisk[drv_index]); h->gendisk[drv_index] = NULL; } /* cciss_add_gendisk finds a free hba[]->drv structure * and allocates a gendisk if needed, and sets the lunid * in the drvinfo structure. It returns the index into * the ->drv[] array, or -1 if none are free. * is_controller_node indicates whether highest_lun should * count this disk, or if it's only being added to provide * a means to talk to the controller in case no logical * drives have yet been configured. */ static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[], int controller_node) { int drv_index; drv_index = cciss_alloc_drive_info(h, controller_node); if (drv_index == -1) return -1; /*Check if the gendisk needs to be allocated */ if (!h->gendisk[drv_index]) { h->gendisk[drv_index] = alloc_disk(1 << NWD_SHIFT); if (!h->gendisk[drv_index]) { dev_err(&h->pdev->dev, "could not allocate a new disk %d\n", drv_index); goto err_free_drive_info; } } memcpy(h->drv[drv_index]->LunID, lunid, sizeof(h->drv[drv_index]->LunID)); if (cciss_create_ld_sysfs_entry(h, drv_index)) goto err_free_disk; /* Don't need to mark this busy because nobody */ /* else knows about this disk yet to contend */ /* for access to it. */ h->drv[drv_index]->busy_configuring = 0; wmb(); return drv_index; err_free_disk: cciss_free_gendisk(h, drv_index); err_free_drive_info: cciss_free_drive_info(h, drv_index); return -1; } /* This is for the special case of a controller which * has no logical drives. In this case, we still need * to register a disk so the controller can be accessed * by the Array Config Utility. */ static void cciss_add_controller_node(ctlr_info_t *h) { struct gendisk *disk; int drv_index; if (h->gendisk[0] != NULL) /* already did this? Then bail. */ return; drv_index = cciss_add_gendisk(h, CTLR_LUNID, 1); if (drv_index == -1) goto error; h->drv[drv_index]->block_size = 512; h->drv[drv_index]->nr_blocks = 0; h->drv[drv_index]->heads = 0; h->drv[drv_index]->sectors = 0; h->drv[drv_index]->cylinders = 0; h->drv[drv_index]->raid_level = -1; memset(h->drv[drv_index]->serial_no, 0, 16); disk = h->gendisk[drv_index]; if (cciss_add_disk(h, disk, drv_index) == 0) return; cciss_free_gendisk(h, drv_index); cciss_free_drive_info(h, drv_index); error: dev_warn(&h->pdev->dev, "could not add disk 0.\n"); return; } /* This function will add and remove logical drives from the Logical * drive array of the controller and maintain persistency of ordering * so that mount points are preserved until the next reboot. This allows * for the removal of logical drives in the middle of the drive array * without a re-ordering of those drives. * INPUT * h = The controller to perform the operations on */ static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl) { int num_luns; ReportLunData_struct *ld_buff = NULL; int return_code; int listlength = 0; int i; int drv_found; int drv_index = 0; unsigned char lunid[8] = CTLR_LUNID; unsigned long flags; if (!capable(CAP_SYS_RAWIO)) return -EPERM; /* Set busy_configuring flag for this operation */ spin_lock_irqsave(&h->lock, flags); if (h->busy_configuring) { spin_unlock_irqrestore(&h->lock, flags); return -EBUSY; } h->busy_configuring = 1; spin_unlock_irqrestore(&h->lock, flags); ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL); if (ld_buff == NULL) goto mem_msg; return_code = sendcmd_withirq(h, CISS_REPORT_LOG, ld_buff, sizeof(ReportLunData_struct), 0, CTLR_LUNID, TYPE_CMD); if (return_code == IO_OK) listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength); else { /* reading number of logical volumes failed */ dev_warn(&h->pdev->dev, "report logical volume command failed\n"); listlength = 0; goto freeret; } num_luns = listlength / 8; /* 8 bytes per entry */ if (num_luns > CISS_MAX_LUN) { num_luns = CISS_MAX_LUN; dev_warn(&h->pdev->dev, "more luns configured" " on controller than can be handled by" " this driver.\n"); } if (num_luns == 0) cciss_add_controller_node(h); /* Compare controller drive array to driver's drive array * to see if any drives are missing on the controller due * to action of Array Config Utility (user deletes drive) * and deregister logical drives which have disappeared. */ for (i = 0; i <= h->highest_lun; i++) { int j; drv_found = 0; /* skip holes in the array from already deleted drives */ if (h->drv[i] == NULL) continue; for (j = 0; j < num_luns; j++) { memcpy(lunid, &ld_buff->LUN[j][0], sizeof(lunid)); if (memcmp(h->drv[i]->LunID, lunid, sizeof(lunid)) == 0) { drv_found = 1; break; } } if (!drv_found) { /* Deregister it from the OS, it's gone. */ spin_lock_irqsave(&h->lock, flags); h->drv[i]->busy_configuring = 1; spin_unlock_irqrestore(&h->lock, flags); return_code = deregister_disk(h, i, 1, via_ioctl); if (h->drv[i] != NULL) h->drv[i]->busy_configuring = 0; } } /* Compare controller drive array to driver's drive array. * Check for updates in the drive information and any new drives * on the controller due to ACU adding logical drives, or changing * a logical drive's size, etc. Reregister any new/changed drives */ for (i = 0; i < num_luns; i++) { int j; drv_found = 0; memcpy(lunid, &ld_buff->LUN[i][0], sizeof(lunid)); /* Find if the LUN is already in the drive array * of the driver. If so then update its info * if not in use. If it does not exist then find * the first free index and add it. */ for (j = 0; j <= h->highest_lun; j++) { if (h->drv[j] != NULL && memcmp(h->drv[j]->LunID, lunid, sizeof(h->drv[j]->LunID)) == 0) { drv_index = j; drv_found = 1; break; } } /* check if the drive was found already in the array */ if (!drv_found) { drv_index = cciss_add_gendisk(h, lunid, 0); if (drv_index == -1) goto freeret; } cciss_update_drive_info(h, drv_index, first_time, via_ioctl); } /* end for */ freeret: kfree(ld_buff); h->busy_configuring = 0; /* We return -1 here to tell the ACU that we have registered/updated * all of the drives that we can and to keep it from calling us * additional times. */ return -1; mem_msg: dev_err(&h->pdev->dev, "out of memory\n"); h->busy_configuring = 0; goto freeret; } static void cciss_clear_drive_info(drive_info_struct *drive_info) { /* zero out the disk size info */ drive_info->nr_blocks = 0; drive_info->block_size = 0; drive_info->heads = 0; drive_info->sectors = 0; drive_info->cylinders = 0; drive_info->raid_level = -1; memset(drive_info->serial_no, 0, sizeof(drive_info->serial_no)); memset(drive_info->model, 0, sizeof(drive_info->model)); memset(drive_info->rev, 0, sizeof(drive_info->rev)); memset(drive_info->vendor, 0, sizeof(drive_info->vendor)); /* * don't clear the LUNID though, we need to remember which * one this one is. */ } /* This function will deregister the disk and it's queue from the * kernel. It must be called with the controller lock held and the * drv structures busy_configuring flag set. It's parameters are: * * disk = This is the disk to be deregistered * drv = This is the drive_info_struct associated with the disk to be * deregistered. It contains information about the disk used * by the driver. * clear_all = This flag determines whether or not the disk information * is going to be completely cleared out and the highest_lun * reset. Sometimes we want to clear out information about * the disk in preparation for re-adding it. In this case * the highest_lun should be left unchanged and the LunID * should not be cleared. * via_ioctl * This indicates whether we've reached this path via ioctl. * This affects the maximum usage count allowed for c0d0 to be messed with. * If this path is reached via ioctl(), then the max_usage_count will * be 1, as the process calling ioctl() has got to have the device open. * If we get here via sysfs, then the max usage count will be zero. */ static int deregister_disk(ctlr_info_t *h, int drv_index, int clear_all, int via_ioctl) { int i; struct gendisk *disk; drive_info_struct *drv; int recalculate_highest_lun; if (!capable(CAP_SYS_RAWIO)) return -EPERM; drv = h->drv[drv_index]; disk = h->gendisk[drv_index]; /* make sure logical volume is NOT is use */ if (clear_all || (h->gendisk[0] == disk)) { if (drv->usage_count > via_ioctl) return -EBUSY; } else if (drv->usage_count > 0) return -EBUSY; recalculate_highest_lun = (drv == h->drv[h->highest_lun]); /* invalidate the devices and deregister the disk. If it is disk * zero do not deregister it but just zero out it's values. This * allows us to delete disk zero but keep the controller registered. */ if (h->gendisk[0] != disk) { struct request_queue *q = disk->queue; if (disk->flags & GENHD_FL_UP) { cciss_destroy_ld_sysfs_entry(h, drv_index, 0); del_gendisk(disk); } if (q) blk_cleanup_queue(q); /* If clear_all is set then we are deleting the logical * drive, not just refreshing its info. For drives * other than disk 0 we will call put_disk. We do not * do this for disk 0 as we need it to be able to * configure the controller. */ if (clear_all){ /* This isn't pretty, but we need to find the * disk in our array and NULL our the pointer. * This is so that we will call alloc_disk if * this index is used again later. */ for (i=0; i < CISS_MAX_LUN; i++){ if (h->gendisk[i] == disk) { h->gendisk[i] = NULL; break; } } put_disk(disk); } } else { set_capacity(disk, 0); cciss_clear_drive_info(drv); } --h->num_luns; /* if it was the last disk, find the new hightest lun */ if (clear_all && recalculate_highest_lun) { int newhighest = -1; for (i = 0; i <= h->highest_lun; i++) { /* if the disk has size > 0, it is available */ if (h->drv[i] && h->drv[i]->heads) newhighest = i; } h->highest_lun = newhighest; } return 0; } static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr, int cmd_type) { u64bit buff_dma_handle; int status = IO_OK; c->cmd_type = CMD_IOCTL_PEND; c->Header.ReplyQueue = 0; if (buff != NULL) { c->Header.SGList = 1; c->Header.SGTotal = 1; } else { c->Header.SGList = 0; c->Header.SGTotal = 0; } c->Header.Tag.lower = c->busaddr; memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); c->Request.Type.Type = cmd_type; if (cmd_type == TYPE_CMD) { switch (cmd) { case CISS_INQUIRY: /* are we trying to read a vital product page */ if (page_code != 0) { c->Request.CDB[1] = 0x01; c->Request.CDB[2] = page_code; } c->Request.CDBLen = 6; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_READ; c->Request.Timeout = 0; c->Request.CDB[0] = CISS_INQUIRY; c->Request.CDB[4] = size & 0xFF; break; case CISS_REPORT_LOG: case CISS_REPORT_PHYS: /* Talking to controller so It's a physical command mode = 00 target = 0. Nothing to write. */ c->Request.CDBLen = 12; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_READ; c->Request.Timeout = 0; c->Request.CDB[0] = cmd; c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ c->Request.CDB[7] = (size >> 16) & 0xFF; c->Request.CDB[8] = (size >> 8) & 0xFF; c->Request.CDB[9] = size & 0xFF; break; case CCISS_READ_CAPACITY: c->Request.CDBLen = 10; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_READ; c->Request.Timeout = 0; c->Request.CDB[0] = cmd; break; case CCISS_READ_CAPACITY_16: c->Request.CDBLen = 16; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_READ; c->Request.Timeout = 0; c->Request.CDB[0] = cmd; c->Request.CDB[1] = 0x10; c->Request.CDB[10] = (size >> 24) & 0xFF; c->Request.CDB[11] = (size >> 16) & 0xFF; c->Request.CDB[12] = (size >> 8) & 0xFF; c->Request.CDB[13] = size & 0xFF; c->Request.Timeout = 0; c->Request.CDB[0] = cmd; break; case CCISS_CACHE_FLUSH: c->Request.CDBLen = 12; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_WRITE; c->Request.Timeout = 0; c->Request.CDB[0] = BMIC_WRITE; c->Request.CDB[6] = BMIC_CACHE_FLUSH; c->Request.CDB[7] = (size >> 8) & 0xFF; c->Request.CDB[8] = size & 0xFF; break; case TEST_UNIT_READY: c->Request.CDBLen = 6; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_NONE; c->Request.Timeout = 0; break; default: dev_warn(&h->pdev->dev, "Unknown Command 0x%c\n", cmd); return IO_ERROR; } } else if (cmd_type == TYPE_MSG) { switch (cmd) { case CCISS_ABORT_MSG: c->Request.CDBLen = 12; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_WRITE; c->Request.Timeout = 0; c->Request.CDB[0] = cmd; /* abort */ c->Request.CDB[1] = 0; /* abort a command */ /* buff contains the tag of the command to abort */ memcpy(&c->Request.CDB[4], buff, 8); break; case CCISS_RESET_MSG: c->Request.CDBLen = 16; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_NONE; c->Request.Timeout = 0; memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); c->Request.CDB[0] = cmd; /* reset */ c->Request.CDB[1] = CCISS_RESET_TYPE_TARGET; break; case CCISS_NOOP_MSG: c->Request.CDBLen = 1; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_WRITE; c->Request.Timeout = 0; c->Request.CDB[0] = cmd; break; default: dev_warn(&h->pdev->dev, "unknown message type %d\n", cmd); return IO_ERROR; } } else { dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); return IO_ERROR; } /* Fill in the scatter gather information */ if (size > 0) { buff_dma_handle.val = (__u64) pci_map_single(h->pdev, buff, size, PCI_DMA_BIDIRECTIONAL); c->SG[0].Addr.lower = buff_dma_handle.val32.lower; c->SG[0].Addr.upper = buff_dma_handle.val32.upper; c->SG[0].Len = size; c->SG[0].Ext = 0; /* we are not chaining */ } return status; } static int cciss_send_reset(ctlr_info_t *h, unsigned char *scsi3addr, u8 reset_type) { CommandList_struct *c; int return_status; c = cmd_alloc(h); if (!c) return -ENOMEM; return_status = fill_cmd(h, c, CCISS_RESET_MSG, NULL, 0, 0, CTLR_LUNID, TYPE_MSG); c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ if (return_status != IO_OK) { cmd_special_free(h, c); return return_status; } c->waiting = NULL; enqueue_cmd_and_start_io(h, c); /* Don't wait for completion, the reset won't complete. Don't free * the command either. This is the last command we will send before * re-initializing everything, so it doesn't matter and won't leak. */ return 0; } static int check_target_status(ctlr_info_t *h, CommandList_struct *c) { switch (c->err_info->ScsiStatus) { case SAM_STAT_GOOD: return IO_OK; case SAM_STAT_CHECK_CONDITION: switch (0xf & c->err_info->SenseInfo[2]) { case 0: return IO_OK; /* no sense */ case 1: return IO_OK; /* recovered error */ default: if (check_for_unit_attention(h, c)) return IO_NEEDS_RETRY; dev_warn(&h->pdev->dev, "cmd 0x%02x " "check condition, sense key = 0x%02x\n", c->Request.CDB[0], c->err_info->SenseInfo[2]); } break; default: dev_warn(&h->pdev->dev, "cmd 0x%02x" "scsi status = 0x%02x\n", c->Request.CDB[0], c->err_info->ScsiStatus); break; } return IO_ERROR; } static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c) { int return_status = IO_OK; if (c->err_info->CommandStatus == CMD_SUCCESS) return IO_OK; switch (c->err_info->CommandStatus) { case CMD_TARGET_STATUS: return_status = check_target_status(h, c); break; case CMD_DATA_UNDERRUN: case CMD_DATA_OVERRUN: /* expected for inquiry and report lun commands */ break; case CMD_INVALID: dev_warn(&h->pdev->dev, "cmd 0x%02x is " "reported invalid\n", c->Request.CDB[0]); return_status = IO_ERROR; break; case CMD_PROTOCOL_ERR: dev_warn(&h->pdev->dev, "cmd 0x%02x has " "protocol error\n", c->Request.CDB[0]); return_status = IO_ERROR; break; case CMD_HARDWARE_ERR: dev_warn(&h->pdev->dev, "cmd 0x%02x had " " hardware error\n", c->Request.CDB[0]); return_status = IO_ERROR; break; case CMD_CONNECTION_LOST: dev_warn(&h->pdev->dev, "cmd 0x%02x had " "connection lost\n", c->Request.CDB[0]); return_status = IO_ERROR; break; case CMD_ABORTED: dev_warn(&h->pdev->dev, "cmd 0x%02x was " "aborted\n", c->Request.CDB[0]); return_status = IO_ERROR; break; case CMD_ABORT_FAILED: dev_warn(&h->pdev->dev, "cmd 0x%02x reports " "abort failed\n", c->Request.CDB[0]); return_status = IO_ERROR; break; case CMD_UNSOLICITED_ABORT: dev_warn(&h->pdev->dev, "unsolicited abort 0x%02x\n", c->Request.CDB[0]); return_status = IO_NEEDS_RETRY; break; case CMD_UNABORTABLE: dev_warn(&h->pdev->dev, "cmd unabortable\n"); return_status = IO_ERROR; break; default: dev_warn(&h->pdev->dev, "cmd 0x%02x returned " "unknown status %x\n", c->Request.CDB[0], c->err_info->CommandStatus); return_status = IO_ERROR; } return return_status; } static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c, int attempt_retry) { DECLARE_COMPLETION_ONSTACK(wait); u64bit buff_dma_handle; int return_status = IO_OK; resend_cmd2: c->waiting = &wait; enqueue_cmd_and_start_io(h, c); wait_for_completion(&wait); if (c->err_info->CommandStatus == 0 || !attempt_retry) goto command_done; return_status = process_sendcmd_error(h, c); if (return_status == IO_NEEDS_RETRY && c->retry_count < MAX_CMD_RETRIES) { dev_warn(&h->pdev->dev, "retrying 0x%02x\n", c->Request.CDB[0]); c->retry_count++; /* erase the old error information */ memset(c->err_info, 0, sizeof(ErrorInfo_struct)); return_status = IO_OK; INIT_COMPLETION(wait); goto resend_cmd2; } command_done: /* unlock the buffers from DMA */ buff_dma_handle.val32.lower = c->SG[0].Addr.lower; buff_dma_handle.val32.upper = c->SG[0].Addr.upper; pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val, c->SG[0].Len, PCI_DMA_BIDIRECTIONAL); return return_status; } static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size, __u8 page_code, unsigned char scsi3addr[], int cmd_type) { CommandList_struct *c; int return_status; c = cmd_special_alloc(h); if (!c) return -ENOMEM; return_status = fill_cmd(h, c, cmd, buff, size, page_code, scsi3addr, cmd_type); if (return_status == IO_OK) return_status = sendcmd_withirq_core(h, c, 1); cmd_special_free(h, c); return return_status; } static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol, sector_t total_size, unsigned int block_size, InquiryData_struct *inq_buff, drive_info_struct *drv) { int return_code; unsigned long t; unsigned char scsi3addr[8]; memset(inq_buff, 0, sizeof(InquiryData_struct)); log_unit_to_scsi3addr(h, scsi3addr, logvol); return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff, sizeof(*inq_buff), 0xC1, scsi3addr, TYPE_CMD); if (return_code == IO_OK) { if (inq_buff->data_byte[8] == 0xFF) { dev_warn(&h->pdev->dev, "reading geometry failed, volume " "does not support reading geometry\n"); drv->heads = 255; drv->sectors = 32; /* Sectors per track */ drv->cylinders = total_size + 1; drv->raid_level = RAID_UNKNOWN; } else { drv->heads = inq_buff->data_byte[6]; drv->sectors = inq_buff->data_byte[7]; drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8; drv->cylinders += inq_buff->data_byte[5]; drv->raid_level = inq_buff->data_byte[8]; } drv->block_size = block_size; drv->nr_blocks = total_size + 1; t = drv->heads * drv->sectors; if (t > 1) { sector_t real_size = total_size + 1; unsigned long rem = sector_div(real_size, t); if (rem) real_size++; drv->cylinders = real_size; } } else { /* Get geometry failed */ dev_warn(&h->pdev->dev, "reading geometry failed\n"); } } static void cciss_read_capacity(ctlr_info_t *h, int logvol, sector_t *total_size, unsigned int *block_size) { ReadCapdata_struct *buf; int return_code; unsigned char scsi3addr[8]; buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL); if (!buf) { dev_warn(&h->pdev->dev, "out of memory\n"); return; } log_unit_to_scsi3addr(h, scsi3addr, logvol); return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY, buf, sizeof(ReadCapdata_struct), 0, scsi3addr, TYPE_CMD); if (return_code == IO_OK) { *total_size = be32_to_cpu(*(__be32 *) buf->total_size); *block_size = be32_to_cpu(*(__be32 *) buf->block_size); } else { /* read capacity command failed */ dev_warn(&h->pdev->dev, "read capacity failed\n"); *total_size = 0; *block_size = BLOCK_SIZE; } kfree(buf); } static void cciss_read_capacity_16(ctlr_info_t *h, int logvol, sector_t *total_size, unsigned int *block_size) { ReadCapdata_struct_16 *buf; int return_code; unsigned char scsi3addr[8]; buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL); if (!buf) { dev_warn(&h->pdev->dev, "out of memory\n"); return; } log_unit_to_scsi3addr(h, scsi3addr, logvol); return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY_16, buf, sizeof(ReadCapdata_struct_16), 0, scsi3addr, TYPE_CMD); if (return_code == IO_OK) { *total_size = be64_to_cpu(*(__be64 *) buf->total_size); *block_size = be32_to_cpu(*(__be32 *) buf->block_size); } else { /* read capacity command failed */ dev_warn(&h->pdev->dev, "read capacity failed\n"); *total_size = 0; *block_size = BLOCK_SIZE; } dev_info(&h->pdev->dev, " blocks= %llu block_size= %d\n", (unsigned long long)*total_size+1, *block_size); kfree(buf); } static int cciss_revalidate(struct gendisk *disk) { ctlr_info_t *h = get_host(disk); drive_info_struct *drv = get_drv(disk); int logvol; int FOUND = 0; unsigned int block_size; sector_t total_size; InquiryData_struct *inq_buff = NULL; for (logvol = 0; logvol <= h->highest_lun; logvol++) { if (!h->drv[logvol]) continue; if (memcmp(h->drv[logvol]->LunID, drv->LunID, sizeof(drv->LunID)) == 0) { FOUND = 1; break; } } if (!FOUND) return 1; inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); if (inq_buff == NULL) { dev_warn(&h->pdev->dev, "out of memory\n"); return 1; } if (h->cciss_read == CCISS_READ_10) { cciss_read_capacity(h, logvol, &total_size, &block_size); } else { cciss_read_capacity_16(h, logvol, &total_size, &block_size); } cciss_geometry_inquiry(h, logvol, total_size, block_size, inq_buff, drv); blk_queue_logical_block_size(drv->queue, drv->block_size); set_capacity(disk, drv->nr_blocks); kfree(inq_buff); return 0; } /* * Map (physical) PCI mem into (virtual) kernel space */ static void __iomem *remap_pci_mem(ulong base, ulong size) { ulong page_base = ((ulong) base) & PAGE_MASK; ulong page_offs = ((ulong) base) - page_base; void __iomem *page_remapped = ioremap(page_base, page_offs + size); return page_remapped ? (page_remapped + page_offs) : NULL; } /* * Takes jobs of the Q and sends them to the hardware, then puts it on * the Q to wait for completion. */ static void start_io(ctlr_info_t *h) { CommandList_struct *c; while (!list_empty(&h->reqQ)) { c = list_entry(h->reqQ.next, CommandList_struct, list); /* can't do anything if fifo is full */ if ((h->access.fifo_full(h))) { dev_warn(&h->pdev->dev, "fifo full\n"); break; } /* Get the first entry from the Request Q */ removeQ(c); h->Qdepth--; /* Tell the controller execute command */ h->access.submit_command(h, c); /* Put job onto the completed Q */ addQ(&h->cmpQ, c); } } /* Assumes that h->lock is held. */ /* Zeros out the error record and then resends the command back */ /* to the controller */ static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c) { /* erase the old error information */ memset(c->err_info, 0, sizeof(ErrorInfo_struct)); /* add it to software queue and then send it to the controller */ addQ(&h->reqQ, c); h->Qdepth++; if (h->Qdepth > h->maxQsinceinit) h->maxQsinceinit = h->Qdepth; start_io(h); } static inline unsigned int make_status_bytes(unsigned int scsi_status_byte, unsigned int msg_byte, unsigned int host_byte, unsigned int driver_byte) { /* inverse of macros in scsi.h */ return (scsi_status_byte & 0xff) | ((msg_byte & 0xff) << 8) | ((host_byte & 0xff) << 16) | ((driver_byte & 0xff) << 24); } static inline int evaluate_target_status(ctlr_info_t *h, CommandList_struct *cmd, int *retry_cmd) { unsigned char sense_key; unsigned char status_byte, msg_byte, host_byte, driver_byte; int error_value; *retry_cmd = 0; /* If we get in here, it means we got "target status", that is, scsi status */ status_byte = cmd->err_info->ScsiStatus; driver_byte = DRIVER_OK; msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */ if (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) host_byte = DID_PASSTHROUGH; else host_byte = DID_OK; error_value = make_status_bytes(status_byte, msg_byte, host_byte, driver_byte); if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) { if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) dev_warn(&h->pdev->dev, "cmd %p " "has SCSI Status 0x%x\n", cmd, cmd->err_info->ScsiStatus); return error_value; } /* check the sense key */ sense_key = 0xf & cmd->err_info->SenseInfo[2]; /* no status or recovered error */ if (((sense_key == 0x0) || (sense_key == 0x1)) && (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC)) error_value = 0; if (check_for_unit_attention(h, cmd)) { *retry_cmd = !(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC); return 0; } /* Not SG_IO or similar? */ if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) { if (error_value != 0) dev_warn(&h->pdev->dev, "cmd %p has CHECK CONDITION" " sense key = 0x%x\n", cmd, sense_key); return error_value; } /* SG_IO or similar, copy sense data back */ if (cmd->rq->sense) { if (cmd->rq->sense_len > cmd->err_info->SenseLen) cmd->rq->sense_len = cmd->err_info->SenseLen; memcpy(cmd->rq->sense, cmd->err_info->SenseInfo, cmd->rq->sense_len); } else cmd->rq->sense_len = 0; return error_value; } /* checks the status of the job and calls complete buffers to mark all * buffers for the completed job. Note that this function does not need * to hold the hba/queue lock. */ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, int timeout) { int retry_cmd = 0; struct request *rq = cmd->rq; rq->errors = 0; if (timeout) rq->errors = make_status_bytes(0, 0, 0, DRIVER_TIMEOUT); if (cmd->err_info->CommandStatus == 0) /* no error has occurred */ goto after_error_processing; switch (cmd->err_info->CommandStatus) { case CMD_TARGET_STATUS: rq->errors = evaluate_target_status(h, cmd, &retry_cmd); break; case CMD_DATA_UNDERRUN: if (cmd->rq->cmd_type == REQ_TYPE_FS) { dev_warn(&h->pdev->dev, "cmd %p has" " completed with data underrun " "reported\n", cmd); cmd->rq->resid_len = cmd->err_info->ResidualCnt; } break; case CMD_DATA_OVERRUN: if (cmd->rq->cmd_type == REQ_TYPE_FS) dev_warn(&h->pdev->dev, "cciss: cmd %p has" " completed with data overrun " "reported\n", cmd); break; case CMD_INVALID: dev_warn(&h->pdev->dev, "cciss: cmd %p is " "reported invalid\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? DID_PASSTHROUGH : DID_ERROR); break; case CMD_PROTOCOL_ERR: dev_warn(&h->pdev->dev, "cciss: cmd %p has " "protocol error\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? DID_PASSTHROUGH : DID_ERROR); break; case CMD_HARDWARE_ERR: dev_warn(&h->pdev->dev, "cciss: cmd %p had " " hardware error\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? DID_PASSTHROUGH : DID_ERROR); break; case CMD_CONNECTION_LOST: dev_warn(&h->pdev->dev, "cciss: cmd %p had " "connection lost\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? DID_PASSTHROUGH : DID_ERROR); break; case CMD_ABORTED: dev_warn(&h->pdev->dev, "cciss: cmd %p was " "aborted\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? DID_PASSTHROUGH : DID_ABORT); break; case CMD_ABORT_FAILED: dev_warn(&h->pdev->dev, "cciss: cmd %p reports " "abort failed\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? DID_PASSTHROUGH : DID_ERROR); break; case CMD_UNSOLICITED_ABORT: dev_warn(&h->pdev->dev, "cciss%d: unsolicited " "abort %p\n", h->ctlr, cmd); if (cmd->retry_count < MAX_CMD_RETRIES) { retry_cmd = 1; dev_warn(&h->pdev->dev, "retrying %p\n", cmd); cmd->retry_count++; } else dev_warn(&h->pdev->dev, "%p retried too many times\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? DID_PASSTHROUGH : DID_ABORT); break; case CMD_TIMEOUT: dev_warn(&h->pdev->dev, "cmd %p timedout\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? DID_PASSTHROUGH : DID_ERROR); break; case CMD_UNABORTABLE: dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC ? DID_PASSTHROUGH : DID_ERROR); break; default: dev_warn(&h->pdev->dev, "cmd %p returned " "unknown status %x\n", cmd, cmd->err_info->CommandStatus); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? DID_PASSTHROUGH : DID_ERROR); } after_error_processing: /* We need to return this command */ if (retry_cmd) { resend_cciss_cmd(h, cmd); return; } cmd->rq->completion_data = cmd; blk_complete_request(cmd->rq); } static inline u32 cciss_tag_contains_index(u32 tag) { #define DIRECT_LOOKUP_BIT 0x10 return tag & DIRECT_LOOKUP_BIT; } static inline u32 cciss_tag_to_index(u32 tag) { #define DIRECT_LOOKUP_SHIFT 5 return tag >> DIRECT_LOOKUP_SHIFT; } static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag) { #define CCISS_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) #define CCISS_SIMPLE_ERROR_BITS 0x03 if (likely(h->transMethod & CFGTBL_Trans_Performant)) return tag & ~CCISS_PERF_ERROR_BITS; return tag & ~CCISS_SIMPLE_ERROR_BITS; } static inline void cciss_mark_tag_indexed(u32 *tag) { *tag |= DIRECT_LOOKUP_BIT; } static inline void cciss_set_tag_index(u32 *tag, u32 index) { *tag |= (index << DIRECT_LOOKUP_SHIFT); } /* * Get a request and submit it to the controller. */ static void do_cciss_request(struct request_queue *q) { ctlr_info_t *h = q->queuedata; CommandList_struct *c; sector_t start_blk; int seg; struct request *creq; u64bit temp64; struct scatterlist *tmp_sg; SGDescriptor_struct *curr_sg; drive_info_struct *drv; int i, dir; int sg_index = 0; int chained = 0; queue: creq = blk_peek_request(q); if (!creq) goto startio; BUG_ON(creq->nr_phys_segments > h->maxsgentries); c = cmd_alloc(h); if (!c) goto full; blk_start_request(creq); tmp_sg = h->scatter_list[c->cmdindex]; spin_unlock_irq(q->queue_lock); c->cmd_type = CMD_RWREQ; c->rq = creq; /* fill in the request */ drv = creq->rq_disk->private_data; c->Header.ReplyQueue = 0; /* unused in simple mode */ /* got command from pool, so use the command block index instead */ /* for direct lookups. */ /* The first 2 bits are reserved for controller error reporting. */ cciss_set_tag_index(&c->Header.Tag.lower, c->cmdindex); cciss_mark_tag_indexed(&c->Header.Tag.lower); memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID)); c->Request.CDBLen = 10; /* 12 byte commands not in FW yet; */ c->Request.Type.Type = TYPE_CMD; /* It is a command. */ c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE; c->Request.Timeout = 0; /* Don't time out */ c->Request.CDB[0] = (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; start_blk = blk_rq_pos(creq); dev_dbg(&h->pdev->dev, "sector =%d nr_sectors=%d\n", (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq)); sg_init_table(tmp_sg, h->maxsgentries); seg = blk_rq_map_sg(q, creq, tmp_sg); /* get the DMA records for the setup */ if (c->Request.Type.Direction == XFER_READ) dir = PCI_DMA_FROMDEVICE; else dir = PCI_DMA_TODEVICE; curr_sg = c->SG; sg_index = 0; chained = 0; for (i = 0; i < seg; i++) { if (((sg_index+1) == (h->max_cmd_sgentries)) && !chained && ((seg - i) > 1)) { /* Point to next chain block. */ curr_sg = h->cmd_sg_list[c->cmdindex]; sg_index = 0; chained = 1; } curr_sg[sg_index].Len = tmp_sg[i].length; temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]), tmp_sg[i].offset, tmp_sg[i].length, dir); curr_sg[sg_index].Addr.lower = temp64.val32.lower; curr_sg[sg_index].Addr.upper = temp64.val32.upper; curr_sg[sg_index].Ext = 0; /* we are not chaining */ ++sg_index; } if (chained) cciss_map_sg_chain_block(h, c, h->cmd_sg_list[c->cmdindex], (seg - (h->max_cmd_sgentries - 1)) * sizeof(SGDescriptor_struct)); /* track how many SG entries we are using */ if (seg > h->maxSG) h->maxSG = seg; dev_dbg(&h->pdev->dev, "Submitting %u sectors in %d segments " "chained[%d]\n", blk_rq_sectors(creq), seg, chained); c->Header.SGTotal = seg + chained; if (seg <= h->max_cmd_sgentries) c->Header.SGList = c->Header.SGTotal; else c->Header.SGList = h->max_cmd_sgentries; set_performant_mode(h, c); if (likely(creq->cmd_type == REQ_TYPE_FS)) { if(h->cciss_read == CCISS_READ_10) { c->Request.CDB[1] = 0; c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */ c->Request.CDB[3] = (start_blk >> 16) & 0xff; c->Request.CDB[4] = (start_blk >> 8) & 0xff; c->Request.CDB[5] = start_blk & 0xff; c->Request.CDB[6] = 0; /* (sect >> 24) & 0xff; MSB */ c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff; c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff; c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0; } else { u32 upper32 = upper_32_bits(start_blk); c->Request.CDBLen = 16; c->Request.CDB[1]= 0; c->Request.CDB[2]= (upper32 >> 24) & 0xff; /* MSB */ c->Request.CDB[3]= (upper32 >> 16) & 0xff; c->Request.CDB[4]= (upper32 >> 8) & 0xff; c->Request.CDB[5]= upper32 & 0xff; c->Request.CDB[6]= (start_blk >> 24) & 0xff; c->Request.CDB[7]= (start_blk >> 16) & 0xff; c->Request.CDB[8]= (start_blk >> 8) & 0xff; c->Request.CDB[9]= start_blk & 0xff; c->Request.CDB[10]= (blk_rq_sectors(creq) >> 24) & 0xff; c->Request.CDB[11]= (blk_rq_sectors(creq) >> 16) & 0xff; c->Request.CDB[12]= (blk_rq_sectors(creq) >> 8) & 0xff; c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff; c->Request.CDB[14] = c->Request.CDB[15] = 0; } } else if (creq->cmd_type == REQ_TYPE_BLOCK_PC) { c->Request.CDBLen = creq->cmd_len; memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB); } else { dev_warn(&h->pdev->dev, "bad request type %d\n", creq->cmd_type); BUG(); } spin_lock_irq(q->queue_lock); addQ(&h->reqQ, c); h->Qdepth++; if (h->Qdepth > h->maxQsinceinit) h->maxQsinceinit = h->Qdepth; goto queue; full: blk_stop_queue(q); startio: /* We will already have the driver lock here so not need * to lock it. */ start_io(h); } static inline unsigned long get_next_completion(ctlr_info_t *h) { return h->access.command_completed(h); } static inline int interrupt_pending(ctlr_info_t *h) { return h->access.intr_pending(h); } static inline long interrupt_not_for_us(ctlr_info_t *h) { return ((h->access.intr_pending(h) == 0) || (h->interrupts_enabled == 0)); } static inline int bad_tag(ctlr_info_t *h, u32 tag_index, u32 raw_tag) { if (unlikely(tag_index >= h->nr_cmds)) { dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); return 1; } return 0; } static inline void finish_cmd(ctlr_info_t *h, CommandList_struct *c, u32 raw_tag) { removeQ(c); if (likely(c->cmd_type == CMD_RWREQ)) complete_command(h, c, 0); else if (c->cmd_type == CMD_IOCTL_PEND) complete(c->waiting); #ifdef CONFIG_CISS_SCSI_TAPE else if (c->cmd_type == CMD_SCSI) complete_scsi_command(c, 0, raw_tag); #endif } static inline u32 next_command(ctlr_info_t *h) { u32 a; if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) return h->access.command_completed(h); if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { a = *(h->reply_pool_head); /* Next cmd in ring buffer */ (h->reply_pool_head)++; h->commands_outstanding--; } else { a = FIFO_EMPTY; } /* Check for wraparound */ if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { h->reply_pool_head = h->reply_pool; h->reply_pool_wraparound ^= 1; } return a; } /* process completion of an indexed ("direct lookup") command */ static inline u32 process_indexed_cmd(ctlr_info_t *h, u32 raw_tag) { u32 tag_index; CommandList_struct *c; tag_index = cciss_tag_to_index(raw_tag); if (bad_tag(h, tag_index, raw_tag)) return next_command(h); c = h->cmd_pool + tag_index; finish_cmd(h, c, raw_tag); return next_command(h); } /* process completion of a non-indexed command */ static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag) { CommandList_struct *c = NULL; __u32 busaddr_masked, tag_masked; tag_masked = cciss_tag_discard_error_bits(h, raw_tag); list_for_each_entry(c, &h->cmpQ, list) { busaddr_masked = cciss_tag_discard_error_bits(h, c->busaddr); if (busaddr_masked == tag_masked) { finish_cmd(h, c, raw_tag); return next_command(h); } } bad_tag(h, h->nr_cmds + 1, raw_tag); return next_command(h); } /* Some controllers, like p400, will give us one interrupt * after a soft reset, even if we turned interrupts off. * Only need to check for this in the cciss_xxx_discard_completions * functions. */ static int ignore_bogus_interrupt(ctlr_info_t *h) { if (likely(!reset_devices)) return 0; if (likely(h->interrupts_enabled)) return 0; dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " "(known firmware bug.) Ignoring.\n"); return 1; } static irqreturn_t cciss_intx_discard_completions(int irq, void *dev_id) { ctlr_info_t *h = dev_id; unsigned long flags; u32 raw_tag; if (ignore_bogus_interrupt(h)) return IRQ_NONE; if (interrupt_not_for_us(h)) return IRQ_NONE; spin_lock_irqsave(&h->lock, flags); while (interrupt_pending(h)) { raw_tag = get_next_completion(h); while (raw_tag != FIFO_EMPTY) raw_tag = next_command(h); } spin_unlock_irqrestore(&h->lock, flags); return IRQ_HANDLED; } static irqreturn_t cciss_msix_discard_completions(int irq, void *dev_id) { ctlr_info_t *h = dev_id; unsigned long flags; u32 raw_tag; if (ignore_bogus_interrupt(h)) return IRQ_NONE; spin_lock_irqsave(&h->lock, flags); raw_tag = get_next_completion(h); while (raw_tag != FIFO_EMPTY) raw_tag = next_command(h); spin_unlock_irqrestore(&h->lock, flags); return IRQ_HANDLED; } static irqreturn_t do_cciss_intx(int irq, void *dev_id) { ctlr_info_t *h = dev_id; unsigned long flags; u32 raw_tag; if (interrupt_not_for_us(h)) return IRQ_NONE; spin_lock_irqsave(&h->lock, flags); while (interrupt_pending(h)) { raw_tag = get_next_completion(h); while (raw_tag != FIFO_EMPTY) { if (cciss_tag_contains_index(raw_tag)) raw_tag = process_indexed_cmd(h, raw_tag); else raw_tag = process_nonindexed_cmd(h, raw_tag); } } spin_unlock_irqrestore(&h->lock, flags); return IRQ_HANDLED; } /* Add a second interrupt handler for MSI/MSI-X mode. In this mode we never * check the interrupt pending register because it is not set. */ static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id) { ctlr_info_t *h = dev_id; unsigned long flags; u32 raw_tag; spin_lock_irqsave(&h->lock, flags); raw_tag = get_next_completion(h); while (raw_tag != FIFO_EMPTY) { if (cciss_tag_contains_index(raw_tag)) raw_tag = process_indexed_cmd(h, raw_tag); else raw_tag = process_nonindexed_cmd(h, raw_tag); } spin_unlock_irqrestore(&h->lock, flags); return IRQ_HANDLED; } /** * add_to_scan_list() - add controller to rescan queue * @h: Pointer to the controller. * * Adds the controller to the rescan queue if not already on the queue. * * returns 1 if added to the queue, 0 if skipped (could be on the * queue already, or the controller could be initializing or shutting * down). **/ static int add_to_scan_list(struct ctlr_info *h) { struct ctlr_info *test_h; int found = 0; int ret = 0; if (h->busy_initializing) return 0; if (!mutex_trylock(&h->busy_shutting_down)) return 0; mutex_lock(&scan_mutex); list_for_each_entry(test_h, &scan_q, scan_list) { if (test_h == h) { found = 1; break; } } if (!found && !h->busy_scanning) { INIT_COMPLETION(h->scan_wait); list_add_tail(&h->scan_list, &scan_q); ret = 1; } mutex_unlock(&scan_mutex); mutex_unlock(&h->busy_shutting_down); return ret; } /** * remove_from_scan_list() - remove controller from rescan queue * @h: Pointer to the controller. * * Removes the controller from the rescan queue if present. Blocks if * the controller is currently conducting a rescan. The controller * can be in one of three states: * 1. Doesn't need a scan * 2. On the scan list, but not scanning yet (we remove it) * 3. Busy scanning (and not on the list). In this case we want to wait for * the scan to complete to make sure the scanning thread for this * controller is completely idle. **/ static void remove_from_scan_list(struct ctlr_info *h) { struct ctlr_info *test_h, *tmp_h; mutex_lock(&scan_mutex); list_for_each_entry_safe(test_h, tmp_h, &scan_q, scan_list) { if (test_h == h) { /* state 2. */ list_del(&h->scan_list); complete_all(&h->scan_wait); mutex_unlock(&scan_mutex); return; } } if (h->busy_scanning) { /* state 3. */ mutex_unlock(&scan_mutex); wait_for_completion(&h->scan_wait); } else { /* state 1, nothing to do. */ mutex_unlock(&scan_mutex); } } /** * scan_thread() - kernel thread used to rescan controllers * @data: Ignored. * * A kernel thread used scan for drive topology changes on * controllers. The thread processes only one controller at a time * using a queue. Controllers are added to the queue using * add_to_scan_list() and removed from the queue either after done * processing or using remove_from_scan_list(). * * returns 0. **/ static int scan_thread(void *data) { struct ctlr_info *h; while (1) { set_current_state(TASK_INTERRUPTIBLE); schedule(); if (kthread_should_stop()) break; while (1) { mutex_lock(&scan_mutex); if (list_empty(&scan_q)) { mutex_unlock(&scan_mutex); break; } h = list_entry(scan_q.next, struct ctlr_info, scan_list); list_del(&h->scan_list); h->busy_scanning = 1; mutex_unlock(&scan_mutex); rebuild_lun_table(h, 0, 0); complete_all(&h->scan_wait); mutex_lock(&scan_mutex); h->busy_scanning = 0; mutex_unlock(&scan_mutex); } } return 0; } static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c) { if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) return 0; switch (c->err_info->SenseInfo[12]) { case STATE_CHANGED: dev_warn(&h->pdev->dev, "a state change " "detected, command retried\n"); return 1; break; case LUN_FAILED: dev_warn(&h->pdev->dev, "LUN failure " "detected, action required\n"); return 1; break; case REPORT_LUNS_CHANGED: dev_warn(&h->pdev->dev, "report LUN data changed\n"); /* * Here, we could call add_to_scan_list and wake up the scan thread, * except that it's quite likely that we will get more than one * REPORT_LUNS_CHANGED condition in quick succession, which means * that those which occur after the first one will likely happen * *during* the scan_thread's rescan. And the rescan code is not * robust enough to restart in the middle, undoing what it has already * done, and it's not clear that it's even possible to do this, since * part of what it does is notify the block layer, which starts * doing it's own i/o to read partition tables and so on, and the * driver doesn't have visibility to know what might need undoing. * In any event, if possible, it is horribly complicated to get right * so we just don't do it for now. * * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012. */ return 1; break; case POWER_OR_RESET: dev_warn(&h->pdev->dev, "a power on or device reset detected\n"); return 1; break; case UNIT_ATTENTION_CLEARED: dev_warn(&h->pdev->dev, "unit attention cleared by another initiator\n"); return 1; break; default: dev_warn(&h->pdev->dev, "unknown unit attention detected\n"); return 1; } } /* * We cannot read the structure directly, for portability we must use * the io functions. * This is for debug only. */ static void print_cfg_table(ctlr_info_t *h) { int i; char temp_name[17]; CfgTable_struct *tb = h->cfgtable; dev_dbg(&h->pdev->dev, "Controller Configuration information\n"); dev_dbg(&h->pdev->dev, "------------------------------------\n"); for (i = 0; i < 4; i++) temp_name[i] = readb(&(tb->Signature[i])); temp_name[4] = '\0'; dev_dbg(&h->pdev->dev, " Signature = %s\n", temp_name); dev_dbg(&h->pdev->dev, " Spec Number = %d\n", readl(&(tb->SpecValence))); dev_dbg(&h->pdev->dev, " Transport methods supported = 0x%x\n", readl(&(tb->TransportSupport))); dev_dbg(&h->pdev->dev, " Transport methods active = 0x%x\n", readl(&(tb->TransportActive))); dev_dbg(&h->pdev->dev, " Requested transport Method = 0x%x\n", readl(&(tb->HostWrite.TransportRequest))); dev_dbg(&h->pdev->dev, " Coalesce Interrupt Delay = 0x%x\n", readl(&(tb->HostWrite.CoalIntDelay))); dev_dbg(&h->pdev->dev, " Coalesce Interrupt Count = 0x%x\n", readl(&(tb->HostWrite.CoalIntCount))); dev_dbg(&h->pdev->dev, " Max outstanding commands = 0x%d\n", readl(&(tb->CmdsOutMax))); dev_dbg(&h->pdev->dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); for (i = 0; i < 16; i++) temp_name[i] = readb(&(tb->ServerName[i])); temp_name[16] = '\0'; dev_dbg(&h->pdev->dev, " Server Name = %s\n", temp_name); dev_dbg(&h->pdev->dev, " Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat))); } static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) { int i, offset, mem_type, bar_type; if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ return 0; offset = 0; for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) offset += 4; else { mem_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_MEM_TYPE_MASK; switch (mem_type) { case PCI_BASE_ADDRESS_MEM_TYPE_32: case PCI_BASE_ADDRESS_MEM_TYPE_1M: offset += 4; /* 32 bit */ break; case PCI_BASE_ADDRESS_MEM_TYPE_64: offset += 8; break; default: /* reserved in PCI 2.2 */ dev_warn(&pdev->dev, "Base address is invalid\n"); return -1; break; } } if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) return i + 1; } return -1; } /* Fill in bucket_map[], given nsgs (the max number of * scatter gather elements supported) and bucket[], * which is an array of 8 integers. The bucket[] array * contains 8 different DMA transfer sizes (in 16 * byte increments) which the controller uses to fetch * commands. This function fills in bucket_map[], which * maps a given number of scatter gather elements to one of * the 8 DMA transfer sizes. The point of it is to allow the * controller to only do as much DMA as needed to fetch the * command, with the DMA transfer size encoded in the lower * bits of the command address. */ static void calc_bucket_map(int bucket[], int num_buckets, int nsgs, int *bucket_map) { int i, j, b, size; /* even a command with 0 SGs requires 4 blocks */ #define MINIMUM_TRANSFER_BLOCKS 4 #define NUM_BUCKETS 8 /* Note, bucket_map must have nsgs+1 entries. */ for (i = 0; i <= nsgs; i++) { /* Compute size of a command with i SG entries */ size = i + MINIMUM_TRANSFER_BLOCKS; b = num_buckets; /* Assume the biggest bucket */ /* Find the bucket that is just big enough */ for (j = 0; j < 8; j++) { if (bucket[j] >= size) { b = j; break; } } /* for a command with i SG entries, use bucket b. */ bucket_map[i] = b; } } static void cciss_wait_for_mode_change_ack(ctlr_info_t *h) { int i; /* under certain very rare conditions, this can take awhile. * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right * as we enter this code.) */ for (i = 0; i < MAX_CONFIG_WAIT; i++) { if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) break; usleep_range(10000, 20000); } } static void cciss_enter_performant_mode(ctlr_info_t *h, u32 use_short_tags) { /* This is a bit complicated. There are 8 registers on * the controller which we write to to tell it 8 different * sizes of commands which there may be. It's a way of * reducing the DMA done to fetch each command. Encoded into * each command's tag are 3 bits which communicate to the controller * which of the eight sizes that command fits within. The size of * each command depends on how many scatter gather entries there are. * Each SG entry requires 16 bytes. The eight registers are programmed * with the number of 16-byte blocks a command of that size requires. * The smallest command possible requires 5 such 16 byte blocks. * the largest command possible requires MAXSGENTRIES + 4 16-byte * blocks. Note, this only extends to the SG entries contained * within the command block, and does not extend to chained blocks * of SG elements. bft[] contains the eight values we write to * the registers. They are not evenly distributed, but have more * sizes for small commands, and fewer sizes for larger commands. */ __u32 trans_offset; int bft[8] = { 5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4}; /* * 5 = 1 s/g entry or 4k * 6 = 2 s/g entry or 8k * 8 = 4 s/g entry or 16k * 10 = 6 s/g entry or 24k */ unsigned long register_value; BUILD_BUG_ON(28 > MAXSGENTRIES + 4); h->reply_pool_wraparound = 1; /* spec: init to 1 */ /* Controller spec: zero out this buffer. */ memset(h->reply_pool, 0, h->max_commands * sizeof(__u64)); h->reply_pool_head = h->reply_pool; trans_offset = readl(&(h->cfgtable->TransMethodOffset)); calc_bucket_map(bft, ARRAY_SIZE(bft), h->maxsgentries, h->blockFetchTable); writel(bft[0], &h->transtable->BlockFetch0); writel(bft[1], &h->transtable->BlockFetch1); writel(bft[2], &h->transtable->BlockFetch2); writel(bft[3], &h->transtable->BlockFetch3); writel(bft[4], &h->transtable->BlockFetch4); writel(bft[5], &h->transtable->BlockFetch5); writel(bft[6], &h->transtable->BlockFetch6); writel(bft[7], &h->transtable->BlockFetch7); /* size of controller ring buffer */ writel(h->max_commands, &h->transtable->RepQSize); writel(1, &h->transtable->RepQCount); writel(0, &h->transtable->RepQCtrAddrLow32); writel(0, &h->transtable->RepQCtrAddrHigh32); writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); writel(0, &h->transtable->RepQAddr0High32); writel(CFGTBL_Trans_Performant | use_short_tags, &(h->cfgtable->HostWrite.TransportRequest)); writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); cciss_wait_for_mode_change_ack(h); register_value = readl(&(h->cfgtable->TransportActive)); if (!(register_value & CFGTBL_Trans_Performant)) dev_warn(&h->pdev->dev, "cciss: unable to get board into" " performant mode\n"); } static void cciss_put_controller_into_performant_mode(ctlr_info_t *h) { __u32 trans_support; if (cciss_simple_mode) return; dev_dbg(&h->pdev->dev, "Trying to put board into Performant mode\n"); /* Attempt to put controller into performant mode if supported */ /* Does board support performant mode? */ trans_support = readl(&(h->cfgtable->TransportSupport)); if (!(trans_support & PERFORMANT_MODE)) return; dev_dbg(&h->pdev->dev, "Placing controller into performant mode\n"); /* Performant mode demands commands on a 32 byte boundary * pci_alloc_consistent aligns on page boundarys already. * Just need to check if divisible by 32 */ if ((sizeof(CommandList_struct) % 32) != 0) { dev_warn(&h->pdev->dev, "%s %d %s\n", "cciss info: command size[", (int)sizeof(CommandList_struct), "] not divisible by 32, no performant mode..\n"); return; } /* Performant mode ring buffer and supporting data structures */ h->reply_pool = (__u64 *)pci_alloc_consistent( h->pdev, h->max_commands * sizeof(__u64), &(h->reply_pool_dhandle)); /* Need a block fetch table for performant mode */ h->blockFetchTable = kmalloc(((h->maxsgentries+1) * sizeof(__u32)), GFP_KERNEL); if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL)) goto clean_up; cciss_enter_performant_mode(h, trans_support & CFGTBL_Trans_use_short_tags); /* Change the access methods to the performant access methods */ h->access = SA5_performant_access; h->transMethod = CFGTBL_Trans_Performant; return; clean_up: kfree(h->blockFetchTable); if (h->reply_pool) pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64), h->reply_pool, h->reply_pool_dhandle); return; } /* cciss_put_controller_into_performant_mode */ /* If MSI/MSI-X is supported by the kernel we will try to enable it on * controllers that are capable. If not, we use IO-APIC mode. */ static void cciss_interrupt_mode(ctlr_info_t *h) { #ifdef CONFIG_PCI_MSI int err; struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1}, {0, 2}, {0, 3} }; /* Some boards advertise MSI but don't really support it */ if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) goto default_int_mode; if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { err = pci_enable_msix(h->pdev, cciss_msix_entries, 4); if (!err) { h->intr[0] = cciss_msix_entries[0].vector; h->intr[1] = cciss_msix_entries[1].vector; h->intr[2] = cciss_msix_entries[2].vector; h->intr[3] = cciss_msix_entries[3].vector; h->msix_vector = 1; return; } if (err > 0) { dev_warn(&h->pdev->dev, "only %d MSI-X vectors available\n", err); goto default_int_mode; } else { dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err); goto default_int_mode; } } if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { if (!pci_enable_msi(h->pdev)) h->msi_vector = 1; else dev_warn(&h->pdev->dev, "MSI init failed\n"); } default_int_mode: #endif /* CONFIG_PCI_MSI */ /* if we get here we're going to use the default interrupt mode */ h->intr[h->intr_mode] = h->pdev->irq; return; } static int cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id) { int i; u32 subsystem_vendor_id, subsystem_device_id; subsystem_vendor_id = pdev->subsystem_vendor; subsystem_device_id = pdev->subsystem_device; *board_id = ((subsystem_device_id << 16) & 0xffff0000) | subsystem_vendor_id; for (i = 0; i < ARRAY_SIZE(products); i++) { /* Stand aside for hpsa driver on request */ if (cciss_allow_hpsa) return -ENODEV; if (*board_id == products[i].board_id) return i; } dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n", *board_id); return -ENODEV; } static inline bool cciss_board_disabled(ctlr_info_t *h) { u16 command; (void) pci_read_config_word(h->pdev, PCI_COMMAND, &command); return ((command & PCI_COMMAND_MEMORY) == 0); } static int cciss_pci_find_memory_BAR(struct pci_dev *pdev, unsigned long *memory_bar) { int i; for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { /* addressing mode bits already removed */ *memory_bar = pci_resource_start(pdev, i); dev_dbg(&pdev->dev, "memory BAR = %lx\n", *memory_bar); return 0; } dev_warn(&pdev->dev, "no memory BAR found\n"); return -ENODEV; } static int cciss_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, int wait_for_ready) #define BOARD_READY 1 #define BOARD_NOT_READY 0 { int i, iterations; u32 scratchpad; if (wait_for_ready) iterations = CCISS_BOARD_READY_ITERATIONS; else iterations = CCISS_BOARD_NOT_READY_ITERATIONS; for (i = 0; i < iterations; i++) { scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); if (wait_for_ready) { if (scratchpad == CCISS_FIRMWARE_READY) return 0; } else { if (scratchpad != CCISS_FIRMWARE_READY) return 0; } msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS); } dev_warn(&pdev->dev, "board not ready, timed out.\n"); return -ENODEV; } static int cciss_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, u64 *cfg_offset) { *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); *cfg_base_addr &= (u32) 0x0000ffff; *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); if (*cfg_base_addr_index == -1) { dev_warn(&pdev->dev, "cannot find cfg_base_addr_index, " "*cfg_base_addr = 0x%08x\n", *cfg_base_addr); return -ENODEV; } return 0; } static int cciss_find_cfgtables(ctlr_info_t *h) { u64 cfg_offset; u32 cfg_base_addr; u64 cfg_base_addr_index; u32 trans_offset; int rc; rc = cciss_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, &cfg_base_addr_index, &cfg_offset); if (rc) return rc; h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); if (!h->cfgtable) return -ENOMEM; rc = write_driver_ver_to_cfgtable(h->cfgtable); if (rc) return rc; /* Find performant mode table. */ trans_offset = readl(&h->cfgtable->TransMethodOffset); h->transtable = remap_pci_mem(pci_resource_start(h->pdev, cfg_base_addr_index)+cfg_offset+trans_offset, sizeof(*h->transtable)); if (!h->transtable) return -ENOMEM; return 0; } static void cciss_get_max_perf_mode_cmds(struct ctlr_info *h) { h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); /* Limit commands in memory limited kdump scenario. */ if (reset_devices && h->max_commands > 32) h->max_commands = 32; if (h->max_commands < 16) { dev_warn(&h->pdev->dev, "Controller reports " "max supported commands of %d, an obvious lie. " "Using 16. Ensure that firmware is up to date.\n", h->max_commands); h->max_commands = 16; } } /* Interrogate the hardware for some limits: * max commands, max SG elements without chaining, and with chaining, * SG chain block size, etc. */ static void cciss_find_board_params(ctlr_info_t *h) { cciss_get_max_perf_mode_cmds(h); h->nr_cmds = h->max_commands - 4 - cciss_tape_cmds; h->maxsgentries = readl(&(h->cfgtable->MaxSGElements)); /* * Limit in-command s/g elements to 32 save dma'able memory. * Howvever spec says if 0, use 31 */ h->max_cmd_sgentries = 31; if (h->maxsgentries > 512) { h->max_cmd_sgentries = 32; h->chainsize = h->maxsgentries - h->max_cmd_sgentries + 1; h->maxsgentries--; /* save one for chain pointer */ } else { h->maxsgentries = 31; /* default to traditional values */ h->chainsize = 0; } } static inline bool CISS_signature_present(ctlr_info_t *h) { if (!check_signature(h->cfgtable->Signature, "CISS", 4)) { dev_warn(&h->pdev->dev, "not a valid CISS config table\n"); return false; } return true; } /* Need to enable prefetch in the SCSI core for 6400 in x86 */ static inline void cciss_enable_scsi_prefetch(ctlr_info_t *h) { #ifdef CONFIG_X86 u32 prefetch; prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); prefetch |= 0x100; writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); #endif } /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result * in a prefetch beyond physical memory. */ static inline void cciss_p600_dma_prefetch_quirk(ctlr_info_t *h) { u32 dma_prefetch; __u32 dma_refetch; if (h->board_id != 0x3225103C) return; dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); dma_prefetch |= 0x8000; writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); pci_read_config_dword(h->pdev, PCI_COMMAND_PARITY, &dma_refetch); dma_refetch |= 0x1; pci_write_config_dword(h->pdev, PCI_COMMAND_PARITY, dma_refetch); } static int cciss_pci_init(ctlr_info_t *h) { int prod_index, err; prod_index = cciss_lookup_board_id(h->pdev, &h->board_id); if (prod_index < 0) return -ENODEV; h->product_name = products[prod_index].product_name; h->access = *(products[prod_index].access); if (cciss_board_disabled(h)) { dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); return -ENODEV; } pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); err = pci_enable_device(h->pdev); if (err) { dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n"); return err; } err = pci_request_regions(h->pdev, "cciss"); if (err) { dev_warn(&h->pdev->dev, "Cannot obtain PCI resources, aborting\n"); return err; } dev_dbg(&h->pdev->dev, "irq = %x\n", h->pdev->irq); dev_dbg(&h->pdev->dev, "board_id = %x\n", h->board_id); /* If the kernel supports MSI/MSI-X we will try to enable that functionality, * else we use the IO-APIC interrupt assigned to us by system ROM. */ cciss_interrupt_mode(h); err = cciss_pci_find_memory_BAR(h->pdev, &h->paddr); if (err) goto err_out_free_res; h->vaddr = remap_pci_mem(h->paddr, 0x250); if (!h->vaddr) { err = -ENOMEM; goto err_out_free_res; } err = cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); if (err) goto err_out_free_res; err = cciss_find_cfgtables(h); if (err) goto err_out_free_res; print_cfg_table(h); cciss_find_board_params(h); if (!CISS_signature_present(h)) { err = -ENODEV; goto err_out_free_res; } cciss_enable_scsi_prefetch(h); cciss_p600_dma_prefetch_quirk(h); err = cciss_enter_simple_mode(h); if (err) goto err_out_free_res; cciss_put_controller_into_performant_mode(h); return 0; err_out_free_res: /* * Deliberately omit pci_disable_device(): it does something nasty to * Smart Array controllers that pci_enable_device does not undo */ if (h->transtable) iounmap(h->transtable); if (h->cfgtable) iounmap(h->cfgtable); if (h->vaddr) iounmap(h->vaddr); pci_release_regions(h->pdev); return err; } /* Function to find the first free pointer into our hba[] array * Returns -1 if no free entries are left. */ static int alloc_cciss_hba(struct pci_dev *pdev) { int i; for (i = 0; i < MAX_CTLR; i++) { if (!hba[i]) { ctlr_info_t *h; h = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL); if (!h) goto Enomem; hba[i] = h; return i; } } dev_warn(&pdev->dev, "This driver supports a maximum" " of %d controllers.\n", MAX_CTLR); return -1; Enomem: dev_warn(&pdev->dev, "out of memory.\n"); return -1; } static void free_hba(ctlr_info_t *h) { int i; hba[h->ctlr] = NULL; for (i = 0; i < h->highest_lun + 1; i++) if (h->gendisk[i] != NULL) put_disk(h->gendisk[i]); kfree(h); } /* Send a message CDB to the firmware. */ static int cciss_message(struct pci_dev *pdev, unsigned char opcode, unsigned char type) { typedef struct { CommandListHeader_struct CommandHeader; RequestBlock_struct Request; ErrDescriptor_struct ErrorDescriptor; } Command; static const size_t cmd_sz = sizeof(Command) + sizeof(ErrorInfo_struct); Command *cmd; dma_addr_t paddr64; uint32_t paddr32, tag; void __iomem *vaddr; int i, err; vaddr = ioremap_nocache(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (vaddr == NULL) return -ENOMEM; /* The Inbound Post Queue only accepts 32-bit physical addresses for the CCISS commands, so they must be allocated from the lower 4GiB of memory. */ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { iounmap(vaddr); return -ENOMEM; } cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); if (cmd == NULL) { iounmap(vaddr); return -ENOMEM; } /* This must fit, because of the 32-bit consistent DMA mask. Also, although there's no guarantee, we assume that the address is at least 4-byte aligned (most likely, it's page-aligned). */ paddr32 = paddr64; cmd->CommandHeader.ReplyQueue = 0; cmd->CommandHeader.SGList = 0; cmd->CommandHeader.SGTotal = 0; cmd->CommandHeader.Tag.lower = paddr32; cmd->CommandHeader.Tag.upper = 0; memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); cmd->Request.CDBLen = 16; cmd->Request.Type.Type = TYPE_MSG; cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE; cmd->Request.Type.Direction = XFER_NONE; cmd->Request.Timeout = 0; /* Don't time out */ cmd->Request.CDB[0] = opcode; cmd->Request.CDB[1] = type; memset(&cmd->Request.CDB[2], 0, 14); /* the rest of the CDB is reserved */ cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(Command); cmd->ErrorDescriptor.Addr.upper = 0; cmd->ErrorDescriptor.Len = sizeof(ErrorInfo_struct); writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); for (i = 0; i < 10; i++) { tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); if ((tag & ~3) == paddr32) break; msleep(CCISS_POST_RESET_NOOP_TIMEOUT_MSECS); } iounmap(vaddr); /* we leak the DMA buffer here ... no choice since the controller could still complete the command. */ if (i == 10) { dev_err(&pdev->dev, "controller message %02x:%02x timed out\n", opcode, type); return -ETIMEDOUT; } pci_free_consistent(pdev, cmd_sz, cmd, paddr64); if (tag & 2) { dev_err(&pdev->dev, "controller message %02x:%02x failed\n", opcode, type); return -EIO; } dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", opcode, type); return 0; } #define cciss_noop(p) cciss_message(p, 3, 0) static int cciss_controller_hard_reset(struct pci_dev *pdev, void * __iomem vaddr, u32 use_doorbell) { u16 pmcsr; int pos; if (use_doorbell) { /* For everything after the P600, the PCI power state method * of resetting the controller doesn't work, so we have this * other way using the doorbell register. */ dev_info(&pdev->dev, "using doorbell to reset controller\n"); writel(use_doorbell, vaddr + SA5_DOORBELL); } else { /* Try to do it the PCI power state way */ /* Quoting from the Open CISS Specification: "The Power * Management Control/Status Register (CSR) controls the power * state of the device. The normal operating state is D0, * CSR=00h. The software off state is D3, CSR=03h. To reset * the controller, place the interface device in D3 then to D0, * this causes a secondary PCI reset which will reset the * controller." */ pos = pci_find_capability(pdev, PCI_CAP_ID_PM); if (pos == 0) { dev_err(&pdev->dev, "cciss_controller_hard_reset: " "PCI PM not supported\n"); return -ENODEV; } dev_info(&pdev->dev, "using PCI PM to reset controller\n"); /* enter the D3hot power management state */ pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); pmcsr &= ~PCI_PM_CTRL_STATE_MASK; pmcsr |= PCI_D3hot; pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); msleep(500); /* enter the D0 power management state */ pmcsr &= ~PCI_PM_CTRL_STATE_MASK; pmcsr |= PCI_D0; pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); /* * The P600 requires a small delay when changing states. * Otherwise we may think the board did not reset and we bail. * This for kdump only and is particular to the P600. */ msleep(500); } return 0; } static void init_driver_version(char *driver_version, int len) { memset(driver_version, 0, len); strncpy(driver_version, "cciss " DRIVER_NAME, len - 1); } static int write_driver_ver_to_cfgtable(CfgTable_struct __iomem *cfgtable) { char *driver_version; int i, size = sizeof(cfgtable->driver_version); driver_version = kmalloc(size, GFP_KERNEL); if (!driver_version) return -ENOMEM; init_driver_version(driver_version, size); for (i = 0; i < size; i++) writeb(driver_version[i], &cfgtable->driver_version[i]); kfree(driver_version); return 0; } static void read_driver_ver_from_cfgtable(CfgTable_struct __iomem *cfgtable, unsigned char *driver_ver) { int i; for (i = 0; i < sizeof(cfgtable->driver_version); i++) driver_ver[i] = readb(&cfgtable->driver_version[i]); } static int controller_reset_failed(CfgTable_struct __iomem *cfgtable) { char *driver_ver, *old_driver_ver; int rc, size = sizeof(cfgtable->driver_version); old_driver_ver = kmalloc(2 * size, GFP_KERNEL); if (!old_driver_ver) return -ENOMEM; driver_ver = old_driver_ver + size; /* After a reset, the 32 bytes of "driver version" in the cfgtable * should have been changed, otherwise we know the reset failed. */ init_driver_version(old_driver_ver, size); read_driver_ver_from_cfgtable(cfgtable, driver_ver); rc = !memcmp(driver_ver, old_driver_ver, size); kfree(old_driver_ver); return rc; } /* This does a hard reset of the controller using PCI power management * states or using the doorbell register. */ static int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) { u64 cfg_offset; u32 cfg_base_addr; u64 cfg_base_addr_index; void __iomem *vaddr; unsigned long paddr; u32 misc_fw_support; int rc; CfgTable_struct __iomem *cfgtable; u32 use_doorbell; u32 board_id; u16 command_register; /* For controllers as old a the p600, this is very nearly * the same thing as * * pci_save_state(pci_dev); * pci_set_power_state(pci_dev, PCI_D3hot); * pci_set_power_state(pci_dev, PCI_D0); * pci_restore_state(pci_dev); * * For controllers newer than the P600, the pci power state * method of resetting doesn't work so we have another way * using the doorbell register. */ /* Exclude 640x boards. These are two pci devices in one slot * which share a battery backed cache module. One controls the * cache, the other accesses the cache through the one that controls * it. If we reset the one controlling the cache, the other will * likely not be happy. Just forbid resetting this conjoined mess. */ cciss_lookup_board_id(pdev, &board_id); if (!ctlr_is_resettable(board_id)) { dev_warn(&pdev->dev, "Cannot reset Smart Array 640x " "due to shared cache module."); return -ENODEV; } /* if controller is soft- but not hard resettable... */ if (!ctlr_is_hard_resettable(board_id)) return -ENOTSUPP; /* try soft reset later. */ /* Save the PCI command register */ pci_read_config_word(pdev, 4, &command_register); /* Turn the board off. This is so that later pci_restore_state() * won't turn the board on before the rest of config space is ready. */ pci_disable_device(pdev); pci_save_state(pdev); /* find the first memory BAR, so we can find the cfg table */ rc = cciss_pci_find_memory_BAR(pdev, &paddr); if (rc) return rc; vaddr = remap_pci_mem(paddr, 0x250); if (!vaddr) return -ENOMEM; /* find cfgtable in order to check if reset via doorbell is supported */ rc = cciss_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, &cfg_base_addr_index, &cfg_offset); if (rc) goto unmap_vaddr; cfgtable = remap_pci_mem(pci_resource_start(pdev, cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); if (!cfgtable) { rc = -ENOMEM; goto unmap_vaddr; } rc = write_driver_ver_to_cfgtable(cfgtable); if (rc) goto unmap_vaddr; /* If reset via doorbell register is supported, use that. * There are two such methods. Favor the newest method. */ misc_fw_support = readl(&cfgtable->misc_fw_support); use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2; if (use_doorbell) { use_doorbell = DOORBELL_CTLR_RESET2; } else { use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; if (use_doorbell) { dev_warn(&pdev->dev, "Controller claims that " "'Bit 2 doorbell reset' is " "supported, but not 'bit 5 doorbell reset'. " "Firmware update is recommended.\n"); rc = -ENOTSUPP; /* use the soft reset */ goto unmap_cfgtable; } } rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); if (rc) goto unmap_cfgtable; pci_restore_state(pdev); rc = pci_enable_device(pdev); if (rc) { dev_warn(&pdev->dev, "failed to enable device.\n"); goto unmap_cfgtable; } pci_write_config_word(pdev, 4, command_register); /* Some devices (notably the HP Smart Array 5i Controller) need a little pause here */ msleep(CCISS_POST_RESET_PAUSE_MSECS); /* Wait for board to become not ready, then ready. */ dev_info(&pdev->dev, "Waiting for board to reset.\n"); rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); if (rc) { dev_warn(&pdev->dev, "Failed waiting for board to hard reset." " Will try soft reset.\n"); rc = -ENOTSUPP; /* Not expected, but try soft reset later */ goto unmap_cfgtable; } rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY); if (rc) { dev_warn(&pdev->dev, "failed waiting for board to become ready " "after hard reset\n"); goto unmap_cfgtable; } rc = controller_reset_failed(vaddr); if (rc < 0) goto unmap_cfgtable; if (rc) { dev_warn(&pdev->dev, "Unable to successfully hard reset " "controller. Will try soft reset.\n"); rc = -ENOTSUPP; /* Not expected, but try soft reset later */ } else { dev_info(&pdev->dev, "Board ready after hard reset.\n"); } unmap_cfgtable: iounmap(cfgtable); unmap_vaddr: iounmap(vaddr); return rc; } static int cciss_init_reset_devices(struct pci_dev *pdev) { int rc, i; if (!reset_devices) return 0; /* Reset the controller with a PCI power-cycle or via doorbell */ rc = cciss_kdump_hard_reset_controller(pdev); /* -ENOTSUPP here means we cannot reset the controller * but it's already (and still) up and running in * "performant mode". Or, it might be 640x, which can't reset * due to concerns about shared bbwc between 6402/6404 pair. */ if (rc == -ENOTSUPP) return rc; /* just try to do the kdump anyhow. */ if (rc) return -ENODEV; /* Now try to get the controller to respond to a no-op */ dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n"); for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) { if (cciss_noop(pdev) == 0) break; else dev_warn(&pdev->dev, "no-op failed%s\n", (i < CCISS_POST_RESET_NOOP_RETRIES - 1 ? "; re-trying" : "")); msleep(CCISS_POST_RESET_NOOP_INTERVAL_MSECS); } return 0; } static int cciss_allocate_cmd_pool(ctlr_info_t *h) { h->cmd_pool_bits = kmalloc(BITS_TO_LONGS(h->nr_cmds) * sizeof(unsigned long), GFP_KERNEL); h->cmd_pool = pci_alloc_consistent(h->pdev, h->nr_cmds * sizeof(CommandList_struct), &(h->cmd_pool_dhandle)); h->errinfo_pool = pci_alloc_consistent(h->pdev, h->nr_cmds * sizeof(ErrorInfo_struct), &(h->errinfo_pool_dhandle)); if ((h->cmd_pool_bits == NULL) || (h->cmd_pool == NULL) || (h->errinfo_pool == NULL)) { dev_err(&h->pdev->dev, "out of memory"); return -ENOMEM; } return 0; } static int cciss_allocate_scatterlists(ctlr_info_t *h) { int i; /* zero it, so that on free we need not know how many were alloc'ed */ h->scatter_list = kzalloc(h->max_commands * sizeof(struct scatterlist *), GFP_KERNEL); if (!h->scatter_list) return -ENOMEM; for (i = 0; i < h->nr_cmds; i++) { h->scatter_list[i] = kmalloc(sizeof(struct scatterlist) * h->maxsgentries, GFP_KERNEL); if (h->scatter_list[i] == NULL) { dev_err(&h->pdev->dev, "could not allocate " "s/g lists\n"); return -ENOMEM; } } return 0; } static void cciss_free_scatterlists(ctlr_info_t *h) { int i; if (h->scatter_list) { for (i = 0; i < h->nr_cmds; i++) kfree(h->scatter_list[i]); kfree(h->scatter_list); } } static void cciss_free_cmd_pool(ctlr_info_t *h) { kfree(h->cmd_pool_bits); if (h->cmd_pool) pci_free_consistent(h->pdev, h->nr_cmds * sizeof(CommandList_struct), h->cmd_pool, h->cmd_pool_dhandle); if (h->errinfo_pool) pci_free_consistent(h->pdev, h->nr_cmds * sizeof(ErrorInfo_struct), h->errinfo_pool, h->errinfo_pool_dhandle); } static int cciss_request_irq(ctlr_info_t *h, irqreturn_t (*msixhandler)(int, void *), irqreturn_t (*intxhandler)(int, void *)) { if (h->msix_vector || h->msi_vector) { if (!request_irq(h->intr[h->intr_mode], msixhandler, 0, h->devname, h)) return 0; dev_err(&h->pdev->dev, "Unable to get msi irq %d" " for %s\n", h->intr[h->intr_mode], h->devname); return -1; } if (!request_irq(h->intr[h->intr_mode], intxhandler, IRQF_SHARED, h->devname, h)) return 0; dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n", h->intr[h->intr_mode], h->devname); return -1; } static int cciss_kdump_soft_reset(ctlr_info_t *h) { if (cciss_send_reset(h, CTLR_LUNID, CCISS_RESET_TYPE_CONTROLLER)) { dev_warn(&h->pdev->dev, "Resetting array controller failed.\n"); return -EIO; } dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) { dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); return -1; } dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) { dev_warn(&h->pdev->dev, "Board failed to become ready " "after soft reset.\n"); return -1; } return 0; } static void cciss_undo_allocations_after_kdump_soft_reset(ctlr_info_t *h) { int ctlr = h->ctlr; free_irq(h->intr[h->intr_mode], h); #ifdef CONFIG_PCI_MSI if (h->msix_vector) pci_disable_msix(h->pdev); else if (h->msi_vector) pci_disable_msi(h->pdev); #endif /* CONFIG_PCI_MSI */ cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); cciss_free_scatterlists(h); cciss_free_cmd_pool(h); kfree(h->blockFetchTable); if (h->reply_pool) pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64), h->reply_pool, h->reply_pool_dhandle); if (h->transtable) iounmap(h->transtable); if (h->cfgtable) iounmap(h->cfgtable); if (h->vaddr) iounmap(h->vaddr); unregister_blkdev(h->major, h->devname); cciss_destroy_hba_sysfs_entry(h); pci_release_regions(h->pdev); kfree(h); hba[ctlr] = NULL; } /* * This is it. Find all the controllers and register them. I really hate * stealing all these major device numbers. * returns the number of block devices registered. */ static int cciss_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int i; int j = 0; int rc; int try_soft_reset = 0; int dac, return_code; InquiryData_struct *inq_buff; ctlr_info_t *h; unsigned long flags; /* * By default the cciss driver is used for all older HP Smart Array * controllers. There are module paramaters that allow a user to * override this behavior and instead use the hpsa SCSI driver. If * this is the case cciss may be loaded first from the kdump initrd * image and cause a kernel panic. So if reset_devices is true and * cciss_allow_hpsa is set just bail. */ if ((reset_devices) && (cciss_allow_hpsa == 1)) return -ENODEV; rc = cciss_init_reset_devices(pdev); if (rc) { if (rc != -ENOTSUPP) return rc; /* If the reset fails in a particular way (it has no way to do * a proper hard reset, so returns -ENOTSUPP) we can try to do * a soft reset once we get the controller configured up to the * point that it can accept a command. */ try_soft_reset = 1; rc = 0; } reinit_after_soft_reset: i = alloc_cciss_hba(pdev); if (i < 0) return -1; h = hba[i]; h->pdev = pdev; h->busy_initializing = 1; h->intr_mode = cciss_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; INIT_LIST_HEAD(&h->cmpQ); INIT_LIST_HEAD(&h->reqQ); mutex_init(&h->busy_shutting_down); if (cciss_pci_init(h) != 0) goto clean_no_release_regions; sprintf(h->devname, "cciss%d", i); h->ctlr = i; if (cciss_tape_cmds < 2) cciss_tape_cmds = 2; if (cciss_tape_cmds > 16) cciss_tape_cmds = 16; init_completion(&h->scan_wait); if (cciss_create_hba_sysfs_entry(h)) goto clean0; /* configure PCI DMA stuff */ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) dac = 1; else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) dac = 0; else { dev_err(&h->pdev->dev, "no suitable DMA available\n"); goto clean1; } /* * register with the major number, or get a dynamic major number * by passing 0 as argument. This is done for greater than * 8 controller support. */ if (i < MAX_CTLR_ORIG) h->major = COMPAQ_CISS_MAJOR + i; rc = register_blkdev(h->major, h->devname); if (rc == -EBUSY || rc == -EINVAL) { dev_err(&h->pdev->dev, "Unable to get major number %d for %s " "on hba %d\n", h->major, h->devname, i); goto clean1; } else { if (i >= MAX_CTLR_ORIG) h->major = rc; } /* make sure the board interrupts are off */ h->access.set_intr_mask(h, CCISS_INTR_OFF); rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx); if (rc) goto clean2; dev_info(&h->pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n", h->devname, pdev->device, pci_name(pdev), h->intr[h->intr_mode], dac ? "" : " not"); if (cciss_allocate_cmd_pool(h)) goto clean4; if (cciss_allocate_scatterlists(h)) goto clean4; h->cmd_sg_list = cciss_allocate_sg_chain_blocks(h, h->chainsize, h->nr_cmds); if (!h->cmd_sg_list && h->chainsize > 0) goto clean4; spin_lock_init(&h->lock); /* Initialize the pdev driver private data. have it point to h. */ pci_set_drvdata(pdev, h); /* command and error info recs zeroed out before they are used */ bitmap_zero(h->cmd_pool_bits, h->nr_cmds); h->num_luns = 0; h->highest_lun = -1; for (j = 0; j < CISS_MAX_LUN; j++) { h->drv[j] = NULL; h->gendisk[j] = NULL; } /* At this point, the controller is ready to take commands. * Now, if reset_devices and the hard reset didn't work, try * the soft reset and see if that works. */ if (try_soft_reset) { /* This is kind of gross. We may or may not get a completion * from the soft reset command, and if we do, then the value * from the fifo may or may not be valid. So, we wait 10 secs * after the reset throwing away any completions we get during * that time. Unregister the interrupt handler and register * fake ones to scoop up any residual completions. */ spin_lock_irqsave(&h->lock, flags); h->access.set_intr_mask(h, CCISS_INTR_OFF); spin_unlock_irqrestore(&h->lock, flags); free_irq(h->intr[h->intr_mode], h); rc = cciss_request_irq(h, cciss_msix_discard_completions, cciss_intx_discard_completions); if (rc) { dev_warn(&h->pdev->dev, "Failed to request_irq after " "soft reset.\n"); goto clean4; } rc = cciss_kdump_soft_reset(h); if (rc) { dev_warn(&h->pdev->dev, "Soft reset failed.\n"); goto clean4; } dev_info(&h->pdev->dev, "Board READY.\n"); dev_info(&h->pdev->dev, "Waiting for stale completions to drain.\n"); h->access.set_intr_mask(h, CCISS_INTR_ON); msleep(10000); h->access.set_intr_mask(h, CCISS_INTR_OFF); rc = controller_reset_failed(h->cfgtable); if (rc) dev_info(&h->pdev->dev, "Soft reset appears to have failed.\n"); /* since the controller's reset, we have to go back and re-init * everything. Easiest to just forget what we've done and do it * all over again. */ cciss_undo_allocations_after_kdump_soft_reset(h); try_soft_reset = 0; if (rc) /* don't go to clean4, we already unallocated */ return -ENODEV; goto reinit_after_soft_reset; } cciss_scsi_setup(h); /* Turn the interrupts on so we can service requests */ h->access.set_intr_mask(h, CCISS_INTR_ON); /* Get the firmware version */ inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL); if (inq_buff == NULL) { dev_err(&h->pdev->dev, "out of memory\n"); goto clean4; } return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff, sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD); if (return_code == IO_OK) { h->firm_ver[0] = inq_buff->data_byte[32]; h->firm_ver[1] = inq_buff->data_byte[33]; h->firm_ver[2] = inq_buff->data_byte[34]; h->firm_ver[3] = inq_buff->data_byte[35]; } else { /* send command failed */ dev_warn(&h->pdev->dev, "unable to determine firmware" " version of controller\n"); } kfree(inq_buff); cciss_procinit(h); h->cciss_max_sectors = 8192; rebuild_lun_table(h, 1, 0); cciss_engage_scsi(h); h->busy_initializing = 0; return 1; clean4: cciss_free_cmd_pool(h); cciss_free_scatterlists(h); cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); free_irq(h->intr[h->intr_mode], h); clean2: unregister_blkdev(h->major, h->devname); clean1: cciss_destroy_hba_sysfs_entry(h); clean0: pci_release_regions(pdev); clean_no_release_regions: h->busy_initializing = 0; /* * Deliberately omit pci_disable_device(): it does something nasty to * Smart Array controllers that pci_enable_device does not undo */ pci_set_drvdata(pdev, NULL); free_hba(h); return -1; } static void cciss_shutdown(struct pci_dev *pdev) { ctlr_info_t *h; char *flush_buf; int return_code; h = pci_get_drvdata(pdev); flush_buf = kzalloc(4, GFP_KERNEL); if (!flush_buf) { dev_warn(&h->pdev->dev, "cache not flushed, out of memory.\n"); return; } /* write all data in the battery backed cache to disk */ return_code = sendcmd_withirq(h, CCISS_CACHE_FLUSH, flush_buf, 4, 0, CTLR_LUNID, TYPE_CMD); kfree(flush_buf); if (return_code != IO_OK) dev_warn(&h->pdev->dev, "Error flushing cache\n"); h->access.set_intr_mask(h, CCISS_INTR_OFF); free_irq(h->intr[h->intr_mode], h); } static int cciss_enter_simple_mode(struct ctlr_info *h) { u32 trans_support; trans_support = readl(&(h->cfgtable->TransportSupport)); if (!(trans_support & SIMPLE_MODE)) return -ENOTSUPP; h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); cciss_wait_for_mode_change_ack(h); print_cfg_table(h); if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) { dev_warn(&h->pdev->dev, "unable to get board into simple mode\n"); return -ENODEV; } h->transMethod = CFGTBL_Trans_Simple; return 0; } static void cciss_remove_one(struct pci_dev *pdev) { ctlr_info_t *h; int i, j; if (pci_get_drvdata(pdev) == NULL) { dev_err(&pdev->dev, "Unable to remove device\n"); return; } h = pci_get_drvdata(pdev); i = h->ctlr; if (hba[i] == NULL) { dev_err(&pdev->dev, "device appears to already be removed\n"); return; } mutex_lock(&h->busy_shutting_down); remove_from_scan_list(h); remove_proc_entry(h->devname, proc_cciss); unregister_blkdev(h->major, h->devname); /* remove it from the disk list */ for (j = 0; j < CISS_MAX_LUN; j++) { struct gendisk *disk = h->gendisk[j]; if (disk) { struct request_queue *q = disk->queue; if (disk->flags & GENHD_FL_UP) { cciss_destroy_ld_sysfs_entry(h, j, 1); del_gendisk(disk); } if (q) blk_cleanup_queue(q); } } #ifdef CONFIG_CISS_SCSI_TAPE cciss_unregister_scsi(h); /* unhook from SCSI subsystem */ #endif cciss_shutdown(pdev); #ifdef CONFIG_PCI_MSI if (h->msix_vector) pci_disable_msix(h->pdev); else if (h->msi_vector) pci_disable_msi(h->pdev); #endif /* CONFIG_PCI_MSI */ iounmap(h->transtable); iounmap(h->cfgtable); iounmap(h->vaddr); cciss_free_cmd_pool(h); /* Free up sg elements */ for (j = 0; j < h->nr_cmds; j++) kfree(h->scatter_list[j]); kfree(h->scatter_list); cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); kfree(h->blockFetchTable); if (h->reply_pool) pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64), h->reply_pool, h->reply_pool_dhandle); /* * Deliberately omit pci_disable_device(): it does something nasty to * Smart Array controllers that pci_enable_device does not undo */ pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); cciss_destroy_hba_sysfs_entry(h); mutex_unlock(&h->busy_shutting_down); free_hba(h); } static struct pci_driver cciss_pci_driver = { .name = "cciss", .probe = cciss_init_one, .remove = cciss_remove_one, .id_table = cciss_pci_device_id, /* id_table */ .shutdown = cciss_shutdown, }; /* * This is it. Register the PCI driver information for the cards we control * the OS will call our registered routines when it finds one of our cards. */ static int __init cciss_init(void) { int err; /* * The hardware requires that commands are aligned on a 64-bit * boundary. Given that we use pci_alloc_consistent() to allocate an * array of them, the size must be a multiple of 8 bytes. */ BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT); printk(KERN_INFO DRIVER_NAME "\n"); err = bus_register(&cciss_bus_type); if (err) return err; /* Start the scan thread */ cciss_scan_thread = kthread_run(scan_thread, NULL, "cciss_scan"); if (IS_ERR(cciss_scan_thread)) { err = PTR_ERR(cciss_scan_thread); goto err_bus_unregister; } /* Register for our PCI devices */ err = pci_register_driver(&cciss_pci_driver); if (err) goto err_thread_stop; return err; err_thread_stop: kthread_stop(cciss_scan_thread); err_bus_unregister: bus_unregister(&cciss_bus_type); return err; } static void __exit cciss_cleanup(void) { int i; pci_unregister_driver(&cciss_pci_driver); /* double check that all controller entrys have been removed */ for (i = 0; i < MAX_CTLR; i++) { if (hba[i] != NULL) { dev_warn(&hba[i]->pdev->dev, "had to remove controller\n"); cciss_remove_one(hba[i]->pdev); } } kthread_stop(cciss_scan_thread); if (proc_cciss) remove_proc_entry("driver/cciss", NULL); bus_unregister(&cciss_bus_type); } module_init(cciss_init); module_exit(cciss_cleanup);
gpl-2.0
Victor-android/kernel_huawei_u8800pro
drivers/staging/comedi/drivers/ni_daq_dio24.c
761
14120
/* comedi/drivers/ni_daq_dio24.c Driver for National Instruments PCMCIA DAQ-Card DIO-24 Copyright (C) 2002 Daniel Vecino Castel <dvecino@able.es> PCMCIA crap at end of file is adapted from dummy_cs.c 1.31 2001/08/24 12:13:13 from the pcmcia package. The initial developer of the pcmcia dummy_cs.c code is David A. Hinds <dahinds@users.sourceforge.net>. Portions created by David A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ************************************************************************ */ /* Driver: ni_daq_dio24 Description: National Instruments PCMCIA DAQ-Card DIO-24 Author: Daniel Vecino Castel <dvecino@able.es> Devices: [National Instruments] PCMCIA DAQ-Card DIO-24 (ni_daq_dio24) Status: ? Updated: Thu, 07 Nov 2002 21:53:06 -0800 This is just a wrapper around the 8255.o driver to properly handle the PCMCIA interface. */ /* #define LABPC_DEBUG *//* enable debugging messages */ #undef LABPC_DEBUG #include <linux/interrupt.h> #include <linux/slab.h> #include "../comedidev.h" #include <linux/ioport.h> #include "8255.h" #include <pcmcia/cs_types.h> #include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> static struct pcmcia_device *pcmcia_cur_dev = NULL; #define DIO24_SIZE 4 /* size of io region used by board */ static int dio24_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int dio24_detach(struct comedi_device *dev); enum dio24_bustype { pcmcia_bustype }; struct dio24_board_struct { const char *name; int device_id; /* device id for pcmcia board */ enum dio24_bustype bustype; /* PCMCIA */ int have_dio; /* have 8255 chip */ /* function pointers so we can use inb/outb or readb/writeb as appropriate */ unsigned int (*read_byte) (unsigned int address); void (*write_byte) (unsigned int byte, unsigned int address); }; static const struct dio24_board_struct dio24_boards[] = { { .name = "daqcard-dio24", .device_id = 0x475c, /* 0x10b is manufacturer id, 0x475c is device id */ .bustype = pcmcia_bustype, .have_dio = 1, }, { .name = "ni_daq_dio24", .device_id = 0x475c, /* 0x10b is manufacturer id, 0x475c is device id */ .bustype = pcmcia_bustype, .have_dio = 1, }, }; /* * Useful for shorthand access to the particular board structure */ #define thisboard ((const struct dio24_board_struct *)dev->board_ptr) struct dio24_private { int data; /* number of data points left to be taken */ }; #define devpriv ((struct dio24_private *)dev->private) static struct comedi_driver driver_dio24 = { .driver_name = "ni_daq_dio24", .module = THIS_MODULE, .attach = dio24_attach, .detach = dio24_detach, .num_names = ARRAY_SIZE(dio24_boards), .board_name = &dio24_boards[0].name, .offset = sizeof(struct dio24_board_struct), }; static int dio24_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; unsigned long iobase = 0; #ifdef incomplete unsigned int irq = 0; #endif struct pcmcia_device *link; /* allocate and initialize dev->private */ if (alloc_private(dev, sizeof(struct dio24_private)) < 0) return -ENOMEM; /* get base address, irq etc. based on bustype */ switch (thisboard->bustype) { case pcmcia_bustype: link = pcmcia_cur_dev; /* XXX hack */ if (!link) return -EIO; iobase = link->io.BasePort1; #ifdef incomplete irq = link->irq; #endif break; default: printk("bug! couldn't determine board type\n"); return -EINVAL; break; } printk("comedi%d: ni_daq_dio24: %s, io 0x%lx", dev->minor, thisboard->name, iobase); #ifdef incomplete if (irq) { printk(", irq %u", irq); } #endif printk("\n"); if (iobase == 0) { printk("io base address is zero!\n"); return -EINVAL; } dev->iobase = iobase; #ifdef incomplete /* grab our IRQ */ dev->irq = irq; #endif dev->board_name = thisboard->name; if (alloc_subdevices(dev, 1) < 0) return -ENOMEM; /* 8255 dio */ s = dev->subdevices + 0; subdev_8255_init(dev, s, NULL, dev->iobase); return 0; }; static int dio24_detach(struct comedi_device *dev) { printk("comedi%d: ni_daq_dio24: remove\n", dev->minor); if (dev->subdevices) subdev_8255_cleanup(dev, dev->subdevices + 0); if (thisboard->bustype != pcmcia_bustype && dev->iobase) release_region(dev->iobase, DIO24_SIZE); if (dev->irq) free_irq(dev->irq, dev); return 0; }; /* PCMCIA crap -- watch your words! */ static void dio24_config(struct pcmcia_device *link); static void dio24_release(struct pcmcia_device *link); static int dio24_cs_suspend(struct pcmcia_device *p_dev); static int dio24_cs_resume(struct pcmcia_device *p_dev); /* The attach() and detach() entry points are used to create and destroy "instances" of the driver, where each instance represents everything needed to manage one actual PCMCIA card. */ static int dio24_cs_attach(struct pcmcia_device *); static void dio24_cs_detach(struct pcmcia_device *); /* You'll also need to prototype all the functions that will actually be used to talk to your device. See 'memory_cs' for a good example of a fully self-sufficient driver; the other drivers rely more or less on other parts of the kernel. */ /* The dev_info variable is the "key" that is used to match up this device driver with appropriate cards, through the card configuration database. */ static const dev_info_t dev_info = "ni_daq_dio24"; struct local_info_t { struct pcmcia_device *link; int stop; struct bus_operations *bus; }; /*====================================================================== dio24_cs_attach() creates an "instance" of the driver, allocating local data structures for one device. The device is registered with Card Services. The dev_link structure is initialized, but we don't actually configure the card at this point -- we wait until we receive a card insertion event. ======================================================================*/ static int dio24_cs_attach(struct pcmcia_device *link) { struct local_info_t *local; printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO - CS-attach!\n"); dev_dbg(&link->dev, "dio24_cs_attach()\n"); /* Allocate space for private device-specific data */ local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL); if (!local) return -ENOMEM; local->link = link; link->priv = local; /* General socket configuration defaults can go here. In this client, we assume very little, and rely on the CIS for almost everything. In most clients, many details (i.e., number, sizes, and attributes of IO windows) are fixed by the nature of the device, and can be hard-wired here. */ link->conf.Attributes = 0; link->conf.IntType = INT_MEMORY_AND_IO; pcmcia_cur_dev = link; dio24_config(link); return 0; } /* dio24_cs_attach */ /*====================================================================== This deletes a driver "instance". The device is de-registered with Card Services. If it has been released, all local data structures are freed. Otherwise, the structures will be freed when the device is released. ======================================================================*/ static void dio24_cs_detach(struct pcmcia_device *link) { printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO - cs-detach!\n"); dev_dbg(&link->dev, "dio24_cs_detach\n"); ((struct local_info_t *)link->priv)->stop = 1; dio24_release(link); /* This points to the parent local_info_t struct */ if (link->priv) kfree(link->priv); } /* dio24_cs_detach */ /*====================================================================== dio24_config() is scheduled to run after a CARD_INSERTION event is received, to configure the PCMCIA socket, and to make the device available to the system. ======================================================================*/ static int dio24_pcmcia_config_loop(struct pcmcia_device *p_dev, cistpl_cftable_entry_t *cfg, cistpl_cftable_entry_t *dflt, unsigned int vcc, void *priv_data) { win_req_t *req = priv_data; memreq_t map; if (cfg->index == 0) return -ENODEV; /* Does this card need audio output? */ if (cfg->flags & CISTPL_CFTABLE_AUDIO) { p_dev->conf.Attributes |= CONF_ENABLE_SPKR; p_dev->conf.Status = CCSR_AUDIO_ENA; } /* Do we need to allocate an interrupt? */ p_dev->conf.Attributes |= CONF_ENABLE_IRQ; /* IO window settings */ p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0; if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; if (!(io->flags & CISTPL_IO_8BIT)) p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16; if (!(io->flags & CISTPL_IO_16BIT)) p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; p_dev->io.BasePort1 = io->win[0].base; p_dev->io.NumPorts1 = io->win[0].len; if (io->nwin > 1) { p_dev->io.Attributes2 = p_dev->io.Attributes1; p_dev->io.BasePort2 = io->win[1].base; p_dev->io.NumPorts2 = io->win[1].len; } /* This reserves IO space but doesn't actually enable it */ if (pcmcia_request_io(p_dev, &p_dev->io) != 0) return -ENODEV; } if ((cfg->mem.nwin > 0) || (dflt->mem.nwin > 0)) { cistpl_mem_t *mem = (cfg->mem.nwin) ? &cfg->mem : &dflt->mem; req->Attributes = WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM; req->Attributes |= WIN_ENABLE; req->Base = mem->win[0].host_addr; req->Size = mem->win[0].len; if (req->Size < 0x1000) req->Size = 0x1000; req->AccessSpeed = 0; if (pcmcia_request_window(p_dev, req, &p_dev->win)) return -ENODEV; map.Page = 0; map.CardOffset = mem->win[0].card_addr; if (pcmcia_map_mem_page(p_dev, p_dev->win, &map)) return -ENODEV; } /* If we got this far, we're cool! */ return 0; } static void dio24_config(struct pcmcia_device *link) { int ret; win_req_t req; printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO! - config\n"); dev_dbg(&link->dev, "dio24_config\n"); ret = pcmcia_loop_config(link, dio24_pcmcia_config_loop, &req); if (ret) { dev_warn(&link->dev, "no configuration found\n"); goto failed; } if (!link->irq) goto failed; /* This actually configures the PCMCIA socket -- setting up the I/O windows and the interrupt mapping, and putting the card and host interface into "Memory and IO" mode. */ ret = pcmcia_request_configuration(link, &link->conf); if (ret) goto failed; /* Finally, report what we've done */ dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex); if (link->conf.Attributes & CONF_ENABLE_IRQ) printk(", irq %d", link->irq); if (link->io.NumPorts1) printk(", io 0x%04x-0x%04x", link->io.BasePort1, link->io.BasePort1 + link->io.NumPorts1 - 1); if (link->io.NumPorts2) printk(" & 0x%04x-0x%04x", link->io.BasePort2, link->io.BasePort2 + link->io.NumPorts2 - 1); if (link->win) printk(", mem 0x%06lx-0x%06lx", req.Base, req.Base + req.Size - 1); printk("\n"); return; failed: printk(KERN_INFO "Fallo"); dio24_release(link); } /* dio24_config */ static void dio24_release(struct pcmcia_device *link) { dev_dbg(&link->dev, "dio24_release\n"); pcmcia_disable_device(link); } /* dio24_release */ /*====================================================================== The card status event handler. Mostly, this schedules other stuff to run after an event is received. When a CARD_REMOVAL event is received, we immediately set a private flag to block future accesses to this device. All the functions that actually access the device should check this flag to make sure the card is still present. ======================================================================*/ static int dio24_cs_suspend(struct pcmcia_device *link) { struct local_info_t *local = link->priv; /* Mark the device as stopped, to block IO until later */ local->stop = 1; return 0; } /* dio24_cs_suspend */ static int dio24_cs_resume(struct pcmcia_device *link) { struct local_info_t *local = link->priv; local->stop = 0; return 0; } /* dio24_cs_resume */ /*====================================================================*/ static struct pcmcia_device_id dio24_cs_ids[] = { /* N.B. These IDs should match those in dio24_boards */ PCMCIA_DEVICE_MANF_CARD(0x010b, 0x475c), /* daqcard-dio24 */ PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, dio24_cs_ids); MODULE_AUTHOR("Daniel Vecino Castel <dvecino@able.es>"); MODULE_DESCRIPTION("Comedi driver for National Instruments " "PCMCIA DAQ-Card DIO-24"); MODULE_LICENSE("GPL"); struct pcmcia_driver dio24_cs_driver = { .probe = dio24_cs_attach, .remove = dio24_cs_detach, .suspend = dio24_cs_suspend, .resume = dio24_cs_resume, .id_table = dio24_cs_ids, .owner = THIS_MODULE, .drv = { .name = dev_info, }, }; static int __init init_dio24_cs(void) { printk("ni_daq_dio24: HOLA SOY YO!\n"); pcmcia_register_driver(&dio24_cs_driver); return 0; } static void __exit exit_dio24_cs(void) { pcmcia_unregister_driver(&dio24_cs_driver); } int __init init_module(void) { int ret; ret = init_dio24_cs(); if (ret < 0) return ret; return comedi_driver_register(&driver_dio24); } void __exit cleanup_module(void) { exit_dio24_cs(); comedi_driver_unregister(&driver_dio24); }
gpl-2.0
sonicxml/Spectrum
net/8021q/vlan.c
761
18515
/* * INET 802.1Q VLAN * Ethernet-type device handling. * * Authors: Ben Greear <greearb@candelatech.com> * Please send support related email to: netdev@vger.kernel.org * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html * * Fixes: * Fix for packet capture - Nick Eggleston <nick@dccinc.com>; * Add HW acceleration hooks - David S. Miller <davem@redhat.com>; * Correct all the locking - David S. Miller <davem@redhat.com>; * Use hash table for VLAN groups - David S. Miller <davem@redhat.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/capability.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/rculist.h> #include <net/p8022.h> #include <net/arp.h> #include <linux/rtnetlink.h> #include <linux/notifier.h> #include <net/rtnetlink.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <asm/uaccess.h> #include <linux/if_vlan.h> #include "vlan.h" #include "vlanproc.h" #define DRV_VERSION "1.8" /* Global VLAN variables */ int vlan_net_id __read_mostly; /* Our listing of VLAN group(s) */ static struct hlist_head vlan_group_hash[VLAN_GRP_HASH_SIZE]; const char vlan_fullname[] = "802.1Q VLAN Support"; const char vlan_version[] = DRV_VERSION; static const char vlan_copyright[] = "Ben Greear <greearb@candelatech.com>"; static const char vlan_buggyright[] = "David S. Miller <davem@redhat.com>"; static struct packet_type vlan_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_8021Q), .func = vlan_skb_recv, /* VLAN receive method */ }; /* End of global variables definitions. */ static inline unsigned int vlan_grp_hashfn(unsigned int idx) { return ((idx >> VLAN_GRP_HASH_SHIFT) ^ idx) & VLAN_GRP_HASH_MASK; } /* Must be invoked with RCU read lock (no preempt) */ static struct vlan_group *__vlan_find_group(struct net_device *real_dev) { struct vlan_group *grp; struct hlist_node *n; int hash = vlan_grp_hashfn(real_dev->ifindex); hlist_for_each_entry_rcu(grp, n, &vlan_group_hash[hash], hlist) { if (grp->real_dev == real_dev) return grp; } return NULL; } /* Find the protocol handler. Assumes VID < VLAN_VID_MASK. * * Must be invoked with RCU read lock (no preempt) */ struct net_device *__find_vlan_dev(struct net_device *real_dev, u16 vlan_id) { struct vlan_group *grp = __vlan_find_group(real_dev); if (grp) return vlan_group_get_device(grp, vlan_id); return NULL; } static void vlan_group_free(struct vlan_group *grp) { int i; for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) kfree(grp->vlan_devices_arrays[i]); kfree(grp); } static struct vlan_group *vlan_group_alloc(struct net_device *real_dev) { struct vlan_group *grp; grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL); if (!grp) return NULL; grp->real_dev = real_dev; hlist_add_head_rcu(&grp->hlist, &vlan_group_hash[vlan_grp_hashfn(real_dev->ifindex)]); return grp; } static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id) { struct net_device **array; unsigned int size; ASSERT_RTNL(); array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; if (array != NULL) return 0; size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN; array = kzalloc(size, GFP_KERNEL); if (array == NULL) return -ENOBUFS; vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN] = array; return 0; } static void vlan_rcu_free(struct rcu_head *rcu) { vlan_group_free(container_of(rcu, struct vlan_group, rcu)); } void unregister_vlan_dev(struct net_device *dev, struct list_head *head) { struct vlan_dev_info *vlan = vlan_dev_info(dev); struct net_device *real_dev = vlan->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; struct vlan_group *grp; u16 vlan_id = vlan->vlan_id; ASSERT_RTNL(); grp = __vlan_find_group(real_dev); BUG_ON(!grp); /* Take it out of our own structures, but be sure to interlock with * HW accelerating devices or SW vlan input packet processing. */ if (real_dev->features & NETIF_F_HW_VLAN_FILTER) ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id); grp->nr_vlans--; vlan_group_set_device(grp, vlan_id, NULL); if (!grp->killall) synchronize_net(); unregister_netdevice_queue(dev, head); /* If the group is now empty, kill off the group. */ if (grp->nr_vlans == 0) { vlan_gvrp_uninit_applicant(real_dev); if (real_dev->features & NETIF_F_HW_VLAN_RX) ops->ndo_vlan_rx_register(real_dev, NULL); hlist_del_rcu(&grp->hlist); /* Free the group, after all cpu's are done. */ call_rcu(&grp->rcu, vlan_rcu_free); } /* Get rid of the vlan's reference to real_dev */ dev_put(real_dev); } int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id) { const char *name = real_dev->name; const struct net_device_ops *ops = real_dev->netdev_ops; if (real_dev->features & NETIF_F_VLAN_CHALLENGED) { pr_info("8021q: VLANs not supported on %s\n", name); return -EOPNOTSUPP; } if ((real_dev->features & NETIF_F_HW_VLAN_RX) && !ops->ndo_vlan_rx_register) { pr_info("8021q: device %s has buggy VLAN hw accel\n", name); return -EOPNOTSUPP; } if ((real_dev->features & NETIF_F_HW_VLAN_FILTER) && (!ops->ndo_vlan_rx_add_vid || !ops->ndo_vlan_rx_kill_vid)) { pr_info("8021q: Device %s has buggy VLAN hw accel\n", name); return -EOPNOTSUPP; } if (__find_vlan_dev(real_dev, vlan_id) != NULL) return -EEXIST; return 0; } int register_vlan_dev(struct net_device *dev) { struct vlan_dev_info *vlan = vlan_dev_info(dev); struct net_device *real_dev = vlan->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; u16 vlan_id = vlan->vlan_id; struct vlan_group *grp, *ngrp = NULL; int err; grp = __vlan_find_group(real_dev); if (!grp) { ngrp = grp = vlan_group_alloc(real_dev); if (!grp) return -ENOBUFS; err = vlan_gvrp_init_applicant(real_dev); if (err < 0) goto out_free_group; } err = vlan_group_prealloc_vid(grp, vlan_id); if (err < 0) goto out_uninit_applicant; err = register_netdevice(dev); if (err < 0) goto out_uninit_applicant; /* Account for reference in struct vlan_dev_info */ dev_hold(real_dev); netif_stacked_transfer_operstate(real_dev, dev); linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */ /* So, got the sucker initialized, now lets place * it into our local structure. */ vlan_group_set_device(grp, vlan_id, dev); grp->nr_vlans++; if (ngrp && real_dev->features & NETIF_F_HW_VLAN_RX) ops->ndo_vlan_rx_register(real_dev, ngrp); if (real_dev->features & NETIF_F_HW_VLAN_FILTER) ops->ndo_vlan_rx_add_vid(real_dev, vlan_id); return 0; out_uninit_applicant: if (ngrp) vlan_gvrp_uninit_applicant(real_dev); out_free_group: if (ngrp) { hlist_del_rcu(&ngrp->hlist); /* Free the group, after all cpu's are done. */ call_rcu(&ngrp->rcu, vlan_rcu_free); } return err; } /* Attach a VLAN device to a mac address (ie Ethernet Card). * Returns 0 if the device was created or a negative error code otherwise. */ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) { struct net_device *new_dev; struct net *net = dev_net(real_dev); struct vlan_net *vn = net_generic(net, vlan_net_id); char name[IFNAMSIZ]; int err; if (vlan_id >= VLAN_VID_MASK) return -ERANGE; err = vlan_check_real_dev(real_dev, vlan_id); if (err < 0) return err; /* Gotta set up the fields for the device. */ switch (vn->name_type) { case VLAN_NAME_TYPE_RAW_PLUS_VID: /* name will look like: eth1.0005 */ snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, vlan_id); break; case VLAN_NAME_TYPE_PLUS_VID_NO_PAD: /* Put our vlan.VID in the name. * Name will look like: vlan5 */ snprintf(name, IFNAMSIZ, "vlan%i", vlan_id); break; case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD: /* Put our vlan.VID in the name. * Name will look like: eth0.5 */ snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, vlan_id); break; case VLAN_NAME_TYPE_PLUS_VID: /* Put our vlan.VID in the name. * Name will look like: vlan0005 */ default: snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id); } new_dev = alloc_netdev_mq(sizeof(struct vlan_dev_info), name, vlan_setup, real_dev->num_tx_queues); if (new_dev == NULL) return -ENOBUFS; new_dev->real_num_tx_queues = real_dev->real_num_tx_queues; dev_net_set(new_dev, net); /* need 4 bytes for extra VLAN header info, * hope the underlying device can handle it. */ new_dev->mtu = real_dev->mtu; vlan_dev_info(new_dev)->vlan_id = vlan_id; vlan_dev_info(new_dev)->real_dev = real_dev; vlan_dev_info(new_dev)->dent = NULL; vlan_dev_info(new_dev)->flags = VLAN_FLAG_REORDER_HDR; new_dev->rtnl_link_ops = &vlan_link_ops; err = register_vlan_dev(new_dev); if (err < 0) goto out_free_newdev; return 0; out_free_newdev: free_netdev(new_dev); return err; } static void vlan_sync_address(struct net_device *dev, struct net_device *vlandev) { struct vlan_dev_info *vlan = vlan_dev_info(vlandev); /* May be called without an actual change */ if (!compare_ether_addr(vlan->real_dev_addr, dev->dev_addr)) return; /* vlan address was different from the old address and is equal to * the new address */ if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && !compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) dev_uc_del(dev, vlandev->dev_addr); /* vlan address was equal to the old address and is different from * the new address */ if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) dev_uc_add(dev, vlandev->dev_addr); memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN); } static void vlan_transfer_features(struct net_device *dev, struct net_device *vlandev) { unsigned long old_features = vlandev->features; vlandev->features &= ~dev->vlan_features; vlandev->features |= dev->features & dev->vlan_features; vlandev->gso_max_size = dev->gso_max_size; #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; #endif vlandev->real_num_tx_queues = dev->real_num_tx_queues; BUG_ON(vlandev->real_num_tx_queues > vlandev->num_tx_queues); if (old_features != vlandev->features) netdev_features_change(vlandev); } static void __vlan_device_event(struct net_device *dev, unsigned long event) { switch (event) { case NETDEV_CHANGENAME: vlan_proc_rem_dev(dev); if (vlan_proc_add_dev(dev) < 0) pr_warning("8021q: failed to change proc name for %s\n", dev->name); break; case NETDEV_REGISTER: if (vlan_proc_add_dev(dev) < 0) pr_warning("8021q: failed to add proc entry for %s\n", dev->name); break; case NETDEV_UNREGISTER: vlan_proc_rem_dev(dev); break; } } static int vlan_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = ptr; struct vlan_group *grp; int i, flgs; struct net_device *vlandev; struct vlan_dev_info *vlan; LIST_HEAD(list); if (is_vlan_dev(dev)) __vlan_device_event(dev, event); grp = __vlan_find_group(dev); if (!grp) goto out; /* It is OK that we do not hold the group lock right now, * as we run under the RTNL lock. */ switch (event) { case NETDEV_CHANGE: /* Propagate real device state to vlan devices */ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { vlandev = vlan_group_get_device(grp, i); if (!vlandev) continue; netif_stacked_transfer_operstate(dev, vlandev); } break; case NETDEV_CHANGEADDR: /* Adjust unicast filters on underlying device */ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { vlandev = vlan_group_get_device(grp, i); if (!vlandev) continue; flgs = vlandev->flags; if (!(flgs & IFF_UP)) continue; vlan_sync_address(dev, vlandev); } break; case NETDEV_CHANGEMTU: for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { vlandev = vlan_group_get_device(grp, i); if (!vlandev) continue; if (vlandev->mtu <= dev->mtu) continue; dev_set_mtu(vlandev, dev->mtu); } break; case NETDEV_FEAT_CHANGE: /* Propagate device features to underlying device */ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { vlandev = vlan_group_get_device(grp, i); if (!vlandev) continue; vlan_transfer_features(dev, vlandev); } break; case NETDEV_DOWN: /* Put all VLANs for this dev in the down state too. */ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { vlandev = vlan_group_get_device(grp, i); if (!vlandev) continue; flgs = vlandev->flags; if (!(flgs & IFF_UP)) continue; vlan = vlan_dev_info(vlandev); if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) dev_change_flags(vlandev, flgs & ~IFF_UP); netif_stacked_transfer_operstate(dev, vlandev); } break; case NETDEV_UP: /* Put all VLANs for this dev in the up state too. */ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { vlandev = vlan_group_get_device(grp, i); if (!vlandev) continue; flgs = vlandev->flags; if (flgs & IFF_UP) continue; vlan = vlan_dev_info(vlandev); if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) dev_change_flags(vlandev, flgs | IFF_UP); netif_stacked_transfer_operstate(dev, vlandev); } break; case NETDEV_UNREGISTER: /* Delete all VLANs for this dev. */ grp->killall = 1; for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { vlandev = vlan_group_get_device(grp, i); if (!vlandev) continue; /* unregistration of last vlan destroys group, abort * afterwards */ if (grp->nr_vlans == 1) i = VLAN_GROUP_ARRAY_LEN; unregister_vlan_dev(vlandev, &list); } unregister_netdevice_many(&list); break; case NETDEV_PRE_TYPE_CHANGE: /* Forbid underlaying device to change its type. */ return NOTIFY_BAD; } out: return NOTIFY_DONE; } static struct notifier_block vlan_notifier_block __read_mostly = { .notifier_call = vlan_device_event, }; /* * VLAN IOCTL handler. * o execute requested action or pass command to the device driver * arg is really a struct vlan_ioctl_args __user *. */ static int vlan_ioctl_handler(struct net *net, void __user *arg) { int err; struct vlan_ioctl_args args; struct net_device *dev = NULL; if (copy_from_user(&args, arg, sizeof(struct vlan_ioctl_args))) return -EFAULT; /* Null terminate this sucker, just in case. */ args.device1[23] = 0; args.u.device2[23] = 0; rtnl_lock(); switch (args.cmd) { case SET_VLAN_INGRESS_PRIORITY_CMD: case SET_VLAN_EGRESS_PRIORITY_CMD: case SET_VLAN_FLAG_CMD: case ADD_VLAN_CMD: case DEL_VLAN_CMD: case GET_VLAN_REALDEV_NAME_CMD: case GET_VLAN_VID_CMD: err = -ENODEV; dev = __dev_get_by_name(net, args.device1); if (!dev) goto out; err = -EINVAL; if (args.cmd != ADD_VLAN_CMD && !is_vlan_dev(dev)) goto out; } switch (args.cmd) { case SET_VLAN_INGRESS_PRIORITY_CMD: err = -EPERM; if (!capable(CAP_NET_ADMIN)) break; vlan_dev_set_ingress_priority(dev, args.u.skb_priority, args.vlan_qos); err = 0; break; case SET_VLAN_EGRESS_PRIORITY_CMD: err = -EPERM; if (!capable(CAP_NET_ADMIN)) break; err = vlan_dev_set_egress_priority(dev, args.u.skb_priority, args.vlan_qos); break; case SET_VLAN_FLAG_CMD: err = -EPERM; if (!capable(CAP_NET_ADMIN)) break; err = vlan_dev_change_flags(dev, args.vlan_qos ? args.u.flag : 0, args.u.flag); break; case SET_VLAN_NAME_TYPE_CMD: err = -EPERM; if (!capable(CAP_NET_ADMIN)) break; if ((args.u.name_type >= 0) && (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) { struct vlan_net *vn; vn = net_generic(net, vlan_net_id); vn->name_type = args.u.name_type; err = 0; } else { err = -EINVAL; } break; case ADD_VLAN_CMD: err = -EPERM; if (!capable(CAP_NET_ADMIN)) break; err = register_vlan_device(dev, args.u.VID); break; case DEL_VLAN_CMD: err = -EPERM; if (!capable(CAP_NET_ADMIN)) break; unregister_vlan_dev(dev, NULL); err = 0; break; case GET_VLAN_REALDEV_NAME_CMD: err = 0; vlan_dev_get_realdev_name(dev, args.u.device2); if (copy_to_user(arg, &args, sizeof(struct vlan_ioctl_args))) err = -EFAULT; break; case GET_VLAN_VID_CMD: err = 0; args.u.VID = vlan_dev_vlan_id(dev); if (copy_to_user(arg, &args, sizeof(struct vlan_ioctl_args))) err = -EFAULT; break; default: err = -EOPNOTSUPP; break; } out: rtnl_unlock(); return err; } static int __net_init vlan_init_net(struct net *net) { struct vlan_net *vn = net_generic(net, vlan_net_id); int err; vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD; err = vlan_proc_init(net); return err; } static void __net_exit vlan_exit_net(struct net *net) { vlan_proc_cleanup(net); } static struct pernet_operations vlan_net_ops = { .init = vlan_init_net, .exit = vlan_exit_net, .id = &vlan_net_id, .size = sizeof(struct vlan_net), }; static int __init vlan_proto_init(void) { int err; pr_info("%s v%s %s\n", vlan_fullname, vlan_version, vlan_copyright); pr_info("All bugs added by %s\n", vlan_buggyright); err = register_pernet_subsys(&vlan_net_ops); if (err < 0) goto err0; err = register_netdevice_notifier(&vlan_notifier_block); if (err < 0) goto err2; err = vlan_gvrp_init(); if (err < 0) goto err3; err = vlan_netlink_init(); if (err < 0) goto err4; dev_add_pack(&vlan_packet_type); vlan_ioctl_set(vlan_ioctl_handler); return 0; err4: vlan_gvrp_uninit(); err3: unregister_netdevice_notifier(&vlan_notifier_block); err2: unregister_pernet_subsys(&vlan_net_ops); err0: return err; } static void __exit vlan_cleanup_module(void) { unsigned int i; vlan_ioctl_set(NULL); vlan_netlink_fini(); unregister_netdevice_notifier(&vlan_notifier_block); dev_remove_pack(&vlan_packet_type); /* This table must be empty if there are no module references left. */ for (i = 0; i < VLAN_GRP_HASH_SIZE; i++) BUG_ON(!hlist_empty(&vlan_group_hash[i])); unregister_pernet_subsys(&vlan_net_ops); rcu_barrier(); /* Wait for completion of call_rcu()'s */ vlan_gvrp_uninit(); } module_init(vlan_proto_init); module_exit(vlan_cleanup_module); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
nickpack/htc-kernel-saga
drivers/s390/net/qeth_core_main.c
761
133991
/* * drivers/s390/net/qeth_core_main.c * * Copyright IBM Corp. 2007, 2009 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, * Frank Pavlic <fpavlic@de.ibm.com>, * Thomas Spatzier <tspat@de.ibm.com>, * Frank Blaschka <frank.blaschka@de.ibm.com> */ #define KMSG_COMPONENT "qeth" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/mii.h> #include <linux/kthread.h> #include <linux/slab.h> #include <asm/ebcdic.h> #include <asm/io.h> #include "qeth_core.h" struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */ /* N P A M L V H */ [QETH_DBF_SETUP] = {"qeth_setup", 8, 1, 8, 5, &debug_hex_ascii_view, NULL}, [QETH_DBF_QERR] = {"qeth_qerr", 2, 1, 8, 2, &debug_hex_ascii_view, NULL}, [QETH_DBF_TRACE] = {"qeth_trace", 4, 1, 8, 3, &debug_hex_ascii_view, NULL}, [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 128, 3, &debug_sprintf_view, NULL}, [QETH_DBF_SENSE] = {"qeth_sense", 2, 1, 64, 2, &debug_hex_ascii_view, NULL}, [QETH_DBF_MISC] = {"qeth_misc", 2, 1, 256, 2, &debug_hex_ascii_view, NULL}, [QETH_DBF_CTRL] = {"qeth_control", 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL}, }; EXPORT_SYMBOL_GPL(qeth_dbf); struct qeth_card_list_struct qeth_core_card_list; EXPORT_SYMBOL_GPL(qeth_core_card_list); struct kmem_cache *qeth_core_header_cache; EXPORT_SYMBOL_GPL(qeth_core_header_cache); static struct device *qeth_core_root_dev; static unsigned int known_devices[][6] = QETH_MODELLIST_ARRAY; static struct lock_class_key qdio_out_skb_queue_key; static void qeth_send_control_data_cb(struct qeth_channel *, struct qeth_cmd_buffer *); static int qeth_issue_next_read(struct qeth_card *); static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *); static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32); static void qeth_free_buffer_pool(struct qeth_card *); static int qeth_qdio_establish(struct qeth_card *); static inline void __qeth_fill_buffer_frag(struct sk_buff *skb, struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill) { struct skb_frag_struct *frag; int fragno; unsigned long addr; int element, cnt, dlen; fragno = skb_shinfo(skb)->nr_frags; element = *next_element_to_fill; dlen = 0; if (is_tso) buffer->element[element].flags = SBAL_FLAGS_MIDDLE_FRAG; else buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG; dlen = skb->len - skb->data_len; if (dlen) { buffer->element[element].addr = skb->data; buffer->element[element].length = dlen; element++; } for (cnt = 0; cnt < fragno; cnt++) { frag = &skb_shinfo(skb)->frags[cnt]; addr = (page_to_pfn(frag->page) << PAGE_SHIFT) + frag->page_offset; buffer->element[element].addr = (char *)addr; buffer->element[element].length = frag->size; if (cnt < (fragno - 1)) buffer->element[element].flags = SBAL_FLAGS_MIDDLE_FRAG; else buffer->element[element].flags = SBAL_FLAGS_LAST_FRAG; element++; } *next_element_to_fill = element; } static inline const char *qeth_get_cardname(struct qeth_card *card) { if (card->info.guestlan) { switch (card->info.type) { case QETH_CARD_TYPE_OSD: return " Guest LAN QDIO"; case QETH_CARD_TYPE_IQD: return " Guest LAN Hiper"; case QETH_CARD_TYPE_OSM: return " Guest LAN QDIO - OSM"; case QETH_CARD_TYPE_OSX: return " Guest LAN QDIO - OSX"; default: return " unknown"; } } else { switch (card->info.type) { case QETH_CARD_TYPE_OSD: return " OSD Express"; case QETH_CARD_TYPE_IQD: return " HiperSockets"; case QETH_CARD_TYPE_OSN: return " OSN QDIO"; case QETH_CARD_TYPE_OSM: return " OSM QDIO"; case QETH_CARD_TYPE_OSX: return " OSX QDIO"; default: return " unknown"; } } return " n/a"; } /* max length to be returned: 14 */ const char *qeth_get_cardname_short(struct qeth_card *card) { if (card->info.guestlan) { switch (card->info.type) { case QETH_CARD_TYPE_OSD: return "GuestLAN QDIO"; case QETH_CARD_TYPE_IQD: return "GuestLAN Hiper"; case QETH_CARD_TYPE_OSM: return "GuestLAN OSM"; case QETH_CARD_TYPE_OSX: return "GuestLAN OSX"; default: return "unknown"; } } else { switch (card->info.type) { case QETH_CARD_TYPE_OSD: switch (card->info.link_type) { case QETH_LINK_TYPE_FAST_ETH: return "OSD_100"; case QETH_LINK_TYPE_HSTR: return "HSTR"; case QETH_LINK_TYPE_GBIT_ETH: return "OSD_1000"; case QETH_LINK_TYPE_10GBIT_ETH: return "OSD_10GIG"; case QETH_LINK_TYPE_LANE_ETH100: return "OSD_FE_LANE"; case QETH_LINK_TYPE_LANE_TR: return "OSD_TR_LANE"; case QETH_LINK_TYPE_LANE_ETH1000: return "OSD_GbE_LANE"; case QETH_LINK_TYPE_LANE: return "OSD_ATM_LANE"; default: return "OSD_Express"; } case QETH_CARD_TYPE_IQD: return "HiperSockets"; case QETH_CARD_TYPE_OSN: return "OSN"; case QETH_CARD_TYPE_OSM: return "OSM_1000"; case QETH_CARD_TYPE_OSX: return "OSX_10GIG"; default: return "unknown"; } } return "n/a"; } void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, int clear_start_mask) { unsigned long flags; spin_lock_irqsave(&card->thread_mask_lock, flags); card->thread_allowed_mask = threads; if (clear_start_mask) card->thread_start_mask &= threads; spin_unlock_irqrestore(&card->thread_mask_lock, flags); wake_up(&card->wait_q); } EXPORT_SYMBOL_GPL(qeth_set_allowed_threads); int qeth_threads_running(struct qeth_card *card, unsigned long threads) { unsigned long flags; int rc = 0; spin_lock_irqsave(&card->thread_mask_lock, flags); rc = (card->thread_running_mask & threads); spin_unlock_irqrestore(&card->thread_mask_lock, flags); return rc; } EXPORT_SYMBOL_GPL(qeth_threads_running); int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads) { return wait_event_interruptible(card->wait_q, qeth_threads_running(card, threads) == 0); } EXPORT_SYMBOL_GPL(qeth_wait_for_threads); void qeth_clear_working_pool_list(struct qeth_card *card) { struct qeth_buffer_pool_entry *pool_entry, *tmp; QETH_DBF_TEXT(TRACE, 5, "clwrklst"); list_for_each_entry_safe(pool_entry, tmp, &card->qdio.in_buf_pool.entry_list, list){ list_del(&pool_entry->list); } } EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list); static int qeth_alloc_buffer_pool(struct qeth_card *card) { struct qeth_buffer_pool_entry *pool_entry; void *ptr; int i, j; QETH_DBF_TEXT(TRACE, 5, "alocpool"); for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL); if (!pool_entry) { qeth_free_buffer_pool(card); return -ENOMEM; } for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) { ptr = (void *) __get_free_page(GFP_KERNEL); if (!ptr) { while (j > 0) free_page((unsigned long) pool_entry->elements[--j]); kfree(pool_entry); qeth_free_buffer_pool(card); return -ENOMEM; } pool_entry->elements[j] = ptr; } list_add(&pool_entry->init_list, &card->qdio.init_pool.entry_list); } return 0; } int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) { QETH_DBF_TEXT(TRACE, 2, "realcbp"); if ((card->state != CARD_STATE_DOWN) && (card->state != CARD_STATE_RECOVER)) return -EPERM; /* TODO: steel/add buffers from/to a running card's buffer pool (?) */ qeth_clear_working_pool_list(card); qeth_free_buffer_pool(card); card->qdio.in_buf_pool.buf_count = bufcnt; card->qdio.init_pool.buf_count = bufcnt; return qeth_alloc_buffer_pool(card); } EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool); static int qeth_issue_next_read(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(TRACE, 5, "issnxrd"); if (card->read.state != CH_STATE_UP) return -EIO; iob = qeth_get_buffer(&card->read); if (!iob) { dev_warn(&card->gdev->dev, "The qeth device driver " "failed to recover an error on the device\n"); QETH_DBF_MESSAGE(2, "%s issue_next_read failed: no iob " "available\n", dev_name(&card->gdev->dev)); return -ENOMEM; } qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); QETH_DBF_TEXT(TRACE, 6, "noirqpnd"); rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, (addr_t) iob, 0, 0); if (rc) { QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! " "rc=%i\n", dev_name(&card->gdev->dev), rc); atomic_set(&card->read.irq_pending, 0); qeth_schedule_recovery(card); wake_up(&card->wait_q); } return rc; } static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) { struct qeth_reply *reply; reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC); if (reply) { atomic_set(&reply->refcnt, 1); atomic_set(&reply->received, 0); reply->card = card; }; return reply; } static void qeth_get_reply(struct qeth_reply *reply) { WARN_ON(atomic_read(&reply->refcnt) <= 0); atomic_inc(&reply->refcnt); } static void qeth_put_reply(struct qeth_reply *reply) { WARN_ON(atomic_read(&reply->refcnt) <= 0); if (atomic_dec_and_test(&reply->refcnt)) kfree(reply); } static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, struct qeth_card *card) { char *ipa_name; int com = cmd->hdr.command; ipa_name = qeth_get_ipa_cmd_name(com); if (rc) QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s returned x%X \"%s\"\n", ipa_name, com, QETH_CARD_IFNAME(card), rc, qeth_get_ipa_msg(rc)); else QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s succeeded\n", ipa_name, com, QETH_CARD_IFNAME(card)); } static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) { struct qeth_ipa_cmd *cmd = NULL; QETH_DBF_TEXT(TRACE, 5, "chkipad"); if (IS_IPA(iob->data)) { cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); if (IS_IPA_REPLY(cmd)) { if (cmd->hdr.command != IPA_CMD_SETCCID && cmd->hdr.command != IPA_CMD_DELCCID && cmd->hdr.command != IPA_CMD_MODCCID && cmd->hdr.command != IPA_CMD_SET_DIAG_ASS) qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); return cmd; } else { switch (cmd->hdr.command) { case IPA_CMD_STOPLAN: dev_warn(&card->gdev->dev, "The link for interface %s on CHPID" " 0x%X failed\n", QETH_CARD_IFNAME(card), card->info.chpid); card->lan_online = 0; if (card->dev && netif_carrier_ok(card->dev)) netif_carrier_off(card->dev); return NULL; case IPA_CMD_STARTLAN: dev_info(&card->gdev->dev, "The link for %s on CHPID 0x%X has" " been restored\n", QETH_CARD_IFNAME(card), card->info.chpid); netif_carrier_on(card->dev); card->lan_online = 1; qeth_schedule_recovery(card); return NULL; case IPA_CMD_MODCCID: return cmd; case IPA_CMD_REGISTER_LOCAL_ADDR: QETH_DBF_TEXT(TRACE, 3, "irla"); break; case IPA_CMD_UNREGISTER_LOCAL_ADDR: QETH_DBF_TEXT(TRACE, 3, "urla"); break; default: QETH_DBF_MESSAGE(2, "Received data is IPA " "but not a reply!\n"); break; } } } return cmd; } void qeth_clear_ipacmd_list(struct qeth_card *card) { struct qeth_reply *reply, *r; unsigned long flags; QETH_DBF_TEXT(TRACE, 4, "clipalst"); spin_lock_irqsave(&card->lock, flags); list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { qeth_get_reply(reply); reply->rc = -EIO; atomic_inc(&reply->received); list_del_init(&reply->list); wake_up(&reply->wait_q); qeth_put_reply(reply); } spin_unlock_irqrestore(&card->lock, flags); } EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); static int qeth_check_idx_response(struct qeth_card *card, unsigned char *buffer) { if (!buffer) return 0; QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); if ((buffer[2] & 0xc0) == 0xc0) { QETH_DBF_MESSAGE(2, "received an IDX TERMINATE " "with cause code 0x%02x%s\n", buffer[4], ((buffer[4] == 0x22) ? " -- try another portname" : "")); QETH_DBF_TEXT(TRACE, 2, "ckidxres"); QETH_DBF_TEXT(TRACE, 2, " idxterm"); QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO); if (buffer[4] == 0xf6) { dev_err(&card->gdev->dev, "The qeth device is not configured " "for the OSI layer required by z/VM\n"); return -EPERM; } return -EIO; } return 0; } static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob, __u32 len) { struct qeth_card *card; QETH_DBF_TEXT(TRACE, 4, "setupccw"); card = CARD_FROM_CDEV(channel->ccwdev); if (channel == &card->read) memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); else memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1)); channel->ccw.count = len; channel->ccw.cda = (__u32) __pa(iob); } static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel) { __u8 index; QETH_DBF_TEXT(TRACE, 6, "getbuff"); index = channel->io_buf_no; do { if (channel->iob[index].state == BUF_STATE_FREE) { channel->iob[index].state = BUF_STATE_LOCKED; channel->io_buf_no = (channel->io_buf_no + 1) % QETH_CMD_BUFFER_NO; memset(channel->iob[index].data, 0, QETH_BUFSIZE); return channel->iob + index; } index = (index + 1) % QETH_CMD_BUFFER_NO; } while (index != channel->io_buf_no); return NULL; } void qeth_release_buffer(struct qeth_channel *channel, struct qeth_cmd_buffer *iob) { unsigned long flags; QETH_DBF_TEXT(TRACE, 6, "relbuff"); spin_lock_irqsave(&channel->iob_lock, flags); memset(iob->data, 0, QETH_BUFSIZE); iob->state = BUF_STATE_FREE; iob->callback = qeth_send_control_data_cb; iob->rc = 0; spin_unlock_irqrestore(&channel->iob_lock, flags); } EXPORT_SYMBOL_GPL(qeth_release_buffer); static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel) { struct qeth_cmd_buffer *buffer = NULL; unsigned long flags; spin_lock_irqsave(&channel->iob_lock, flags); buffer = __qeth_get_buffer(channel); spin_unlock_irqrestore(&channel->iob_lock, flags); return buffer; } struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *channel) { struct qeth_cmd_buffer *buffer; wait_event(channel->wait_q, ((buffer = qeth_get_buffer(channel)) != NULL)); return buffer; } EXPORT_SYMBOL_GPL(qeth_wait_for_buffer); void qeth_clear_cmd_buffers(struct qeth_channel *channel) { int cnt; for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) qeth_release_buffer(channel, &channel->iob[cnt]); channel->buf_no = 0; channel->io_buf_no = 0; } EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers); static void qeth_send_control_data_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob) { struct qeth_card *card; struct qeth_reply *reply, *r; struct qeth_ipa_cmd *cmd; unsigned long flags; int keep_reply; int rc = 0; QETH_DBF_TEXT(TRACE, 4, "sndctlcb"); card = CARD_FROM_CDEV(channel->ccwdev); rc = qeth_check_idx_response(card, iob->data); switch (rc) { case 0: break; case -EIO: qeth_clear_ipacmd_list(card); qeth_schedule_recovery(card); default: goto out; } cmd = qeth_check_ipa_data(card, iob); if ((cmd == NULL) && (card->state != CARD_STATE_DOWN)) goto out; /*in case of OSN : check if cmd is set */ if (card->info.type == QETH_CARD_TYPE_OSN && cmd && cmd->hdr.command != IPA_CMD_STARTLAN && card->osn_info.assist_cb != NULL) { card->osn_info.assist_cb(card->dev, cmd); goto out; } spin_lock_irqsave(&card->lock, flags); list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) || ((cmd) && (reply->seqno == cmd->hdr.seqno))) { qeth_get_reply(reply); list_del_init(&reply->list); spin_unlock_irqrestore(&card->lock, flags); keep_reply = 0; if (reply->callback != NULL) { if (cmd) { reply->offset = (__u16)((char *)cmd - (char *)iob->data); keep_reply = reply->callback(card, reply, (unsigned long)cmd); } else keep_reply = reply->callback(card, reply, (unsigned long)iob); } if (cmd) reply->rc = (u16) cmd->hdr.return_code; else if (iob->rc) reply->rc = iob->rc; if (keep_reply) { spin_lock_irqsave(&card->lock, flags); list_add_tail(&reply->list, &card->cmd_waiter_list); spin_unlock_irqrestore(&card->lock, flags); } else { atomic_inc(&reply->received); wake_up(&reply->wait_q); } qeth_put_reply(reply); goto out; } } spin_unlock_irqrestore(&card->lock, flags); out: memcpy(&card->seqno.pdu_hdr_ack, QETH_PDU_HEADER_SEQ_NO(iob->data), QETH_SEQ_NO_LENGTH); qeth_release_buffer(channel, iob); } static int qeth_setup_channel(struct qeth_channel *channel) { int cnt; QETH_DBF_TEXT(SETUP, 2, "setupch"); for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) { channel->iob[cnt].data = kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL); if (channel->iob[cnt].data == NULL) break; channel->iob[cnt].state = BUF_STATE_FREE; channel->iob[cnt].channel = channel; channel->iob[cnt].callback = qeth_send_control_data_cb; channel->iob[cnt].rc = 0; } if (cnt < QETH_CMD_BUFFER_NO) { while (cnt-- > 0) kfree(channel->iob[cnt].data); return -ENOMEM; } channel->buf_no = 0; channel->io_buf_no = 0; atomic_set(&channel->irq_pending, 0); spin_lock_init(&channel->iob_lock); init_waitqueue_head(&channel->wait_q); return 0; } static int qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread) { unsigned long flags; spin_lock_irqsave(&card->thread_mask_lock, flags); if (!(card->thread_allowed_mask & thread) || (card->thread_start_mask & thread)) { spin_unlock_irqrestore(&card->thread_mask_lock, flags); return -EPERM; } card->thread_start_mask |= thread; spin_unlock_irqrestore(&card->thread_mask_lock, flags); return 0; } void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread) { unsigned long flags; spin_lock_irqsave(&card->thread_mask_lock, flags); card->thread_start_mask &= ~thread; spin_unlock_irqrestore(&card->thread_mask_lock, flags); wake_up(&card->wait_q); } EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit); void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread) { unsigned long flags; spin_lock_irqsave(&card->thread_mask_lock, flags); card->thread_running_mask &= ~thread; spin_unlock_irqrestore(&card->thread_mask_lock, flags); wake_up(&card->wait_q); } EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit); static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) { unsigned long flags; int rc = 0; spin_lock_irqsave(&card->thread_mask_lock, flags); if (card->thread_start_mask & thread) { if ((card->thread_allowed_mask & thread) && !(card->thread_running_mask & thread)) { rc = 1; card->thread_start_mask &= ~thread; card->thread_running_mask |= thread; } else rc = -EPERM; } spin_unlock_irqrestore(&card->thread_mask_lock, flags); return rc; } int qeth_do_run_thread(struct qeth_card *card, unsigned long thread) { int rc = 0; wait_event(card->wait_q, (rc = __qeth_do_run_thread(card, thread)) >= 0); return rc; } EXPORT_SYMBOL_GPL(qeth_do_run_thread); void qeth_schedule_recovery(struct qeth_card *card) { QETH_DBF_TEXT(TRACE, 2, "startrec"); if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0) schedule_work(&card->kernel_thread_starter); } EXPORT_SYMBOL_GPL(qeth_schedule_recovery); static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) { int dstat, cstat; char *sense; sense = (char *) irb->ecw; cstat = irb->scsw.cmd.cstat; dstat = irb->scsw.cmd.dstat; if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { QETH_DBF_TEXT(TRACE, 2, "CGENCHK"); dev_warn(&cdev->dev, "The qeth device driver " "failed to recover an error on the device\n"); QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n", dev_name(&cdev->dev), dstat, cstat); print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 16, 1, irb, 64, 1); return 1; } if (dstat & DEV_STAT_UNIT_CHECK) { if (sense[SENSE_RESETTING_EVENT_BYTE] & SENSE_RESETTING_EVENT_FLAG) { QETH_DBF_TEXT(TRACE, 2, "REVIND"); return 1; } if (sense[SENSE_COMMAND_REJECT_BYTE] & SENSE_COMMAND_REJECT_FLAG) { QETH_DBF_TEXT(TRACE, 2, "CMDREJi"); return 1; } if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { QETH_DBF_TEXT(TRACE, 2, "AFFE"); return 1; } if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { QETH_DBF_TEXT(TRACE, 2, "ZEROSEN"); return 0; } QETH_DBF_TEXT(TRACE, 2, "DGENCHK"); return 1; } return 0; } static long __qeth_check_irb_error(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) { if (!IS_ERR(irb)) return 0; switch (PTR_ERR(irb)) { case -EIO: QETH_DBF_MESSAGE(2, "%s i/o-error on device\n", dev_name(&cdev->dev)); QETH_DBF_TEXT(TRACE, 2, "ckirberr"); QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO); break; case -ETIMEDOUT: dev_warn(&cdev->dev, "A hardware operation timed out" " on the device\n"); QETH_DBF_TEXT(TRACE, 2, "ckirberr"); QETH_DBF_TEXT_(TRACE, 2, " rc%d", -ETIMEDOUT); if (intparm == QETH_RCD_PARM) { struct qeth_card *card = CARD_FROM_CDEV(cdev); if (card && (card->data.ccwdev == cdev)) { card->data.state = CH_STATE_DOWN; wake_up(&card->wait_q); } } break; default: QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n", dev_name(&cdev->dev), PTR_ERR(irb)); QETH_DBF_TEXT(TRACE, 2, "ckirberr"); QETH_DBF_TEXT(TRACE, 2, " rc???"); } return PTR_ERR(irb); } static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) { int rc; int cstat, dstat; struct qeth_cmd_buffer *buffer; struct qeth_channel *channel; struct qeth_card *card; struct qeth_cmd_buffer *iob; __u8 index; QETH_DBF_TEXT(TRACE, 5, "irq"); if (__qeth_check_irb_error(cdev, intparm, irb)) return; cstat = irb->scsw.cmd.cstat; dstat = irb->scsw.cmd.dstat; card = CARD_FROM_CDEV(cdev); if (!card) return; if (card->read.ccwdev == cdev) { channel = &card->read; QETH_DBF_TEXT(TRACE, 5, "read"); } else if (card->write.ccwdev == cdev) { channel = &card->write; QETH_DBF_TEXT(TRACE, 5, "write"); } else { channel = &card->data; QETH_DBF_TEXT(TRACE, 5, "data"); } atomic_set(&channel->irq_pending, 0); if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC)) channel->state = CH_STATE_STOPPED; if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC)) channel->state = CH_STATE_HALTED; /*let's wake up immediately on data channel*/ if ((channel == &card->data) && (intparm != 0) && (intparm != QETH_RCD_PARM)) goto out; if (intparm == QETH_CLEAR_CHANNEL_PARM) { QETH_DBF_TEXT(TRACE, 6, "clrchpar"); /* we don't have to handle this further */ intparm = 0; } if (intparm == QETH_HALT_CHANNEL_PARM) { QETH_DBF_TEXT(TRACE, 6, "hltchpar"); /* we don't have to handle this further */ intparm = 0; } if ((dstat & DEV_STAT_UNIT_EXCEP) || (dstat & DEV_STAT_UNIT_CHECK) || (cstat)) { if (irb->esw.esw0.erw.cons) { dev_warn(&channel->ccwdev->dev, "The qeth device driver failed to recover " "an error on the device\n"); QETH_DBF_MESSAGE(2, "%s sense data available. cstat " "0x%X dstat 0x%X\n", dev_name(&channel->ccwdev->dev), cstat, dstat); print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); print_hex_dump(KERN_WARNING, "qeth: sense data ", DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1); } if (intparm == QETH_RCD_PARM) { channel->state = CH_STATE_DOWN; goto out; } rc = qeth_get_problem(cdev, irb); if (rc) { qeth_clear_ipacmd_list(card); qeth_schedule_recovery(card); goto out; } } if (intparm == QETH_RCD_PARM) { channel->state = CH_STATE_RCD_DONE; goto out; } if (intparm) { buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm); buffer->state = BUF_STATE_PROCESSED; } if (channel == &card->data) return; if (channel == &card->read && channel->state == CH_STATE_UP) qeth_issue_next_read(card); iob = channel->iob; index = channel->buf_no; while (iob[index].state == BUF_STATE_PROCESSED) { if (iob[index].callback != NULL) iob[index].callback(channel, iob + index); index = (index + 1) % QETH_CMD_BUFFER_NO; } channel->buf_no = index; out: wake_up(&card->wait_q); return; } static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, unsigned int qeth_skip_skb) { int i; struct sk_buff *skb; /* is PCI flag set on buffer? */ if (buf->buffer->element[0].flags & 0x40) atomic_dec(&queue->set_pci_flags_count); if (!qeth_skip_skb) { skb = skb_dequeue(&buf->skb_list); while (skb) { atomic_dec(&skb->users); dev_kfree_skb_any(skb); skb = skb_dequeue(&buf->skb_list); } } for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { if (buf->buffer->element[i].addr && buf->is_header[i]) kmem_cache_free(qeth_core_header_cache, buf->buffer->element[i].addr); buf->is_header[i] = 0; buf->buffer->element[i].length = 0; buf->buffer->element[i].addr = NULL; buf->buffer->element[i].flags = 0; } buf->buffer->element[15].flags = 0; buf->next_element_to_fill = 0; atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); } static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf) { __qeth_clear_output_buffer(queue, buf, 0); } void qeth_clear_qdio_buffers(struct qeth_card *card) { int i, j; QETH_DBF_TEXT(TRACE, 2, "clearqdbf"); /* clear outbound buffers to free skbs */ for (i = 0; i < card->qdio.no_out_queues; ++i) if (card->qdio.out_qs[i]) { for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) qeth_clear_output_buffer(card->qdio.out_qs[i], &card->qdio.out_qs[i]->bufs[j]); } } EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers); static void qeth_free_buffer_pool(struct qeth_card *card) { struct qeth_buffer_pool_entry *pool_entry, *tmp; int i = 0; QETH_DBF_TEXT(TRACE, 5, "freepool"); list_for_each_entry_safe(pool_entry, tmp, &card->qdio.init_pool.entry_list, init_list){ for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) free_page((unsigned long)pool_entry->elements[i]); list_del(&pool_entry->init_list); kfree(pool_entry); } } static void qeth_free_qdio_buffers(struct qeth_card *card) { int i, j; QETH_DBF_TEXT(TRACE, 2, "freeqdbf"); if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == QETH_QDIO_UNINITIALIZED) return; kfree(card->qdio.in_q); card->qdio.in_q = NULL; /* inbound buffer pool */ qeth_free_buffer_pool(card); /* free outbound qdio_qs */ if (card->qdio.out_qs) { for (i = 0; i < card->qdio.no_out_queues; ++i) { for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) qeth_clear_output_buffer(card->qdio.out_qs[i], &card->qdio.out_qs[i]->bufs[j]); kfree(card->qdio.out_qs[i]); } kfree(card->qdio.out_qs); card->qdio.out_qs = NULL; } } static void qeth_clean_channel(struct qeth_channel *channel) { int cnt; QETH_DBF_TEXT(SETUP, 2, "freech"); for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) kfree(channel->iob[cnt].data); } static void qeth_get_channel_path_desc(struct qeth_card *card) { struct ccw_device *ccwdev; struct channelPath_dsc { u8 flags; u8 lsn; u8 desc; u8 chpid; u8 swla; u8 zeroes; u8 chla; u8 chpp; } *chp_dsc; QETH_DBF_TEXT(SETUP, 2, "chp_desc"); ccwdev = card->data.ccwdev; chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0); if (chp_dsc != NULL) { /* CHPP field bit 6 == 1 -> single queue */ if ((chp_dsc->chpp & 0x02) == 0x02) card->qdio.no_out_queues = 1; card->info.func_level = 0x4100 + chp_dsc->desc; kfree(chp_dsc); } if (card->qdio.no_out_queues == 1) { card->qdio.default_out_queue = 0; dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); } QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues); QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level); return; } static void qeth_init_qdio_info(struct qeth_card *card) { QETH_DBF_TEXT(SETUP, 4, "intqdinf"); atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); /* inbound */ card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count; INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list); INIT_LIST_HEAD(&card->qdio.init_pool.entry_list); } static void qeth_set_intial_options(struct qeth_card *card) { card->options.route4.type = NO_ROUTER; card->options.route6.type = NO_ROUTER; card->options.checksum_type = QETH_CHECKSUM_DEFAULT; card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS; card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL; card->options.fake_broadcast = 0; card->options.add_hhlen = DEFAULT_ADD_HHLEN; card->options.performance_stats = 0; card->options.rx_sg_cb = QETH_RX_SG_CB; card->options.isolation = ISOLATION_MODE_NONE; } static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) { unsigned long flags; int rc = 0; spin_lock_irqsave(&card->thread_mask_lock, flags); QETH_DBF_TEXT_(TRACE, 4, " %02x%02x%02x", (u8) card->thread_start_mask, (u8) card->thread_allowed_mask, (u8) card->thread_running_mask); rc = (card->thread_start_mask & thread); spin_unlock_irqrestore(&card->thread_mask_lock, flags); return rc; } static void qeth_start_kernel_thread(struct work_struct *work) { struct qeth_card *card = container_of(work, struct qeth_card, kernel_thread_starter); QETH_DBF_TEXT(TRACE , 2, "strthrd"); if (card->read.state != CH_STATE_UP && card->write.state != CH_STATE_UP) return; if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) kthread_run(card->discipline.recover, (void *) card, "qeth_recover"); } static int qeth_setup_card(struct qeth_card *card) { QETH_DBF_TEXT(SETUP, 2, "setupcrd"); QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); card->read.state = CH_STATE_DOWN; card->write.state = CH_STATE_DOWN; card->data.state = CH_STATE_DOWN; card->state = CARD_STATE_DOWN; card->lan_online = 0; card->use_hard_stop = 0; card->dev = NULL; spin_lock_init(&card->vlanlock); spin_lock_init(&card->mclock); card->vlangrp = NULL; spin_lock_init(&card->lock); spin_lock_init(&card->ip_lock); spin_lock_init(&card->thread_mask_lock); mutex_init(&card->conf_mutex); card->thread_start_mask = 0; card->thread_allowed_mask = 0; card->thread_running_mask = 0; INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); INIT_LIST_HEAD(&card->ip_list); INIT_LIST_HEAD(card->ip_tbd_list); INIT_LIST_HEAD(&card->cmd_waiter_list); init_waitqueue_head(&card->wait_q); /* intial options */ qeth_set_intial_options(card); /* IP address takeover */ INIT_LIST_HEAD(&card->ipato.entries); card->ipato.enabled = 0; card->ipato.invert4 = 0; card->ipato.invert6 = 0; /* init QDIO stuff */ qeth_init_qdio_info(card); return 0; } static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr) { struct qeth_card *card = container_of(slr, struct qeth_card, qeth_service_level); if (card->info.mcl_level[0]) seq_printf(m, "qeth: %s firmware level %s\n", CARD_BUS_ID(card), card->info.mcl_level); } static struct qeth_card *qeth_alloc_card(void) { struct qeth_card *card; QETH_DBF_TEXT(SETUP, 2, "alloccrd"); card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL); if (!card) goto out; QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL); if (!card->ip_tbd_list) { QETH_DBF_TEXT(SETUP, 0, "iptbdnom"); goto out_card; } if (qeth_setup_channel(&card->read)) goto out_ip; if (qeth_setup_channel(&card->write)) goto out_channel; card->options.layer2 = -1; card->qeth_service_level.seq_print = qeth_core_sl_print; register_service_level(&card->qeth_service_level); return card; out_channel: qeth_clean_channel(&card->read); out_ip: kfree(card->ip_tbd_list); out_card: kfree(card); out: return NULL; } static int qeth_determine_card_type(struct qeth_card *card) { int i = 0; QETH_DBF_TEXT(SETUP, 2, "detcdtyp"); card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; while (known_devices[i][QETH_DEV_MODEL_IND]) { if ((CARD_RDEV(card)->id.dev_type == known_devices[i][QETH_DEV_TYPE_IND]) && (CARD_RDEV(card)->id.dev_model == known_devices[i][QETH_DEV_MODEL_IND])) { card->info.type = known_devices[i][QETH_DEV_MODEL_IND]; card->qdio.no_out_queues = known_devices[i][QETH_QUEUE_NO_IND]; card->info.is_multicast_different = known_devices[i][QETH_MULTICAST_IND]; qeth_get_channel_path_desc(card); return 0; } i++; } card->info.type = QETH_CARD_TYPE_UNKNOWN; dev_err(&card->gdev->dev, "The adapter hardware is of an " "unknown type\n"); return -ENOENT; } static int qeth_clear_channel(struct qeth_channel *channel) { unsigned long flags; struct qeth_card *card; int rc; QETH_DBF_TEXT(TRACE, 3, "clearch"); card = CARD_FROM_CDEV(channel->ccwdev); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) return rc; rc = wait_event_interruptible_timeout(card->wait_q, channel->state == CH_STATE_STOPPED, QETH_TIMEOUT); if (rc == -ERESTARTSYS) return rc; if (channel->state != CH_STATE_STOPPED) return -ETIME; channel->state = CH_STATE_DOWN; return 0; } static int qeth_halt_channel(struct qeth_channel *channel) { unsigned long flags; struct qeth_card *card; int rc; QETH_DBF_TEXT(TRACE, 3, "haltch"); card = CARD_FROM_CDEV(channel->ccwdev); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) return rc; rc = wait_event_interruptible_timeout(card->wait_q, channel->state == CH_STATE_HALTED, QETH_TIMEOUT); if (rc == -ERESTARTSYS) return rc; if (channel->state != CH_STATE_HALTED) return -ETIME; return 0; } static int qeth_halt_channels(struct qeth_card *card) { int rc1 = 0, rc2 = 0, rc3 = 0; QETH_DBF_TEXT(TRACE, 3, "haltchs"); rc1 = qeth_halt_channel(&card->read); rc2 = qeth_halt_channel(&card->write); rc3 = qeth_halt_channel(&card->data); if (rc1) return rc1; if (rc2) return rc2; return rc3; } static int qeth_clear_channels(struct qeth_card *card) { int rc1 = 0, rc2 = 0, rc3 = 0; QETH_DBF_TEXT(TRACE, 3, "clearchs"); rc1 = qeth_clear_channel(&card->read); rc2 = qeth_clear_channel(&card->write); rc3 = qeth_clear_channel(&card->data); if (rc1) return rc1; if (rc2) return rc2; return rc3; } static int qeth_clear_halt_card(struct qeth_card *card, int halt) { int rc = 0; QETH_DBF_TEXT(TRACE, 3, "clhacrd"); QETH_DBF_HEX(TRACE, 3, &card, sizeof(void *)); if (halt) rc = qeth_halt_channels(card); if (rc) return rc; return qeth_clear_channels(card); } int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) { int rc = 0; QETH_DBF_TEXT(TRACE, 3, "qdioclr"); switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, QETH_QDIO_CLEANING)) { case QETH_QDIO_ESTABLISHED: if (card->info.type == QETH_CARD_TYPE_IQD) rc = qdio_shutdown(CARD_DDEV(card), QDIO_FLAG_CLEANUP_USING_HALT); else rc = qdio_shutdown(CARD_DDEV(card), QDIO_FLAG_CLEANUP_USING_CLEAR); if (rc) QETH_DBF_TEXT_(TRACE, 3, "1err%d", rc); qdio_free(CARD_DDEV(card)); atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); break; case QETH_QDIO_CLEANING: return rc; default: break; } rc = qeth_clear_halt_card(card, use_halt); if (rc) QETH_DBF_TEXT_(TRACE, 3, "2err%d", rc); card->state = CARD_STATE_DOWN; return rc; } EXPORT_SYMBOL_GPL(qeth_qdio_clear_card); static int qeth_read_conf_data(struct qeth_card *card, void **buffer, int *length) { struct ciw *ciw; char *rcd_buf; int ret; struct qeth_channel *channel = &card->data; unsigned long flags; /* * scan for RCD command in extended SenseID data */ ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD); if (!ciw || ciw->cmd == 0) return -EOPNOTSUPP; rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA); if (!rcd_buf) return -ENOMEM; channel->ccw.cmd_code = ciw->cmd; channel->ccw.cda = (__u32) __pa(rcd_buf); channel->ccw.count = ciw->count; channel->ccw.flags = CCW_FLAG_SLI; channel->state = CH_STATE_RCD; spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); ret = ccw_device_start_timeout(channel->ccwdev, &channel->ccw, QETH_RCD_PARM, LPM_ANYPATH, 0, QETH_RCD_TIMEOUT); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (!ret) wait_event(card->wait_q, (channel->state == CH_STATE_RCD_DONE || channel->state == CH_STATE_DOWN)); if (channel->state == CH_STATE_DOWN) ret = -EIO; else channel->state = CH_STATE_DOWN; if (ret) { kfree(rcd_buf); *buffer = NULL; *length = 0; } else { *length = ciw->count; *buffer = rcd_buf; } return ret; } static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd) { QETH_DBF_TEXT(SETUP, 2, "cfgunit"); card->info.chpid = prcd[30]; card->info.unit_addr2 = prcd[31]; card->info.cula = prcd[63]; card->info.guestlan = ((prcd[0x10] == _ascebc['V']) && (prcd[0x11] == _ascebc['M'])); } static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd) { QETH_DBF_TEXT(SETUP, 2, "cfgblkt"); if (prcd[74] == 0xF0 && prcd[75] == 0xF0 && prcd[76] == 0xF5) { card->info.blkt.time_total = 250; card->info.blkt.inter_packet = 5; card->info.blkt.inter_packet_jumbo = 15; } else { card->info.blkt.time_total = 0; card->info.blkt.inter_packet = 0; card->info.blkt.inter_packet_jumbo = 0; } } static void qeth_init_tokens(struct qeth_card *card) { card->token.issuer_rm_w = 0x00010103UL; card->token.cm_filter_w = 0x00010108UL; card->token.cm_connection_w = 0x0001010aUL; card->token.ulp_filter_w = 0x0001010bUL; card->token.ulp_connection_w = 0x0001010dUL; } static void qeth_init_func_level(struct qeth_card *card) { switch (card->info.type) { case QETH_CARD_TYPE_IQD: if (card->ipato.enabled) card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT; else card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT; break; case QETH_CARD_TYPE_OSD: card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD; break; default: break; } } static int qeth_idx_activate_get_answer(struct qeth_channel *channel, void (*idx_reply_cb)(struct qeth_channel *, struct qeth_cmd_buffer *)) { struct qeth_cmd_buffer *iob; unsigned long flags; int rc; struct qeth_card *card; QETH_DBF_TEXT(SETUP, 2, "idxanswr"); card = CARD_FROM_CDEV(channel->ccwdev); iob = qeth_get_buffer(channel); iob->callback = idx_reply_cb; memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); channel->ccw.count = QETH_BUFSIZE; channel->ccw.cda = (__u32) __pa(iob->data); wait_event(card->wait_q, atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); rc = ccw_device_start(channel->ccwdev, &channel->ccw, (addr_t) iob, 0, 0); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) { QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); atomic_set(&channel->irq_pending, 0); wake_up(&card->wait_q); return rc; } rc = wait_event_interruptible_timeout(card->wait_q, channel->state == CH_STATE_UP, QETH_TIMEOUT); if (rc == -ERESTARTSYS) return rc; if (channel->state != CH_STATE_UP) { rc = -ETIME; QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); qeth_clear_cmd_buffers(channel); } else rc = 0; return rc; } static int qeth_idx_activate_channel(struct qeth_channel *channel, void (*idx_reply_cb)(struct qeth_channel *, struct qeth_cmd_buffer *)) { struct qeth_card *card; struct qeth_cmd_buffer *iob; unsigned long flags; __u16 temp; __u8 tmp; int rc; struct ccw_dev_id temp_devid; card = CARD_FROM_CDEV(channel->ccwdev); QETH_DBF_TEXT(SETUP, 2, "idxactch"); iob = qeth_get_buffer(channel); iob->callback = idx_reply_cb; memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1)); channel->ccw.count = IDX_ACTIVATE_SIZE; channel->ccw.cda = (__u32) __pa(iob->data); if (channel == &card->write) { memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE); memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); card->seqno.trans_hdr++; } else { memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE); memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); } tmp = ((__u8)card->info.portno) | 0x80; memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1); memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), &card->info.func_level, sizeof(__u16)); ccw_device_get_id(CARD_DDEV(card), &temp_devid); memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp_devid.devno, 2); temp = (card->info.cula << 8) + card->info.unit_addr2; memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2); wait_event(card->wait_q, atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); rc = ccw_device_start(channel->ccwdev, &channel->ccw, (addr_t) iob, 0, 0); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) { QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n", rc); QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); atomic_set(&channel->irq_pending, 0); wake_up(&card->wait_q); return rc; } rc = wait_event_interruptible_timeout(card->wait_q, channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT); if (rc == -ERESTARTSYS) return rc; if (channel->state != CH_STATE_ACTIVATING) { dev_warn(&channel->ccwdev->dev, "The qeth device driver" " failed to recover an error on the device\n"); QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n", dev_name(&channel->ccwdev->dev)); QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); qeth_clear_cmd_buffers(channel); return -ETIME; } return qeth_idx_activate_get_answer(channel, idx_reply_cb); } static int qeth_peer_func_level(int level) { if ((level & 0xff) == 8) return (level & 0xff) + 0x400; if (((level >> 8) & 3) == 1) return (level & 0xff) + 0x200; return level; } static void qeth_idx_write_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob) { struct qeth_card *card; __u16 temp; QETH_DBF_TEXT(SETUP , 2, "idxwrcb"); if (channel->state == CH_STATE_DOWN) { channel->state = CH_STATE_ACTIVATING; goto out; } card = CARD_FROM_CDEV(channel->ccwdev); if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL) dev_err(&card->write.ccwdev->dev, "The adapter is used exclusively by another " "host\n"); else QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:" " negative reply\n", dev_name(&card->write.ccwdev->dev)); goto out; } memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) { QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: " "function level mismatch (sent: 0x%x, received: " "0x%x)\n", dev_name(&card->write.ccwdev->dev), card->info.func_level, temp); goto out; } channel->state = CH_STATE_UP; out: qeth_release_buffer(channel, iob); } static void qeth_idx_read_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob) { struct qeth_card *card; __u16 temp; QETH_DBF_TEXT(SETUP , 2, "idxrdcb"); if (channel->state == CH_STATE_DOWN) { channel->state = CH_STATE_ACTIVATING; goto out; } card = CARD_FROM_CDEV(channel->ccwdev); if (qeth_check_idx_response(card, iob->data)) goto out; if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) { case QETH_IDX_ACT_ERR_EXCL: dev_err(&card->write.ccwdev->dev, "The adapter is used exclusively by another " "host\n"); break; case QETH_IDX_ACT_ERR_AUTH: dev_err(&card->read.ccwdev->dev, "Setting the device online failed because of " "insufficient LPAR authorization\n"); break; default: QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:" " negative reply\n", dev_name(&card->read.ccwdev->dev)); } goto out; } /** * * temporary fix for microcode bug * * to revert it,replace OR by AND * */ if ((!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) || (card->info.type == QETH_CARD_TYPE_OSD)) card->info.portname_required = 1; memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); if (temp != qeth_peer_func_level(card->info.func_level)) { QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function " "level mismatch (sent: 0x%x, received: 0x%x)\n", dev_name(&card->read.ccwdev->dev), card->info.func_level, temp); goto out; } memcpy(&card->token.issuer_rm_r, QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), QETH_MPC_TOKEN_LENGTH); memcpy(&card->info.mcl_level[0], QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH); channel->state = CH_STATE_UP; out: qeth_release_buffer(channel, iob); } void qeth_prepare_control_data(struct qeth_card *card, int len, struct qeth_cmd_buffer *iob) { qeth_setup_ccw(&card->write, iob->data, len); iob->callback = qeth_release_buffer; memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); card->seqno.trans_hdr++; memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data), &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH); card->seqno.pdu_hdr++; memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN); } EXPORT_SYMBOL_GPL(qeth_prepare_control_data); int qeth_send_control_data(struct qeth_card *card, int len, struct qeth_cmd_buffer *iob, int (*reply_cb)(struct qeth_card *, struct qeth_reply *, unsigned long), void *reply_param) { int rc; unsigned long flags; struct qeth_reply *reply = NULL; unsigned long timeout, event_timeout; struct qeth_ipa_cmd *cmd; QETH_DBF_TEXT(TRACE, 2, "sendctl"); reply = qeth_alloc_reply(card); if (!reply) { return -ENOMEM; } reply->callback = reply_cb; reply->param = reply_param; if (card->state == CARD_STATE_DOWN) reply->seqno = QETH_IDX_COMMAND_SEQNO; else reply->seqno = card->seqno.ipa++; init_waitqueue_head(&reply->wait_q); spin_lock_irqsave(&card->lock, flags); list_add_tail(&reply->list, &card->cmd_waiter_list); spin_unlock_irqrestore(&card->lock, flags); QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN); while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; qeth_prepare_control_data(card, len, iob); if (IS_IPA(iob->data)) event_timeout = QETH_IPA_TIMEOUT; else event_timeout = QETH_TIMEOUT; timeout = jiffies + event_timeout; QETH_DBF_TEXT(TRACE, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, (addr_t) iob, 0, 0); spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); if (rc) { QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: " "ccw_device_start rc = %i\n", dev_name(&card->write.ccwdev->dev), rc); QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); spin_lock_irqsave(&card->lock, flags); list_del_init(&reply->list); qeth_put_reply(reply); spin_unlock_irqrestore(&card->lock, flags); qeth_release_buffer(iob->channel, iob); atomic_set(&card->write.irq_pending, 0); wake_up(&card->wait_q); return rc; } /* we have only one long running ipassist, since we can ensure process context of this command we can sleep */ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); if ((cmd->hdr.command == IPA_CMD_SETIP) && (cmd->hdr.prot_version == QETH_PROT_IPV4)) { if (!wait_event_timeout(reply->wait_q, atomic_read(&reply->received), event_timeout)) goto time_err; } else { while (!atomic_read(&reply->received)) { if (time_after(jiffies, timeout)) goto time_err; cpu_relax(); }; } rc = reply->rc; qeth_put_reply(reply); return rc; time_err: spin_lock_irqsave(&reply->card->lock, flags); list_del_init(&reply->list); spin_unlock_irqrestore(&reply->card->lock, flags); reply->rc = -ETIME; atomic_inc(&reply->received); wake_up(&reply->wait_q); rc = reply->rc; qeth_put_reply(reply); return rc; } EXPORT_SYMBOL_GPL(qeth_send_control_data); static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "cmenblcb"); iob = (struct qeth_cmd_buffer *) data; memcpy(&card->token.cm_filter_r, QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data), QETH_MPC_TOKEN_LENGTH); QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } static int qeth_cm_enable(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "cmenable"); iob = qeth_wait_for_buffer(&card->write); memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE); memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data), &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data), &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH); rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob, qeth_cm_enable_cb, NULL); return rc; } static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "cmsetpcb"); iob = (struct qeth_cmd_buffer *) data; memcpy(&card->token.cm_connection_r, QETH_CM_SETUP_RESP_DEST_ADDR(iob->data), QETH_MPC_TOKEN_LENGTH); QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } static int qeth_cm_setup(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "cmsetup"); iob = qeth_wait_for_buffer(&card->write); memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE); memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data), &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data), &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data), &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH); rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob, qeth_cm_setup_cb, NULL); return rc; } static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card) { switch (card->info.type) { case QETH_CARD_TYPE_UNKNOWN: return 1500; case QETH_CARD_TYPE_IQD: return card->info.max_mtu; case QETH_CARD_TYPE_OSD: switch (card->info.link_type) { case QETH_LINK_TYPE_HSTR: case QETH_LINK_TYPE_LANE_TR: return 2000; default: return 1492; } case QETH_CARD_TYPE_OSM: case QETH_CARD_TYPE_OSX: return 1492; default: return 1500; } } static inline int qeth_get_max_mtu_for_card(int cardtype) { switch (cardtype) { case QETH_CARD_TYPE_UNKNOWN: case QETH_CARD_TYPE_OSD: case QETH_CARD_TYPE_OSN: case QETH_CARD_TYPE_OSM: case QETH_CARD_TYPE_OSX: return 61440; case QETH_CARD_TYPE_IQD: return 57344; default: return 1500; } } static inline int qeth_get_mtu_out_of_mpc(int cardtype) { switch (cardtype) { case QETH_CARD_TYPE_IQD: return 1; default: return 0; } } static inline int qeth_get_mtu_outof_framesize(int framesize) { switch (framesize) { case 0x4000: return 8192; case 0x6000: return 16384; case 0xa000: return 32768; case 0xffff: return 57344; default: return 0; } } static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu) { switch (card->info.type) { case QETH_CARD_TYPE_OSD: case QETH_CARD_TYPE_OSM: case QETH_CARD_TYPE_OSX: return ((mtu >= 576) && (mtu <= 61440)); case QETH_CARD_TYPE_IQD: return ((mtu >= 576) && (mtu <= card->info.max_mtu + 4096 - 32)); case QETH_CARD_TYPE_OSN: case QETH_CARD_TYPE_UNKNOWN: default: return 1; } } static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { __u16 mtu, framesize; __u16 len; __u8 link_type; struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "ulpenacb"); iob = (struct qeth_cmd_buffer *) data; memcpy(&card->token.ulp_filter_r, QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data), QETH_MPC_TOKEN_LENGTH); if (qeth_get_mtu_out_of_mpc(card->info.type)) { memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); mtu = qeth_get_mtu_outof_framesize(framesize); if (!mtu) { iob->rc = -EINVAL; QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } card->info.max_mtu = mtu; card->info.initial_mtu = mtu; card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE; } else { card->info.initial_mtu = qeth_get_initial_mtu_for_card(card); card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type); card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; } memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2); if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) { memcpy(&link_type, QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1); card->info.link_type = link_type; } else card->info.link_type = 0; QETH_DBF_TEXT_(SETUP, 2, "link%d", link_type); QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } static int qeth_ulp_enable(struct qeth_card *card) { int rc; char prot_type; struct qeth_cmd_buffer *iob; /*FIXME: trace view callbacks*/ QETH_DBF_TEXT(SETUP, 2, "ulpenabl"); iob = qeth_wait_for_buffer(&card->write); memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE); *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (__u8) card->info.portno; if (card->options.layer2) if (card->info.type == QETH_CARD_TYPE_OSN) prot_type = QETH_PROT_OSN2; else prot_type = QETH_PROT_LAYER2; else prot_type = QETH_PROT_TCPIP; memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1); memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data), &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data), &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_ULP_ENABLE_PORTNAME_AND_LL(iob->data), card->info.portname, 9); rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob, qeth_ulp_enable_cb, NULL); return rc; } static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_cmd_buffer *iob; int rc = 0; QETH_DBF_TEXT(SETUP, 2, "ulpstpcb"); iob = (struct qeth_cmd_buffer *) data; memcpy(&card->token.ulp_connection_r, QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), QETH_MPC_TOKEN_LENGTH); if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 3)) { QETH_DBF_TEXT(SETUP, 2, "olmlimit"); dev_err(&card->gdev->dev, "A connection could not be " "established because of an OLM limit\n"); rc = -EMLINK; } QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return rc; } static int qeth_ulp_setup(struct qeth_card *card) { int rc; __u16 temp; struct qeth_cmd_buffer *iob; struct ccw_dev_id dev_id; QETH_DBF_TEXT(SETUP, 2, "ulpsetup"); iob = qeth_wait_for_buffer(&card->write); memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE); memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data), &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data), &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data), &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH); ccw_device_get_id(CARD_DDEV(card), &dev_id); memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2); temp = (card->info.cula << 8) + card->info.unit_addr2; memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2); rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob, qeth_ulp_setup_cb, NULL); return rc; } static int qeth_alloc_qdio_buffers(struct qeth_card *card) { int i, j; QETH_DBF_TEXT(SETUP, 2, "allcqdbf"); if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED, QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) return 0; card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), GFP_KERNEL); if (!card->qdio.in_q) goto out_nomem; QETH_DBF_TEXT(SETUP, 2, "inq"); QETH_DBF_HEX(SETUP, 2, &card->qdio.in_q, sizeof(void *)); memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q)); /* give inbound qeth_qdio_buffers their qdio_buffers */ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) card->qdio.in_q->bufs[i].buffer = &card->qdio.in_q->qdio_bufs[i]; /* inbound buffer pool */ if (qeth_alloc_buffer_pool(card)) goto out_freeinq; /* outbound */ card->qdio.out_qs = kmalloc(card->qdio.no_out_queues * sizeof(struct qeth_qdio_out_q *), GFP_KERNEL); if (!card->qdio.out_qs) goto out_freepool; for (i = 0; i < card->qdio.no_out_queues; ++i) { card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q), GFP_KERNEL); if (!card->qdio.out_qs[i]) goto out_freeoutq; QETH_DBF_TEXT_(SETUP, 2, "outq %i", i); QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *)); memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q)); card->qdio.out_qs[i]->queue_no = i; /* give outbound qeth_qdio_buffers their qdio_buffers */ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { card->qdio.out_qs[i]->bufs[j].buffer = &card->qdio.out_qs[i]->qdio_bufs[j]; skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j]. skb_list); lockdep_set_class( &card->qdio.out_qs[i]->bufs[j].skb_list.lock, &qdio_out_skb_queue_key); INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list); } } return 0; out_freeoutq: while (i > 0) kfree(card->qdio.out_qs[--i]); kfree(card->qdio.out_qs); card->qdio.out_qs = NULL; out_freepool: qeth_free_buffer_pool(card); out_freeinq: kfree(card->qdio.in_q); card->qdio.in_q = NULL; out_nomem: atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); return -ENOMEM; } static void qeth_create_qib_param_field(struct qeth_card *card, char *param_field) { param_field[0] = _ascebc['P']; param_field[1] = _ascebc['C']; param_field[2] = _ascebc['I']; param_field[3] = _ascebc['T']; *((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card); *((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card); *((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card); } static void qeth_create_qib_param_field_blkt(struct qeth_card *card, char *param_field) { param_field[16] = _ascebc['B']; param_field[17] = _ascebc['L']; param_field[18] = _ascebc['K']; param_field[19] = _ascebc['T']; *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total; *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet; *((unsigned int *) (&param_field[28])) = card->info.blkt.inter_packet_jumbo; } static int qeth_qdio_activate(struct qeth_card *card) { QETH_DBF_TEXT(SETUP, 3, "qdioact"); return qdio_activate(CARD_DDEV(card)); } static int qeth_dm_act(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "dmact"); iob = qeth_wait_for_buffer(&card->write); memcpy(iob->data, DM_ACT, DM_ACT_SIZE); memcpy(QETH_DM_ACT_DEST_ADDR(iob->data), &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data), &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL); return rc; } static int qeth_mpc_initialize(struct qeth_card *card) { int rc; QETH_DBF_TEXT(SETUP, 2, "mpcinit"); rc = qeth_issue_next_read(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); return rc; } rc = qeth_cm_enable(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); goto out_qdio; } rc = qeth_cm_setup(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); goto out_qdio; } rc = qeth_ulp_enable(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); goto out_qdio; } rc = qeth_ulp_setup(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); goto out_qdio; } rc = qeth_alloc_qdio_buffers(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); goto out_qdio; } rc = qeth_qdio_establish(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); qeth_free_qdio_buffers(card); goto out_qdio; } rc = qeth_qdio_activate(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); goto out_qdio; } rc = qeth_dm_act(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc); goto out_qdio; } return 0; out_qdio: qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); return rc; } static void qeth_print_status_with_portname(struct qeth_card *card) { char dbf_text[15]; int i; sprintf(dbf_text, "%s", card->info.portname + 1); for (i = 0; i < 8; i++) dbf_text[i] = (char) _ebcasc[(__u8) dbf_text[i]]; dbf_text[8] = 0; dev_info(&card->gdev->dev, "Device is a%s card%s%s%s\n" "with link type %s (portname: %s)\n", qeth_get_cardname(card), (card->info.mcl_level[0]) ? " (level: " : "", (card->info.mcl_level[0]) ? card->info.mcl_level : "", (card->info.mcl_level[0]) ? ")" : "", qeth_get_cardname_short(card), dbf_text); } static void qeth_print_status_no_portname(struct qeth_card *card) { if (card->info.portname[0]) dev_info(&card->gdev->dev, "Device is a%s " "card%s%s%s\nwith link type %s " "(no portname needed by interface).\n", qeth_get_cardname(card), (card->info.mcl_level[0]) ? " (level: " : "", (card->info.mcl_level[0]) ? card->info.mcl_level : "", (card->info.mcl_level[0]) ? ")" : "", qeth_get_cardname_short(card)); else dev_info(&card->gdev->dev, "Device is a%s " "card%s%s%s\nwith link type %s.\n", qeth_get_cardname(card), (card->info.mcl_level[0]) ? " (level: " : "", (card->info.mcl_level[0]) ? card->info.mcl_level : "", (card->info.mcl_level[0]) ? ")" : "", qeth_get_cardname_short(card)); } void qeth_print_status_message(struct qeth_card *card) { switch (card->info.type) { case QETH_CARD_TYPE_OSD: case QETH_CARD_TYPE_OSM: case QETH_CARD_TYPE_OSX: /* VM will use a non-zero first character * to indicate a HiperSockets like reporting * of the level OSA sets the first character to zero * */ if (!card->info.mcl_level[0]) { sprintf(card->info.mcl_level, "%02x%02x", card->info.mcl_level[2], card->info.mcl_level[3]); card->info.mcl_level[QETH_MCL_LENGTH] = 0; break; } /* fallthrough */ case QETH_CARD_TYPE_IQD: if ((card->info.guestlan) || (card->info.mcl_level[0] & 0x80)) { card->info.mcl_level[0] = (char) _ebcasc[(__u8) card->info.mcl_level[0]]; card->info.mcl_level[1] = (char) _ebcasc[(__u8) card->info.mcl_level[1]]; card->info.mcl_level[2] = (char) _ebcasc[(__u8) card->info.mcl_level[2]]; card->info.mcl_level[3] = (char) _ebcasc[(__u8) card->info.mcl_level[3]]; card->info.mcl_level[QETH_MCL_LENGTH] = 0; } break; default: memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1); } if (card->info.portname_required) qeth_print_status_with_portname(card); else qeth_print_status_no_portname(card); } EXPORT_SYMBOL_GPL(qeth_print_status_message); static void qeth_initialize_working_pool_list(struct qeth_card *card) { struct qeth_buffer_pool_entry *entry; QETH_DBF_TEXT(TRACE, 5, "inwrklst"); list_for_each_entry(entry, &card->qdio.init_pool.entry_list, init_list) { qeth_put_buffer_pool_entry(card, entry); } } static inline struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( struct qeth_card *card) { struct list_head *plh; struct qeth_buffer_pool_entry *entry; int i, free; struct page *page; if (list_empty(&card->qdio.in_buf_pool.entry_list)) return NULL; list_for_each(plh, &card->qdio.in_buf_pool.entry_list) { entry = list_entry(plh, struct qeth_buffer_pool_entry, list); free = 1; for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { if (page_count(virt_to_page(entry->elements[i])) > 1) { free = 0; break; } } if (free) { list_del_init(&entry->list); return entry; } } /* no free buffer in pool so take first one and swap pages */ entry = list_entry(card->qdio.in_buf_pool.entry_list.next, struct qeth_buffer_pool_entry, list); for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { if (page_count(virt_to_page(entry->elements[i])) > 1) { page = alloc_page(GFP_ATOMIC); if (!page) { return NULL; } else { free_page((unsigned long)entry->elements[i]); entry->elements[i] = page_address(page); if (card->options.performance_stats) card->perf_stats.sg_alloc_page_rx++; } } } list_del_init(&entry->list); return entry; } static int qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) { struct qeth_buffer_pool_entry *pool_entry; int i; pool_entry = qeth_find_free_buffer_pool_entry(card); if (!pool_entry) return 1; /* * since the buffer is accessed only from the input_tasklet * there shouldn't be a need to synchronize; also, since we use * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off * buffers */ buf->pool_entry = pool_entry; for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { buf->buffer->element[i].length = PAGE_SIZE; buf->buffer->element[i].addr = pool_entry->elements[i]; if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY; else buf->buffer->element[i].flags = 0; } return 0; } int qeth_init_qdio_queues(struct qeth_card *card) { int i, j; int rc; QETH_DBF_TEXT(SETUP, 2, "initqdqs"); /* inbound queue */ memset(card->qdio.in_q->qdio_bufs, 0, QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer)); qeth_initialize_working_pool_list(card); /*give only as many buffers to hardware as we have buffer pool entries*/ for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i) qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); card->qdio.in_q->next_buf_to_init = card->qdio.in_buf_pool.buf_count - 1; rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, card->qdio.in_buf_pool.buf_count - 1); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); return rc; } /* outbound queue */ for (i = 0; i < card->qdio.no_out_queues; ++i) { memset(card->qdio.out_qs[i]->qdio_bufs, 0, QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer)); for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { qeth_clear_output_buffer(card->qdio.out_qs[i], &card->qdio.out_qs[i]->bufs[j]); } card->qdio.out_qs[i]->card = card; card->qdio.out_qs[i]->next_buf_to_fill = 0; card->qdio.out_qs[i]->do_pack = 0; atomic_set(&card->qdio.out_qs[i]->used_buffers, 0); atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0); atomic_set(&card->qdio.out_qs[i]->state, QETH_OUT_Q_UNLOCKED); } return 0; } EXPORT_SYMBOL_GPL(qeth_init_qdio_queues); static inline __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type) { switch (link_type) { case QETH_LINK_TYPE_HSTR: return 2; default: return 1; } } static void qeth_fill_ipacmd_header(struct qeth_card *card, struct qeth_ipa_cmd *cmd, __u8 command, enum qeth_prot_versions prot) { memset(cmd, 0, sizeof(struct qeth_ipa_cmd)); cmd->hdr.command = command; cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; cmd->hdr.seqno = card->seqno.ipa; cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); cmd->hdr.rel_adapter_no = (__u8) card->info.portno; if (card->options.layer2) cmd->hdr.prim_version_no = 2; else cmd->hdr.prim_version_no = 1; cmd->hdr.param_count = 1; cmd->hdr.prot_version = prot; cmd->hdr.ipa_supported = 0; cmd->hdr.ipa_enabled = 0; } struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card, enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot) { struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; iob = qeth_wait_for_buffer(&card->write); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); qeth_fill_ipacmd_header(card, cmd, ipacmd, prot); return iob; } EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer); void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, char prot_type) { memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1); memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); } EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long), void *reply_param) { int rc; char prot_type; QETH_DBF_TEXT(TRACE, 4, "sendipa"); if (card->options.layer2) if (card->info.type == QETH_CARD_TYPE_OSN) prot_type = QETH_PROT_OSN2; else prot_type = QETH_PROT_LAYER2; else prot_type = QETH_PROT_TCPIP; qeth_prepare_ipa_cmd(card, iob, prot_type); rc = qeth_send_control_data(card, IPA_CMD_LENGTH, iob, reply_cb, reply_param); return rc; } EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); static int qeth_send_startstoplan(struct qeth_card *card, enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot) { int rc; struct qeth_cmd_buffer *iob; iob = qeth_get_ipacmd_buffer(card, ipacmd, prot); rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); return rc; } int qeth_send_startlan(struct qeth_card *card) { int rc; QETH_DBF_TEXT(SETUP, 2, "strtlan"); rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, 0); return rc; } EXPORT_SYMBOL_GPL(qeth_send_startlan); int qeth_send_stoplan(struct qeth_card *card) { int rc = 0; /* * TODO: according to the IPA format document page 14, * TCP/IP (we!) never issue a STOPLAN * is this right ?!? */ QETH_DBF_TEXT(SETUP, 2, "stoplan"); rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, 0); return rc; } EXPORT_SYMBOL_GPL(qeth_send_stoplan); int qeth_default_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; QETH_DBF_TEXT(TRACE, 4, "defadpcb"); cmd = (struct qeth_ipa_cmd *) data; if (cmd->hdr.return_code == 0) cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code; return 0; } EXPORT_SYMBOL_GPL(qeth_default_setadapterparms_cb); static int qeth_query_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; QETH_DBF_TEXT(TRACE, 3, "quyadpcb"); cmd = (struct qeth_ipa_cmd *) data; if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) { card->info.link_type = cmd->data.setadapterparms.data.query_cmds_supp.lan_type; QETH_DBF_TEXT_(SETUP, 2, "lnk %d", card->info.link_type); } card->options.adp.supported_funcs = cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds; return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); } struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, __u32 command, __u32 cmdlen) { struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setadapterparms.hdr.cmdlength = cmdlen; cmd->data.setadapterparms.hdr.command_code = command; cmd->data.setadapterparms.hdr.used_total = 1; cmd->data.setadapterparms.hdr.seq_no = 1; return iob; } EXPORT_SYMBOL_GPL(qeth_get_adapter_cmd); int qeth_query_setadapterparms(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(TRACE, 3, "queryadp"); iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, sizeof(struct qeth_ipacmd_setadpparms)); rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); return rc; } EXPORT_SYMBOL_GPL(qeth_query_setadapterparms); int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf, unsigned int qdio_error, const char *dbftext) { if (qdio_error) { QETH_DBF_TEXT(TRACE, 2, dbftext); QETH_DBF_TEXT(QERR, 2, dbftext); QETH_DBF_TEXT_(QERR, 2, " F15=%02X", buf->element[15].flags & 0xff); QETH_DBF_TEXT_(QERR, 2, " F14=%02X", buf->element[14].flags & 0xff); QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error); if ((buf->element[15].flags & 0xff) == 0x12) { card->stats.rx_dropped++; return 0; } else return 1; } return 0; } EXPORT_SYMBOL_GPL(qeth_check_qdio_errors); void qeth_queue_input_buffer(struct qeth_card *card, int index) { struct qeth_qdio_q *queue = card->qdio.in_q; int count; int i; int rc; int newcount = 0; count = (index < queue->next_buf_to_init)? card->qdio.in_buf_pool.buf_count - (queue->next_buf_to_init - index) : card->qdio.in_buf_pool.buf_count - (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index); /* only requeue at a certain threshold to avoid SIGAs */ if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) { for (i = queue->next_buf_to_init; i < queue->next_buf_to_init + count; ++i) { if (qeth_init_input_buffer(card, &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) { break; } else { newcount++; } } if (newcount < count) { /* we are in memory shortage so we switch back to traditional skb allocation and drop packages */ atomic_set(&card->force_alloc_skb, 3); count = newcount; } else { atomic_add_unless(&card->force_alloc_skb, -1, 0); } /* * according to old code it should be avoided to requeue all * 128 buffers in order to benefit from PCI avoidance. * this function keeps at least one buffer (the buffer at * 'index') un-requeued -> this buffer is the first buffer that * will be requeued the next time */ if (card->options.performance_stats) { card->perf_stats.inbound_do_qdio_cnt++; card->perf_stats.inbound_do_qdio_start_time = qeth_get_micros(); } rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, queue->next_buf_to_init, count); if (card->options.performance_stats) card->perf_stats.inbound_do_qdio_time += qeth_get_micros() - card->perf_stats.inbound_do_qdio_start_time; if (rc) { dev_warn(&card->gdev->dev, "QDIO reported an error, rc=%i\n", rc); QETH_DBF_TEXT(TRACE, 2, "qinberr"); QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); } queue->next_buf_to_init = (queue->next_buf_to_init + count) % QDIO_MAX_BUFFERS_PER_Q; } } EXPORT_SYMBOL_GPL(qeth_queue_input_buffer); static int qeth_handle_send_error(struct qeth_card *card, struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) { int sbalf15 = buffer->buffer->element[15].flags & 0xff; QETH_DBF_TEXT(TRACE, 6, "hdsnderr"); if (card->info.type == QETH_CARD_TYPE_IQD) { if (sbalf15 == 0) { qdio_err = 0; } else { qdio_err = 1; } } qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr"); if (!qdio_err) return QETH_SEND_ERROR_NONE; if ((sbalf15 >= 15) && (sbalf15 <= 31)) return QETH_SEND_ERROR_RETRY; QETH_DBF_TEXT(TRACE, 1, "lnkfail"); QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card)); QETH_DBF_TEXT_(TRACE, 1, "%04x %02x", (u16)qdio_err, (u8)sbalf15); return QETH_SEND_ERROR_LINK_FAILURE; } /* * Switched to packing state if the number of used buffers on a queue * reaches a certain limit. */ static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) { if (!queue->do_pack) { if (atomic_read(&queue->used_buffers) >= QETH_HIGH_WATERMARK_PACK){ /* switch non-PACKING -> PACKING */ QETH_DBF_TEXT(TRACE, 6, "np->pack"); if (queue->card->options.performance_stats) queue->card->perf_stats.sc_dp_p++; queue->do_pack = 1; } } } /* * Switches from packing to non-packing mode. If there is a packing * buffer on the queue this buffer will be prepared to be flushed. * In that case 1 is returned to inform the caller. If no buffer * has to be flushed, zero is returned. */ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) { struct qeth_qdio_out_buffer *buffer; int flush_count = 0; if (queue->do_pack) { if (atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) { /* switch PACKING -> non-PACKING */ QETH_DBF_TEXT(TRACE, 6, "pack->np"); if (queue->card->options.performance_stats) queue->card->perf_stats.sc_p_dp++; queue->do_pack = 0; /* flush packing buffers */ buffer = &queue->bufs[queue->next_buf_to_fill]; if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && (buffer->next_element_to_fill > 0)) { atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); flush_count++; queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q; } } } return flush_count; } /* * Called to flush a packing buffer if no more pci flags are on the queue. * Checks if there is a packing buffer and prepares it to be flushed. * In that case returns 1, otherwise zero. */ static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) { struct qeth_qdio_out_buffer *buffer; buffer = &queue->bufs[queue->next_buf_to_fill]; if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && (buffer->next_element_to_fill > 0)) { /* it's a packing buffer */ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q; return 1; } return 0; } static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, int count) { struct qeth_qdio_out_buffer *buf; int rc; int i; unsigned int qdio_flags; for (i = index; i < index + count; ++i) { buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; buf->buffer->element[buf->next_element_to_fill - 1].flags |= SBAL_FLAGS_LAST_ENTRY; if (queue->card->info.type == QETH_CARD_TYPE_IQD) continue; if (!queue->do_pack) { if ((atomic_read(&queue->used_buffers) >= (QETH_HIGH_WATERMARK_PACK - QETH_WATERMARK_PACK_FUZZ)) && !atomic_read(&queue->set_pci_flags_count)) { /* it's likely that we'll go to packing * mode soon */ atomic_inc(&queue->set_pci_flags_count); buf->buffer->element[0].flags |= 0x40; } } else { if (!atomic_read(&queue->set_pci_flags_count)) { /* * there's no outstanding PCI any more, so we * have to request a PCI to be sure the the PCI * will wake at some time in the future then we * can flush packed buffers that might still be * hanging around, which can happen if no * further send was requested by the stack */ atomic_inc(&queue->set_pci_flags_count); buf->buffer->element[0].flags |= 0x40; } } } queue->sync_iqdio_error = 0; queue->card->dev->trans_start = jiffies; if (queue->card->options.performance_stats) { queue->card->perf_stats.outbound_do_qdio_cnt++; queue->card->perf_stats.outbound_do_qdio_start_time = qeth_get_micros(); } qdio_flags = QDIO_FLAG_SYNC_OUTPUT; if (atomic_read(&queue->set_pci_flags_count)) qdio_flags |= QDIO_FLAG_PCI_OUT; rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, queue->queue_no, index, count); if (queue->card->options.performance_stats) queue->card->perf_stats.outbound_do_qdio_time += qeth_get_micros() - queue->card->perf_stats.outbound_do_qdio_start_time; if (rc > 0) { if (!(rc & QDIO_ERROR_SIGA_BUSY)) queue->sync_iqdio_error = rc & 3; } if (rc) { queue->card->stats.tx_errors += count; /* ignore temporary SIGA errors without busy condition */ if (rc == QDIO_ERROR_SIGA_TARGET) return; QETH_DBF_TEXT(TRACE, 2, "flushbuf"); QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_DDEV_ID(queue->card)); /* this must not happen under normal circumstances. if it * happens something is really wrong -> recover */ qeth_schedule_recovery(queue->card); return; } atomic_add(count, &queue->used_buffers); if (queue->card->options.performance_stats) queue->card->perf_stats.bufs_sent += count; } static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) { int index; int flush_cnt = 0; int q_was_packing = 0; /* * check if weed have to switch to non-packing mode or if * we have to get a pci flag out on the queue */ if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) || !atomic_read(&queue->set_pci_flags_count)) { if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) == QETH_OUT_Q_UNLOCKED) { /* * If we get in here, there was no action in * do_send_packet. So, we check if there is a * packing buffer to be flushed here. */ netif_stop_queue(queue->card->dev); index = queue->next_buf_to_fill; q_was_packing = queue->do_pack; /* queue->do_pack may change */ barrier(); flush_cnt += qeth_switch_to_nonpacking_if_needed(queue); if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count)) flush_cnt += qeth_flush_buffers_on_no_pci(queue); if (queue->card->options.performance_stats && q_was_packing) queue->card->perf_stats.bufs_sent_pack += flush_cnt; if (flush_cnt) qeth_flush_buffers(queue, index, flush_cnt); atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); } } } void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int qdio_error, int __queue, int first_element, int count, unsigned long card_ptr) { struct qeth_card *card = (struct qeth_card *) card_ptr; struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; struct qeth_qdio_out_buffer *buffer; int i; unsigned qeth_send_err; QETH_DBF_TEXT(TRACE, 6, "qdouhdl"); if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { QETH_DBF_TEXT(TRACE, 2, "achkcond"); QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); netif_stop_queue(card->dev); qeth_schedule_recovery(card); return; } if (card->options.performance_stats) { card->perf_stats.outbound_handler_cnt++; card->perf_stats.outbound_handler_start_time = qeth_get_micros(); } for (i = first_element; i < (first_element + count); ++i) { buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; qeth_send_err = qeth_handle_send_error(card, buffer, qdio_error); __qeth_clear_output_buffer(queue, buffer, (qeth_send_err == QETH_SEND_ERROR_RETRY) ? 1 : 0); } atomic_sub(count, &queue->used_buffers); /* check if we need to do something on this outbound queue */ if (card->info.type != QETH_CARD_TYPE_IQD) qeth_check_outbound_queue(queue); netif_wake_queue(queue->card->dev); if (card->options.performance_stats) card->perf_stats.outbound_handler_time += qeth_get_micros() - card->perf_stats.outbound_handler_start_time; } EXPORT_SYMBOL_GPL(qeth_qdio_output_handler); int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, int ipv, int cast_type) { if (!ipv && (card->info.type == QETH_CARD_TYPE_OSD || card->info.type == QETH_CARD_TYPE_OSX)) return card->qdio.default_out_queue; switch (card->qdio.no_out_queues) { case 4: if (cast_type && card->info.is_multicast_different) return card->info.is_multicast_different & (card->qdio.no_out_queues - 1); if (card->qdio.do_prio_queueing && (ipv == 4)) { const u8 tos = ip_hdr(skb)->tos; if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_TOS) { if (tos & IP_TOS_NOTIMPORTANT) return 3; if (tos & IP_TOS_HIGHRELIABILITY) return 2; if (tos & IP_TOS_HIGHTHROUGHPUT) return 1; if (tos & IP_TOS_LOWDELAY) return 0; } if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) return 3 - (tos >> 6); } else if (card->qdio.do_prio_queueing && (ipv == 6)) { /* TODO: IPv6!!! */ } return card->qdio.default_out_queue; case 1: /* fallthrough for single-out-queue 1920-device */ default: return card->qdio.default_out_queue; } } EXPORT_SYMBOL_GPL(qeth_get_priority_queue); int qeth_get_elements_no(struct qeth_card *card, void *hdr, struct sk_buff *skb, int elems) { int elements_needed = 0; if (skb_shinfo(skb)->nr_frags > 0) elements_needed = (skb_shinfo(skb)->nr_frags + 1); if (elements_needed == 0) elements_needed = 1 + (((((unsigned long) skb->data) % PAGE_SIZE) + skb->len) >> PAGE_SHIFT); if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { QETH_DBF_MESSAGE(2, "Invalid size of IP packet " "(Number=%d / Length=%d). Discarded.\n", (elements_needed+elems), skb->len); return 0; } return elements_needed; } EXPORT_SYMBOL_GPL(qeth_get_elements_no); static inline void __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill, int offset) { int length = skb->len; int length_here; int element; char *data; int first_lap ; element = *next_element_to_fill; data = skb->data; first_lap = (is_tso == 0 ? 1 : 0); if (offset >= 0) { data = skb->data + offset; length -= offset; first_lap = 0; } while (length > 0) { /* length_here is the remaining amount of data in this page */ length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE); if (length < length_here) length_here = length; buffer->element[element].addr = data; buffer->element[element].length = length_here; length -= length_here; if (!length) { if (first_lap) buffer->element[element].flags = 0; else buffer->element[element].flags = SBAL_FLAGS_LAST_FRAG; } else { if (first_lap) buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG; else buffer->element[element].flags = SBAL_FLAGS_MIDDLE_FRAG; } data += length_here; element++; first_lap = 0; } *next_element_to_fill = element; } static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, struct sk_buff *skb, struct qeth_hdr *hdr, int offset, int hd_len) { struct qdio_buffer *buffer; int flush_cnt = 0, hdr_len, large_send = 0; buffer = buf->buffer; atomic_inc(&skb->users); skb_queue_tail(&buf->skb_list, skb); /*check first on TSO ....*/ if (hdr->hdr.l3.id == QETH_HEADER_TYPE_TSO) { int element = buf->next_element_to_fill; hdr_len = sizeof(struct qeth_hdr_tso) + ((struct qeth_hdr_tso *)hdr)->ext.dg_hdr_len; /*fill first buffer entry only with header information */ buffer->element[element].addr = skb->data; buffer->element[element].length = hdr_len; buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG; buf->next_element_to_fill++; skb->data += hdr_len; skb->len -= hdr_len; large_send = 1; } if (offset >= 0) { int element = buf->next_element_to_fill; buffer->element[element].addr = hdr; buffer->element[element].length = sizeof(struct qeth_hdr) + hd_len; buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG; buf->is_header[element] = 1; buf->next_element_to_fill++; } if (skb_shinfo(skb)->nr_frags == 0) __qeth_fill_buffer(skb, buffer, large_send, (int *)&buf->next_element_to_fill, offset); else __qeth_fill_buffer_frag(skb, buffer, large_send, (int *)&buf->next_element_to_fill); if (!queue->do_pack) { QETH_DBF_TEXT(TRACE, 6, "fillbfnp"); /* set state to PRIMED -> will be flushed */ atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); flush_cnt = 1; } else { QETH_DBF_TEXT(TRACE, 6, "fillbfpa"); if (queue->card->options.performance_stats) queue->card->perf_stats.skbs_sent_pack++; if (buf->next_element_to_fill >= QETH_MAX_BUFFER_ELEMENTS(queue->card)) { /* * packed buffer if full -> set state PRIMED * -> will be flushed */ atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); flush_cnt = 1; } } return flush_cnt; } int qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, int elements_needed, int offset, int hd_len) { struct qeth_qdio_out_buffer *buffer; struct sk_buff *skb1; struct qeth_skb_data *retry_ctrl; int index; int rc; /* spin until we get the queue ... */ while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); /* ... now we've got the queue */ index = queue->next_buf_to_fill; buffer = &queue->bufs[queue->next_buf_to_fill]; /* * check if buffer is empty to make sure that we do not 'overtake' * ourselves and try to fill a buffer that is already primed */ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) goto out; queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q; atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); qeth_flush_buffers(queue, index, 1); if (queue->sync_iqdio_error == 2) { skb1 = skb_dequeue(&buffer->skb_list); while (skb1) { atomic_dec(&skb1->users); skb1 = skb_dequeue(&buffer->skb_list); } retry_ctrl = (struct qeth_skb_data *) &skb->cb[16]; if (retry_ctrl->magic != QETH_SKB_MAGIC) { retry_ctrl->magic = QETH_SKB_MAGIC; retry_ctrl->count = 0; } if (retry_ctrl->count < QETH_SIGA_CC2_RETRIES) { retry_ctrl->count++; rc = dev_queue_xmit(skb); } else { dev_kfree_skb_any(skb); QETH_DBF_TEXT(QERR, 2, "qrdrop"); } } return 0; out: atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); return -EBUSY; } EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast); int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, int elements_needed) { struct qeth_qdio_out_buffer *buffer; int start_index; int flush_count = 0; int do_pack = 0; int tmp; int rc = 0; /* spin until we get the queue ... */ while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); start_index = queue->next_buf_to_fill; buffer = &queue->bufs[queue->next_buf_to_fill]; /* * check if buffer is empty to make sure that we do not 'overtake' * ourselves and try to fill a buffer that is already primed */ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) { atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); return -EBUSY; } /* check if we need to switch packing state of this queue */ qeth_switch_to_packing_if_needed(queue); if (queue->do_pack) { do_pack = 1; /* does packet fit in current buffer? */ if ((QETH_MAX_BUFFER_ELEMENTS(card) - buffer->next_element_to_fill) < elements_needed) { /* ... no -> set state PRIMED */ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); flush_count++; queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q; buffer = &queue->bufs[queue->next_buf_to_fill]; /* we did a step forward, so check buffer state * again */ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) { qeth_flush_buffers(queue, start_index, flush_count); atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); return -EBUSY; } } } tmp = qeth_fill_buffer(queue, buffer, skb, hdr, -1, 0); queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) % QDIO_MAX_BUFFERS_PER_Q; flush_count += tmp; if (flush_count) qeth_flush_buffers(queue, start_index, flush_count); else if (!atomic_read(&queue->set_pci_flags_count)) atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH); /* * queue->state will go from LOCKED -> UNLOCKED or from * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us * (switch packing state or flush buffer to get another pci flag out). * In that case we will enter this loop */ while (atomic_dec_return(&queue->state)) { flush_count = 0; start_index = queue->next_buf_to_fill; /* check if we can go back to non-packing state */ flush_count += qeth_switch_to_nonpacking_if_needed(queue); /* * check if we need to flush a packing buffer to get a pci * flag out on the queue */ if (!flush_count && !atomic_read(&queue->set_pci_flags_count)) flush_count += qeth_flush_buffers_on_no_pci(queue); if (flush_count) qeth_flush_buffers(queue, start_index, flush_count); } /* at this point the queue is UNLOCKED again */ if (queue->card->options.performance_stats && do_pack) queue->card->perf_stats.bufs_sent_pack += flush_count; return rc; } EXPORT_SYMBOL_GPL(qeth_do_send_packet); static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; struct qeth_ipacmd_setadpparms *setparms; QETH_DBF_TEXT(TRACE, 4, "prmadpcb"); cmd = (struct qeth_ipa_cmd *) data; setparms = &(cmd->data.setadapterparms); qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); if (cmd->hdr.return_code) { QETH_DBF_TEXT_(TRACE, 4, "prmrc%2.2x", cmd->hdr.return_code); setparms->data.mode = SET_PROMISC_MODE_OFF; } card->info.promisc_mode = setparms->data.mode; return 0; } void qeth_setadp_promisc_mode(struct qeth_card *card) { enum qeth_ipa_promisc_modes mode; struct net_device *dev = card->dev; struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; QETH_DBF_TEXT(TRACE, 4, "setprom"); if (((dev->flags & IFF_PROMISC) && (card->info.promisc_mode == SET_PROMISC_MODE_ON)) || (!(dev->flags & IFF_PROMISC) && (card->info.promisc_mode == SET_PROMISC_MODE_OFF))) return; mode = SET_PROMISC_MODE_OFF; if (dev->flags & IFF_PROMISC) mode = SET_PROMISC_MODE_ON; QETH_DBF_TEXT_(TRACE, 4, "mode:%x", mode); iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, sizeof(struct qeth_ipacmd_setadpparms)); cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); cmd->data.setadapterparms.data.mode = mode; qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); } EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode); int qeth_change_mtu(struct net_device *dev, int new_mtu) { struct qeth_card *card; char dbf_text[15]; card = dev->ml_priv; QETH_DBF_TEXT(TRACE, 4, "chgmtu"); sprintf(dbf_text, "%8x", new_mtu); QETH_DBF_TEXT(TRACE, 4, dbf_text); if (new_mtu < 64) return -EINVAL; if (new_mtu > 65535) return -EINVAL; if ((!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) && (!qeth_mtu_is_valid(card, new_mtu))) return -EINVAL; dev->mtu = new_mtu; return 0; } EXPORT_SYMBOL_GPL(qeth_change_mtu); struct net_device_stats *qeth_get_stats(struct net_device *dev) { struct qeth_card *card; card = dev->ml_priv; QETH_DBF_TEXT(TRACE, 5, "getstat"); return &card->stats; } EXPORT_SYMBOL_GPL(qeth_get_stats); static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; QETH_DBF_TEXT(TRACE, 4, "chgmaccb"); cmd = (struct qeth_ipa_cmd *) data; if (!card->options.layer2 || !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) { memcpy(card->dev->dev_addr, &cmd->data.setadapterparms.data.change_addr.addr, OSA_ADDR_LEN); card->info.mac_bits |= QETH_LAYER2_MAC_READ; } qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); return 0; } int qeth_setadpparms_change_macaddr(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; QETH_DBF_TEXT(TRACE, 4, "chgmac"); iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, sizeof(struct qeth_ipacmd_setadpparms)); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN; memcpy(&cmd->data.setadapterparms.data.change_addr.addr, card->dev->dev_addr, OSA_ADDR_LEN); rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb, NULL); return rc; } EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd; struct qeth_set_access_ctrl *access_ctrl_req; int rc; QETH_DBF_TEXT(TRACE, 4, "setaccb"); cmd = (struct qeth_ipa_cmd *) data; access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; QETH_DBF_TEXT_(SETUP, 2, "setaccb"); QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); QETH_DBF_TEXT_(SETUP, 2, "rc=%d", cmd->data.setadapterparms.hdr.return_code); switch (cmd->data.setadapterparms.hdr.return_code) { case SET_ACCESS_CTRL_RC_SUCCESS: case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED: case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED: { card->options.isolation = access_ctrl_req->subcmd_code; if (card->options.isolation == ISOLATION_MODE_NONE) { dev_info(&card->gdev->dev, "QDIO data connection isolation is deactivated\n"); } else { dev_info(&card->gdev->dev, "QDIO data connection isolation is activated\n"); } QETH_DBF_MESSAGE(3, "OK:SET_ACCESS_CTRL(%s, %d)==%d\n", card->gdev->dev.kobj.name, access_ctrl_req->subcmd_code, cmd->data.setadapterparms.hdr.return_code); rc = 0; break; } case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: { QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n", card->gdev->dev.kobj.name, access_ctrl_req->subcmd_code, cmd->data.setadapterparms.hdr.return_code); dev_err(&card->gdev->dev, "Adapter does not " "support QDIO data connection isolation\n"); /* ensure isolation mode is "none" */ card->options.isolation = ISOLATION_MODE_NONE; rc = -EOPNOTSUPP; break; } case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: { QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n", card->gdev->dev.kobj.name, access_ctrl_req->subcmd_code, cmd->data.setadapterparms.hdr.return_code); dev_err(&card->gdev->dev, "Adapter is dedicated. " "QDIO data connection isolation not supported\n"); /* ensure isolation mode is "none" */ card->options.isolation = ISOLATION_MODE_NONE; rc = -EOPNOTSUPP; break; } case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: { QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n", card->gdev->dev.kobj.name, access_ctrl_req->subcmd_code, cmd->data.setadapterparms.hdr.return_code); dev_err(&card->gdev->dev, "TSO does not permit QDIO data connection isolation\n"); /* ensure isolation mode is "none" */ card->options.isolation = ISOLATION_MODE_NONE; rc = -EPERM; break; } default: { /* this should never happen */ QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d" "==UNKNOWN\n", card->gdev->dev.kobj.name, access_ctrl_req->subcmd_code, cmd->data.setadapterparms.hdr.return_code); /* ensure isolation mode is "none" */ card->options.isolation = ISOLATION_MODE_NONE; rc = 0; break; } } qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); return rc; } static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, enum qeth_ipa_isolation_modes isolation) { int rc; struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; struct qeth_set_access_ctrl *access_ctrl_req; QETH_DBF_TEXT(TRACE, 4, "setacctl"); QETH_DBF_TEXT_(SETUP, 2, "setacctl"); QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, sizeof(struct qeth_ipacmd_setadpparms_hdr) + sizeof(struct qeth_set_access_ctrl)); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; access_ctrl_req->subcmd_code = isolation; rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb, NULL); QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc); return rc; } int qeth_set_access_ctrl_online(struct qeth_card *card) { int rc = 0; QETH_DBF_TEXT(TRACE, 4, "setactlo"); if ((card->info.type == QETH_CARD_TYPE_OSD || card->info.type == QETH_CARD_TYPE_OSX) && qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { rc = qeth_setadpparms_set_access_ctrl(card, card->options.isolation); if (rc) { QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL,%s,%d) sent failed\n", card->gdev->dev.kobj.name, rc); } } else if (card->options.isolation != ISOLATION_MODE_NONE) { card->options.isolation = ISOLATION_MODE_NONE; dev_err(&card->gdev->dev, "Adapter does not " "support QDIO data connection isolation\n"); rc = -EOPNOTSUPP; } return rc; } EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online); void qeth_tx_timeout(struct net_device *dev) { struct qeth_card *card; QETH_DBF_TEXT(TRACE, 4, "txtimeo"); card = dev->ml_priv; card->stats.tx_errors++; qeth_schedule_recovery(card); } EXPORT_SYMBOL_GPL(qeth_tx_timeout); int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) { struct qeth_card *card = dev->ml_priv; int rc = 0; switch (regnum) { case MII_BMCR: /* Basic mode control register */ rc = BMCR_FULLDPLX; if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) && (card->info.link_type != QETH_LINK_TYPE_OSN) && (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH)) rc |= BMCR_SPEED100; break; case MII_BMSR: /* Basic mode status register */ rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS | BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL | BMSR_100BASE4; break; case MII_PHYSID1: /* PHYS ID 1 */ rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) | dev->dev_addr[2]; rc = (rc >> 5) & 0xFFFF; break; case MII_PHYSID2: /* PHYS ID 2 */ rc = (dev->dev_addr[2] << 10) & 0xFFFF; break; case MII_ADVERTISE: /* Advertisement control reg */ rc = ADVERTISE_ALL; break; case MII_LPA: /* Link partner ability reg */ rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL | LPA_100BASE4 | LPA_LPACK; break; case MII_EXPANSION: /* Expansion register */ break; case MII_DCOUNTER: /* disconnect counter */ break; case MII_FCSCOUNTER: /* false carrier counter */ break; case MII_NWAYTEST: /* N-way auto-neg test register */ break; case MII_RERRCOUNTER: /* rx error counter */ rc = card->stats.rx_errors; break; case MII_SREVISION: /* silicon revision */ break; case MII_RESV1: /* reserved 1 */ break; case MII_LBRERROR: /* loopback, rx, bypass error */ break; case MII_PHYADDR: /* physical address */ break; case MII_RESV2: /* reserved 2 */ break; case MII_TPISTATUS: /* TPI status for 10mbps */ break; case MII_NCONFIG: /* network interface config */ break; default: break; } return rc; } EXPORT_SYMBOL_GPL(qeth_mdio_read); static int qeth_send_ipa_snmp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, int len, int (*reply_cb)(struct qeth_card *, struct qeth_reply *, unsigned long), void *reply_param) { u16 s1, s2; QETH_DBF_TEXT(TRACE, 4, "sendsnmp"); memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); /* adjust PDU length fields in IPA_PDU_HEADER */ s1 = (u32) IPA_PDU_HEADER_SIZE + len; s2 = (u32) len; memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2); memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2); memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2); memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2); return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob, reply_cb, reply_param); } static int qeth_snmp_command_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long sdata) { struct qeth_ipa_cmd *cmd; struct qeth_arp_query_info *qinfo; struct qeth_snmp_cmd *snmp; unsigned char *data; __u16 data_len; QETH_DBF_TEXT(TRACE, 3, "snpcmdcb"); cmd = (struct qeth_ipa_cmd *) sdata; data = (unsigned char *)((char *)cmd - reply->offset); qinfo = (struct qeth_arp_query_info *) reply->param; snmp = &cmd->data.setadapterparms.data.snmp; if (cmd->hdr.return_code) { QETH_DBF_TEXT_(TRACE, 4, "scer1%i", cmd->hdr.return_code); return 0; } if (cmd->data.setadapterparms.hdr.return_code) { cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code; QETH_DBF_TEXT_(TRACE, 4, "scer2%i", cmd->hdr.return_code); return 0; } data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data)); if (cmd->data.setadapterparms.hdr.seq_no == 1) data_len -= (__u16)((char *)&snmp->data - (char *)cmd); else data_len -= (__u16)((char *)&snmp->request - (char *)cmd); /* check if there is enough room in userspace */ if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { QETH_DBF_TEXT_(TRACE, 4, "scer3%i", -ENOMEM); cmd->hdr.return_code = -ENOMEM; return 0; } QETH_DBF_TEXT_(TRACE, 4, "snore%i", cmd->data.setadapterparms.hdr.used_total); QETH_DBF_TEXT_(TRACE, 4, "sseqn%i", cmd->data.setadapterparms.hdr.seq_no); /*copy entries to user buffer*/ if (cmd->data.setadapterparms.hdr.seq_no == 1) { memcpy(qinfo->udata + qinfo->udata_offset, (char *)snmp, data_len + offsetof(struct qeth_snmp_cmd, data)); qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data); } else { memcpy(qinfo->udata + qinfo->udata_offset, (char *)&snmp->request, data_len); } qinfo->udata_offset += data_len; /* check if all replies received ... */ QETH_DBF_TEXT_(TRACE, 4, "srtot%i", cmd->data.setadapterparms.hdr.used_total); QETH_DBF_TEXT_(TRACE, 4, "srseq%i", cmd->data.setadapterparms.hdr.seq_no); if (cmd->data.setadapterparms.hdr.seq_no < cmd->data.setadapterparms.hdr.used_total) return 1; return 0; } int qeth_snmp_command(struct qeth_card *card, char __user *udata) { struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; struct qeth_snmp_ureq *ureq; int req_len; struct qeth_arp_query_info qinfo = {0, }; int rc = 0; QETH_DBF_TEXT(TRACE, 3, "snmpcmd"); if (card->info.guestlan) return -EOPNOTSUPP; if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && (!card->options.layer2)) { return -EOPNOTSUPP; } /* skip 4 bytes (data_len struct member) to get req_len */ if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int))) return -EFAULT; ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL); if (!ureq) { QETH_DBF_TEXT(TRACE, 2, "snmpnome"); return -ENOMEM; } if (copy_from_user(ureq, udata, req_len + sizeof(struct qeth_snmp_ureq_hdr))) { kfree(ureq); return -EFAULT; } qinfo.udata_len = ureq->hdr.data_len; qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); if (!qinfo.udata) { kfree(ureq); return -ENOMEM; } qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr); iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, QETH_SNMP_SETADP_CMDLENGTH + req_len); cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len); rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, qeth_snmp_command_cb, (void *)&qinfo); if (rc) QETH_DBF_MESSAGE(2, "SNMP command failed on %s: (0x%x)\n", QETH_CARD_IFNAME(card), rc); else { if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) rc = -EFAULT; } kfree(ureq); kfree(qinfo.udata); return rc; } EXPORT_SYMBOL_GPL(qeth_snmp_command); static inline int qeth_get_qdio_q_format(struct qeth_card *card) { switch (card->info.type) { case QETH_CARD_TYPE_IQD: return 2; default: return 0; } } static int qeth_qdio_establish(struct qeth_card *card) { struct qdio_initialize init_data; char *qib_param_field; struct qdio_buffer **in_sbal_ptrs; struct qdio_buffer **out_sbal_ptrs; int i, j, k; int rc = 0; QETH_DBF_TEXT(SETUP, 2, "qdioest"); qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char), GFP_KERNEL); if (!qib_param_field) return -ENOMEM; qeth_create_qib_param_field(card, qib_param_field); qeth_create_qib_param_field_blkt(card, qib_param_field); in_sbal_ptrs = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(void *), GFP_KERNEL); if (!in_sbal_ptrs) { kfree(qib_param_field); return -ENOMEM; } for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) in_sbal_ptrs[i] = (struct qdio_buffer *) virt_to_phys(card->qdio.in_q->bufs[i].buffer); out_sbal_ptrs = kmalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q * sizeof(void *), GFP_KERNEL); if (!out_sbal_ptrs) { kfree(in_sbal_ptrs); kfree(qib_param_field); return -ENOMEM; } for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i) for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) { out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys( card->qdio.out_qs[i]->bufs[j].buffer); } memset(&init_data, 0, sizeof(struct qdio_initialize)); init_data.cdev = CARD_DDEV(card); init_data.q_format = qeth_get_qdio_q_format(card); init_data.qib_param_field_format = 0; init_data.qib_param_field = qib_param_field; init_data.no_input_qs = 1; init_data.no_output_qs = card->qdio.no_out_queues; init_data.input_handler = card->discipline.input_handler; init_data.output_handler = card->discipline.output_handler; init_data.int_parm = (unsigned long) card; init_data.input_sbal_addr_array = (void **) in_sbal_ptrs; init_data.output_sbal_addr_array = (void **) out_sbal_ptrs; if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { rc = qdio_allocate(&init_data); if (rc) { atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); goto out; } rc = qdio_establish(&init_data); if (rc) { atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); qdio_free(CARD_DDEV(card)); } } out: kfree(out_sbal_ptrs); kfree(in_sbal_ptrs); kfree(qib_param_field); return rc; } static void qeth_core_free_card(struct qeth_card *card) { QETH_DBF_TEXT(SETUP, 2, "freecrd"); QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); qeth_clean_channel(&card->read); qeth_clean_channel(&card->write); if (card->dev) free_netdev(card->dev); kfree(card->ip_tbd_list); qeth_free_qdio_buffers(card); unregister_service_level(&card->qeth_service_level); kfree(card); } static struct ccw_device_id qeth_ids[] = { {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01), .driver_info = QETH_CARD_TYPE_OSD}, {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05), .driver_info = QETH_CARD_TYPE_IQD}, {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06), .driver_info = QETH_CARD_TYPE_OSN}, {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03), .driver_info = QETH_CARD_TYPE_OSM}, {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02), .driver_info = QETH_CARD_TYPE_OSX}, {}, }; MODULE_DEVICE_TABLE(ccw, qeth_ids); static struct ccw_driver qeth_ccw_driver = { .name = "qeth", .ids = qeth_ids, .probe = ccwgroup_probe_ccwdev, .remove = ccwgroup_remove_ccwdev, }; static int qeth_core_driver_group(const char *buf, struct device *root_dev, unsigned long driver_id) { return ccwgroup_create_from_string(root_dev, driver_id, &qeth_ccw_driver, 3, buf); } int qeth_core_hardsetup_card(struct qeth_card *card) { int retries = 0; int rc; QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); atomic_set(&card->force_alloc_skb, 0); retry: if (retries) QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", dev_name(&card->gdev->dev)); ccw_device_set_offline(CARD_DDEV(card)); ccw_device_set_offline(CARD_WDEV(card)); ccw_device_set_offline(CARD_RDEV(card)); rc = ccw_device_set_online(CARD_RDEV(card)); if (rc) goto retriable; rc = ccw_device_set_online(CARD_WDEV(card)); if (rc) goto retriable; rc = ccw_device_set_online(CARD_DDEV(card)); if (rc) goto retriable; rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); retriable: if (rc == -ERESTARTSYS) { QETH_DBF_TEXT(SETUP, 2, "break1"); return rc; } else if (rc) { QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); if (++retries > 3) goto out; else goto retry; } qeth_init_tokens(card); qeth_init_func_level(card); rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb); if (rc == -ERESTARTSYS) { QETH_DBF_TEXT(SETUP, 2, "break2"); return rc; } else if (rc) { QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); if (--retries < 0) goto out; else goto retry; } rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb); if (rc == -ERESTARTSYS) { QETH_DBF_TEXT(SETUP, 2, "break3"); return rc; } else if (rc) { QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); if (--retries < 0) goto out; else goto retry; } rc = qeth_mpc_initialize(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); goto out; } return 0; out: dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " "an error on the device\n"); QETH_DBF_MESSAGE(2, "%s Initialization in hardsetup failed! rc=%d\n", dev_name(&card->gdev->dev), rc); return rc; } EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); static inline int qeth_create_skb_frag(struct qdio_buffer_element *element, struct sk_buff **pskb, int offset, int *pfrag, int data_len) { struct page *page = virt_to_page(element->addr); if (*pskb == NULL) { /* the upper protocol layers assume that there is data in the * skb itself. Copy a small amount (64 bytes) to make them * happy. */ *pskb = dev_alloc_skb(64 + ETH_HLEN); if (!(*pskb)) return -ENOMEM; skb_reserve(*pskb, ETH_HLEN); if (data_len <= 64) { memcpy(skb_put(*pskb, data_len), element->addr + offset, data_len); } else { get_page(page); memcpy(skb_put(*pskb, 64), element->addr + offset, 64); skb_fill_page_desc(*pskb, *pfrag, page, offset + 64, data_len - 64); (*pskb)->data_len += data_len - 64; (*pskb)->len += data_len - 64; (*pskb)->truesize += data_len - 64; (*pfrag)++; } } else { get_page(page); skb_fill_page_desc(*pskb, *pfrag, page, offset, data_len); (*pskb)->data_len += data_len; (*pskb)->len += data_len; (*pskb)->truesize += data_len; (*pfrag)++; } return 0; } struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer, struct qdio_buffer_element **__element, int *__offset, struct qeth_hdr **hdr) { struct qdio_buffer_element *element = *__element; int offset = *__offset; struct sk_buff *skb = NULL; int skb_len = 0; void *data_ptr; int data_len; int headroom = 0; int use_rx_sg = 0; int frag = 0; /* qeth_hdr must not cross element boundaries */ if (element->length < offset + sizeof(struct qeth_hdr)) { if (qeth_is_last_sbale(element)) return NULL; element++; offset = 0; if (element->length < sizeof(struct qeth_hdr)) return NULL; } *hdr = element->addr + offset; offset += sizeof(struct qeth_hdr); switch ((*hdr)->hdr.l2.id) { case QETH_HEADER_TYPE_LAYER2: skb_len = (*hdr)->hdr.l2.pkt_length; break; case QETH_HEADER_TYPE_LAYER3: skb_len = (*hdr)->hdr.l3.length; if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || (card->info.link_type == QETH_LINK_TYPE_HSTR)) headroom = TR_HLEN; else headroom = ETH_HLEN; break; case QETH_HEADER_TYPE_OSN: skb_len = (*hdr)->hdr.osn.pdu_length; headroom = sizeof(struct qeth_hdr); break; default: break; } if (!skb_len) return NULL; if ((skb_len >= card->options.rx_sg_cb) && (!(card->info.type == QETH_CARD_TYPE_OSN)) && (!atomic_read(&card->force_alloc_skb))) { use_rx_sg = 1; } else { skb = dev_alloc_skb(skb_len + headroom); if (!skb) goto no_mem; if (headroom) skb_reserve(skb, headroom); } data_ptr = element->addr + offset; while (skb_len) { data_len = min(skb_len, (int)(element->length - offset)); if (data_len) { if (use_rx_sg) { if (qeth_create_skb_frag(element, &skb, offset, &frag, data_len)) goto no_mem; } else { memcpy(skb_put(skb, data_len), data_ptr, data_len); } } skb_len -= data_len; if (skb_len) { if (qeth_is_last_sbale(element)) { QETH_DBF_TEXT(TRACE, 4, "unexeob"); QETH_DBF_TEXT_(TRACE, 4, "%s", CARD_BUS_ID(card)); QETH_DBF_TEXT(QERR, 2, "unexeob"); QETH_DBF_TEXT_(QERR, 2, "%s", CARD_BUS_ID(card)); QETH_DBF_HEX(MISC, 4, buffer, sizeof(*buffer)); dev_kfree_skb_any(skb); card->stats.rx_errors++; return NULL; } element++; offset = 0; data_ptr = element->addr; } else { offset += data_len; } } *__element = element; *__offset = offset; if (use_rx_sg && card->options.performance_stats) { card->perf_stats.sg_skbs_rx++; card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags; } return skb; no_mem: if (net_ratelimit()) { QETH_DBF_TEXT(TRACE, 2, "noskbmem"); QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); } card->stats.rx_dropped++; return NULL; } EXPORT_SYMBOL_GPL(qeth_core_get_next_skb); static void qeth_unregister_dbf_views(void) { int x; for (x = 0; x < QETH_DBF_INFOS; x++) { debug_unregister(qeth_dbf[x].id); qeth_dbf[x].id = NULL; } } void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *fmt, ...) { char dbf_txt_buf[32]; va_list args; if (level > (qeth_dbf[dbf_nix].id)->level) return; va_start(args, fmt); vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); va_end(args); debug_text_event(qeth_dbf[dbf_nix].id, level, dbf_txt_buf); } EXPORT_SYMBOL_GPL(qeth_dbf_longtext); static int qeth_register_dbf_views(void) { int ret; int x; for (x = 0; x < QETH_DBF_INFOS; x++) { /* register the areas */ qeth_dbf[x].id = debug_register(qeth_dbf[x].name, qeth_dbf[x].pages, qeth_dbf[x].areas, qeth_dbf[x].len); if (qeth_dbf[x].id == NULL) { qeth_unregister_dbf_views(); return -ENOMEM; } /* register a view */ ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view); if (ret) { qeth_unregister_dbf_views(); return ret; } /* set a passing level */ debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level); } return 0; } int qeth_core_load_discipline(struct qeth_card *card, enum qeth_discipline_id discipline) { int rc = 0; switch (discipline) { case QETH_DISCIPLINE_LAYER3: card->discipline.ccwgdriver = try_then_request_module( symbol_get(qeth_l3_ccwgroup_driver), "qeth_l3"); break; case QETH_DISCIPLINE_LAYER2: card->discipline.ccwgdriver = try_then_request_module( symbol_get(qeth_l2_ccwgroup_driver), "qeth_l2"); break; } if (!card->discipline.ccwgdriver) { dev_err(&card->gdev->dev, "There is no kernel module to " "support discipline %d\n", discipline); rc = -EINVAL; } return rc; } void qeth_core_free_discipline(struct qeth_card *card) { if (card->options.layer2) symbol_put(qeth_l2_ccwgroup_driver); else symbol_put(qeth_l3_ccwgroup_driver); card->discipline.ccwgdriver = NULL; } static void qeth_determine_capabilities(struct qeth_card *card) { int rc; int length; char *prcd; QETH_DBF_TEXT(SETUP, 2, "detcapab"); rc = ccw_device_set_online(CARD_DDEV(card)); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); goto out; } rc = qeth_read_conf_data(card, (void **) &prcd, &length); if (rc) { QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n", dev_name(&card->gdev->dev), rc); QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); goto out_offline; } qeth_configure_unitaddr(card, prcd); qeth_configure_blkt_default(card, prcd); kfree(prcd); rc = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd); if (rc) QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); out_offline: ccw_device_set_offline(CARD_DDEV(card)); out: return; } static int qeth_core_probe_device(struct ccwgroup_device *gdev) { struct qeth_card *card; struct device *dev; int rc; unsigned long flags; QETH_DBF_TEXT(SETUP, 2, "probedev"); dev = &gdev->dev; if (!get_device(dev)) return -ENODEV; QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev)); card = qeth_alloc_card(); if (!card) { QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM); rc = -ENOMEM; goto err_dev; } card->read.ccwdev = gdev->cdev[0]; card->write.ccwdev = gdev->cdev[1]; card->data.ccwdev = gdev->cdev[2]; dev_set_drvdata(&gdev->dev, card); card->gdev = gdev; gdev->cdev[0]->handler = qeth_irq; gdev->cdev[1]->handler = qeth_irq; gdev->cdev[2]->handler = qeth_irq; rc = qeth_determine_card_type(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); goto err_card; } rc = qeth_setup_card(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); goto err_card; } if (card->info.type == QETH_CARD_TYPE_OSN) rc = qeth_core_create_osn_attributes(dev); else rc = qeth_core_create_device_attributes(dev); if (rc) goto err_card; switch (card->info.type) { case QETH_CARD_TYPE_OSN: case QETH_CARD_TYPE_OSM: rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2); if (rc) goto err_attr; rc = card->discipline.ccwgdriver->probe(card->gdev); if (rc) goto err_disc; case QETH_CARD_TYPE_OSD: case QETH_CARD_TYPE_OSX: default: break; } write_lock_irqsave(&qeth_core_card_list.rwlock, flags); list_add_tail(&card->list, &qeth_core_card_list.list); write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags); qeth_determine_capabilities(card); return 0; err_disc: qeth_core_free_discipline(card); err_attr: if (card->info.type == QETH_CARD_TYPE_OSN) qeth_core_remove_osn_attributes(dev); else qeth_core_remove_device_attributes(dev); err_card: qeth_core_free_card(card); err_dev: put_device(dev); return rc; } static void qeth_core_remove_device(struct ccwgroup_device *gdev) { unsigned long flags; struct qeth_card *card = dev_get_drvdata(&gdev->dev); QETH_DBF_TEXT(SETUP, 2, "removedv"); if (card->discipline.ccwgdriver) { card->discipline.ccwgdriver->remove(gdev); qeth_core_free_discipline(card); } if (card->info.type == QETH_CARD_TYPE_OSN) { qeth_core_remove_osn_attributes(&gdev->dev); } else { qeth_core_remove_device_attributes(&gdev->dev); } write_lock_irqsave(&qeth_core_card_list.rwlock, flags); list_del(&card->list); write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags); qeth_core_free_card(card); dev_set_drvdata(&gdev->dev, NULL); put_device(&gdev->dev); return; } static int qeth_core_set_online(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); int rc = 0; int def_discipline; if (!card->discipline.ccwgdriver) { if (card->info.type == QETH_CARD_TYPE_IQD) def_discipline = QETH_DISCIPLINE_LAYER3; else def_discipline = QETH_DISCIPLINE_LAYER2; rc = qeth_core_load_discipline(card, def_discipline); if (rc) goto err; rc = card->discipline.ccwgdriver->probe(card->gdev); if (rc) goto err; } rc = card->discipline.ccwgdriver->set_online(gdev); err: return rc; } static int qeth_core_set_offline(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); return card->discipline.ccwgdriver->set_offline(gdev); } static void qeth_core_shutdown(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); if (card->discipline.ccwgdriver && card->discipline.ccwgdriver->shutdown) card->discipline.ccwgdriver->shutdown(gdev); } static int qeth_core_prepare(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); if (card->discipline.ccwgdriver && card->discipline.ccwgdriver->prepare) return card->discipline.ccwgdriver->prepare(gdev); return 0; } static void qeth_core_complete(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); if (card->discipline.ccwgdriver && card->discipline.ccwgdriver->complete) card->discipline.ccwgdriver->complete(gdev); } static int qeth_core_freeze(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); if (card->discipline.ccwgdriver && card->discipline.ccwgdriver->freeze) return card->discipline.ccwgdriver->freeze(gdev); return 0; } static int qeth_core_thaw(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); if (card->discipline.ccwgdriver && card->discipline.ccwgdriver->thaw) return card->discipline.ccwgdriver->thaw(gdev); return 0; } static int qeth_core_restore(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); if (card->discipline.ccwgdriver && card->discipline.ccwgdriver->restore) return card->discipline.ccwgdriver->restore(gdev); return 0; } static struct ccwgroup_driver qeth_core_ccwgroup_driver = { .owner = THIS_MODULE, .name = "qeth", .driver_id = 0xD8C5E3C8, .probe = qeth_core_probe_device, .remove = qeth_core_remove_device, .set_online = qeth_core_set_online, .set_offline = qeth_core_set_offline, .shutdown = qeth_core_shutdown, .prepare = qeth_core_prepare, .complete = qeth_core_complete, .freeze = qeth_core_freeze, .thaw = qeth_core_thaw, .restore = qeth_core_restore, }; static ssize_t qeth_core_driver_group_store(struct device_driver *ddrv, const char *buf, size_t count) { int err; err = qeth_core_driver_group(buf, qeth_core_root_dev, qeth_core_ccwgroup_driver.driver_id); if (err) return err; else return count; } static DRIVER_ATTR(group, 0200, NULL, qeth_core_driver_group_store); static struct { const char str[ETH_GSTRING_LEN]; } qeth_ethtool_stats_keys[] = { /* 0 */{"rx skbs"}, {"rx buffers"}, {"tx skbs"}, {"tx buffers"}, {"tx skbs no packing"}, {"tx buffers no packing"}, {"tx skbs packing"}, {"tx buffers packing"}, {"tx sg skbs"}, {"tx sg frags"}, /* 10 */{"rx sg skbs"}, {"rx sg frags"}, {"rx sg page allocs"}, {"tx large kbytes"}, {"tx large count"}, {"tx pk state ch n->p"}, {"tx pk state ch p->n"}, {"tx pk watermark low"}, {"tx pk watermark high"}, {"queue 0 buffer usage"}, /* 20 */{"queue 1 buffer usage"}, {"queue 2 buffer usage"}, {"queue 3 buffer usage"}, {"rx handler time"}, {"rx handler count"}, {"rx do_QDIO time"}, {"rx do_QDIO count"}, {"tx handler time"}, {"tx handler count"}, {"tx time"}, /* 30 */{"tx count"}, {"tx do_QDIO time"}, {"tx do_QDIO count"}, {"tx csum"}, {"tx lin"}, }; int qeth_core_get_sset_count(struct net_device *dev, int stringset) { switch (stringset) { case ETH_SS_STATS: return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN); default: return -EINVAL; } } EXPORT_SYMBOL_GPL(qeth_core_get_sset_count); void qeth_core_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct qeth_card *card = dev->ml_priv; data[0] = card->stats.rx_packets - card->perf_stats.initial_rx_packets; data[1] = card->perf_stats.bufs_rec; data[2] = card->stats.tx_packets - card->perf_stats.initial_tx_packets; data[3] = card->perf_stats.bufs_sent; data[4] = card->stats.tx_packets - card->perf_stats.initial_tx_packets - card->perf_stats.skbs_sent_pack; data[5] = card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack; data[6] = card->perf_stats.skbs_sent_pack; data[7] = card->perf_stats.bufs_sent_pack; data[8] = card->perf_stats.sg_skbs_sent; data[9] = card->perf_stats.sg_frags_sent; data[10] = card->perf_stats.sg_skbs_rx; data[11] = card->perf_stats.sg_frags_rx; data[12] = card->perf_stats.sg_alloc_page_rx; data[13] = (card->perf_stats.large_send_bytes >> 10); data[14] = card->perf_stats.large_send_cnt; data[15] = card->perf_stats.sc_dp_p; data[16] = card->perf_stats.sc_p_dp; data[17] = QETH_LOW_WATERMARK_PACK; data[18] = QETH_HIGH_WATERMARK_PACK; data[19] = atomic_read(&card->qdio.out_qs[0]->used_buffers); data[20] = (card->qdio.no_out_queues > 1) ? atomic_read(&card->qdio.out_qs[1]->used_buffers) : 0; data[21] = (card->qdio.no_out_queues > 2) ? atomic_read(&card->qdio.out_qs[2]->used_buffers) : 0; data[22] = (card->qdio.no_out_queues > 3) ? atomic_read(&card->qdio.out_qs[3]->used_buffers) : 0; data[23] = card->perf_stats.inbound_time; data[24] = card->perf_stats.inbound_cnt; data[25] = card->perf_stats.inbound_do_qdio_time; data[26] = card->perf_stats.inbound_do_qdio_cnt; data[27] = card->perf_stats.outbound_handler_time; data[28] = card->perf_stats.outbound_handler_cnt; data[29] = card->perf_stats.outbound_time; data[30] = card->perf_stats.outbound_cnt; data[31] = card->perf_stats.outbound_do_qdio_time; data[32] = card->perf_stats.outbound_do_qdio_cnt; data[33] = card->perf_stats.tx_csum; data[34] = card->perf_stats.tx_lin; } EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats); void qeth_core_get_strings(struct net_device *dev, u32 stringset, u8 *data) { switch (stringset) { case ETH_SS_STATS: memcpy(data, &qeth_ethtool_stats_keys, sizeof(qeth_ethtool_stats_keys)); break; default: WARN_ON(1); break; } } EXPORT_SYMBOL_GPL(qeth_core_get_strings); void qeth_core_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct qeth_card *card = dev->ml_priv; if (card->options.layer2) strcpy(info->driver, "qeth_l2"); else strcpy(info->driver, "qeth_l3"); strcpy(info->version, "1.0"); strcpy(info->fw_version, card->info.mcl_level); sprintf(info->bus_info, "%s/%s/%s", CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card)); } EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo); int qeth_core_ethtool_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct qeth_card *card = netdev->ml_priv; enum qeth_link_types link_type; if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan)) link_type = QETH_LINK_TYPE_10GBIT_ETH; else link_type = card->info.link_type; ecmd->transceiver = XCVR_INTERNAL; ecmd->supported = SUPPORTED_Autoneg; ecmd->advertising = ADVERTISED_Autoneg; ecmd->duplex = DUPLEX_FULL; ecmd->autoneg = AUTONEG_ENABLE; switch (link_type) { case QETH_LINK_TYPE_FAST_ETH: case QETH_LINK_TYPE_LANE_ETH100: ecmd->supported |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_TP; ecmd->advertising |= ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_TP; ecmd->speed = SPEED_100; ecmd->port = PORT_TP; break; case QETH_LINK_TYPE_GBIT_ETH: case QETH_LINK_TYPE_LANE_ETH1000: ecmd->supported |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE; ecmd->advertising |= ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | ADVERTISED_FIBRE; ecmd->speed = SPEED_1000; ecmd->port = PORT_FIBRE; break; case QETH_LINK_TYPE_10GBIT_ETH: ecmd->supported |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE; ecmd->advertising |= ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE; ecmd->speed = SPEED_10000; ecmd->port = PORT_FIBRE; break; default: ecmd->supported |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_TP; ecmd->advertising |= ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_TP; ecmd->speed = SPEED_10; ecmd->port = PORT_TP; } return 0; } EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings); static int __init qeth_core_init(void) { int rc; pr_info("loading core functions\n"); INIT_LIST_HEAD(&qeth_core_card_list.list); rwlock_init(&qeth_core_card_list.rwlock); rc = qeth_register_dbf_views(); if (rc) goto out_err; rc = ccw_driver_register(&qeth_ccw_driver); if (rc) goto ccw_err; rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver); if (rc) goto ccwgroup_err; rc = driver_create_file(&qeth_core_ccwgroup_driver.driver, &driver_attr_group); if (rc) goto driver_err; qeth_core_root_dev = root_device_register("qeth"); rc = IS_ERR(qeth_core_root_dev) ? PTR_ERR(qeth_core_root_dev) : 0; if (rc) goto register_err; qeth_core_header_cache = kmem_cache_create("qeth_hdr", sizeof(struct qeth_hdr) + ETH_HLEN, 64, 0, NULL); if (!qeth_core_header_cache) { rc = -ENOMEM; goto slab_err; } return 0; slab_err: root_device_unregister(qeth_core_root_dev); register_err: driver_remove_file(&qeth_core_ccwgroup_driver.driver, &driver_attr_group); driver_err: ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); ccwgroup_err: ccw_driver_unregister(&qeth_ccw_driver); ccw_err: QETH_DBF_MESSAGE(2, "Initialization failed with code %d\n", rc); qeth_unregister_dbf_views(); out_err: pr_err("Initializing the qeth device driver failed\n"); return rc; } static void __exit qeth_core_exit(void) { root_device_unregister(qeth_core_root_dev); driver_remove_file(&qeth_core_ccwgroup_driver.driver, &driver_attr_group); ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); ccw_driver_unregister(&qeth_ccw_driver); kmem_cache_destroy(qeth_core_header_cache); qeth_unregister_dbf_views(); pr_info("core functions removed\n"); } module_init(qeth_core_init); module_exit(qeth_core_exit); MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); MODULE_DESCRIPTION("qeth core functions"); MODULE_LICENSE("GPL");
gpl-2.0
DerTeufel/cm7
drivers/spi/spi_ppc4xx.c
761
14571
/* * SPI_PPC4XX SPI controller driver. * * Copyright (C) 2007 Gary Jennejohn <garyj@denx.de> * Copyright 2008 Stefan Roese <sr@denx.de>, DENX Software Engineering * Copyright 2009 Harris Corporation, Steven A. Falco <sfalco@harris.com> * * Based in part on drivers/spi/spi_s3c24xx.c * * Copyright (c) 2006 Ben Dooks * Copyright (c) 2006 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ /* * The PPC4xx SPI controller has no FIFO so each sent/received byte will * generate an interrupt to the CPU. This can cause high CPU utilization. * This driver allows platforms to reduce the interrupt load on the CPU * during SPI transfers by setting max_speed_hz via the device tree. */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/of_platform.h> #include <linux/of_spi.h> #include <linux/of_gpio.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> #include <asm/io.h> #include <asm/dcr.h> #include <asm/dcr-regs.h> /* bits in mode register - bit 0 is MSb */ /* * SPI_PPC4XX_MODE_SCP = 0 means "data latched on trailing edge of clock" * SPI_PPC4XX_MODE_SCP = 1 means "data latched on leading edge of clock" * Note: This is the inverse of CPHA. */ #define SPI_PPC4XX_MODE_SCP (0x80 >> 3) /* SPI_PPC4XX_MODE_SPE = 1 means "port enabled" */ #define SPI_PPC4XX_MODE_SPE (0x80 >> 4) /* * SPI_PPC4XX_MODE_RD = 0 means "MSB first" - this is the normal mode * SPI_PPC4XX_MODE_RD = 1 means "LSB first" - this is bit-reversed mode * Note: This is identical to SPI_LSB_FIRST. */ #define SPI_PPC4XX_MODE_RD (0x80 >> 5) /* * SPI_PPC4XX_MODE_CI = 0 means "clock idles low" * SPI_PPC4XX_MODE_CI = 1 means "clock idles high" * Note: This is identical to CPOL. */ #define SPI_PPC4XX_MODE_CI (0x80 >> 6) /* * SPI_PPC4XX_MODE_IL = 0 means "loopback disable" * SPI_PPC4XX_MODE_IL = 1 means "loopback enable" */ #define SPI_PPC4XX_MODE_IL (0x80 >> 7) /* bits in control register */ /* starts a transfer when set */ #define SPI_PPC4XX_CR_STR (0x80 >> 7) /* bits in status register */ /* port is busy with a transfer */ #define SPI_PPC4XX_SR_BSY (0x80 >> 6) /* RxD ready */ #define SPI_PPC4XX_SR_RBR (0x80 >> 7) /* clock settings (SCP and CI) for various SPI modes */ #define SPI_CLK_MODE0 (SPI_PPC4XX_MODE_SCP | 0) #define SPI_CLK_MODE1 (0 | 0) #define SPI_CLK_MODE2 (SPI_PPC4XX_MODE_SCP | SPI_PPC4XX_MODE_CI) #define SPI_CLK_MODE3 (0 | SPI_PPC4XX_MODE_CI) #define DRIVER_NAME "spi_ppc4xx_of" struct spi_ppc4xx_regs { u8 mode; u8 rxd; u8 txd; u8 cr; u8 sr; u8 dummy; /* * Clock divisor modulus register * This uses the follwing formula: * SCPClkOut = OPBCLK/(4(CDM + 1)) * or * CDM = (OPBCLK/4*SCPClkOut) - 1 * bit 0 is the MSb! */ u8 cdm; }; /* SPI Controller driver's private data. */ struct ppc4xx_spi { /* bitbang has to be first */ struct spi_bitbang bitbang; struct completion done; u64 mapbase; u64 mapsize; int irqnum; /* need this to set the SPI clock */ unsigned int opb_freq; /* for transfers */ int len; int count; /* data buffers */ const unsigned char *tx; unsigned char *rx; int *gpios; struct spi_ppc4xx_regs __iomem *regs; /* pointer to the registers */ struct spi_master *master; struct device *dev; }; /* need this so we can set the clock in the chipselect routine */ struct spi_ppc4xx_cs { u8 mode; }; static int spi_ppc4xx_txrx(struct spi_device *spi, struct spi_transfer *t) { struct ppc4xx_spi *hw; u8 data; dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n", t->tx_buf, t->rx_buf, t->len); hw = spi_master_get_devdata(spi->master); hw->tx = t->tx_buf; hw->rx = t->rx_buf; hw->len = t->len; hw->count = 0; /* send the first byte */ data = hw->tx ? hw->tx[0] : 0; out_8(&hw->regs->txd, data); out_8(&hw->regs->cr, SPI_PPC4XX_CR_STR); wait_for_completion(&hw->done); return hw->count; } static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t) { struct ppc4xx_spi *hw = spi_master_get_devdata(spi->master); struct spi_ppc4xx_cs *cs = spi->controller_state; int scr; u8 cdm = 0; u32 speed; u8 bits_per_word; /* Start with the generic configuration for this device. */ bits_per_word = spi->bits_per_word; speed = spi->max_speed_hz; /* * Modify the configuration if the transfer overrides it. Do not allow * the transfer to overwrite the generic configuration with zeros. */ if (t) { if (t->bits_per_word) bits_per_word = t->bits_per_word; if (t->speed_hz) speed = min(t->speed_hz, spi->max_speed_hz); } if (bits_per_word != 8) { dev_err(&spi->dev, "invalid bits-per-word (%d)\n", bits_per_word); return -EINVAL; } if (!speed || (speed > spi->max_speed_hz)) { dev_err(&spi->dev, "invalid speed_hz (%d)\n", speed); return -EINVAL; } /* Write new configration */ out_8(&hw->regs->mode, cs->mode); /* Set the clock */ /* opb_freq was already divided by 4 */ scr = (hw->opb_freq / speed) - 1; if (scr > 0) cdm = min(scr, 0xff); dev_dbg(&spi->dev, "setting pre-scaler to %d (hz %d)\n", cdm, speed); if (in_8(&hw->regs->cdm) != cdm) out_8(&hw->regs->cdm, cdm); spin_lock(&hw->bitbang.lock); if (!hw->bitbang.busy) { hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE); /* Need to ndelay here? */ } spin_unlock(&hw->bitbang.lock); return 0; } static int spi_ppc4xx_setup(struct spi_device *spi) { struct spi_ppc4xx_cs *cs = spi->controller_state; if (spi->bits_per_word != 8) { dev_err(&spi->dev, "invalid bits-per-word (%d)\n", spi->bits_per_word); return -EINVAL; } if (!spi->max_speed_hz) { dev_err(&spi->dev, "invalid max_speed_hz (must be non-zero)\n"); return -EINVAL; } if (cs == NULL) { cs = kzalloc(sizeof *cs, GFP_KERNEL); if (!cs) return -ENOMEM; spi->controller_state = cs; } /* * We set all bits of the SPI0_MODE register, so, * no need to read-modify-write */ cs->mode = SPI_PPC4XX_MODE_SPE; switch (spi->mode & (SPI_CPHA | SPI_CPOL)) { case SPI_MODE_0: cs->mode |= SPI_CLK_MODE0; break; case SPI_MODE_1: cs->mode |= SPI_CLK_MODE1; break; case SPI_MODE_2: cs->mode |= SPI_CLK_MODE2; break; case SPI_MODE_3: cs->mode |= SPI_CLK_MODE3; break; } if (spi->mode & SPI_LSB_FIRST) cs->mode |= SPI_PPC4XX_MODE_RD; return 0; } static void spi_ppc4xx_chipsel(struct spi_device *spi, int value) { struct ppc4xx_spi *hw = spi_master_get_devdata(spi->master); unsigned int cs = spi->chip_select; unsigned int cspol; /* * If there are no chip selects at all, or if this is the special * case of a non-existent (dummy) chip select, do nothing. */ if (!hw->master->num_chipselect || hw->gpios[cs] == -EEXIST) return; cspol = spi->mode & SPI_CS_HIGH ? 1 : 0; if (value == BITBANG_CS_INACTIVE) cspol = !cspol; gpio_set_value(hw->gpios[cs], cspol); } static irqreturn_t spi_ppc4xx_int(int irq, void *dev_id) { struct ppc4xx_spi *hw; u8 status; u8 data; unsigned int count; hw = (struct ppc4xx_spi *)dev_id; status = in_8(&hw->regs->sr); if (!status) return IRQ_NONE; /* * BSY de-asserts one cycle after the transfer is complete. The * interrupt is asserted after the transfer is complete. The exact * relationship is not documented, hence this code. */ if (unlikely(status & SPI_PPC4XX_SR_BSY)) { u8 lstatus; int cnt = 0; dev_dbg(hw->dev, "got interrupt but spi still busy?\n"); do { ndelay(10); lstatus = in_8(&hw->regs->sr); } while (++cnt < 100 && lstatus & SPI_PPC4XX_SR_BSY); if (cnt >= 100) { dev_err(hw->dev, "busywait: too many loops!\n"); complete(&hw->done); return IRQ_HANDLED; } else { /* status is always 1 (RBR) here */ status = in_8(&hw->regs->sr); dev_dbg(hw->dev, "loops %d status %x\n", cnt, status); } } count = hw->count; hw->count++; /* RBR triggered this interrupt. Therefore, data must be ready. */ data = in_8(&hw->regs->rxd); if (hw->rx) hw->rx[count] = data; count++; if (count < hw->len) { data = hw->tx ? hw->tx[count] : 0; out_8(&hw->regs->txd, data); out_8(&hw->regs->cr, SPI_PPC4XX_CR_STR); } else { complete(&hw->done); } return IRQ_HANDLED; } static void spi_ppc4xx_cleanup(struct spi_device *spi) { kfree(spi->controller_state); } static void spi_ppc4xx_enable(struct ppc4xx_spi *hw) { /* * On all 4xx PPC's the SPI bus is shared/multiplexed with * the 2nd I2C bus. We need to enable the the SPI bus before * using it. */ /* need to clear bit 14 to enable SPC */ dcri_clrset(SDR0, SDR0_PFC1, 0x80000000 >> 14, 0); } static void free_gpios(struct ppc4xx_spi *hw) { if (hw->master->num_chipselect) { int i; for (i = 0; i < hw->master->num_chipselect; i++) if (gpio_is_valid(hw->gpios[i])) gpio_free(hw->gpios[i]); kfree(hw->gpios); hw->gpios = NULL; } } /* * of_device layer stuff... */ static int __init spi_ppc4xx_of_probe(struct of_device *op, const struct of_device_id *match) { struct ppc4xx_spi *hw; struct spi_master *master; struct spi_bitbang *bbp; struct resource resource; struct device_node *np = op->dev.of_node; struct device *dev = &op->dev; struct device_node *opbnp; int ret; int num_gpios; const unsigned int *clk; master = spi_alloc_master(dev, sizeof *hw); if (master == NULL) return -ENOMEM; dev_set_drvdata(dev, master); hw = spi_master_get_devdata(master); hw->master = spi_master_get(master); hw->dev = dev; init_completion(&hw->done); /* * A count of zero implies a single SPI device without any chip-select. * Note that of_gpio_count counts all gpios assigned to this spi master. * This includes both "null" gpio's and real ones. */ num_gpios = of_gpio_count(np); if (num_gpios) { int i; hw->gpios = kzalloc(sizeof(int) * num_gpios, GFP_KERNEL); if (!hw->gpios) { ret = -ENOMEM; goto free_master; } for (i = 0; i < num_gpios; i++) { int gpio; enum of_gpio_flags flags; gpio = of_get_gpio_flags(np, i, &flags); hw->gpios[i] = gpio; if (gpio_is_valid(gpio)) { /* Real CS - set the initial state. */ ret = gpio_request(gpio, np->name); if (ret < 0) { dev_err(dev, "can't request gpio " "#%d: %d\n", i, ret); goto free_gpios; } gpio_direction_output(gpio, !!(flags & OF_GPIO_ACTIVE_LOW)); } else if (gpio == -EEXIST) { ; /* No CS, but that's OK. */ } else { dev_err(dev, "invalid gpio #%d: %d\n", i, gpio); ret = -EINVAL; goto free_gpios; } } } /* Setup the state for the bitbang driver */ bbp = &hw->bitbang; bbp->master = hw->master; bbp->setup_transfer = spi_ppc4xx_setupxfer; bbp->chipselect = spi_ppc4xx_chipsel; bbp->txrx_bufs = spi_ppc4xx_txrx; bbp->use_dma = 0; bbp->master->setup = spi_ppc4xx_setup; bbp->master->cleanup = spi_ppc4xx_cleanup; /* Allocate bus num dynamically. */ bbp->master->bus_num = -1; /* the spi->mode bits understood by this driver: */ bbp->master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST; /* this many pins in all GPIO controllers */ bbp->master->num_chipselect = num_gpios; /* Get the clock for the OPB */ opbnp = of_find_compatible_node(NULL, NULL, "ibm,opb"); if (opbnp == NULL) { dev_err(dev, "OPB: cannot find node\n"); ret = -ENODEV; goto free_gpios; } /* Get the clock (Hz) for the OPB */ clk = of_get_property(opbnp, "clock-frequency", NULL); if (clk == NULL) { dev_err(dev, "OPB: no clock-frequency property set\n"); of_node_put(opbnp); ret = -ENODEV; goto free_gpios; } hw->opb_freq = *clk; hw->opb_freq >>= 2; of_node_put(opbnp); ret = of_address_to_resource(np, 0, &resource); if (ret) { dev_err(dev, "error while parsing device node resource\n"); goto free_gpios; } hw->mapbase = resource.start; hw->mapsize = resource.end - resource.start + 1; /* Sanity check */ if (hw->mapsize < sizeof(struct spi_ppc4xx_regs)) { dev_err(dev, "too small to map registers\n"); ret = -EINVAL; goto free_gpios; } /* Request IRQ */ hw->irqnum = irq_of_parse_and_map(np, 0); ret = request_irq(hw->irqnum, spi_ppc4xx_int, IRQF_DISABLED, "spi_ppc4xx_of", (void *)hw); if (ret) { dev_err(dev, "unable to allocate interrupt\n"); goto free_gpios; } if (!request_mem_region(hw->mapbase, hw->mapsize, DRIVER_NAME)) { dev_err(dev, "resource unavailable\n"); ret = -EBUSY; goto request_mem_error; } hw->regs = ioremap(hw->mapbase, sizeof(struct spi_ppc4xx_regs)); if (!hw->regs) { dev_err(dev, "unable to memory map registers\n"); ret = -ENXIO; goto map_io_error; } spi_ppc4xx_enable(hw); /* Finally register our spi controller */ dev->dma_mask = 0; ret = spi_bitbang_start(bbp); if (ret) { dev_err(dev, "failed to register SPI master\n"); goto unmap_regs; } dev_info(dev, "driver initialized\n"); of_register_spi_devices(master, np); return 0; unmap_regs: iounmap(hw->regs); map_io_error: release_mem_region(hw->mapbase, hw->mapsize); request_mem_error: free_irq(hw->irqnum, hw); free_gpios: free_gpios(hw); free_master: dev_set_drvdata(dev, NULL); spi_master_put(master); dev_err(dev, "initialization failed\n"); return ret; } static int __exit spi_ppc4xx_of_remove(struct of_device *op) { struct spi_master *master = dev_get_drvdata(&op->dev); struct ppc4xx_spi *hw = spi_master_get_devdata(master); spi_bitbang_stop(&hw->bitbang); dev_set_drvdata(&op->dev, NULL); release_mem_region(hw->mapbase, hw->mapsize); free_irq(hw->irqnum, hw); iounmap(hw->regs); free_gpios(hw); return 0; } static const struct of_device_id spi_ppc4xx_of_match[] = { { .compatible = "ibm,ppc4xx-spi", }, {}, }; MODULE_DEVICE_TABLE(of, spi_ppc4xx_of_match); static struct of_platform_driver spi_ppc4xx_of_driver = { .probe = spi_ppc4xx_of_probe, .remove = __exit_p(spi_ppc4xx_of_remove), .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = spi_ppc4xx_of_match, }, }; static int __init spi_ppc4xx_init(void) { return of_register_platform_driver(&spi_ppc4xx_of_driver); } module_init(spi_ppc4xx_init); static void __exit spi_ppc4xx_exit(void) { of_unregister_platform_driver(&spi_ppc4xx_of_driver); } module_exit(spi_ppc4xx_exit); MODULE_AUTHOR("Gary Jennejohn & Stefan Roese"); MODULE_DESCRIPTION("Simple PPC4xx SPI Driver"); MODULE_LICENSE("GPL");
gpl-2.0
klock-android/linux
fs/befs/btree.c
761
22513
/* * linux/fs/befs/btree.c * * Copyright (C) 2001-2002 Will Dyson <will_dyson@pobox.com> * * Licensed under the GNU GPL. See the file COPYING for details. * * 2002-02-05: Sergey S. Kostyliov added binary search within * btree nodes. * * Many thanks to: * * Dominic Giampaolo, author of "Practical File System * Design with the Be File System", for such a helpful book. * * Marcus J. Ranum, author of the b+tree package in * comp.sources.misc volume 10. This code is not copied from that * work, but it is partially based on it. * * Makoto Kato, author of the original BeFS for linux filesystem * driver. */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/buffer_head.h> #include "befs.h" #include "btree.h" #include "datastream.h" /* * The btree functions in this file are built on top of the * datastream.c interface, which is in turn built on top of the * io.c interface. */ /* Befs B+tree structure: * * The first thing in the tree is the tree superblock. It tells you * all kinds of useful things about the tree, like where the rootnode * is located, and the size of the nodes (always 1024 with current version * of BeOS). * * The rest of the tree consists of a series of nodes. Nodes contain a header * (struct befs_btree_nodehead), the packed key data, an array of shorts * containing the ending offsets for each of the keys, and an array of * befs_off_t values. In interior nodes, the keys are the ending keys for * the childnode they point to, and the values are offsets into the * datastream containing the tree. */ /* Note: * * The book states 2 confusing things about befs b+trees. First, * it states that the overflow field of node headers is used by internal nodes * to point to another node that "effectively continues this one". Here is what * I believe that means. Each key in internal nodes points to another node that * contains key values less than itself. Inspection reveals that the last key * in the internal node is not the last key in the index. Keys that are * greater than the last key in the internal node go into the overflow node. * I imagine there is a performance reason for this. * * Second, it states that the header of a btree node is sufficient to * distinguish internal nodes from leaf nodes. Without saying exactly how. * After figuring out the first, it becomes obvious that internal nodes have * overflow nodes and leafnodes do not. */ /* * Currently, this code is only good for directory B+trees. * In order to be used for other BFS indexes, it needs to be extended to handle * duplicate keys and non-string keytypes (int32, int64, float, double). */ /* * In memory structure of each btree node */ struct befs_btree_node { befs_host_btree_nodehead head; /* head of node converted to cpu byteorder */ struct buffer_head *bh; befs_btree_nodehead *od_node; /* on disk node */ }; /* local constants */ static const befs_off_t befs_bt_inval = 0xffffffffffffffffULL; /* local functions */ static int befs_btree_seekleaf(struct super_block *sb, befs_data_stream * ds, befs_btree_super * bt_super, struct befs_btree_node *this_node, befs_off_t * node_off); static int befs_bt_read_super(struct super_block *sb, befs_data_stream * ds, befs_btree_super * sup); static int befs_bt_read_node(struct super_block *sb, befs_data_stream * ds, struct befs_btree_node *node, befs_off_t node_off); static int befs_leafnode(struct befs_btree_node *node); static fs16 *befs_bt_keylen_index(struct befs_btree_node *node); static fs64 *befs_bt_valarray(struct befs_btree_node *node); static char *befs_bt_keydata(struct befs_btree_node *node); static int befs_find_key(struct super_block *sb, struct befs_btree_node *node, const char *findkey, befs_off_t * value); static char *befs_bt_get_key(struct super_block *sb, struct befs_btree_node *node, int index, u16 * keylen); static int befs_compare_strings(const void *key1, int keylen1, const void *key2, int keylen2); /** * befs_bt_read_super - read in btree superblock convert to cpu byteorder * @sb: Filesystem superblock * @ds: Datastream to read from * @sup: Buffer in which to place the btree superblock * * Calls befs_read_datastream to read in the btree superblock and * makes sure it is in cpu byteorder, byteswapping if necessary. * * On success, returns BEFS_OK and *@sup contains the btree superblock, * in cpu byte order. * * On failure, BEFS_ERR is returned. */ static int befs_bt_read_super(struct super_block *sb, befs_data_stream * ds, befs_btree_super * sup) { struct buffer_head *bh = NULL; befs_disk_btree_super *od_sup = NULL; befs_debug(sb, "---> %s", __func__); bh = befs_read_datastream(sb, ds, 0, NULL); if (!bh) { befs_error(sb, "Couldn't read index header."); goto error; } od_sup = (befs_disk_btree_super *) bh->b_data; befs_dump_index_entry(sb, od_sup); sup->magic = fs32_to_cpu(sb, od_sup->magic); sup->node_size = fs32_to_cpu(sb, od_sup->node_size); sup->max_depth = fs32_to_cpu(sb, od_sup->max_depth); sup->data_type = fs32_to_cpu(sb, od_sup->data_type); sup->root_node_ptr = fs64_to_cpu(sb, od_sup->root_node_ptr); sup->free_node_ptr = fs64_to_cpu(sb, od_sup->free_node_ptr); sup->max_size = fs64_to_cpu(sb, od_sup->max_size); brelse(bh); if (sup->magic != BEFS_BTREE_MAGIC) { befs_error(sb, "Index header has bad magic."); goto error; } befs_debug(sb, "<--- %s", __func__); return BEFS_OK; error: befs_debug(sb, "<--- %s ERROR", __func__); return BEFS_ERR; } /** * befs_bt_read_node - read in btree node and convert to cpu byteorder * @sb: Filesystem superblock * @ds: Datastream to read from * @node: Buffer in which to place the btree node * @node_off: Starting offset (in bytes) of the node in @ds * * Calls befs_read_datastream to read in the indicated btree node and * makes sure its header fields are in cpu byteorder, byteswapping if * necessary. * Note: node->bh must be NULL when this function called first * time. Don't forget brelse(node->bh) after last call. * * On success, returns BEFS_OK and *@node contains the btree node that * starts at @node_off, with the node->head fields in cpu byte order. * * On failure, BEFS_ERR is returned. */ static int befs_bt_read_node(struct super_block *sb, befs_data_stream * ds, struct befs_btree_node *node, befs_off_t node_off) { uint off = 0; befs_debug(sb, "---> %s", __func__); if (node->bh) brelse(node->bh); node->bh = befs_read_datastream(sb, ds, node_off, &off); if (!node->bh) { befs_error(sb, "%s failed to read " "node at %llu", __func__, node_off); befs_debug(sb, "<--- %s ERROR", __func__); return BEFS_ERR; } node->od_node = (befs_btree_nodehead *) ((void *) node->bh->b_data + off); befs_dump_index_node(sb, node->od_node); node->head.left = fs64_to_cpu(sb, node->od_node->left); node->head.right = fs64_to_cpu(sb, node->od_node->right); node->head.overflow = fs64_to_cpu(sb, node->od_node->overflow); node->head.all_key_count = fs16_to_cpu(sb, node->od_node->all_key_count); node->head.all_key_length = fs16_to_cpu(sb, node->od_node->all_key_length); befs_debug(sb, "<--- %s", __func__); return BEFS_OK; } /** * befs_btree_find - Find a key in a befs B+tree * @sb: Filesystem superblock * @ds: Datastream containing btree * @key: Key string to lookup in btree * @value: Value stored with @key * * On success, returns BEFS_OK and sets *@value to the value stored * with @key (usually the disk block number of an inode). * * On failure, returns BEFS_ERR or BEFS_BT_NOT_FOUND. * * Algorithm: * Read the superblock and rootnode of the b+tree. * Drill down through the interior nodes using befs_find_key(). * Once at the correct leaf node, use befs_find_key() again to get the * actuall value stored with the key. */ int befs_btree_find(struct super_block *sb, befs_data_stream * ds, const char *key, befs_off_t * value) { struct befs_btree_node *this_node = NULL; befs_btree_super bt_super; befs_off_t node_off; int res; befs_debug(sb, "---> %s Key: %s", __func__, key); if (befs_bt_read_super(sb, ds, &bt_super) != BEFS_OK) { befs_error(sb, "befs_btree_find() failed to read index superblock"); goto error; } this_node = kmalloc(sizeof(struct befs_btree_node), GFP_NOFS); if (!this_node) { befs_error(sb, "befs_btree_find() failed to allocate %zu " "bytes of memory", sizeof(struct befs_btree_node)); goto error; } this_node->bh = NULL; /* read in root node */ node_off = bt_super.root_node_ptr; if (befs_bt_read_node(sb, ds, this_node, node_off) != BEFS_OK) { befs_error(sb, "befs_btree_find() failed to read " "node at %llu", node_off); goto error_alloc; } while (!befs_leafnode(this_node)) { res = befs_find_key(sb, this_node, key, &node_off); if (res == BEFS_BT_NOT_FOUND) node_off = this_node->head.overflow; /* if no match, go to overflow node */ if (befs_bt_read_node(sb, ds, this_node, node_off) != BEFS_OK) { befs_error(sb, "befs_btree_find() failed to read " "node at %llu", node_off); goto error_alloc; } } /* at the correct leaf node now */ res = befs_find_key(sb, this_node, key, value); brelse(this_node->bh); kfree(this_node); if (res != BEFS_BT_MATCH) { befs_debug(sb, "<--- %s Key %s not found", __func__, key); *value = 0; return BEFS_BT_NOT_FOUND; } befs_debug(sb, "<--- %s Found key %s, value %llu", __func__, key, *value); return BEFS_OK; error_alloc: kfree(this_node); error: *value = 0; befs_debug(sb, "<--- %s ERROR", __func__); return BEFS_ERR; } /** * befs_find_key - Search for a key within a node * @sb: Filesystem superblock * @node: Node to find the key within * @findkey: Keystring to search for * @value: If key is found, the value stored with the key is put here * * finds exact match if one exists, and returns BEFS_BT_MATCH * If no exact match, finds first key in node that is greater * (alphabetically) than the search key and returns BEFS_BT_PARMATCH * (for partial match, I guess). Can you think of something better to * call it? * * If no key was a match or greater than the search key, return * BEFS_BT_NOT_FOUND. * * Use binary search instead of a linear. */ static int befs_find_key(struct super_block *sb, struct befs_btree_node *node, const char *findkey, befs_off_t * value) { int first, last, mid; int eq; u16 keylen; int findkey_len; char *thiskey; fs64 *valarray; befs_debug(sb, "---> %s %s", __func__, findkey); *value = 0; findkey_len = strlen(findkey); /* if node can not contain key, just skeep this node */ last = node->head.all_key_count - 1; thiskey = befs_bt_get_key(sb, node, last, &keylen); eq = befs_compare_strings(thiskey, keylen, findkey, findkey_len); if (eq < 0) { befs_debug(sb, "<--- %s %s not found", __func__, findkey); return BEFS_BT_NOT_FOUND; } valarray = befs_bt_valarray(node); /* simple binary search */ first = 0; mid = 0; while (last >= first) { mid = (last + first) / 2; befs_debug(sb, "first: %d, last: %d, mid: %d", first, last, mid); thiskey = befs_bt_get_key(sb, node, mid, &keylen); eq = befs_compare_strings(thiskey, keylen, findkey, findkey_len); if (eq == 0) { befs_debug(sb, "<--- %s found %s at %d", __func__, thiskey, mid); *value = fs64_to_cpu(sb, valarray[mid]); return BEFS_BT_MATCH; } if (eq > 0) last = mid - 1; else first = mid + 1; } if (eq < 0) *value = fs64_to_cpu(sb, valarray[mid + 1]); else *value = fs64_to_cpu(sb, valarray[mid]); befs_debug(sb, "<--- %s found %s at %d", __func__, thiskey, mid); return BEFS_BT_PARMATCH; } /** * befs_btree_read - Traverse leafnodes of a btree * @sb: Filesystem superblock * @ds: Datastream containing btree * @key_no: Key number (alphabetical order) of key to read * @bufsize: Size of the buffer to return key in * @keybuf: Pointer to a buffer to put the key in * @keysize: Length of the returned key * @value: Value stored with the returned key * * Heres how it works: Key_no is the index of the key/value pair to * return in keybuf/value. * Bufsize is the size of keybuf (BEFS_NAME_LEN+1 is a good size). Keysize is * the number of characters in the key (just a convenience). * * Algorithm: * Get the first leafnode of the tree. See if the requested key is in that * node. If not, follow the node->right link to the next leafnode. Repeat * until the (key_no)th key is found or the tree is out of keys. */ int befs_btree_read(struct super_block *sb, befs_data_stream * ds, loff_t key_no, size_t bufsize, char *keybuf, size_t * keysize, befs_off_t * value) { struct befs_btree_node *this_node; befs_btree_super bt_super; befs_off_t node_off = 0; int cur_key; fs64 *valarray; char *keystart; u16 keylen; int res; uint key_sum = 0; befs_debug(sb, "---> %s", __func__); if (befs_bt_read_super(sb, ds, &bt_super) != BEFS_OK) { befs_error(sb, "befs_btree_read() failed to read index superblock"); goto error; } this_node = kmalloc(sizeof(struct befs_btree_node), GFP_NOFS); if (this_node == NULL) { befs_error(sb, "befs_btree_read() failed to allocate %zu " "bytes of memory", sizeof(struct befs_btree_node)); goto error; } node_off = bt_super.root_node_ptr; this_node->bh = NULL; /* seeks down to first leafnode, reads it into this_node */ res = befs_btree_seekleaf(sb, ds, &bt_super, this_node, &node_off); if (res == BEFS_BT_EMPTY) { brelse(this_node->bh); kfree(this_node); *value = 0; *keysize = 0; befs_debug(sb, "<--- %s Tree is EMPTY", __func__); return BEFS_BT_EMPTY; } else if (res == BEFS_ERR) { goto error_alloc; } /* find the leaf node containing the key_no key */ while (key_sum + this_node->head.all_key_count <= key_no) { /* no more nodes to look in: key_no is too large */ if (this_node->head.right == befs_bt_inval) { *keysize = 0; *value = 0; befs_debug(sb, "<--- %s END of keys at %llu", __func__, (unsigned long long) key_sum + this_node->head.all_key_count); brelse(this_node->bh); kfree(this_node); return BEFS_BT_END; } key_sum += this_node->head.all_key_count; node_off = this_node->head.right; if (befs_bt_read_node(sb, ds, this_node, node_off) != BEFS_OK) { befs_error(sb, "%s failed to read node at %llu", __func__, (unsigned long long)node_off); goto error_alloc; } } /* how many keys into this_node is key_no */ cur_key = key_no - key_sum; /* get pointers to datastructures within the node body */ valarray = befs_bt_valarray(this_node); keystart = befs_bt_get_key(sb, this_node, cur_key, &keylen); befs_debug(sb, "Read [%llu,%d]: keysize %d", (long long unsigned int)node_off, (int)cur_key, (int)keylen); if (bufsize < keylen + 1) { befs_error(sb, "%s keybuf too small (%zu) " "for key of size %d", __func__, bufsize, keylen); brelse(this_node->bh); goto error_alloc; } strlcpy(keybuf, keystart, keylen + 1); *value = fs64_to_cpu(sb, valarray[cur_key]); *keysize = keylen; befs_debug(sb, "Read [%llu,%d]: Key \"%.*s\", Value %llu", node_off, cur_key, keylen, keybuf, *value); brelse(this_node->bh); kfree(this_node); befs_debug(sb, "<--- %s", __func__); return BEFS_OK; error_alloc: kfree(this_node); error: *keysize = 0; *value = 0; befs_debug(sb, "<--- %s ERROR", __func__); return BEFS_ERR; } /** * befs_btree_seekleaf - Find the first leafnode in the btree * @sb: Filesystem superblock * @ds: Datastream containing btree * @bt_super: Pointer to the superblock of the btree * @this_node: Buffer to return the leafnode in * @node_off: Pointer to offset of current node within datastream. Modified * by the function. * * * Helper function for btree traverse. Moves the current position to the * start of the first leaf node. * * Also checks for an empty tree. If there are no keys, returns BEFS_BT_EMPTY. */ static int befs_btree_seekleaf(struct super_block *sb, befs_data_stream * ds, befs_btree_super *bt_super, struct befs_btree_node *this_node, befs_off_t * node_off) { befs_debug(sb, "---> %s", __func__); if (befs_bt_read_node(sb, ds, this_node, *node_off) != BEFS_OK) { befs_error(sb, "%s failed to read " "node at %llu", __func__, *node_off); goto error; } befs_debug(sb, "Seekleaf to root node %llu", *node_off); if (this_node->head.all_key_count == 0 && befs_leafnode(this_node)) { befs_debug(sb, "<--- %s Tree is EMPTY", __func__); return BEFS_BT_EMPTY; } while (!befs_leafnode(this_node)) { if (this_node->head.all_key_count == 0) { befs_debug(sb, "%s encountered " "an empty interior node: %llu. Using Overflow " "node: %llu", __func__, *node_off, this_node->head.overflow); *node_off = this_node->head.overflow; } else { fs64 *valarray = befs_bt_valarray(this_node); *node_off = fs64_to_cpu(sb, valarray[0]); } if (befs_bt_read_node(sb, ds, this_node, *node_off) != BEFS_OK) { befs_error(sb, "%s failed to read " "node at %llu", __func__, *node_off); goto error; } befs_debug(sb, "Seekleaf to child node %llu", *node_off); } befs_debug(sb, "Node %llu is a leaf node", *node_off); return BEFS_OK; error: befs_debug(sb, "<--- %s ERROR", __func__); return BEFS_ERR; } /** * befs_leafnode - Determine if the btree node is a leaf node or an * interior node * @node: Pointer to node structure to test * * Return 1 if leaf, 0 if interior */ static int befs_leafnode(struct befs_btree_node *node) { /* all interior nodes (and only interior nodes) have an overflow node */ if (node->head.overflow == befs_bt_inval) return 1; else return 0; } /** * befs_bt_keylen_index - Finds start of keylen index in a node * @node: Pointer to the node structure to find the keylen index within * * Returns a pointer to the start of the key length index array * of the B+tree node *@node * * "The length of all the keys in the node is added to the size of the * header and then rounded up to a multiple of four to get the beginning * of the key length index" (p.88, practical filesystem design). * * Except that rounding up to 8 works, and rounding up to 4 doesn't. */ static fs16 * befs_bt_keylen_index(struct befs_btree_node *node) { const int keylen_align = 8; unsigned long int off = (sizeof (befs_btree_nodehead) + node->head.all_key_length); ulong tmp = off % keylen_align; if (tmp) off += keylen_align - tmp; return (fs16 *) ((void *) node->od_node + off); } /** * befs_bt_valarray - Finds the start of value array in a node * @node: Pointer to the node structure to find the value array within * * Returns a pointer to the start of the value array * of the node pointed to by the node header */ static fs64 * befs_bt_valarray(struct befs_btree_node *node) { void *keylen_index_start = (void *) befs_bt_keylen_index(node); size_t keylen_index_size = node->head.all_key_count * sizeof (fs16); return (fs64 *) (keylen_index_start + keylen_index_size); } /** * befs_bt_keydata - Finds start of keydata array in a node * @node: Pointer to the node structure to find the keydata array within * * Returns a pointer to the start of the keydata array * of the node pointed to by the node header */ static char * befs_bt_keydata(struct befs_btree_node *node) { return (char *) ((void *) node->od_node + sizeof (befs_btree_nodehead)); } /** * befs_bt_get_key - returns a pointer to the start of a key * @sb: filesystem superblock * @node: node in which to look for the key * @index: the index of the key to get * @keylen: modified to be the length of the key at @index * * Returns a valid pointer into @node on success. * Returns NULL on failure (bad input) and sets *@keylen = 0 */ static char * befs_bt_get_key(struct super_block *sb, struct befs_btree_node *node, int index, u16 * keylen) { int prev_key_end; char *keystart; fs16 *keylen_index; if (index < 0 || index > node->head.all_key_count) { *keylen = 0; return NULL; } keystart = befs_bt_keydata(node); keylen_index = befs_bt_keylen_index(node); if (index == 0) prev_key_end = 0; else prev_key_end = fs16_to_cpu(sb, keylen_index[index - 1]); *keylen = fs16_to_cpu(sb, keylen_index[index]) - prev_key_end; return keystart + prev_key_end; } /** * befs_compare_strings - compare two strings * @key1: pointer to the first key to be compared * @keylen1: length in bytes of key1 * @key2: pointer to the second key to be compared * @keylen2: length in bytes of key2 * * Returns 0 if @key1 and @key2 are equal. * Returns >0 if @key1 is greater. * Returns <0 if @key2 is greater.. */ static int befs_compare_strings(const void *key1, int keylen1, const void *key2, int keylen2) { int len = min_t(int, keylen1, keylen2); int result = strncmp(key1, key2, len); if (result == 0) result = keylen1 - keylen2; return result; } /* These will be used for non-string keyed btrees */ #if 0 static int btree_compare_int32(cont void *key1, int keylen1, const void *key2, int keylen2) { return *(int32_t *) key1 - *(int32_t *) key2; } static int btree_compare_uint32(cont void *key1, int keylen1, const void *key2, int keylen2) { if (*(u_int32_t *) key1 == *(u_int32_t *) key2) return 0; else if (*(u_int32_t *) key1 > *(u_int32_t *) key2) return 1; return -1; } static int btree_compare_int64(cont void *key1, int keylen1, const void *key2, int keylen2) { if (*(int64_t *) key1 == *(int64_t *) key2) return 0; else if (*(int64_t *) key1 > *(int64_t *) key2) return 1; return -1; } static int btree_compare_uint64(cont void *key1, int keylen1, const void *key2, int keylen2) { if (*(u_int64_t *) key1 == *(u_int64_t *) key2) return 0; else if (*(u_int64_t *) key1 > *(u_int64_t *) key2) return 1; return -1; } static int btree_compare_float(cont void *key1, int keylen1, const void *key2, int keylen2) { float result = *(float *) key1 - *(float *) key2; if (result == 0.0f) return 0; return (result < 0.0f) ? -1 : 1; } static int btree_compare_double(cont void *key1, int keylen1, const void *key2, int keylen2) { double result = *(double *) key1 - *(double *) key2; if (result == 0.0) return 0; return (result < 0.0) ? -1 : 1; } #endif //0
gpl-2.0
gdyuldin/huawei_u8850_kernel_ics
drivers/staging/vt6655/rc4.c
761
2181
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * File: rc4.c * * Purpose: * * Functions: * * Revision History: * * Author: Kyle Hsu * * Date: Sep 4, 2002 * */ #include "rc4.h" void rc4_init(PRC4Ext pRC4, PBYTE pbyKey, UINT cbKey_len) { UINT ust1, ust2; UINT keyindex; UINT stateindex; PBYTE pbyst; UINT idx; pbyst = pRC4->abystate; pRC4->ux = 0; pRC4->uy = 0; for (idx = 0; idx < 256; idx++) pbyst[idx] = (BYTE)idx; keyindex = 0; stateindex = 0; for (idx = 0; idx < 256; idx++) { ust1 = pbyst[idx]; stateindex = (stateindex + pbyKey[keyindex] + ust1) & 0xff; ust2 = pbyst[stateindex]; pbyst[stateindex] = (BYTE)ust1; pbyst[idx] = (BYTE)ust2; if (++keyindex >= cbKey_len) keyindex = 0; } } UINT rc4_byte(PRC4Ext pRC4) { UINT ux; UINT uy; UINT ustx, usty; PBYTE pbyst; pbyst = pRC4->abystate; ux = (pRC4->ux + 1) & 0xff; ustx = pbyst[ux]; uy = (ustx + pRC4->uy) & 0xff; usty = pbyst[uy]; pRC4->ux = ux; pRC4->uy = uy; pbyst[uy] = (BYTE)ustx; pbyst[ux] = (BYTE)usty; return pbyst[(ustx + usty) & 0xff]; } void rc4_encrypt(PRC4Ext pRC4, PBYTE pbyDest, PBYTE pbySrc, UINT cbData_len) { UINT ii; for (ii = 0; ii < cbData_len; ii++) pbyDest[ii] = (BYTE)(pbySrc[ii] ^ rc4_byte(pRC4)); }
gpl-2.0
CyanogenMod/samsung-kernel-c1
drivers/video/cg14.c
761
15072
/* cg14.c: CGFOURTEEN frame buffer driver * * Copyright (C) 2003, 2006 David S. Miller (davem@davemloft.net) * Copyright (C) 1996,1998 Jakub Jelinek (jj@ultra.linux.cz) * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx) * * Driver layout based loosely on tgafb.c, see that file for credits. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/fb.h> #include <linux/mm.h> #include <linux/uaccess.h> #include <linux/of_device.h> #include <asm/io.h> #include <asm/fbio.h> #include "sbuslib.h" /* * Local functions. */ static int cg14_setcolreg(unsigned, unsigned, unsigned, unsigned, unsigned, struct fb_info *); static int cg14_mmap(struct fb_info *, struct vm_area_struct *); static int cg14_ioctl(struct fb_info *, unsigned int, unsigned long); static int cg14_pan_display(struct fb_var_screeninfo *, struct fb_info *); /* * Frame buffer operations */ static struct fb_ops cg14_ops = { .owner = THIS_MODULE, .fb_setcolreg = cg14_setcolreg, .fb_pan_display = cg14_pan_display, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_mmap = cg14_mmap, .fb_ioctl = cg14_ioctl, #ifdef CONFIG_COMPAT .fb_compat_ioctl = sbusfb_compat_ioctl, #endif }; #define CG14_MCR_INTENABLE_SHIFT 7 #define CG14_MCR_INTENABLE_MASK 0x80 #define CG14_MCR_VIDENABLE_SHIFT 6 #define CG14_MCR_VIDENABLE_MASK 0x40 #define CG14_MCR_PIXMODE_SHIFT 4 #define CG14_MCR_PIXMODE_MASK 0x30 #define CG14_MCR_TMR_SHIFT 2 #define CG14_MCR_TMR_MASK 0x0c #define CG14_MCR_TMENABLE_SHIFT 1 #define CG14_MCR_TMENABLE_MASK 0x02 #define CG14_MCR_RESET_SHIFT 0 #define CG14_MCR_RESET_MASK 0x01 #define CG14_REV_REVISION_SHIFT 4 #define CG14_REV_REVISION_MASK 0xf0 #define CG14_REV_IMPL_SHIFT 0 #define CG14_REV_IMPL_MASK 0x0f #define CG14_VBR_FRAMEBASE_SHIFT 12 #define CG14_VBR_FRAMEBASE_MASK 0x00fff000 #define CG14_VMCR1_SETUP_SHIFT 0 #define CG14_VMCR1_SETUP_MASK 0x000001ff #define CG14_VMCR1_VCONFIG_SHIFT 9 #define CG14_VMCR1_VCONFIG_MASK 0x00000e00 #define CG14_VMCR2_REFRESH_SHIFT 0 #define CG14_VMCR2_REFRESH_MASK 0x00000001 #define CG14_VMCR2_TESTROWCNT_SHIFT 1 #define CG14_VMCR2_TESTROWCNT_MASK 0x00000002 #define CG14_VMCR2_FBCONFIG_SHIFT 2 #define CG14_VMCR2_FBCONFIG_MASK 0x0000000c #define CG14_VCR_REFRESHREQ_SHIFT 0 #define CG14_VCR_REFRESHREQ_MASK 0x000003ff #define CG14_VCR1_REFRESHENA_SHIFT 10 #define CG14_VCR1_REFRESHENA_MASK 0x00000400 #define CG14_VCA_CAD_SHIFT 0 #define CG14_VCA_CAD_MASK 0x000003ff #define CG14_VCA_VERS_SHIFT 10 #define CG14_VCA_VERS_MASK 0x00000c00 #define CG14_VCA_RAMSPEED_SHIFT 12 #define CG14_VCA_RAMSPEED_MASK 0x00001000 #define CG14_VCA_8MB_SHIFT 13 #define CG14_VCA_8MB_MASK 0x00002000 #define CG14_MCR_PIXMODE_8 0 #define CG14_MCR_PIXMODE_16 2 #define CG14_MCR_PIXMODE_32 3 struct cg14_regs{ u8 mcr; /* Master Control Reg */ u8 ppr; /* Packed Pixel Reg */ u8 tms[2]; /* Test Mode Status Regs */ u8 msr; /* Master Status Reg */ u8 fsr; /* Fault Status Reg */ u8 rev; /* Revision & Impl */ u8 ccr; /* Clock Control Reg */ u32 tmr; /* Test Mode Read Back */ u8 mod; /* Monitor Operation Data Reg */ u8 acr; /* Aux Control */ u8 xxx0[6]; u16 hct; /* Hor Counter */ u16 vct; /* Vert Counter */ u16 hbs; /* Hor Blank Start */ u16 hbc; /* Hor Blank Clear */ u16 hss; /* Hor Sync Start */ u16 hsc; /* Hor Sync Clear */ u16 csc; /* Composite Sync Clear */ u16 vbs; /* Vert Blank Start */ u16 vbc; /* Vert Blank Clear */ u16 vss; /* Vert Sync Start */ u16 vsc; /* Vert Sync Clear */ u16 xcs; u16 xcc; u16 fsa; /* Fault Status Address */ u16 adr; /* Address Registers */ u8 xxx1[0xce]; u8 pcg[0x100]; /* Pixel Clock Generator */ u32 vbr; /* Frame Base Row */ u32 vmcr; /* VBC Master Control */ u32 vcr; /* VBC refresh */ u32 vca; /* VBC Config */ }; #define CG14_CCR_ENABLE 0x04 #define CG14_CCR_SELECT 0x02 /* HW/Full screen */ struct cg14_cursor { u32 cpl0[32]; /* Enable plane 0 */ u32 cpl1[32]; /* Color selection plane */ u8 ccr; /* Cursor Control Reg */ u8 xxx0[3]; u16 cursx; /* Cursor x,y position */ u16 cursy; /* Cursor x,y position */ u32 color0; u32 color1; u32 xxx1[0x1bc]; u32 cpl0i[32]; /* Enable plane 0 autoinc */ u32 cpl1i[32]; /* Color selection autoinc */ }; struct cg14_dac { u8 addr; /* Address Register */ u8 xxx0[255]; u8 glut; /* Gamma table */ u8 xxx1[255]; u8 select; /* Register Select */ u8 xxx2[255]; u8 mode; /* Mode Register */ }; struct cg14_xlut{ u8 x_xlut [256]; u8 x_xlutd [256]; u8 xxx0[0x600]; u8 x_xlut_inc [256]; u8 x_xlutd_inc [256]; }; /* Color look up table (clut) */ /* Each one of these arrays hold the color lookup table (for 256 * colors) for each MDI page (I assume then there should be 4 MDI * pages, I still wonder what they are. I have seen NeXTStep split * the screen in four parts, while operating in 24 bits mode. Each * integer holds 4 values: alpha value (transparency channel, thanks * go to John Stone (johns@umr.edu) from OpenBSD), red, green and blue * * I currently use the clut instead of the Xlut */ struct cg14_clut { u32 c_clut [256]; u32 c_clutd [256]; /* i wonder what the 'd' is for */ u32 c_clut_inc [256]; u32 c_clutd_inc [256]; }; #define CG14_MMAP_ENTRIES 16 struct cg14_par { spinlock_t lock; struct cg14_regs __iomem *regs; struct cg14_clut __iomem *clut; struct cg14_cursor __iomem *cursor; u32 flags; #define CG14_FLAG_BLANKED 0x00000001 unsigned long iospace; struct sbus_mmap_map mmap_map[CG14_MMAP_ENTRIES]; int mode; int ramsize; }; static void __cg14_reset(struct cg14_par *par) { struct cg14_regs __iomem *regs = par->regs; u8 val; val = sbus_readb(&regs->mcr); val &= ~(CG14_MCR_PIXMODE_MASK); sbus_writeb(val, &regs->mcr); } static int cg14_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct cg14_par *par = (struct cg14_par *) info->par; unsigned long flags; /* We just use this to catch switches out of * graphics mode. */ spin_lock_irqsave(&par->lock, flags); __cg14_reset(par); spin_unlock_irqrestore(&par->lock, flags); if (var->xoffset || var->yoffset || var->vmode) return -EINVAL; return 0; } /** * cg14_setcolreg - Optional function. Sets a color register. * @regno: boolean, 0 copy local, 1 get_user() function * @red: frame buffer colormap structure * @green: The green value which can be up to 16 bits wide * @blue: The blue value which can be up to 16 bits wide. * @transp: If supported the alpha value which can be up to 16 bits wide. * @info: frame buffer info structure */ static int cg14_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct cg14_par *par = (struct cg14_par *) info->par; struct cg14_clut __iomem *clut = par->clut; unsigned long flags; u32 val; if (regno >= 256) return 1; red >>= 8; green >>= 8; blue >>= 8; val = (red | (green << 8) | (blue << 16)); spin_lock_irqsave(&par->lock, flags); sbus_writel(val, &clut->c_clut[regno]); spin_unlock_irqrestore(&par->lock, flags); return 0; } static int cg14_mmap(struct fb_info *info, struct vm_area_struct *vma) { struct cg14_par *par = (struct cg14_par *) info->par; return sbusfb_mmap_helper(par->mmap_map, info->fix.smem_start, info->fix.smem_len, par->iospace, vma); } static int cg14_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { struct cg14_par *par = (struct cg14_par *) info->par; struct cg14_regs __iomem *regs = par->regs; struct mdi_cfginfo kmdi, __user *mdii; unsigned long flags; int cur_mode, mode, ret = 0; switch (cmd) { case MDI_RESET: spin_lock_irqsave(&par->lock, flags); __cg14_reset(par); spin_unlock_irqrestore(&par->lock, flags); break; case MDI_GET_CFGINFO: memset(&kmdi, 0, sizeof(kmdi)); spin_lock_irqsave(&par->lock, flags); kmdi.mdi_type = FBTYPE_MDICOLOR; kmdi.mdi_height = info->var.yres; kmdi.mdi_width = info->var.xres; kmdi.mdi_mode = par->mode; kmdi.mdi_pixfreq = 72; /* FIXME */ kmdi.mdi_size = par->ramsize; spin_unlock_irqrestore(&par->lock, flags); mdii = (struct mdi_cfginfo __user *) arg; if (copy_to_user(mdii, &kmdi, sizeof(kmdi))) ret = -EFAULT; break; case MDI_SET_PIXELMODE: if (get_user(mode, (int __user *) arg)) { ret = -EFAULT; break; } spin_lock_irqsave(&par->lock, flags); cur_mode = sbus_readb(&regs->mcr); cur_mode &= ~CG14_MCR_PIXMODE_MASK; switch(mode) { case MDI_32_PIX: cur_mode |= (CG14_MCR_PIXMODE_32 << CG14_MCR_PIXMODE_SHIFT); break; case MDI_16_PIX: cur_mode |= (CG14_MCR_PIXMODE_16 << CG14_MCR_PIXMODE_SHIFT); break; case MDI_8_PIX: break; default: ret = -ENOSYS; break; }; if (!ret) { sbus_writeb(cur_mode, &regs->mcr); par->mode = mode; } spin_unlock_irqrestore(&par->lock, flags); break; default: ret = sbusfb_ioctl_helper(cmd, arg, info, FBTYPE_MDICOLOR, 8, info->fix.smem_len); break; }; return ret; } /* * Initialisation */ static void __devinit cg14_init_fix(struct fb_info *info, int linebytes, struct device_node *dp) { const char *name = dp->name; strlcpy(info->fix.id, name, sizeof(info->fix.id)); info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.visual = FB_VISUAL_PSEUDOCOLOR; info->fix.line_length = linebytes; info->fix.accel = FB_ACCEL_SUN_CG14; } static struct sbus_mmap_map __cg14_mmap_map[CG14_MMAP_ENTRIES] __devinitdata = { { .voff = CG14_REGS, .poff = 0x80000000, .size = 0x1000 }, { .voff = CG14_XLUT, .poff = 0x80003000, .size = 0x1000 }, { .voff = CG14_CLUT1, .poff = 0x80004000, .size = 0x1000 }, { .voff = CG14_CLUT2, .poff = 0x80005000, .size = 0x1000 }, { .voff = CG14_CLUT3, .poff = 0x80006000, .size = 0x1000 }, { .voff = CG3_MMAP_OFFSET - 0x7000, .poff = 0x80000000, .size = 0x7000 }, { .voff = CG3_MMAP_OFFSET, .poff = 0x00000000, .size = SBUS_MMAP_FBSIZE(1) }, { .voff = MDI_CURSOR_MAP, .poff = 0x80001000, .size = 0x1000 }, { .voff = MDI_CHUNKY_BGR_MAP, .poff = 0x01000000, .size = 0x400000 }, { .voff = MDI_PLANAR_X16_MAP, .poff = 0x02000000, .size = 0x200000 }, { .voff = MDI_PLANAR_C16_MAP, .poff = 0x02800000, .size = 0x200000 }, { .voff = MDI_PLANAR_X32_MAP, .poff = 0x03000000, .size = 0x100000 }, { .voff = MDI_PLANAR_B32_MAP, .poff = 0x03400000, .size = 0x100000 }, { .voff = MDI_PLANAR_G32_MAP, .poff = 0x03800000, .size = 0x100000 }, { .voff = MDI_PLANAR_R32_MAP, .poff = 0x03c00000, .size = 0x100000 }, { .size = 0 } }; static void cg14_unmap_regs(struct of_device *op, struct fb_info *info, struct cg14_par *par) { if (par->regs) of_iounmap(&op->resource[0], par->regs, sizeof(struct cg14_regs)); if (par->clut) of_iounmap(&op->resource[0], par->clut, sizeof(struct cg14_clut)); if (par->cursor) of_iounmap(&op->resource[0], par->cursor, sizeof(struct cg14_cursor)); if (info->screen_base) of_iounmap(&op->resource[1], info->screen_base, info->fix.smem_len); } static int __devinit cg14_probe(struct of_device *op, const struct of_device_id *match) { struct device_node *dp = op->dev.of_node; struct fb_info *info; struct cg14_par *par; int is_8mb, linebytes, i, err; info = framebuffer_alloc(sizeof(struct cg14_par), &op->dev); err = -ENOMEM; if (!info) goto out_err; par = info->par; spin_lock_init(&par->lock); sbusfb_fill_var(&info->var, dp, 8); info->var.red.length = 8; info->var.green.length = 8; info->var.blue.length = 8; linebytes = of_getintprop_default(dp, "linebytes", info->var.xres); info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres); if (!strcmp(dp->parent->name, "sbus") || !strcmp(dp->parent->name, "sbi")) { info->fix.smem_start = op->resource[0].start; par->iospace = op->resource[0].flags & IORESOURCE_BITS; } else { info->fix.smem_start = op->resource[1].start; par->iospace = op->resource[0].flags & IORESOURCE_BITS; } par->regs = of_ioremap(&op->resource[0], 0, sizeof(struct cg14_regs), "cg14 regs"); par->clut = of_ioremap(&op->resource[0], CG14_CLUT1, sizeof(struct cg14_clut), "cg14 clut"); par->cursor = of_ioremap(&op->resource[0], CG14_CURSORREGS, sizeof(struct cg14_cursor), "cg14 cursor"); info->screen_base = of_ioremap(&op->resource[1], 0, info->fix.smem_len, "cg14 ram"); if (!par->regs || !par->clut || !par->cursor || !info->screen_base) goto out_unmap_regs; is_8mb = (((op->resource[1].end - op->resource[1].start) + 1) == (8 * 1024 * 1024)); BUILD_BUG_ON(sizeof(par->mmap_map) != sizeof(__cg14_mmap_map)); memcpy(&par->mmap_map, &__cg14_mmap_map, sizeof(par->mmap_map)); for (i = 0; i < CG14_MMAP_ENTRIES; i++) { struct sbus_mmap_map *map = &par->mmap_map[i]; if (!map->size) break; if (map->poff & 0x80000000) map->poff = (map->poff & 0x7fffffff) + (op->resource[0].start - op->resource[1].start); if (is_8mb && map->size >= 0x100000 && map->size <= 0x400000) map->size *= 2; } par->mode = MDI_8_PIX; par->ramsize = (is_8mb ? 0x800000 : 0x400000); info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; info->fbops = &cg14_ops; __cg14_reset(par); if (fb_alloc_cmap(&info->cmap, 256, 0)) goto out_unmap_regs; fb_set_cmap(&info->cmap, info); cg14_init_fix(info, linebytes, dp); err = register_framebuffer(info); if (err < 0) goto out_dealloc_cmap; dev_set_drvdata(&op->dev, info); printk(KERN_INFO "%s: cgfourteen at %lx:%lx, %dMB\n", dp->full_name, par->iospace, info->fix.smem_start, par->ramsize >> 20); return 0; out_dealloc_cmap: fb_dealloc_cmap(&info->cmap); out_unmap_regs: cg14_unmap_regs(op, info, par); out_err: return err; } static int __devexit cg14_remove(struct of_device *op) { struct fb_info *info = dev_get_drvdata(&op->dev); struct cg14_par *par = info->par; unregister_framebuffer(info); fb_dealloc_cmap(&info->cmap); cg14_unmap_regs(op, info, par); framebuffer_release(info); dev_set_drvdata(&op->dev, NULL); return 0; } static const struct of_device_id cg14_match[] = { { .name = "cgfourteen", }, {}, }; MODULE_DEVICE_TABLE(of, cg14_match); static struct of_platform_driver cg14_driver = { .driver = { .name = "cg14", .owner = THIS_MODULE, .of_match_table = cg14_match, }, .probe = cg14_probe, .remove = __devexit_p(cg14_remove), }; static int __init cg14_init(void) { if (fb_get_options("cg14fb", NULL)) return -ENODEV; return of_register_driver(&cg14_driver, &of_bus_type); } static void __exit cg14_exit(void) { of_unregister_driver(&cg14_driver); } module_init(cg14_init); module_exit(cg14_exit); MODULE_DESCRIPTION("framebuffer driver for CGfourteen chipsets"); MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL");
gpl-2.0
cmenard/GB_Bullet
drivers/staging/vt6655/datarate.c
761
12181
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * File: datarate.c * * Purpose: Handles the auto fallback & data rates functions * * Author: Lyndon Chen * * Date: July 17, 2002 * * Functions: * RATEvParseMaxRate - Parsing the highest basic & support rate in rate field of frame * RATEvTxRateFallBack - Rate fallback Algorithm Implementaion * RATEuSetIE- Set rate IE field. * * Revision History: * */ #include "ttype.h" #include "tmacro.h" #include "mac.h" #include "80211mgr.h" #include "bssdb.h" #include "datarate.h" #include "card.h" #include "baseband.h" #include "srom.h" /*--------------------- Static Definitions -------------------------*/ /*--------------------- Static Classes ----------------------------*/ extern WORD TxRate_iwconfig; //2008-5-8 <add> by chester /*--------------------- Static Variables --------------------------*/ //static int msglevel =MSG_LEVEL_DEBUG; static int msglevel =MSG_LEVEL_INFO; const BYTE acbyIERate[MAX_RATE] = {0x02, 0x04, 0x0B, 0x16, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C}; #define AUTORATE_TXOK_CNT 0x0400 #define AUTORATE_TXFAIL_CNT 0x0064 #define AUTORATE_TIMEOUT 10 /*--------------------- Static Functions --------------------------*/ void s_vResetCounter ( PKnownNodeDB psNodeDBTable ); void s_vResetCounter ( PKnownNodeDB psNodeDBTable ) { BYTE ii; // clear statistic counter for auto_rate for(ii=0;ii<=MAX_RATE;ii++) { psNodeDBTable->uTxOk[ii] = 0; psNodeDBTable->uTxFail[ii] = 0; } } /*--------------------- Export Variables --------------------------*/ /*--------------------- Export Functions --------------------------*/ /*+ * * Description: * Get RateIdx from the value in SuppRates IE or ExtSuppRates IE * * Parameters: * In: * BYTE - Rate value in SuppRates IE or ExtSuppRates IE * Out: * none * * Return Value: RateIdx * -*/ BYTE DATARATEbyGetRateIdx ( BYTE byRate ) { BYTE ii; //Erase basicRate flag. byRate = byRate & 0x7F;//0111 1111 for (ii = 0; ii < MAX_RATE; ii ++) { if (acbyIERate[ii] == byRate) return ii; } return 0; } /*+ * * Routine Description: * Rate fallback Algorithm Implementaion * * Parameters: * In: * pDevice - Pointer to the adapter * psNodeDBTable - Pointer to Node Data Base * Out: * none * * Return Value: none * -*/ #define AUTORATE_TXCNT_THRESHOLD 20 #define AUTORATE_INC_THRESHOLD 30 /*+ * * Description: * Get RateIdx from the value in SuppRates IE or ExtSuppRates IE * * Parameters: * In: * BYTE - Rate value in SuppRates IE or ExtSuppRates IE * Out: * none * * Return Value: RateIdx * -*/ WORD wGetRateIdx( BYTE byRate ) { WORD ii; //Erase basicRate flag. byRate = byRate & 0x7F;//0111 1111 for (ii = 0; ii < MAX_RATE; ii ++) { if (acbyIERate[ii] == byRate) return ii; } return 0; } /*+ * * Description: * Parsing the highest basic & support rate in rate field of frame. * * Parameters: * In: * pDevice - Pointer to the adapter * pItemRates - Pointer to Rate field defined in 802.11 spec. * pItemExtRates - Pointer to Extended Rate field defined in 802.11 spec. * Out: * pwMaxBasicRate - Maximum Basic Rate * pwMaxSuppRate - Maximum Supported Rate * pbyTopCCKRate - Maximum Basic Rate in CCK mode * pbyTopOFDMRate - Maximum Basic Rate in OFDM mode * * Return Value: none * -*/ void RATEvParseMaxRate ( void *pDeviceHandler, PWLAN_IE_SUPP_RATES pItemRates, PWLAN_IE_SUPP_RATES pItemExtRates, BOOL bUpdateBasicRate, PWORD pwMaxBasicRate, PWORD pwMaxSuppRate, PWORD pwSuppRate, PBYTE pbyTopCCKRate, PBYTE pbyTopOFDMRate ) { PSDevice pDevice = (PSDevice) pDeviceHandler; UINT ii; BYTE byHighSuppRate = 0; BYTE byRate = 0; WORD wOldBasicRate = pDevice->wBasicRate; UINT uRateLen; if (pItemRates == NULL) return; *pwSuppRate = 0; uRateLen = pItemRates->len; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate Len: %d\n", uRateLen); if (pDevice->eCurrentPHYType != PHY_TYPE_11B) { if (uRateLen > WLAN_RATES_MAXLEN) uRateLen = WLAN_RATES_MAXLEN; } else { if (uRateLen > WLAN_RATES_MAXLEN_11B) uRateLen = WLAN_RATES_MAXLEN_11B; } for (ii = 0; ii < uRateLen; ii++) { byRate = (BYTE)(pItemRates->abyRates[ii]); if (WLAN_MGMT_IS_BASICRATE(byRate) && (bUpdateBasicRate == TRUE)) { // Add to basic rate set, update pDevice->byTopCCKBasicRate and pDevice->byTopOFDMBasicRate CARDbAddBasicRate((void *)pDevice, wGetRateIdx(byRate)); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate AddBasicRate: %d\n", wGetRateIdx(byRate)); } byRate = (BYTE)(pItemRates->abyRates[ii]&0x7F); if (byHighSuppRate == 0) byHighSuppRate = byRate; if (byRate > byHighSuppRate) byHighSuppRate = byRate; *pwSuppRate |= (1<<wGetRateIdx(byRate)); } if ((pItemExtRates != NULL) && (pItemExtRates->byElementID == WLAN_EID_EXTSUPP_RATES) && (pDevice->eCurrentPHYType != PHY_TYPE_11B)) { UINT uExtRateLen = pItemExtRates->len; if (uExtRateLen > WLAN_RATES_MAXLEN) uExtRateLen = WLAN_RATES_MAXLEN; for (ii = 0; ii < uExtRateLen ; ii++) { byRate = (BYTE)(pItemExtRates->abyRates[ii]); // select highest basic rate if (WLAN_MGMT_IS_BASICRATE(pItemExtRates->abyRates[ii])) { // Add to basic rate set, update pDevice->byTopCCKBasicRate and pDevice->byTopOFDMBasicRate CARDbAddBasicRate((void *)pDevice, wGetRateIdx(byRate)); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate AddBasicRate: %d\n", wGetRateIdx(byRate)); } byRate = (BYTE)(pItemExtRates->abyRates[ii]&0x7F); if (byHighSuppRate == 0) byHighSuppRate = byRate; if (byRate > byHighSuppRate) byHighSuppRate = byRate; *pwSuppRate |= (1<<wGetRateIdx(byRate)); //DBG_PRN_GRP09(("ParseMaxRate : HighSuppRate: %d, %X\n", wGetRateIdx(byRate), byRate)); } } //if(pItemExtRates != NULL) if ((pDevice->byPacketType == PK_TYPE_11GB) && CARDbIsOFDMinBasicRate((void *)pDevice)) { pDevice->byPacketType = PK_TYPE_11GA; } *pbyTopCCKRate = pDevice->byTopCCKBasicRate; *pbyTopOFDMRate = pDevice->byTopOFDMBasicRate; *pwMaxSuppRate = wGetRateIdx(byHighSuppRate); if ((pDevice->byPacketType==PK_TYPE_11B) || (pDevice->byPacketType==PK_TYPE_11GB)) *pwMaxBasicRate = pDevice->byTopCCKBasicRate; else *pwMaxBasicRate = pDevice->byTopOFDMBasicRate; if (wOldBasicRate != pDevice->wBasicRate) CARDvSetRSPINF((void *)pDevice, pDevice->eCurrentPHYType); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Exit ParseMaxRate\n"); } /*+ * * Routine Description: * Rate fallback Algorithm Implementaion * * Parameters: * In: * pDevice - Pointer to the adapter * psNodeDBTable - Pointer to Node Data Base * Out: * none * * Return Value: none * -*/ #define AUTORATE_TXCNT_THRESHOLD 20 #define AUTORATE_INC_THRESHOLD 30 void RATEvTxRateFallBack ( void *pDeviceHandler, PKnownNodeDB psNodeDBTable ) { PSDevice pDevice = (PSDevice) pDeviceHandler; WORD wIdxDownRate = 0; UINT ii; //DWORD dwRateTable[MAX_RATE] = {1, 2, 5, 11, 6, 9, 12, 18, 24, 36, 48, 54}; BOOL bAutoRate[MAX_RATE] = {TRUE,TRUE,TRUE,TRUE,FALSE,FALSE,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE}; DWORD dwThroughputTbl[MAX_RATE] = {10, 20, 55, 110, 60, 90, 120, 180, 240, 360, 480, 540}; DWORD dwThroughput = 0; WORD wIdxUpRate = 0; DWORD dwTxDiff = 0; if (pDevice->pMgmt->eScanState != WMAC_NO_SCANNING) { // Don't do Fallback when scanning Channel return; } psNodeDBTable->uTimeCount ++; if (psNodeDBTable->uTxFail[MAX_RATE] > psNodeDBTable->uTxOk[MAX_RATE]) dwTxDiff = psNodeDBTable->uTxFail[MAX_RATE] - psNodeDBTable->uTxOk[MAX_RATE]; if ((psNodeDBTable->uTxOk[MAX_RATE] < AUTORATE_TXOK_CNT) && (dwTxDiff < AUTORATE_TXFAIL_CNT) && (psNodeDBTable->uTimeCount < AUTORATE_TIMEOUT)) { return; } if (psNodeDBTable->uTimeCount >= AUTORATE_TIMEOUT) { psNodeDBTable->uTimeCount = 0; } for(ii=0;ii<MAX_RATE;ii++) { if (psNodeDBTable->wSuppRate & (0x0001<<ii)) { if (bAutoRate[ii] == TRUE) { wIdxUpRate = (WORD) ii; } } else { bAutoRate[ii] = FALSE; } } for(ii=0;ii<=psNodeDBTable->wTxDataRate;ii++) { if ( (psNodeDBTable->uTxOk[ii] != 0) || (psNodeDBTable->uTxFail[ii] != 0) ) { dwThroughputTbl[ii] *= psNodeDBTable->uTxOk[ii]; if (ii < RATE_11M) { psNodeDBTable->uTxFail[ii] *= 4; } dwThroughputTbl[ii] /= (psNodeDBTable->uTxOk[ii] + psNodeDBTable->uTxFail[ii]); } // DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Rate %d,Ok: %d, Fail:%d, Throughput:%d\n", // ii, psNodeDBTable->uTxOk[ii], psNodeDBTable->uTxFail[ii], dwThroughputTbl[ii]); } dwThroughput = dwThroughputTbl[psNodeDBTable->wTxDataRate]; wIdxDownRate = psNodeDBTable->wTxDataRate; for(ii = psNodeDBTable->wTxDataRate; ii > 0;) { ii--; if ( (dwThroughputTbl[ii] > dwThroughput) && (bAutoRate[ii]==TRUE) ) { dwThroughput = dwThroughputTbl[ii]; wIdxDownRate = (WORD) ii; } } psNodeDBTable->wTxDataRate = wIdxDownRate; if (psNodeDBTable->uTxOk[MAX_RATE]) { if (psNodeDBTable->uTxOk[MAX_RATE] > (psNodeDBTable->uTxFail[MAX_RATE] * 4) ) { psNodeDBTable->wTxDataRate = wIdxUpRate; } }else { // adhoc, if uTxOk =0 & uTxFail = 0 if (psNodeDBTable->uTxFail[MAX_RATE] == 0) psNodeDBTable->wTxDataRate = wIdxUpRate; } //2008-5-8 <add> by chester TxRate_iwconfig=psNodeDBTable->wTxDataRate; s_vResetCounter(psNodeDBTable); // DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Rate: %d, U:%d, D:%d\n", psNodeDBTable->wTxDataRate, wIdxUpRate, wIdxDownRate); return; } /*+ * * Description: * This routine is used to assemble available Rate IE. * * Parameters: * In: * pDevice * Out: * * Return Value: None * -*/ BYTE RATEuSetIE ( PWLAN_IE_SUPP_RATES pSrcRates, PWLAN_IE_SUPP_RATES pDstRates, UINT uRateLen ) { UINT ii, uu, uRateCnt = 0; if ((pSrcRates == NULL) || (pDstRates == NULL)) return 0; if (pSrcRates->len == 0) return 0; for (ii = 0; ii < uRateLen; ii++) { for (uu = 0; uu < pSrcRates->len; uu++) { if ((pSrcRates->abyRates[uu] & 0x7F) == acbyIERate[ii]) { pDstRates->abyRates[uRateCnt ++] = pSrcRates->abyRates[uu]; break; } } } return (BYTE)uRateCnt; }
gpl-2.0
DRHAX34/android_kernel_zte_sailboat
drivers/video/platinumfb.c
761
20260
/* * platinumfb.c -- frame buffer device for the PowerMac 'platinum' display * * Copyright (C) 1998 Franz Sirl * * Frame buffer structure from: * drivers/video/controlfb.c -- frame buffer device for * Apple 'control' display chip. * Copyright (C) 1998 Dan Jacobowitz * * Hardware information from: * platinum.c: Console support for PowerMac "platinum" display adaptor. * Copyright (C) 1996 Paul Mackerras and Mark Abene * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #undef DEBUG #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/nvram.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/pgtable.h> #include "macmodes.h" #include "platinumfb.h" static int default_vmode = VMODE_NVRAM; static int default_cmode = CMODE_NVRAM; struct fb_info_platinum { struct fb_info *info; int vmode, cmode; int xres, yres; int vxres, vyres; int xoffset, yoffset; struct { __u8 red, green, blue; } palette[256]; u32 pseudo_palette[16]; volatile struct cmap_regs __iomem *cmap_regs; unsigned long cmap_regs_phys; volatile struct platinum_regs __iomem *platinum_regs; unsigned long platinum_regs_phys; __u8 __iomem *frame_buffer; volatile __u8 __iomem *base_frame_buffer; unsigned long frame_buffer_phys; unsigned long total_vram; int clktype; int dactype; struct resource rsrc_fb, rsrc_reg; }; /* * Frame buffer device API */ static int platinumfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info); static int platinumfb_blank(int blank_mode, struct fb_info *info); static int platinumfb_set_par (struct fb_info *info); static int platinumfb_check_var (struct fb_var_screeninfo *var, struct fb_info *info); /* * internal functions */ static inline int platinum_vram_reqd(int video_mode, int color_mode); static int read_platinum_sense(struct fb_info_platinum *pinfo); static void set_platinum_clock(struct fb_info_platinum *pinfo); static void platinum_set_hardware(struct fb_info_platinum *pinfo); static int platinum_var_to_par(struct fb_var_screeninfo *var, struct fb_info_platinum *pinfo, int check_only); /* * Interface used by the world */ static struct fb_ops platinumfb_ops = { .owner = THIS_MODULE, .fb_check_var = platinumfb_check_var, .fb_set_par = platinumfb_set_par, .fb_setcolreg = platinumfb_setcolreg, .fb_blank = platinumfb_blank, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; /* * Checks a var structure */ static int platinumfb_check_var (struct fb_var_screeninfo *var, struct fb_info *info) { return platinum_var_to_par(var, info->par, 1); } /* * Applies current var to display */ static int platinumfb_set_par (struct fb_info *info) { struct fb_info_platinum *pinfo = info->par; struct platinum_regvals *init; int err, offset = 0x20; if((err = platinum_var_to_par(&info->var, pinfo, 0))) { printk (KERN_ERR "platinumfb_set_par: error calling" " platinum_var_to_par: %d.\n", err); return err; } platinum_set_hardware(pinfo); init = platinum_reg_init[pinfo->vmode-1]; if ((pinfo->vmode == VMODE_832_624_75) && (pinfo->cmode > CMODE_8)) offset = 0x10; info->screen_base = pinfo->frame_buffer + init->fb_offset + offset; mutex_lock(&info->mm_lock); info->fix.smem_start = (pinfo->frame_buffer_phys) + init->fb_offset + offset; mutex_unlock(&info->mm_lock); info->fix.visual = (pinfo->cmode == CMODE_8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR; info->fix.line_length = vmode_attrs[pinfo->vmode-1].hres * (1<<pinfo->cmode) + offset; printk("line_length: %x\n", info->fix.line_length); return 0; } static int platinumfb_blank(int blank, struct fb_info *fb) { /* * Blank the screen if blank_mode != 0, else unblank. If blank == NULL * then the caller blanks by setting the CLUT (Color Look Up Table) to all * black. Return 0 if blanking succeeded, != 0 if un-/blanking failed due * to e.g. a video mode which doesn't support it. Implements VESA suspend * and powerdown modes on hardware that supports disabling hsync/vsync: * blank_mode == 2: suspend vsync * blank_mode == 3: suspend hsync * blank_mode == 4: powerdown */ /* [danj] I think there's something fishy about those constants... */ /* struct fb_info_platinum *info = (struct fb_info_platinum *) fb; int ctrl; ctrl = ld_le32(&info->platinum_regs->ctrl.r) | 0x33; if (blank) --blank_mode; if (blank & VESA_VSYNC_SUSPEND) ctrl &= ~3; if (blank & VESA_HSYNC_SUSPEND) ctrl &= ~0x30; out_le32(&info->platinum_regs->ctrl.r, ctrl); */ /* TODO: Figure out how the heck to powerdown this thing! */ return 0; } static int platinumfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { struct fb_info_platinum *pinfo = info->par; volatile struct cmap_regs __iomem *cmap_regs = pinfo->cmap_regs; if (regno > 255) return 1; red >>= 8; green >>= 8; blue >>= 8; pinfo->palette[regno].red = red; pinfo->palette[regno].green = green; pinfo->palette[regno].blue = blue; out_8(&cmap_regs->addr, regno); /* tell clut what addr to fill */ out_8(&cmap_regs->lut, red); /* send one color channel at */ out_8(&cmap_regs->lut, green); /* a time... */ out_8(&cmap_regs->lut, blue); if (regno < 16) { int i; u32 *pal = info->pseudo_palette; switch (pinfo->cmode) { case CMODE_16: pal[regno] = (regno << 10) | (regno << 5) | regno; break; case CMODE_32: i = (regno << 8) | regno; pal[regno] = (i << 16) | i; break; } } return 0; } static inline int platinum_vram_reqd(int video_mode, int color_mode) { int baseval = vmode_attrs[video_mode-1].hres * (1<<color_mode); if ((video_mode == VMODE_832_624_75) && (color_mode > CMODE_8)) baseval += 0x10; else baseval += 0x20; return vmode_attrs[video_mode-1].vres * baseval + 0x1000; } #define STORE_D2(a, d) { \ out_8(&cmap_regs->addr, (a+32)); \ out_8(&cmap_regs->d2, (d)); \ } static void set_platinum_clock(struct fb_info_platinum *pinfo) { volatile struct cmap_regs __iomem *cmap_regs = pinfo->cmap_regs; struct platinum_regvals *init; init = platinum_reg_init[pinfo->vmode-1]; STORE_D2(6, 0xc6); out_8(&cmap_regs->addr,3+32); if (in_8(&cmap_regs->d2) == 2) { STORE_D2(7, init->clock_params[pinfo->clktype][0]); STORE_D2(8, init->clock_params[pinfo->clktype][1]); STORE_D2(3, 3); } else { STORE_D2(4, init->clock_params[pinfo->clktype][0]); STORE_D2(5, init->clock_params[pinfo->clktype][1]); STORE_D2(3, 2); } __delay(5000); STORE_D2(9, 0xa6); } /* Now how about actually saying, Make it so! */ /* Some things in here probably don't need to be done each time. */ static void platinum_set_hardware(struct fb_info_platinum *pinfo) { volatile struct platinum_regs __iomem *platinum_regs = pinfo->platinum_regs; volatile struct cmap_regs __iomem *cmap_regs = pinfo->cmap_regs; struct platinum_regvals *init; int i; int vmode, cmode; vmode = pinfo->vmode; cmode = pinfo->cmode; init = platinum_reg_init[vmode - 1]; /* Initialize display timing registers */ out_be32(&platinum_regs->reg[24].r, 7); /* turn display off */ for (i = 0; i < 26; ++i) out_be32(&platinum_regs->reg[i+32].r, init->regs[i]); out_be32(&platinum_regs->reg[26+32].r, (pinfo->total_vram == 0x100000 ? init->offset[cmode] + 4 - cmode : init->offset[cmode])); out_be32(&platinum_regs->reg[16].r, (unsigned) pinfo->frame_buffer_phys+init->fb_offset+0x10); out_be32(&platinum_regs->reg[18].r, init->pitch[cmode]); out_be32(&platinum_regs->reg[19].r, (pinfo->total_vram == 0x100000 ? init->mode[cmode+1] : init->mode[cmode])); out_be32(&platinum_regs->reg[20].r, (pinfo->total_vram == 0x100000 ? 0x11 : 0x1011)); out_be32(&platinum_regs->reg[21].r, 0x100); out_be32(&platinum_regs->reg[22].r, 1); out_be32(&platinum_regs->reg[23].r, 1); out_be32(&platinum_regs->reg[26].r, 0xc00); out_be32(&platinum_regs->reg[27].r, 0x235); /* out_be32(&platinum_regs->reg[27].r, 0x2aa); */ STORE_D2(0, (pinfo->total_vram == 0x100000 ? init->dacula_ctrl[cmode] & 0xf : init->dacula_ctrl[cmode])); STORE_D2(1, 4); STORE_D2(2, 0); set_platinum_clock(pinfo); out_be32(&platinum_regs->reg[24].r, 0); /* turn display on */ } /* * Set misc info vars for this driver */ static void __devinit platinum_init_info(struct fb_info *info, struct fb_info_platinum *pinfo) { /* Fill fb_info */ info->fbops = &platinumfb_ops; info->pseudo_palette = pinfo->pseudo_palette; info->flags = FBINFO_DEFAULT; info->screen_base = pinfo->frame_buffer + 0x20; fb_alloc_cmap(&info->cmap, 256, 0); /* Fill fix common fields */ strcpy(info->fix.id, "platinum"); info->fix.mmio_start = pinfo->platinum_regs_phys; info->fix.mmio_len = 0x1000; info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.smem_start = pinfo->frame_buffer_phys + 0x20; /* will be updated later */ info->fix.smem_len = pinfo->total_vram - 0x20; info->fix.ywrapstep = 0; info->fix.xpanstep = 0; info->fix.ypanstep = 0; info->fix.type_aux = 0; info->fix.accel = FB_ACCEL_NONE; } static int __devinit platinum_init_fb(struct fb_info *info) { struct fb_info_platinum *pinfo = info->par; struct fb_var_screeninfo var; int sense, rc; sense = read_platinum_sense(pinfo); printk(KERN_INFO "platinumfb: Monitor sense value = 0x%x, ", sense); if (default_vmode == VMODE_NVRAM) { #ifdef CONFIG_NVRAM default_vmode = nvram_read_byte(NV_VMODE); if (default_vmode <= 0 || default_vmode > VMODE_MAX || !platinum_reg_init[default_vmode-1]) #endif default_vmode = VMODE_CHOOSE; } if (default_vmode == VMODE_CHOOSE) { default_vmode = mac_map_monitor_sense(sense); } if (default_vmode <= 0 || default_vmode > VMODE_MAX) default_vmode = VMODE_640_480_60; #ifdef CONFIG_NVRAM if (default_cmode == CMODE_NVRAM) default_cmode = nvram_read_byte(NV_CMODE); #endif if (default_cmode < CMODE_8 || default_cmode > CMODE_32) default_cmode = CMODE_8; /* * Reduce the pixel size if we don't have enough VRAM. */ while(default_cmode > CMODE_8 && platinum_vram_reqd(default_vmode, default_cmode) > pinfo->total_vram) default_cmode--; printk("platinumfb: Using video mode %d and color mode %d.\n", default_vmode, default_cmode); /* Setup default var */ if (mac_vmode_to_var(default_vmode, default_cmode, &var) < 0) { /* This shouldn't happen! */ printk("mac_vmode_to_var(%d, %d,) failed\n", default_vmode, default_cmode); try_again: default_vmode = VMODE_640_480_60; default_cmode = CMODE_8; if (mac_vmode_to_var(default_vmode, default_cmode, &var) < 0) { printk(KERN_ERR "platinumfb: mac_vmode_to_var() failed\n"); return -ENXIO; } } /* Initialize info structure */ platinum_init_info(info, pinfo); /* Apply default var */ info->var = var; var.activate = FB_ACTIVATE_NOW; rc = fb_set_var(info, &var); if (rc && (default_vmode != VMODE_640_480_60 || default_cmode != CMODE_8)) goto try_again; /* Register with fbdev layer */ rc = register_framebuffer(info); if (rc < 0) return rc; printk(KERN_INFO "fb%d: Apple Platinum frame buffer device\n", info->node); return 0; } /* * Get the monitor sense value. * Note that this can be called before calibrate_delay, * so we can't use udelay. */ static int read_platinum_sense(struct fb_info_platinum *info) { volatile struct platinum_regs __iomem *platinum_regs = info->platinum_regs; int sense; out_be32(&platinum_regs->reg[23].r, 7); /* turn off drivers */ __delay(2000); sense = (~in_be32(&platinum_regs->reg[23].r) & 7) << 8; /* drive each sense line low in turn and collect the other 2 */ out_be32(&platinum_regs->reg[23].r, 3); /* drive A low */ __delay(2000); sense |= (~in_be32(&platinum_regs->reg[23].r) & 3) << 4; out_be32(&platinum_regs->reg[23].r, 5); /* drive B low */ __delay(2000); sense |= (~in_be32(&platinum_regs->reg[23].r) & 4) << 1; sense |= (~in_be32(&platinum_regs->reg[23].r) & 1) << 2; out_be32(&platinum_regs->reg[23].r, 6); /* drive C low */ __delay(2000); sense |= (~in_be32(&platinum_regs->reg[23].r) & 6) >> 1; out_be32(&platinum_regs->reg[23].r, 7); /* turn off drivers */ return sense; } /* * This routine takes a user-supplied var, and picks the best vmode/cmode from it. * It also updates the var structure to the actual mode data obtained */ static int platinum_var_to_par(struct fb_var_screeninfo *var, struct fb_info_platinum *pinfo, int check_only) { int vmode, cmode; if (mac_var_to_vmode(var, &vmode, &cmode) != 0) { printk(KERN_ERR "platinum_var_to_par: mac_var_to_vmode unsuccessful.\n"); printk(KERN_ERR "platinum_var_to_par: var->xres = %d\n", var->xres); printk(KERN_ERR "platinum_var_to_par: var->yres = %d\n", var->yres); printk(KERN_ERR "platinum_var_to_par: var->xres_virtual = %d\n", var->xres_virtual); printk(KERN_ERR "platinum_var_to_par: var->yres_virtual = %d\n", var->yres_virtual); printk(KERN_ERR "platinum_var_to_par: var->bits_per_pixel = %d\n", var->bits_per_pixel); printk(KERN_ERR "platinum_var_to_par: var->pixclock = %d\n", var->pixclock); printk(KERN_ERR "platinum_var_to_par: var->vmode = %d\n", var->vmode); return -EINVAL; } if (!platinum_reg_init[vmode-1]) { printk(KERN_ERR "platinum_var_to_par, vmode %d not valid.\n", vmode); return -EINVAL; } if (platinum_vram_reqd(vmode, cmode) > pinfo->total_vram) { printk(KERN_ERR "platinum_var_to_par, not enough ram for vmode %d, cmode %d.\n", vmode, cmode); return -EINVAL; } if (mac_vmode_to_var(vmode, cmode, var)) return -EINVAL; if (check_only) return 0; pinfo->vmode = vmode; pinfo->cmode = cmode; pinfo->xres = vmode_attrs[vmode-1].hres; pinfo->yres = vmode_attrs[vmode-1].vres; pinfo->xoffset = 0; pinfo->yoffset = 0; pinfo->vxres = pinfo->xres; pinfo->vyres = pinfo->yres; return 0; } /* * Parse user speficied options (`video=platinumfb:') */ static int __init platinumfb_setup(char *options) { char *this_opt; if (!options || !*options) return 0; while ((this_opt = strsep(&options, ",")) != NULL) { if (!strncmp(this_opt, "vmode:", 6)) { int vmode = simple_strtoul(this_opt+6, NULL, 0); if (vmode > 0 && vmode <= VMODE_MAX) default_vmode = vmode; } else if (!strncmp(this_opt, "cmode:", 6)) { int depth = simple_strtoul(this_opt+6, NULL, 0); switch (depth) { case 0: case 8: default_cmode = CMODE_8; break; case 15: case 16: default_cmode = CMODE_16; break; case 24: case 32: default_cmode = CMODE_32; break; } } } return 0; } #ifdef __powerpc__ #define invalidate_cache(addr) \ asm volatile("eieio; dcbf 0,%1" \ : "=m" (*(addr)) : "r" (addr) : "memory"); #else #define invalidate_cache(addr) #endif static int __devinit platinumfb_probe(struct of_device* odev, const struct of_device_id *match) { struct device_node *dp = odev->dev.of_node; struct fb_info *info; struct fb_info_platinum *pinfo; volatile __u8 *fbuffer; int bank0, bank1, bank2, bank3, rc; dev_info(&odev->dev, "Found Apple Platinum video hardware\n"); info = framebuffer_alloc(sizeof(*pinfo), &odev->dev); if (info == NULL) { dev_err(&odev->dev, "Failed to allocate fbdev !\n"); return -ENOMEM; } pinfo = info->par; if (of_address_to_resource(dp, 0, &pinfo->rsrc_reg) || of_address_to_resource(dp, 1, &pinfo->rsrc_fb)) { dev_err(&odev->dev, "Can't get resources\n"); framebuffer_release(info); return -ENXIO; } dev_dbg(&odev->dev, " registers : 0x%llx...0x%llx\n", (unsigned long long)pinfo->rsrc_reg.start, (unsigned long long)pinfo->rsrc_reg.end); dev_dbg(&odev->dev, " framebuffer: 0x%llx...0x%llx\n", (unsigned long long)pinfo->rsrc_fb.start, (unsigned long long)pinfo->rsrc_fb.end); /* Do not try to request register space, they overlap with the * northbridge and that can fail. Only request framebuffer */ if (!request_mem_region(pinfo->rsrc_fb.start, pinfo->rsrc_fb.end - pinfo->rsrc_fb.start + 1, "platinumfb framebuffer")) { printk(KERN_ERR "platinumfb: Can't request framebuffer !\n"); framebuffer_release(info); return -ENXIO; } /* frame buffer - map only 4MB */ pinfo->frame_buffer_phys = pinfo->rsrc_fb.start; pinfo->frame_buffer = __ioremap(pinfo->rsrc_fb.start, 0x400000, _PAGE_WRITETHRU); pinfo->base_frame_buffer = pinfo->frame_buffer; /* registers */ pinfo->platinum_regs_phys = pinfo->rsrc_reg.start; pinfo->platinum_regs = ioremap(pinfo->rsrc_reg.start, 0x1000); pinfo->cmap_regs_phys = 0xf301b000; /* XXX not in prom? */ request_mem_region(pinfo->cmap_regs_phys, 0x1000, "platinumfb cmap"); pinfo->cmap_regs = ioremap(pinfo->cmap_regs_phys, 0x1000); /* Grok total video ram */ out_be32(&pinfo->platinum_regs->reg[16].r, (unsigned)pinfo->frame_buffer_phys); out_be32(&pinfo->platinum_regs->reg[20].r, 0x1011); /* select max vram */ out_be32(&pinfo->platinum_regs->reg[24].r, 0); /* switch in vram */ fbuffer = pinfo->base_frame_buffer; fbuffer[0x100000] = 0x34; fbuffer[0x100008] = 0x0; invalidate_cache(&fbuffer[0x100000]); fbuffer[0x200000] = 0x56; fbuffer[0x200008] = 0x0; invalidate_cache(&fbuffer[0x200000]); fbuffer[0x300000] = 0x78; fbuffer[0x300008] = 0x0; invalidate_cache(&fbuffer[0x300000]); bank0 = 1; /* builtin 1MB vram, always there */ bank1 = fbuffer[0x100000] == 0x34; bank2 = fbuffer[0x200000] == 0x56; bank3 = fbuffer[0x300000] == 0x78; pinfo->total_vram = (bank0 + bank1 + bank2 + bank3) * 0x100000; printk(KERN_INFO "platinumfb: Total VRAM = %dMB (%d%d%d%d)\n", (unsigned int) (pinfo->total_vram / 1024 / 1024), bank3, bank2, bank1, bank0); /* * Try to determine whether we have an old or a new DACula. */ out_8(&pinfo->cmap_regs->addr, 0x40); pinfo->dactype = in_8(&pinfo->cmap_regs->d2); switch (pinfo->dactype) { case 0x3c: pinfo->clktype = 1; printk(KERN_INFO "platinumfb: DACula type 0x3c\n"); break; case 0x84: pinfo->clktype = 0; printk(KERN_INFO "platinumfb: DACula type 0x84\n"); break; default: pinfo->clktype = 0; printk(KERN_INFO "platinumfb: Unknown DACula type: %x\n", pinfo->dactype); break; } dev_set_drvdata(&odev->dev, info); rc = platinum_init_fb(info); if (rc != 0) { iounmap(pinfo->frame_buffer); iounmap(pinfo->platinum_regs); iounmap(pinfo->cmap_regs); dev_set_drvdata(&odev->dev, NULL); framebuffer_release(info); } return rc; } static int __devexit platinumfb_remove(struct of_device* odev) { struct fb_info *info = dev_get_drvdata(&odev->dev); struct fb_info_platinum *pinfo = info->par; unregister_framebuffer (info); /* Unmap frame buffer and registers */ iounmap(pinfo->frame_buffer); iounmap(pinfo->platinum_regs); iounmap(pinfo->cmap_regs); release_mem_region(pinfo->rsrc_fb.start, pinfo->rsrc_fb.end - pinfo->rsrc_fb.start + 1); release_mem_region(pinfo->cmap_regs_phys, 0x1000); framebuffer_release(info); return 0; } static struct of_device_id platinumfb_match[] = { { .name = "platinum", }, {}, }; static struct of_platform_driver platinum_driver = { .driver = { .name = "platinumfb", .owner = THIS_MODULE, .of_match_table = platinumfb_match, }, .probe = platinumfb_probe, .remove = platinumfb_remove, }; static int __init platinumfb_init(void) { #ifndef MODULE char *option = NULL; if (fb_get_options("platinumfb", &option)) return -ENODEV; platinumfb_setup(option); #endif of_register_platform_driver(&platinum_driver); return 0; } static void __exit platinumfb_exit(void) { of_unregister_platform_driver(&platinum_driver); } MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("framebuffer driver for Apple Platinum video"); module_init(platinumfb_init); #ifdef MODULE module_exit(platinumfb_exit); #endif
gpl-2.0
escalator2015/linux
arch/arm/mach-omap1/time.c
1017
6573
/* * linux/arch/arm/mach-omap1/time.c * * OMAP Timers * * Copyright (C) 2004 Nokia Corporation * Partial timer rewrite and additional dynamic tick timer support by * Tony Lindgen <tony@atomide.com> and * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> * * MPU timer code based on the older MPU timer code for OMAP * Copyright (C) 2000 RidgeRun, Inc. * Author: Greg Lonnon <glonnon@ridgerun.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/clocksource.h> #include <linux/clockchips.h> #include <linux/io.h> #include <linux/sched_clock.h> #include <asm/irq.h> #include <mach/hardware.h> #include <asm/mach/irq.h> #include <asm/mach/time.h> #include "iomap.h" #include "common.h" #ifdef CONFIG_OMAP_MPU_TIMER #define OMAP_MPU_TIMER_BASE OMAP_MPU_TIMER1_BASE #define OMAP_MPU_TIMER_OFFSET 0x100 typedef struct { u32 cntl; /* CNTL_TIMER, R/W */ u32 load_tim; /* LOAD_TIM, W */ u32 read_tim; /* READ_TIM, R */ } omap_mpu_timer_regs_t; #define omap_mpu_timer_base(n) \ ((omap_mpu_timer_regs_t __iomem *)OMAP1_IO_ADDRESS(OMAP_MPU_TIMER_BASE + \ (n)*OMAP_MPU_TIMER_OFFSET)) static inline unsigned long notrace omap_mpu_timer_read(int nr) { omap_mpu_timer_regs_t __iomem *timer = omap_mpu_timer_base(nr); return readl(&timer->read_tim); } static inline void omap_mpu_set_autoreset(int nr) { omap_mpu_timer_regs_t __iomem *timer = omap_mpu_timer_base(nr); writel(readl(&timer->cntl) | MPU_TIMER_AR, &timer->cntl); } static inline void omap_mpu_remove_autoreset(int nr) { omap_mpu_timer_regs_t __iomem *timer = omap_mpu_timer_base(nr); writel(readl(&timer->cntl) & ~MPU_TIMER_AR, &timer->cntl); } static inline void omap_mpu_timer_start(int nr, unsigned long load_val, int autoreset) { omap_mpu_timer_regs_t __iomem *timer = omap_mpu_timer_base(nr); unsigned int timerflags = MPU_TIMER_CLOCK_ENABLE | MPU_TIMER_ST; if (autoreset) timerflags |= MPU_TIMER_AR; writel(MPU_TIMER_CLOCK_ENABLE, &timer->cntl); udelay(1); writel(load_val, &timer->load_tim); udelay(1); writel(timerflags, &timer->cntl); } static inline void omap_mpu_timer_stop(int nr) { omap_mpu_timer_regs_t __iomem *timer = omap_mpu_timer_base(nr); writel(readl(&timer->cntl) & ~MPU_TIMER_ST, &timer->cntl); } /* * --------------------------------------------------------------------------- * MPU timer 1 ... count down to zero, interrupt, reload * --------------------------------------------------------------------------- */ static int omap_mpu_set_next_event(unsigned long cycles, struct clock_event_device *evt) { omap_mpu_timer_start(0, cycles, 0); return 0; } static int omap_mpu_set_oneshot(struct clock_event_device *evt) { omap_mpu_timer_stop(0); omap_mpu_remove_autoreset(0); return 0; } static int omap_mpu_set_periodic(struct clock_event_device *evt) { omap_mpu_set_autoreset(0); return 0; } static struct clock_event_device clockevent_mpu_timer1 = { .name = "mpu_timer1", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .set_next_event = omap_mpu_set_next_event, .set_state_periodic = omap_mpu_set_periodic, .set_state_oneshot = omap_mpu_set_oneshot, }; static irqreturn_t omap_mpu_timer1_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = &clockevent_mpu_timer1; evt->event_handler(evt); return IRQ_HANDLED; } static struct irqaction omap_mpu_timer1_irq = { .name = "mpu_timer1", .flags = IRQF_TIMER | IRQF_IRQPOLL, .handler = omap_mpu_timer1_interrupt, }; static __init void omap_init_mpu_timer(unsigned long rate) { setup_irq(INT_TIMER1, &omap_mpu_timer1_irq); omap_mpu_timer_start(0, (rate / HZ) - 1, 1); clockevent_mpu_timer1.cpumask = cpumask_of(0); clockevents_config_and_register(&clockevent_mpu_timer1, rate, 1, -1); } /* * --------------------------------------------------------------------------- * MPU timer 2 ... free running 32-bit clock source and scheduler clock * --------------------------------------------------------------------------- */ static u64 notrace omap_mpu_read_sched_clock(void) { return ~omap_mpu_timer_read(1); } static void __init omap_init_clocksource(unsigned long rate) { omap_mpu_timer_regs_t __iomem *timer = omap_mpu_timer_base(1); static char err[] __initdata = KERN_ERR "%s: can't register clocksource!\n"; omap_mpu_timer_start(1, ~0, 1); sched_clock_register(omap_mpu_read_sched_clock, 32, rate); if (clocksource_mmio_init(&timer->read_tim, "mpu_timer2", rate, 300, 32, clocksource_mmio_readl_down)) printk(err, "mpu_timer2"); } static void __init omap_mpu_timer_init(void) { struct clk *ck_ref = clk_get(NULL, "ck_ref"); unsigned long rate; BUG_ON(IS_ERR(ck_ref)); rate = clk_get_rate(ck_ref); clk_put(ck_ref); /* PTV = 0 */ rate /= 2; omap_init_mpu_timer(rate); omap_init_clocksource(rate); } #else static inline void omap_mpu_timer_init(void) { pr_err("Bogus timer, should not happen\n"); } #endif /* CONFIG_OMAP_MPU_TIMER */ /* * --------------------------------------------------------------------------- * Timer initialization * --------------------------------------------------------------------------- */ void __init omap1_timer_init(void) { if (omap_32k_timer_init() != 0) omap_mpu_timer_init(); }
gpl-2.0
ztemt/NX512J_kernel
net/bluetooth/hci_sock.c
1529
23082
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth HCI sockets. */ #include <linux/export.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/hci_mon.h> static atomic_t monitor_promisc = ATOMIC_INIT(0); /* ----- HCI socket interface ----- */ static inline int hci_test_bit(int nr, void *addr) { return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31)); } /* Security filter */ static struct hci_sec_filter hci_sec_filter = { /* Packet types */ 0x10, /* Events */ { 0x1000d9fe, 0x0000b00c }, /* Commands */ { { 0x0 }, /* OGF_LINK_CTL */ { 0xbe000006, 0x00000001, 0x00000000, 0x00 }, /* OGF_LINK_POLICY */ { 0x00005200, 0x00000000, 0x00000000, 0x00 }, /* OGF_HOST_CTL */ { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 }, /* OGF_INFO_PARAM */ { 0x000002be, 0x00000000, 0x00000000, 0x00 }, /* OGF_STATUS_PARAM */ { 0x000000ea, 0x00000000, 0x00000000, 0x00 } } }; static struct bt_sock_list hci_sk_list = { .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock) }; /* Send frame to RAW socket */ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) { struct sock *sk; struct sk_buff *skb_copy = NULL; BT_DBG("hdev %p len %d", hdev, skb->len); read_lock(&hci_sk_list.lock); sk_for_each(sk, &hci_sk_list.head) { struct hci_filter *flt; struct sk_buff *nskb; if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev) continue; /* Don't send frame to the socket it came from */ if (skb->sk == sk) continue; if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) continue; /* Apply filter */ flt = &hci_pi(sk)->filter; if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ? 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask)) continue; if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) { int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS); if (!hci_test_bit(evt, &flt->event_mask)) continue; if (flt->opcode && ((evt == HCI_EV_CMD_COMPLETE && flt->opcode != get_unaligned((__le16 *)(skb->data + 3))) || (evt == HCI_EV_CMD_STATUS && flt->opcode != get_unaligned((__le16 *)(skb->data + 4))))) continue; } if (!skb_copy) { /* Create a private copy with headroom */ skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC); if (!skb_copy) continue; /* Put type byte before the data */ memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1); } nskb = skb_clone(skb_copy, GFP_ATOMIC); if (!nskb) continue; if (sock_queue_rcv_skb(sk, nskb)) kfree_skb(nskb); } read_unlock(&hci_sk_list.lock); kfree_skb(skb_copy); } /* Send frame to control socket */ void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk) { struct sock *sk; BT_DBG("len %d", skb->len); read_lock(&hci_sk_list.lock); sk_for_each(sk, &hci_sk_list.head) { struct sk_buff *nskb; /* Skip the original socket */ if (sk == skip_sk) continue; if (sk->sk_state != BT_BOUND) continue; if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL) continue; nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) continue; if (sock_queue_rcv_skb(sk, nskb)) kfree_skb(nskb); } read_unlock(&hci_sk_list.lock); } /* Send frame to monitor socket */ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) { struct sock *sk; struct sk_buff *skb_copy = NULL; __le16 opcode; if (!atomic_read(&monitor_promisc)) return; BT_DBG("hdev %p len %d", hdev, skb->len); switch (bt_cb(skb)->pkt_type) { case HCI_COMMAND_PKT: opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT); break; case HCI_EVENT_PKT: opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT); break; case HCI_ACLDATA_PKT: if (bt_cb(skb)->incoming) opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT); else opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT); break; case HCI_SCODATA_PKT: if (bt_cb(skb)->incoming) opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT); else opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT); break; default: return; } read_lock(&hci_sk_list.lock); sk_for_each(sk, &hci_sk_list.head) { struct sk_buff *nskb; if (sk->sk_state != BT_BOUND) continue; if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR) continue; if (!skb_copy) { struct hci_mon_hdr *hdr; /* Create a private copy with headroom */ skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC); if (!skb_copy) continue; /* Put header before the data */ hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE); hdr->opcode = opcode; hdr->index = cpu_to_le16(hdev->id); hdr->len = cpu_to_le16(skb->len); } nskb = skb_clone(skb_copy, GFP_ATOMIC); if (!nskb) continue; if (sock_queue_rcv_skb(sk, nskb)) kfree_skb(nskb); } read_unlock(&hci_sk_list.lock); kfree_skb(skb_copy); } static void send_monitor_event(struct sk_buff *skb) { struct sock *sk; BT_DBG("len %d", skb->len); read_lock(&hci_sk_list.lock); sk_for_each(sk, &hci_sk_list.head) { struct sk_buff *nskb; if (sk->sk_state != BT_BOUND) continue; if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR) continue; nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) continue; if (sock_queue_rcv_skb(sk, nskb)) kfree_skb(nskb); } read_unlock(&hci_sk_list.lock); } static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event) { struct hci_mon_hdr *hdr; struct hci_mon_new_index *ni; struct sk_buff *skb; __le16 opcode; switch (event) { case HCI_DEV_REG: skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC); if (!skb) return NULL; ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE); ni->type = hdev->dev_type; ni->bus = hdev->bus; bacpy(&ni->bdaddr, &hdev->bdaddr); memcpy(ni->name, hdev->name, 8); opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX); break; case HCI_DEV_UNREG: skb = bt_skb_alloc(0, GFP_ATOMIC); if (!skb) return NULL; opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX); break; default: return NULL; } __net_timestamp(skb); hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE); hdr->opcode = opcode; hdr->index = cpu_to_le16(hdev->id); hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); return skb; } static void send_monitor_replay(struct sock *sk) { struct hci_dev *hdev; read_lock(&hci_dev_list_lock); list_for_each_entry(hdev, &hci_dev_list, list) { struct sk_buff *skb; skb = create_monitor_event(hdev, HCI_DEV_REG); if (!skb) continue; if (sock_queue_rcv_skb(sk, skb)) kfree_skb(skb); } read_unlock(&hci_dev_list_lock); } /* Generate internal stack event */ static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data) { struct hci_event_hdr *hdr; struct hci_ev_stack_internal *ev; struct sk_buff *skb; skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC); if (!skb) return; hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE); hdr->evt = HCI_EV_STACK_INTERNAL; hdr->plen = sizeof(*ev) + dlen; ev = (void *) skb_put(skb, sizeof(*ev) + dlen); ev->type = type; memcpy(ev->data, data, dlen); bt_cb(skb)->incoming = 1; __net_timestamp(skb); bt_cb(skb)->pkt_type = HCI_EVENT_PKT; skb->dev = (void *) hdev; hci_send_to_sock(hdev, skb); kfree_skb(skb); } void hci_sock_dev_event(struct hci_dev *hdev, int event) { struct hci_ev_si_device ev; BT_DBG("hdev %s event %d", hdev->name, event); /* Send event to monitor */ if (atomic_read(&monitor_promisc)) { struct sk_buff *skb; skb = create_monitor_event(hdev, event); if (skb) { send_monitor_event(skb); kfree_skb(skb); } } /* Send event to sockets */ ev.event = event; ev.dev_id = hdev->id; hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev); if (event == HCI_DEV_UNREG) { struct sock *sk; /* Detach sockets from device */ read_lock(&hci_sk_list.lock); sk_for_each(sk, &hci_sk_list.head) { bh_lock_sock_nested(sk); if (hci_pi(sk)->hdev == hdev) { hci_pi(sk)->hdev = NULL; sk->sk_err = EPIPE; sk->sk_state = BT_OPEN; sk->sk_state_change(sk); hci_dev_put(hdev); } bh_unlock_sock(sk); } read_unlock(&hci_sk_list.lock); } } static int hci_sock_release(struct socket *sock) { struct sock *sk = sock->sk; struct hci_dev *hdev; BT_DBG("sock %p sk %p", sock, sk); if (!sk) return 0; hdev = hci_pi(sk)->hdev; if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR) atomic_dec(&monitor_promisc); bt_sock_unlink(&hci_sk_list, sk); if (hdev) { atomic_dec(&hdev->promisc); hci_dev_put(hdev); } sock_orphan(sk); skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); sock_put(sk); return 0; } static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg) { bdaddr_t bdaddr; int err; if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) return -EFAULT; hci_dev_lock(hdev); err = hci_blacklist_add(hdev, &bdaddr, 0); hci_dev_unlock(hdev); return err; } static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg) { bdaddr_t bdaddr; int err; if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) return -EFAULT; hci_dev_lock(hdev); err = hci_blacklist_del(hdev, &bdaddr, 0); hci_dev_unlock(hdev); return err; } /* Ioctls that require bound socket */ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) { struct hci_dev *hdev = hci_pi(sk)->hdev; if (!hdev) return -EBADFD; switch (cmd) { case HCISETRAW: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) return -EPERM; if (arg) set_bit(HCI_RAW, &hdev->flags); else clear_bit(HCI_RAW, &hdev->flags); return 0; case HCIGETCONNINFO: return hci_get_conn_info(hdev, (void __user *) arg); case HCIGETAUTHINFO: return hci_get_auth_info(hdev, (void __user *) arg); case HCIBLOCKADDR: if (!capable(CAP_NET_ADMIN)) return -EPERM; return hci_sock_blacklist_add(hdev, (void __user *) arg); case HCIUNBLOCKADDR: if (!capable(CAP_NET_ADMIN)) return -EPERM; return hci_sock_blacklist_del(hdev, (void __user *) arg); default: if (hdev->ioctl) return hdev->ioctl(hdev, cmd, arg); return -EINVAL; } } static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; void __user *argp = (void __user *) arg; int err; BT_DBG("cmd %x arg %lx", cmd, arg); switch (cmd) { case HCIGETDEVLIST: return hci_get_dev_list(argp); case HCIGETDEVINFO: return hci_get_dev_info(argp); case HCIGETCONNLIST: return hci_get_conn_list(argp); case HCIDEVUP: if (!capable(CAP_NET_ADMIN)) return -EPERM; return hci_dev_open(arg); case HCIDEVDOWN: if (!capable(CAP_NET_ADMIN)) return -EPERM; return hci_dev_close(arg); case HCIDEVRESET: if (!capable(CAP_NET_ADMIN)) return -EPERM; return hci_dev_reset(arg); case HCIDEVRESTAT: if (!capable(CAP_NET_ADMIN)) return -EPERM; return hci_dev_reset_stat(arg); case HCISETSCAN: case HCISETAUTH: case HCISETENCRYPT: case HCISETPTYPE: case HCISETLINKPOL: case HCISETLINKMODE: case HCISETACLMTU: case HCISETSCOMTU: if (!capable(CAP_NET_ADMIN)) return -EPERM; return hci_dev_cmd(cmd, argp); case HCIINQUIRY: return hci_inquiry(argp); default: lock_sock(sk); err = hci_sock_bound_ioctl(sk, cmd, arg); release_sock(sk); return err; } } static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_hci haddr; struct sock *sk = sock->sk; struct hci_dev *hdev = NULL; int len, err = 0; BT_DBG("sock %p sk %p", sock, sk); if (!addr) return -EINVAL; memset(&haddr, 0, sizeof(haddr)); len = min_t(unsigned int, sizeof(haddr), addr_len); memcpy(&haddr, addr, len); if (haddr.hci_family != AF_BLUETOOTH) return -EINVAL; lock_sock(sk); if (sk->sk_state == BT_BOUND) { err = -EALREADY; goto done; } switch (haddr.hci_channel) { case HCI_CHANNEL_RAW: if (hci_pi(sk)->hdev) { err = -EALREADY; goto done; } if (haddr.hci_dev != HCI_DEV_NONE) { hdev = hci_dev_get(haddr.hci_dev); if (!hdev) { err = -ENODEV; goto done; } atomic_inc(&hdev->promisc); } hci_pi(sk)->hdev = hdev; break; case HCI_CHANNEL_CONTROL: if (haddr.hci_dev != HCI_DEV_NONE) { err = -EINVAL; goto done; } if (!capable(CAP_NET_ADMIN)) { err = -EPERM; goto done; } break; case HCI_CHANNEL_MONITOR: if (haddr.hci_dev != HCI_DEV_NONE) { err = -EINVAL; goto done; } if (!capable(CAP_NET_RAW)) { err = -EPERM; goto done; } send_monitor_replay(sk); atomic_inc(&monitor_promisc); break; default: err = -EINVAL; goto done; } hci_pi(sk)->channel = haddr.hci_channel; sk->sk_state = BT_BOUND; done: release_sock(sk); return err; } static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer) { struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr; struct sock *sk = sock->sk; struct hci_dev *hdev = hci_pi(sk)->hdev; BT_DBG("sock %p sk %p", sock, sk); if (!hdev) return -EBADFD; lock_sock(sk); *addr_len = sizeof(*haddr); haddr->hci_family = AF_BLUETOOTH; haddr->hci_dev = hdev->id; haddr->hci_channel= 0; release_sock(sk); return 0; } static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) { __u32 mask = hci_pi(sk)->cmsg_mask; if (mask & HCI_CMSG_DIR) { int incoming = bt_cb(skb)->incoming; put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming); } if (mask & HCI_CMSG_TSTAMP) { #ifdef CONFIG_COMPAT struct compat_timeval ctv; #endif struct timeval tv; void *data; int len; skb_get_timestamp(skb, &tv); data = &tv; len = sizeof(tv); #ifdef CONFIG_COMPAT if (!COMPAT_USE_64BIT_TIME && (msg->msg_flags & MSG_CMSG_COMPAT)) { ctv.tv_sec = tv.tv_sec; ctv.tv_usec = tv.tv_usec; data = &ctv; len = sizeof(ctv); } #endif put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data); } } static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; int copied, err; BT_DBG("sock %p, sk %p", sock, sk); if (flags & (MSG_OOB)) return -EOPNOTSUPP; if (sk->sk_state == BT_CLOSED) return 0; skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) return err; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); switch (hci_pi(sk)->channel) { case HCI_CHANNEL_RAW: hci_sock_cmsg(sk, msg, skb); break; case HCI_CHANNEL_CONTROL: case HCI_CHANNEL_MONITOR: sock_recv_timestamp(msg, sk, skb); break; } skb_free_datagram(sk, skb); return err ? : copied; } static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct hci_dev *hdev; struct sk_buff *skb; int err; BT_DBG("sock %p sk %p", sock, sk); if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE)) return -EINVAL; if (len < 4 || len > HCI_MAX_FRAME_SIZE) return -EINVAL; lock_sock(sk); switch (hci_pi(sk)->channel) { case HCI_CHANNEL_RAW: break; case HCI_CHANNEL_CONTROL: err = mgmt_control(sk, msg, len); goto done; case HCI_CHANNEL_MONITOR: err = -EOPNOTSUPP; goto done; default: err = -EINVAL; goto done; } hdev = hci_pi(sk)->hdev; if (!hdev) { err = -EBADFD; goto done; } if (!test_bit(HCI_UP, &hdev->flags)) { err = -ENETDOWN; goto done; } skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) goto done; if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { err = -EFAULT; goto drop; } bt_cb(skb)->pkt_type = *((unsigned char *) skb->data); skb_pull(skb, 1); skb->dev = (void *) hdev; if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) { u16 opcode = get_unaligned_le16(skb->data); u16 ogf = hci_opcode_ogf(opcode); u16 ocf = hci_opcode_ocf(opcode); if (((ogf > HCI_SFLT_MAX_OGF) || !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) && !capable(CAP_NET_RAW)) { err = -EPERM; goto drop; } if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) { skb_queue_tail(&hdev->raw_q, skb); queue_work(hdev->workqueue, &hdev->tx_work); } else { /* Stand-alone HCI commands must be flaged as * single-command requests. */ bt_cb(skb)->req.start = true; skb_queue_tail(&hdev->cmd_q, skb); queue_work(hdev->workqueue, &hdev->cmd_work); } } else { if (!capable(CAP_NET_RAW)) { err = -EPERM; goto drop; } skb_queue_tail(&hdev->raw_q, skb); queue_work(hdev->workqueue, &hdev->tx_work); } err = len; done: release_sock(sk); return err; drop: kfree_skb(skb); goto done; } static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len) { struct hci_ufilter uf = { .opcode = 0 }; struct sock *sk = sock->sk; int err = 0, opt = 0; BT_DBG("sk %p, opt %d", sk, optname); lock_sock(sk); if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { err = -EINVAL; goto done; } switch (optname) { case HCI_DATA_DIR: if (get_user(opt, (int __user *)optval)) { err = -EFAULT; break; } if (opt) hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR; else hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR; break; case HCI_TIME_STAMP: if (get_user(opt, (int __user *)optval)) { err = -EFAULT; break; } if (opt) hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP; else hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP; break; case HCI_FILTER: { struct hci_filter *f = &hci_pi(sk)->filter; uf.type_mask = f->type_mask; uf.opcode = f->opcode; uf.event_mask[0] = *((u32 *) f->event_mask + 0); uf.event_mask[1] = *((u32 *) f->event_mask + 1); } len = min_t(unsigned int, len, sizeof(uf)); if (copy_from_user(&uf, optval, len)) { err = -EFAULT; break; } if (!capable(CAP_NET_RAW)) { uf.type_mask &= hci_sec_filter.type_mask; uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0); uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1); } { struct hci_filter *f = &hci_pi(sk)->filter; f->type_mask = uf.type_mask; f->opcode = uf.opcode; *((u32 *) f->event_mask + 0) = uf.event_mask[0]; *((u32 *) f->event_mask + 1) = uf.event_mask[1]; } break; default: err = -ENOPROTOOPT; break; } done: release_sock(sk); return err; } static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct hci_ufilter uf; struct sock *sk = sock->sk; int len, opt, err = 0; BT_DBG("sk %p, opt %d", sk, optname); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { err = -EINVAL; goto done; } switch (optname) { case HCI_DATA_DIR: if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR) opt = 1; else opt = 0; if (put_user(opt, optval)) err = -EFAULT; break; case HCI_TIME_STAMP: if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP) opt = 1; else opt = 0; if (put_user(opt, optval)) err = -EFAULT; break; case HCI_FILTER: { struct hci_filter *f = &hci_pi(sk)->filter; memset(&uf, 0, sizeof(uf)); uf.type_mask = f->type_mask; uf.opcode = f->opcode; uf.event_mask[0] = *((u32 *) f->event_mask + 0); uf.event_mask[1] = *((u32 *) f->event_mask + 1); } len = min_t(unsigned int, len, sizeof(uf)); if (copy_to_user(optval, &uf, len)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } done: release_sock(sk); return err; } static const struct proto_ops hci_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = hci_sock_release, .bind = hci_sock_bind, .getname = hci_sock_getname, .sendmsg = hci_sock_sendmsg, .recvmsg = hci_sock_recvmsg, .ioctl = hci_sock_ioctl, .poll = datagram_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = hci_sock_setsockopt, .getsockopt = hci_sock_getsockopt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .mmap = sock_no_mmap }; static struct proto hci_sk_proto = { .name = "HCI", .owner = THIS_MODULE, .obj_size = sizeof(struct hci_pinfo) }; static int hci_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; sock->ops = &hci_sock_ops; sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = protocol; sock->state = SS_UNCONNECTED; sk->sk_state = BT_OPEN; bt_sock_link(&hci_sk_list, sk); return 0; } static const struct net_proto_family hci_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = hci_sock_create, }; int __init hci_sock_init(void) { int err; err = proto_register(&hci_sk_proto, 0); if (err < 0) return err; err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops); if (err < 0) { BT_ERR("HCI socket registration failed"); goto error; } err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL); if (err < 0) { BT_ERR("Failed to create HCI proc file"); bt_sock_unregister(BTPROTO_HCI); goto error; } BT_INFO("HCI socket layer initialized"); return 0; error: proto_unregister(&hci_sk_proto); return err; } void hci_sock_cleanup(void) { bt_procfs_cleanup(&init_net, "hci"); bt_sock_unregister(BTPROTO_HCI); proto_unregister(&hci_sk_proto); }
gpl-2.0
aatjitra/PR25
fs/sysfs/inode.c
1529
8675
/* * fs/sysfs/inode.c - basic sysfs inode and dentry operations * * Copyright (c) 2001-3 Patrick Mochel * Copyright (c) 2007 SUSE Linux Products GmbH * Copyright (c) 2007 Tejun Heo <teheo@suse.de> * * This file is released under the GPLv2. * * Please see Documentation/filesystems/sysfs.txt for more information. */ #undef DEBUG #include <linux/pagemap.h> #include <linux/namei.h> #include <linux/backing-dev.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/xattr.h> #include <linux/security.h> #include "sysfs.h" extern struct super_block * sysfs_sb; static const struct address_space_operations sysfs_aops = { .readpage = simple_readpage, .write_begin = simple_write_begin, .write_end = simple_write_end, }; static struct backing_dev_info sysfs_backing_dev_info = { .name = "sysfs", .ra_pages = 0, /* No readahead */ .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, }; static const struct inode_operations sysfs_inode_operations ={ .permission = sysfs_permission, .setattr = sysfs_setattr, .getattr = sysfs_getattr, .setxattr = sysfs_setxattr, }; int __init sysfs_inode_init(void) { return bdi_init(&sysfs_backing_dev_info); } static struct sysfs_inode_attrs *sysfs_init_inode_attrs(struct sysfs_dirent *sd) { struct sysfs_inode_attrs *attrs; struct iattr *iattrs; attrs = kzalloc(sizeof(struct sysfs_inode_attrs), GFP_KERNEL); if (!attrs) return NULL; iattrs = &attrs->ia_iattr; /* assign default attributes */ iattrs->ia_mode = sd->s_mode; iattrs->ia_uid = 0; iattrs->ia_gid = 0; iattrs->ia_atime = iattrs->ia_mtime = iattrs->ia_ctime = CURRENT_TIME; return attrs; } int sysfs_sd_setattr(struct sysfs_dirent *sd, struct iattr * iattr) { struct sysfs_inode_attrs *sd_attrs; struct iattr *iattrs; unsigned int ia_valid = iattr->ia_valid; sd_attrs = sd->s_iattr; if (!sd_attrs) { /* setting attributes for the first time, allocate now */ sd_attrs = sysfs_init_inode_attrs(sd); if (!sd_attrs) return -ENOMEM; sd->s_iattr = sd_attrs; } /* attributes were changed at least once in past */ iattrs = &sd_attrs->ia_iattr; if (ia_valid & ATTR_UID) iattrs->ia_uid = iattr->ia_uid; if (ia_valid & ATTR_GID) iattrs->ia_gid = iattr->ia_gid; if (ia_valid & ATTR_ATIME) iattrs->ia_atime = iattr->ia_atime; if (ia_valid & ATTR_MTIME) iattrs->ia_mtime = iattr->ia_mtime; if (ia_valid & ATTR_CTIME) iattrs->ia_ctime = iattr->ia_ctime; if (ia_valid & ATTR_MODE) { umode_t mode = iattr->ia_mode; iattrs->ia_mode = sd->s_mode = mode; } return 0; } int sysfs_setattr(struct dentry *dentry, struct iattr *iattr) { struct inode *inode = dentry->d_inode; struct sysfs_dirent *sd = dentry->d_fsdata; int error; if (!sd) return -EINVAL; mutex_lock(&sysfs_mutex); error = inode_change_ok(inode, iattr); if (error) goto out; error = sysfs_sd_setattr(sd, iattr); if (error) goto out; /* this ignores size changes */ setattr_copy(inode, iattr); out: mutex_unlock(&sysfs_mutex); return error; } static int sysfs_sd_setsecdata(struct sysfs_dirent *sd, void **secdata, u32 *secdata_len) { struct sysfs_inode_attrs *iattrs; void *old_secdata; size_t old_secdata_len; if (!sd->s_iattr) { sd->s_iattr = sysfs_init_inode_attrs(sd); if (!sd->s_iattr) return -ENOMEM; } iattrs = sd->s_iattr; old_secdata = iattrs->ia_secdata; old_secdata_len = iattrs->ia_secdata_len; iattrs->ia_secdata = *secdata; iattrs->ia_secdata_len = *secdata_len; *secdata = old_secdata; *secdata_len = old_secdata_len; return 0; } int sysfs_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { struct sysfs_dirent *sd = dentry->d_fsdata; void *secdata; int error; u32 secdata_len = 0; if (!sd) return -EINVAL; if (!strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)) { const char *suffix = name + XATTR_SECURITY_PREFIX_LEN; error = security_inode_setsecurity(dentry->d_inode, suffix, value, size, flags); if (error) goto out; error = security_inode_getsecctx(dentry->d_inode, &secdata, &secdata_len); if (error) goto out; mutex_lock(&sysfs_mutex); error = sysfs_sd_setsecdata(sd, &secdata, &secdata_len); mutex_unlock(&sysfs_mutex); if (secdata) security_release_secctx(secdata, secdata_len); } else return -EINVAL; out: return error; } static inline void set_default_inode_attr(struct inode * inode, mode_t mode) { inode->i_mode = mode; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; } static inline void set_inode_attr(struct inode * inode, struct iattr * iattr) { inode->i_uid = iattr->ia_uid; inode->i_gid = iattr->ia_gid; inode->i_atime = iattr->ia_atime; inode->i_mtime = iattr->ia_mtime; inode->i_ctime = iattr->ia_ctime; } static int sysfs_count_nlink(struct sysfs_dirent *sd) { struct sysfs_dirent *child; int nr = 0; for (child = sd->s_dir.children; child; child = child->s_sibling) if (sysfs_type(child) == SYSFS_DIR) nr++; return nr + 2; } static void sysfs_refresh_inode(struct sysfs_dirent *sd, struct inode *inode) { struct sysfs_inode_attrs *iattrs = sd->s_iattr; inode->i_mode = sd->s_mode; if (iattrs) { /* sysfs_dirent has non-default attributes * get them from persistent copy in sysfs_dirent */ set_inode_attr(inode, &iattrs->ia_iattr); security_inode_notifysecctx(inode, iattrs->ia_secdata, iattrs->ia_secdata_len); } if (sysfs_type(sd) == SYSFS_DIR) inode->i_nlink = sysfs_count_nlink(sd); } int sysfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct sysfs_dirent *sd = dentry->d_fsdata; struct inode *inode = dentry->d_inode; mutex_lock(&sysfs_mutex); sysfs_refresh_inode(sd, inode); mutex_unlock(&sysfs_mutex); generic_fillattr(inode, stat); return 0; } static void sysfs_init_inode(struct sysfs_dirent *sd, struct inode *inode) { struct bin_attribute *bin_attr; inode->i_private = sysfs_get(sd); inode->i_mapping->a_ops = &sysfs_aops; inode->i_mapping->backing_dev_info = &sysfs_backing_dev_info; inode->i_op = &sysfs_inode_operations; set_default_inode_attr(inode, sd->s_mode); sysfs_refresh_inode(sd, inode); /* initialize inode according to type */ switch (sysfs_type(sd)) { case SYSFS_DIR: inode->i_op = &sysfs_dir_inode_operations; inode->i_fop = &sysfs_dir_operations; break; case SYSFS_KOBJ_ATTR: inode->i_size = PAGE_SIZE; inode->i_fop = &sysfs_file_operations; break; case SYSFS_KOBJ_BIN_ATTR: bin_attr = sd->s_bin_attr.bin_attr; inode->i_size = bin_attr->size; inode->i_fop = &bin_fops; break; case SYSFS_KOBJ_LINK: inode->i_op = &sysfs_symlink_inode_operations; break; default: BUG(); } unlock_new_inode(inode); } /** * sysfs_get_inode - get inode for sysfs_dirent * @sb: super block * @sd: sysfs_dirent to allocate inode for * * Get inode for @sd. If such inode doesn't exist, a new inode * is allocated and basics are initialized. New inode is * returned locked. * * LOCKING: * Kernel thread context (may sleep). * * RETURNS: * Pointer to allocated inode on success, NULL on failure. */ struct inode * sysfs_get_inode(struct super_block *sb, struct sysfs_dirent *sd) { struct inode *inode; inode = iget_locked(sb, sd->s_ino); if (inode && (inode->i_state & I_NEW)) sysfs_init_inode(sd, inode); return inode; } /* * The sysfs_dirent serves as both an inode and a directory entry for sysfs. * To prevent the sysfs inode numbers from being freed prematurely we take a * reference to sysfs_dirent from the sysfs inode. A * super_operations.evict_inode() implementation is needed to drop that * reference upon inode destruction. */ void sysfs_evict_inode(struct inode *inode) { struct sysfs_dirent *sd = inode->i_private; truncate_inode_pages(&inode->i_data, 0); end_writeback(inode); sysfs_put(sd); } int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const void *ns, const char *name) { struct sysfs_addrm_cxt acxt; struct sysfs_dirent *sd; if (!dir_sd) return -ENOENT; sysfs_addrm_start(&acxt, dir_sd); sd = sysfs_find_dirent(dir_sd, ns, name); if (sd && (sd->s_ns != ns)) sd = NULL; if (sd) sysfs_remove_one(&acxt, sd); sysfs_addrm_finish(&acxt); if (sd) return 0; else return -ENOENT; } int sysfs_permission(struct inode *inode, int mask, unsigned int flags) { struct sysfs_dirent *sd; if (flags & IPERM_FLAG_RCU) return -ECHILD; sd = inode->i_private; mutex_lock(&sysfs_mutex); sysfs_refresh_inode(sd, inode); mutex_unlock(&sysfs_mutex); return generic_permission(inode, mask, flags, NULL); }
gpl-2.0
ztemt/NX508J_kernel
net/netfilter/nf_conntrack_acct.c
2297
3071
/* Accouting handling for netfilter. */ /* * (C) 2008 Krzysztof Piotr Oledzki <ole@ans.pl> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/netfilter.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/moduleparam.h> #include <linux/export.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_extend.h> #include <net/netfilter/nf_conntrack_acct.h> static bool nf_ct_acct __read_mostly; module_param_named(acct, nf_ct_acct, bool, 0644); MODULE_PARM_DESC(acct, "Enable connection tracking flow accounting."); #ifdef CONFIG_SYSCTL static struct ctl_table acct_sysctl_table[] = { { .procname = "nf_conntrack_acct", .data = &init_net.ct.sysctl_acct, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, {} }; #endif /* CONFIG_SYSCTL */ unsigned int seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir) { struct nf_conn_counter *acct; acct = nf_conn_acct_find(ct); if (!acct) return 0; return seq_printf(s, "packets=%llu bytes=%llu ", (unsigned long long)atomic64_read(&acct[dir].packets), (unsigned long long)atomic64_read(&acct[dir].bytes)); }; EXPORT_SYMBOL_GPL(seq_print_acct); static struct nf_ct_ext_type acct_extend __read_mostly = { .len = sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]), .align = __alignof__(struct nf_conn_counter[IP_CT_DIR_MAX]), .id = NF_CT_EXT_ACCT, }; #ifdef CONFIG_SYSCTL static int nf_conntrack_acct_init_sysctl(struct net *net) { struct ctl_table *table; table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table), GFP_KERNEL); if (!table) goto out; table[0].data = &net->ct.sysctl_acct; /* Don't export sysctls to unprivileged users */ if (net->user_ns != &init_user_ns) table[0].procname = NULL; net->ct.acct_sysctl_header = register_net_sysctl(net, "net/netfilter", table); if (!net->ct.acct_sysctl_header) { printk(KERN_ERR "nf_conntrack_acct: can't register to sysctl.\n"); goto out_register; } return 0; out_register: kfree(table); out: return -ENOMEM; } static void nf_conntrack_acct_fini_sysctl(struct net *net) { struct ctl_table *table; table = net->ct.acct_sysctl_header->ctl_table_arg; unregister_net_sysctl_table(net->ct.acct_sysctl_header); kfree(table); } #else static int nf_conntrack_acct_init_sysctl(struct net *net) { return 0; } static void nf_conntrack_acct_fini_sysctl(struct net *net) { } #endif int nf_conntrack_acct_pernet_init(struct net *net) { net->ct.sysctl_acct = nf_ct_acct; return nf_conntrack_acct_init_sysctl(net); } void nf_conntrack_acct_pernet_fini(struct net *net) { nf_conntrack_acct_fini_sysctl(net); } int nf_conntrack_acct_init(void) { int ret = nf_ct_extend_register(&acct_extend); if (ret < 0) pr_err("nf_conntrack_acct: Unable to register extension\n"); return ret; } void nf_conntrack_acct_fini(void) { nf_ct_extend_unregister(&acct_extend); }
gpl-2.0
darklord4822/android_kernel_lg_l60
arch/arm/mach-pxa/colibri-pxa300.c
4089
4671
/* * arch/arm/mach-pxa/colibri-pxa300.c * * Support for Toradex PXA300/310 based Colibri module * * Daniel Mack <daniel@caiaq.de> * Matthias Meier <matthias.j.meier@gmx.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <asm/mach-types.h> #include <asm/sizes.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> #include <mach/pxa300.h> #include <mach/colibri.h> #include <linux/platform_data/usb-ohci-pxa27x.h> #include <linux/platform_data/video-pxafb.h> #include <mach/audio.h> #include "generic.h" #include "devices.h" #ifdef CONFIG_MACH_COLIBRI_EVALBOARD static mfp_cfg_t colibri_pxa300_evalboard_pin_config[] __initdata = { /* MMC */ GPIO7_MMC1_CLK, GPIO14_MMC1_CMD, GPIO3_MMC1_DAT0, GPIO4_MMC1_DAT1, GPIO5_MMC1_DAT2, GPIO6_MMC1_DAT3, GPIO13_GPIO, /* GPIO13_COLIBRI_PXA300_SD_DETECT */ /* UHC */ GPIO0_2_USBH_PEN, GPIO1_2_USBH_PWR, GPIO77_USB_P3_1, GPIO78_USB_P3_2, GPIO79_USB_P3_3, GPIO80_USB_P3_4, GPIO81_USB_P3_5, GPIO82_USB_P3_6, /* I2C */ GPIO21_I2C_SCL, GPIO22_I2C_SDA, }; #else static mfp_cfg_t colibri_pxa300_evalboard_pin_config[] __initdata = {}; #endif #if defined(CONFIG_AX88796) #define COLIBRI_ETH_IRQ_GPIO mfp_to_gpio(GPIO26_GPIO) /* * Asix AX88796 Ethernet */ static struct ax_plat_data colibri_asix_platdata = { .flags = 0, /* defined later */ .wordlength = 2, }; static struct resource colibri_asix_resource[] = { [0] = { .start = PXA3xx_CS2_PHYS, .end = PXA3xx_CS2_PHYS + (0x20 * 2) - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = PXA_GPIO_TO_IRQ(COLIBRI_ETH_IRQ_GPIO), .end = PXA_GPIO_TO_IRQ(COLIBRI_ETH_IRQ_GPIO), .flags = IORESOURCE_IRQ | IRQF_TRIGGER_FALLING, } }; static struct platform_device asix_device = { .name = "ax88796", .id = 0, .num_resources = ARRAY_SIZE(colibri_asix_resource), .resource = colibri_asix_resource, .dev = { .platform_data = &colibri_asix_platdata } }; static mfp_cfg_t colibri_pxa300_eth_pin_config[] __initdata = { GPIO1_nCS2, /* AX88796 chip select */ GPIO26_GPIO | MFP_PULL_HIGH /* AX88796 IRQ */ }; static void __init colibri_pxa300_init_eth(void) { colibri_pxa3xx_init_eth(&colibri_asix_platdata); pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa300_eth_pin_config)); platform_device_register(&asix_device); } #else static inline void __init colibri_pxa300_init_eth(void) {} #endif /* CONFIG_AX88796 */ #if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) static mfp_cfg_t colibri_pxa300_lcd_pin_config[] __initdata = { GPIO54_LCD_LDD_0, GPIO55_LCD_LDD_1, GPIO56_LCD_LDD_2, GPIO57_LCD_LDD_3, GPIO58_LCD_LDD_4, GPIO59_LCD_LDD_5, GPIO60_LCD_LDD_6, GPIO61_LCD_LDD_7, GPIO62_LCD_LDD_8, GPIO63_LCD_LDD_9, GPIO64_LCD_LDD_10, GPIO65_LCD_LDD_11, GPIO66_LCD_LDD_12, GPIO67_LCD_LDD_13, GPIO68_LCD_LDD_14, GPIO69_LCD_LDD_15, GPIO70_LCD_LDD_16, GPIO71_LCD_LDD_17, GPIO62_LCD_CS_N, GPIO72_LCD_FCLK, GPIO73_LCD_LCLK, GPIO74_LCD_PCLK, GPIO75_LCD_BIAS, GPIO76_LCD_VSYNC, }; static void __init colibri_pxa300_init_lcd(void) { pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa300_lcd_pin_config)); } #else static inline void colibri_pxa300_init_lcd(void) {} #endif /* CONFIG_FB_PXA || CONFIG_FB_PXA_MODULE */ #if defined(CONFIG_SND_AC97_CODEC) || defined(CONFIG_SND_AC97_CODEC_MODULE) static mfp_cfg_t colibri_pxa310_ac97_pin_config[] __initdata = { GPIO24_AC97_SYSCLK, GPIO23_AC97_nACRESET, GPIO25_AC97_SDATA_IN_0, GPIO27_AC97_SDATA_OUT, GPIO28_AC97_SYNC, GPIO29_AC97_BITCLK }; static inline void __init colibri_pxa310_init_ac97(void) { /* no AC97 codec on Colibri PXA300 */ if (!cpu_is_pxa310()) return; pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa310_ac97_pin_config)); pxa_set_ac97_info(NULL); } #else static inline void colibri_pxa310_init_ac97(void) {} #endif void __init colibri_pxa300_init(void) { colibri_pxa300_init_eth(); colibri_pxa3xx_init_nand(); colibri_pxa300_init_lcd(); colibri_pxa3xx_init_lcd(mfp_to_gpio(GPIO39_GPIO)); colibri_pxa310_init_ac97(); /* Evalboard init */ pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa300_evalboard_pin_config)); colibri_evalboard_init(); } MACHINE_START(COLIBRI300, "Toradex Colibri PXA300") .atag_offset = 0x100, .init_machine = colibri_pxa300_init, .map_io = pxa3xx_map_io, .nr_irqs = PXA_NR_IRQS, .init_irq = pxa3xx_init_irq, .handle_irq = pxa3xx_handle_irq, .init_time = pxa_timer_init, .restart = pxa_restart, MACHINE_END
gpl-2.0
ravikirancg/android_kernel_gionee_msm8974
drivers/net/can/mscan/mscan.c
4857
18567
/* * CAN bus driver for the alone generic (as possible as) MSCAN controller. * * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>, * Varma Electronics Oy * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com> * Copyright (C) 2008-2009 Pengutronix <kernel@pengutronix.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the version 2 of the GNU General Public License * as published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/list.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/io.h> #include "mscan.h" static struct can_bittiming_const mscan_bittiming_const = { .name = "mscan", .tseg1_min = 4, .tseg1_max = 16, .tseg2_min = 2, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 64, .brp_inc = 1, }; struct mscan_state { u8 mode; u8 canrier; u8 cantier; }; static enum can_state state_map[] = { CAN_STATE_ERROR_ACTIVE, CAN_STATE_ERROR_WARNING, CAN_STATE_ERROR_PASSIVE, CAN_STATE_BUS_OFF }; static int mscan_set_mode(struct net_device *dev, u8 mode) { struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; int ret = 0; int i; u8 canctl1; if (mode != MSCAN_NORMAL_MODE) { if (priv->tx_active) { /* Abort transfers before going to sleep */# out_8(&regs->cantarq, priv->tx_active); /* Suppress TX done interrupts */ out_8(&regs->cantier, 0); } canctl1 = in_8(&regs->canctl1); if ((mode & MSCAN_SLPRQ) && !(canctl1 & MSCAN_SLPAK)) { setbits8(&regs->canctl0, MSCAN_SLPRQ); for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) { if (in_8(&regs->canctl1) & MSCAN_SLPAK) break; udelay(100); } /* * The mscan controller will fail to enter sleep mode, * while there are irregular activities on bus, like * somebody keeps retransmitting. This behavior is * undocumented and seems to differ between mscan built * in mpc5200b and mpc5200. We proceed in that case, * since otherwise the slprq will be kept set and the * controller will get stuck. NOTE: INITRQ or CSWAI * will abort all active transmit actions, if still * any, at once. */ if (i >= MSCAN_SET_MODE_RETRIES) netdev_dbg(dev, "device failed to enter sleep mode. " "We proceed anyhow.\n"); else priv->can.state = CAN_STATE_SLEEPING; } if ((mode & MSCAN_INITRQ) && !(canctl1 & MSCAN_INITAK)) { setbits8(&regs->canctl0, MSCAN_INITRQ); for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) { if (in_8(&regs->canctl1) & MSCAN_INITAK) break; } if (i >= MSCAN_SET_MODE_RETRIES) ret = -ENODEV; } if (!ret) priv->can.state = CAN_STATE_STOPPED; if (mode & MSCAN_CSWAI) setbits8(&regs->canctl0, MSCAN_CSWAI); } else { canctl1 = in_8(&regs->canctl1); if (canctl1 & (MSCAN_SLPAK | MSCAN_INITAK)) { clrbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ); for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) { canctl1 = in_8(&regs->canctl1); if (!(canctl1 & (MSCAN_INITAK | MSCAN_SLPAK))) break; } if (i >= MSCAN_SET_MODE_RETRIES) ret = -ENODEV; else priv->can.state = CAN_STATE_ERROR_ACTIVE; } } return ret; } static int mscan_start(struct net_device *dev) { struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; u8 canrflg; int err; out_8(&regs->canrier, 0); INIT_LIST_HEAD(&priv->tx_head); priv->prev_buf_id = 0; priv->cur_pri = 0; priv->tx_active = 0; priv->shadow_canrier = 0; priv->flags = 0; if (priv->type == MSCAN_TYPE_MPC5121) { /* Clear pending bus-off condition */ if (in_8(&regs->canmisc) & MSCAN_BOHOLD) out_8(&regs->canmisc, MSCAN_BOHOLD); } err = mscan_set_mode(dev, MSCAN_NORMAL_MODE); if (err) return err; canrflg = in_8(&regs->canrflg); priv->shadow_statflg = canrflg & MSCAN_STAT_MSK; priv->can.state = state_map[max(MSCAN_STATE_RX(canrflg), MSCAN_STATE_TX(canrflg))]; out_8(&regs->cantier, 0); /* Enable receive interrupts. */ out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE); return 0; } static int mscan_restart(struct net_device *dev) { struct mscan_priv *priv = netdev_priv(dev); if (priv->type == MSCAN_TYPE_MPC5121) { struct mscan_regs __iomem *regs = priv->reg_base; priv->can.state = CAN_STATE_ERROR_ACTIVE; WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD), "bus-off state expected\n"); out_8(&regs->canmisc, MSCAN_BOHOLD); /* Re-enable receive interrupts. */ out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE); } else { if (priv->can.state <= CAN_STATE_BUS_OFF) mscan_set_mode(dev, MSCAN_INIT_MODE); return mscan_start(dev); } return 0; } static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct can_frame *frame = (struct can_frame *)skb->data; struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; int i, rtr, buf_id; u32 can_id; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; out_8(&regs->cantier, 0); i = ~priv->tx_active & MSCAN_TXE; buf_id = ffs(i) - 1; switch (hweight8(i)) { case 0: netif_stop_queue(dev); netdev_err(dev, "Tx Ring full when queue awake!\n"); return NETDEV_TX_BUSY; case 1: /* * if buf_id < 3, then current frame will be send out of order, * since buffer with lower id have higher priority (hell..) */ netif_stop_queue(dev); case 2: if (buf_id < priv->prev_buf_id) { priv->cur_pri++; if (priv->cur_pri == 0xff) { set_bit(F_TX_WAIT_ALL, &priv->flags); netif_stop_queue(dev); } } set_bit(F_TX_PROGRESS, &priv->flags); break; } priv->prev_buf_id = buf_id; out_8(&regs->cantbsel, i); rtr = frame->can_id & CAN_RTR_FLAG; /* RTR is always the lowest bit of interest, then IDs follow */ if (frame->can_id & CAN_EFF_FLAG) { can_id = (frame->can_id & CAN_EFF_MASK) << (MSCAN_EFF_RTR_SHIFT + 1); if (rtr) can_id |= 1 << MSCAN_EFF_RTR_SHIFT; out_be16(&regs->tx.idr3_2, can_id); can_id >>= 16; /* EFF_FLAGS are between the IDs :( */ can_id = (can_id & 0x7) | ((can_id << 2) & 0xffe0) | MSCAN_EFF_FLAGS; } else { can_id = (frame->can_id & CAN_SFF_MASK) << (MSCAN_SFF_RTR_SHIFT + 1); if (rtr) can_id |= 1 << MSCAN_SFF_RTR_SHIFT; } out_be16(&regs->tx.idr1_0, can_id); if (!rtr) { void __iomem *data = &regs->tx.dsr1_0; u16 *payload = (u16 *)frame->data; for (i = 0; i < frame->can_dlc / 2; i++) { out_be16(data, *payload++); data += 2 + _MSCAN_RESERVED_DSR_SIZE; } /* write remaining byte if necessary */ if (frame->can_dlc & 1) out_8(data, frame->data[frame->can_dlc - 1]); } out_8(&regs->tx.dlr, frame->can_dlc); out_8(&regs->tx.tbpr, priv->cur_pri); /* Start transmission. */ out_8(&regs->cantflg, 1 << buf_id); if (!test_bit(F_TX_PROGRESS, &priv->flags)) dev->trans_start = jiffies; list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head); can_put_echo_skb(skb, dev, buf_id); /* Enable interrupt. */ priv->tx_active |= 1 << buf_id; out_8(&regs->cantier, priv->tx_active); return NETDEV_TX_OK; } /* This function returns the old state to see where we came from */ static enum can_state check_set_state(struct net_device *dev, u8 canrflg) { struct mscan_priv *priv = netdev_priv(dev); enum can_state state, old_state = priv->can.state; if (canrflg & MSCAN_CSCIF && old_state <= CAN_STATE_BUS_OFF) { state = state_map[max(MSCAN_STATE_RX(canrflg), MSCAN_STATE_TX(canrflg))]; priv->can.state = state; } return old_state; } static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame) { struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; u32 can_id; int i; can_id = in_be16(&regs->rx.idr1_0); if (can_id & (1 << 3)) { frame->can_id = CAN_EFF_FLAG; can_id = ((can_id << 16) | in_be16(&regs->rx.idr3_2)); can_id = ((can_id & 0xffe00000) | ((can_id & 0x7ffff) << 2)) >> 2; } else { can_id >>= 4; frame->can_id = 0; } frame->can_id |= can_id >> 1; if (can_id & 1) frame->can_id |= CAN_RTR_FLAG; frame->can_dlc = get_can_dlc(in_8(&regs->rx.dlr) & 0xf); if (!(frame->can_id & CAN_RTR_FLAG)) { void __iomem *data = &regs->rx.dsr1_0; u16 *payload = (u16 *)frame->data; for (i = 0; i < frame->can_dlc / 2; i++) { *payload++ = in_be16(data); data += 2 + _MSCAN_RESERVED_DSR_SIZE; } /* read remaining byte if necessary */ if (frame->can_dlc & 1) frame->data[frame->can_dlc - 1] = in_8(data); } out_8(&regs->canrflg, MSCAN_RXF); } static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame, u8 canrflg) { struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; struct net_device_stats *stats = &dev->stats; enum can_state old_state; netdev_dbg(dev, "error interrupt (canrflg=%#x)\n", canrflg); frame->can_id = CAN_ERR_FLAG; if (canrflg & MSCAN_OVRIF) { frame->can_id |= CAN_ERR_CRTL; frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; stats->rx_over_errors++; stats->rx_errors++; } else { frame->data[1] = 0; } old_state = check_set_state(dev, canrflg); /* State changed */ if (old_state != priv->can.state) { switch (priv->can.state) { case CAN_STATE_ERROR_WARNING: frame->can_id |= CAN_ERR_CRTL; priv->can.can_stats.error_warning++; if ((priv->shadow_statflg & MSCAN_RSTAT_MSK) < (canrflg & MSCAN_RSTAT_MSK)) frame->data[1] |= CAN_ERR_CRTL_RX_WARNING; if ((priv->shadow_statflg & MSCAN_TSTAT_MSK) < (canrflg & MSCAN_TSTAT_MSK)) frame->data[1] |= CAN_ERR_CRTL_TX_WARNING; break; case CAN_STATE_ERROR_PASSIVE: frame->can_id |= CAN_ERR_CRTL; priv->can.can_stats.error_passive++; frame->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; break; case CAN_STATE_BUS_OFF: frame->can_id |= CAN_ERR_BUSOFF; /* * The MSCAN on the MPC5200 does recover from bus-off * automatically. To avoid that we stop the chip doing * a light-weight stop (we are in irq-context). */ if (priv->type != MSCAN_TYPE_MPC5121) { out_8(&regs->cantier, 0); out_8(&regs->canrier, 0); setbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ); } can_bus_off(dev); break; default: break; } } priv->shadow_statflg = canrflg & MSCAN_STAT_MSK; frame->can_dlc = CAN_ERR_DLC; out_8(&regs->canrflg, MSCAN_ERR_IF); } static int mscan_rx_poll(struct napi_struct *napi, int quota) { struct mscan_priv *priv = container_of(napi, struct mscan_priv, napi); struct net_device *dev = napi->dev; struct mscan_regs __iomem *regs = priv->reg_base; struct net_device_stats *stats = &dev->stats; int npackets = 0; int ret = 1; struct sk_buff *skb; struct can_frame *frame; u8 canrflg; while (npackets < quota) { canrflg = in_8(&regs->canrflg); if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF))) break; skb = alloc_can_skb(dev, &frame); if (!skb) { if (printk_ratelimit()) netdev_notice(dev, "packet dropped\n"); stats->rx_dropped++; out_8(&regs->canrflg, canrflg); continue; } if (canrflg & MSCAN_RXF) mscan_get_rx_frame(dev, frame); else if (canrflg & MSCAN_ERR_IF) mscan_get_err_frame(dev, frame, canrflg); stats->rx_packets++; stats->rx_bytes += frame->can_dlc; npackets++; netif_receive_skb(skb); } if (!(in_8(&regs->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) { napi_complete(&priv->napi); clear_bit(F_RX_PROGRESS, &priv->flags); if (priv->can.state < CAN_STATE_BUS_OFF) out_8(&regs->canrier, priv->shadow_canrier); ret = 0; } return ret; } static irqreturn_t mscan_isr(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; struct net_device_stats *stats = &dev->stats; u8 cantier, cantflg, canrflg; irqreturn_t ret = IRQ_NONE; cantier = in_8(&regs->cantier) & MSCAN_TXE; cantflg = in_8(&regs->cantflg) & cantier; if (cantier && cantflg) { struct list_head *tmp, *pos; list_for_each_safe(pos, tmp, &priv->tx_head) { struct tx_queue_entry *entry = list_entry(pos, struct tx_queue_entry, list); u8 mask = entry->mask; if (!(cantflg & mask)) continue; out_8(&regs->cantbsel, mask); stats->tx_bytes += in_8(&regs->tx.dlr); stats->tx_packets++; can_get_echo_skb(dev, entry->id); priv->tx_active &= ~mask; list_del(pos); } if (list_empty(&priv->tx_head)) { clear_bit(F_TX_WAIT_ALL, &priv->flags); clear_bit(F_TX_PROGRESS, &priv->flags); priv->cur_pri = 0; } else { dev->trans_start = jiffies; } if (!test_bit(F_TX_WAIT_ALL, &priv->flags)) netif_wake_queue(dev); out_8(&regs->cantier, priv->tx_active); ret = IRQ_HANDLED; } canrflg = in_8(&regs->canrflg); if ((canrflg & ~MSCAN_STAT_MSK) && !test_and_set_bit(F_RX_PROGRESS, &priv->flags)) { if (canrflg & ~MSCAN_STAT_MSK) { priv->shadow_canrier = in_8(&regs->canrier); out_8(&regs->canrier, 0); napi_schedule(&priv->napi); ret = IRQ_HANDLED; } else { clear_bit(F_RX_PROGRESS, &priv->flags); } } return ret; } static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode) { struct mscan_priv *priv = netdev_priv(dev); int ret = 0; if (!priv->open_time) return -EINVAL; switch (mode) { case CAN_MODE_START: ret = mscan_restart(dev); if (ret) break; if (netif_queue_stopped(dev)) netif_wake_queue(dev); break; default: ret = -EOPNOTSUPP; break; } return ret; } static int mscan_do_set_bittiming(struct net_device *dev) { struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; struct can_bittiming *bt = &priv->can.bittiming; u8 btr0, btr1; btr0 = BTR0_SET_BRP(bt->brp) | BTR0_SET_SJW(bt->sjw); btr1 = (BTR1_SET_TSEG1(bt->prop_seg + bt->phase_seg1) | BTR1_SET_TSEG2(bt->phase_seg2) | BTR1_SET_SAM(priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)); netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1); out_8(&regs->canbtr0, btr0); out_8(&regs->canbtr1, btr1); return 0; } static int mscan_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; bec->txerr = in_8(&regs->cantxerr); bec->rxerr = in_8(&regs->canrxerr); return 0; } static int mscan_open(struct net_device *dev) { int ret; struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; /* common open */ ret = open_candev(dev); if (ret) return ret; napi_enable(&priv->napi); ret = request_irq(dev->irq, mscan_isr, 0, dev->name, dev); if (ret < 0) { netdev_err(dev, "failed to attach interrupt\n"); goto exit_napi_disable; } priv->open_time = jiffies; if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) setbits8(&regs->canctl1, MSCAN_LISTEN); else clrbits8(&regs->canctl1, MSCAN_LISTEN); ret = mscan_start(dev); if (ret) goto exit_free_irq; netif_start_queue(dev); return 0; exit_free_irq: priv->open_time = 0; free_irq(dev->irq, dev); exit_napi_disable: napi_disable(&priv->napi); close_candev(dev); return ret; } static int mscan_close(struct net_device *dev) { struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; netif_stop_queue(dev); napi_disable(&priv->napi); out_8(&regs->cantier, 0); out_8(&regs->canrier, 0); mscan_set_mode(dev, MSCAN_INIT_MODE); close_candev(dev); free_irq(dev->irq, dev); priv->open_time = 0; return 0; } static const struct net_device_ops mscan_netdev_ops = { .ndo_open = mscan_open, .ndo_stop = mscan_close, .ndo_start_xmit = mscan_start_xmit, }; int register_mscandev(struct net_device *dev, int mscan_clksrc) { struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; u8 ctl1; ctl1 = in_8(&regs->canctl1); if (mscan_clksrc) ctl1 |= MSCAN_CLKSRC; else ctl1 &= ~MSCAN_CLKSRC; if (priv->type == MSCAN_TYPE_MPC5121) { priv->can.do_get_berr_counter = mscan_get_berr_counter; ctl1 |= MSCAN_BORM; /* bus-off recovery upon request */ } ctl1 |= MSCAN_CANE; out_8(&regs->canctl1, ctl1); udelay(100); /* acceptance mask/acceptance code (accept everything) */ out_be16(&regs->canidar1_0, 0); out_be16(&regs->canidar3_2, 0); out_be16(&regs->canidar5_4, 0); out_be16(&regs->canidar7_6, 0); out_be16(&regs->canidmr1_0, 0xffff); out_be16(&regs->canidmr3_2, 0xffff); out_be16(&regs->canidmr5_4, 0xffff); out_be16(&regs->canidmr7_6, 0xffff); /* Two 32 bit Acceptance Filters */ out_8(&regs->canidac, MSCAN_AF_32BIT); mscan_set_mode(dev, MSCAN_INIT_MODE); return register_candev(dev); } void unregister_mscandev(struct net_device *dev) { struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; mscan_set_mode(dev, MSCAN_INIT_MODE); clrbits8(&regs->canctl1, MSCAN_CANE); unregister_candev(dev); } struct net_device *alloc_mscandev(void) { struct net_device *dev; struct mscan_priv *priv; int i; dev = alloc_candev(sizeof(struct mscan_priv), MSCAN_ECHO_SKB_MAX); if (!dev) return NULL; priv = netdev_priv(dev); dev->netdev_ops = &mscan_netdev_ops; dev->flags |= IFF_ECHO; /* we support local echo */ netif_napi_add(dev, &priv->napi, mscan_rx_poll, 8); priv->can.bittiming_const = &mscan_bittiming_const; priv->can.do_set_bittiming = mscan_do_set_bittiming; priv->can.do_set_mode = mscan_do_set_mode; priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY; for (i = 0; i < TX_QUEUE_SIZE; i++) { priv->tx_queue[i].id = i; priv->tx_queue[i].mask = 1 << i; } return dev; } MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("CAN port driver for a MSCAN based chips");
gpl-2.0
sebirdman/m7_kernel_dev
drivers/hwmon/ds1621.c
4857
9218
/* * ds1621.c - Part of lm_sensors, Linux kernel modules for hardware * monitoring * Christian W. Zuckschwerdt <zany@triq.net> 2000-11-23 * based on lm75.c by Frodo Looijaard <frodol@dds.nl> * Ported to Linux 2.6 by Aurelien Jarno <aurelien@aurel32.net> with * the help of Jean Delvare <khali@linux-fr.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/sysfs.h> #include "lm75.h" /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; /* Insmod parameters */ static int polarity = -1; module_param(polarity, int, 0); MODULE_PARM_DESC(polarity, "Output's polarity: 0 = active high, 1 = active low"); /* Many DS1621 constants specified below */ /* Config register used for detection */ /* 7 6 5 4 3 2 1 0 */ /* |Done|THF |TLF |NVB | X | X |POL |1SHOT| */ #define DS1621_REG_CONFIG_NVB 0x10 #define DS1621_REG_CONFIG_POLARITY 0x02 #define DS1621_REG_CONFIG_1SHOT 0x01 #define DS1621_REG_CONFIG_DONE 0x80 /* The DS1621 registers */ static const u8 DS1621_REG_TEMP[3] = { 0xAA, /* input, word, RO */ 0xA2, /* min, word, RW */ 0xA1, /* max, word, RW */ }; #define DS1621_REG_CONF 0xAC /* byte, RW */ #define DS1621_COM_START 0xEE /* no data */ #define DS1621_COM_STOP 0x22 /* no data */ /* The DS1621 configuration register */ #define DS1621_ALARM_TEMP_HIGH 0x40 #define DS1621_ALARM_TEMP_LOW 0x20 /* Conversions */ #define ALARMS_FROM_REG(val) ((val) & \ (DS1621_ALARM_TEMP_HIGH | DS1621_ALARM_TEMP_LOW)) /* Each client has this additional data */ struct ds1621_data { struct device *hwmon_dev; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ u16 temp[3]; /* Register values, word */ u8 conf; /* Register encoding, combined */ }; static void ds1621_init_client(struct i2c_client *client) { u8 conf, new_conf; new_conf = conf = i2c_smbus_read_byte_data(client, DS1621_REG_CONF); /* switch to continuous conversion mode */ new_conf &= ~DS1621_REG_CONFIG_1SHOT; /* setup output polarity */ if (polarity == 0) new_conf &= ~DS1621_REG_CONFIG_POLARITY; else if (polarity == 1) new_conf |= DS1621_REG_CONFIG_POLARITY; if (conf != new_conf) i2c_smbus_write_byte_data(client, DS1621_REG_CONF, new_conf); /* start conversion */ i2c_smbus_write_byte(client, DS1621_COM_START); } static struct ds1621_data *ds1621_update_client(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct ds1621_data *data = i2c_get_clientdata(client); u8 new_conf; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { int i; dev_dbg(&client->dev, "Starting ds1621 update\n"); data->conf = i2c_smbus_read_byte_data(client, DS1621_REG_CONF); for (i = 0; i < ARRAY_SIZE(data->temp); i++) data->temp[i] = i2c_smbus_read_word_swapped(client, DS1621_REG_TEMP[i]); /* reset alarms if necessary */ new_conf = data->conf; if (data->temp[0] > data->temp[1]) /* input > min */ new_conf &= ~DS1621_ALARM_TEMP_LOW; if (data->temp[0] < data->temp[2]) /* input < max */ new_conf &= ~DS1621_ALARM_TEMP_HIGH; if (data->conf != new_conf) i2c_smbus_write_byte_data(client, DS1621_REG_CONF, new_conf); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } static ssize_t show_temp(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct ds1621_data *data = ds1621_update_client(dev); return sprintf(buf, "%d\n", LM75_TEMP_FROM_REG(data->temp[attr->index])); } static ssize_t set_temp(struct device *dev, struct device_attribute *da, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct i2c_client *client = to_i2c_client(dev); struct ds1621_data *data = i2c_get_clientdata(client); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp[attr->index] = LM75_TEMP_TO_REG(val); i2c_smbus_write_word_swapped(client, DS1621_REG_TEMP[attr->index], data->temp[attr->index]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_alarms(struct device *dev, struct device_attribute *da, char *buf) { struct ds1621_data *data = ds1621_update_client(dev); return sprintf(buf, "%d\n", ALARMS_FROM_REG(data->conf)); } static ssize_t show_alarm(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct ds1621_data *data = ds1621_update_client(dev); return sprintf(buf, "%d\n", !!(data->conf & attr->index)); } static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp, set_temp, 1); static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp, set_temp, 2); static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, DS1621_ALARM_TEMP_LOW); static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, DS1621_ALARM_TEMP_HIGH); static struct attribute *ds1621_attributes[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_min_alarm.dev_attr.attr, &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, &dev_attr_alarms.attr, NULL }; static const struct attribute_group ds1621_group = { .attrs = ds1621_attributes, }; /* Return 0 if detection is successful, -ENODEV otherwise */ static int ds1621_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int conf, temp; int i; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_WRITE_BYTE)) return -ENODEV; /* * Now, we do the remaining detection. It is lousy. * * The NVB bit should be low if no EEPROM write has been requested * during the latest 10ms, which is highly improbable in our case. */ conf = i2c_smbus_read_byte_data(client, DS1621_REG_CONF); if (conf < 0 || conf & DS1621_REG_CONFIG_NVB) return -ENODEV; /* The 7 lowest bits of a temperature should always be 0. */ for (i = 0; i < ARRAY_SIZE(DS1621_REG_TEMP); i++) { temp = i2c_smbus_read_word_data(client, DS1621_REG_TEMP[i]); if (temp < 0 || (temp & 0x7f00)) return -ENODEV; } strlcpy(info->type, "ds1621", I2C_NAME_SIZE); return 0; } static int ds1621_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct ds1621_data *data; int err; data = kzalloc(sizeof(struct ds1621_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } i2c_set_clientdata(client, data); mutex_init(&data->update_lock); /* Initialize the DS1621 chip */ ds1621_init_client(client); /* Register sysfs hooks */ err = sysfs_create_group(&client->dev.kobj, &ds1621_group); if (err) goto exit_free; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove_files; } return 0; exit_remove_files: sysfs_remove_group(&client->dev.kobj, &ds1621_group); exit_free: kfree(data); exit: return err; } static int ds1621_remove(struct i2c_client *client) { struct ds1621_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &ds1621_group); kfree(data); return 0; } static const struct i2c_device_id ds1621_id[] = { { "ds1621", 0 }, { "ds1625", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ds1621_id); /* This is the driver that will be inserted */ static struct i2c_driver ds1621_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "ds1621", }, .probe = ds1621_probe, .remove = ds1621_remove, .id_table = ds1621_id, .detect = ds1621_detect, .address_list = normal_i2c, }; module_i2c_driver(ds1621_driver); MODULE_AUTHOR("Christian W. Zuckschwerdt <zany@triq.net>"); MODULE_DESCRIPTION("DS1621 driver"); MODULE_LICENSE("GPL");
gpl-2.0
imang/gcore_kernel
drivers/hwmon/vt1211.c
4857
38600
/* * vt1211.c - driver for the VIA VT1211 Super-I/O chip integrated hardware * monitoring features * Copyright (C) 2006 Juerg Haefliger <juergh@gmail.com> * * This driver is based on the driver for kernel 2.4 by Mark D. Studebaker * and its port to kernel 2.6 by Lars Ekman. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/platform_device.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/hwmon-vid.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/ioport.h> #include <linux/acpi.h> #include <linux/io.h> static int uch_config = -1; module_param(uch_config, int, 0); MODULE_PARM_DESC(uch_config, "Initialize the universal channel configuration"); static int int_mode = -1; module_param(int_mode, int, 0); MODULE_PARM_DESC(int_mode, "Force the temperature interrupt mode"); static unsigned short force_id; module_param(force_id, ushort, 0); MODULE_PARM_DESC(force_id, "Override the detected device ID"); static struct platform_device *pdev; #define DRVNAME "vt1211" /* --------------------------------------------------------------------- * Registers * * The sensors are defined as follows. * * Sensor Voltage Mode Temp Mode Notes (from the datasheet) * -------- ------------ --------- -------------------------- * Reading 1 temp1 Intel thermal diode * Reading 3 temp2 Internal thermal diode * UCH1/Reading2 in0 temp3 NTC type thermistor * UCH2 in1 temp4 +2.5V * UCH3 in2 temp5 VccP * UCH4 in3 temp6 +5V * UCH5 in4 temp7 +12V * 3.3V in5 Internal VDD (+3.3V) * * --------------------------------------------------------------------- */ /* Voltages (in) numbered 0-5 (ix) */ #define VT1211_REG_IN(ix) (0x21 + (ix)) #define VT1211_REG_IN_MIN(ix) ((ix) == 0 ? 0x3e : 0x2a + 2 * (ix)) #define VT1211_REG_IN_MAX(ix) ((ix) == 0 ? 0x3d : 0x29 + 2 * (ix)) /* Temperatures (temp) numbered 0-6 (ix) */ static u8 regtemp[] = {0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25}; static u8 regtempmax[] = {0x39, 0x1d, 0x3d, 0x2b, 0x2d, 0x2f, 0x31}; static u8 regtemphyst[] = {0x3a, 0x1e, 0x3e, 0x2c, 0x2e, 0x30, 0x32}; /* Fans numbered 0-1 (ix) */ #define VT1211_REG_FAN(ix) (0x29 + (ix)) #define VT1211_REG_FAN_MIN(ix) (0x3b + (ix)) #define VT1211_REG_FAN_DIV 0x47 /* PWMs numbered 0-1 (ix) */ /* Auto points numbered 0-3 (ap) */ #define VT1211_REG_PWM(ix) (0x60 + (ix)) #define VT1211_REG_PWM_CLK 0x50 #define VT1211_REG_PWM_CTL 0x51 #define VT1211_REG_PWM_AUTO_TEMP(ap) (0x55 - (ap)) #define VT1211_REG_PWM_AUTO_PWM(ix, ap) (0x58 + 2 * (ix) - (ap)) /* Miscellaneous registers */ #define VT1211_REG_CONFIG 0x40 #define VT1211_REG_ALARM1 0x41 #define VT1211_REG_ALARM2 0x42 #define VT1211_REG_VID 0x45 #define VT1211_REG_UCH_CONFIG 0x4a #define VT1211_REG_TEMP1_CONFIG 0x4b #define VT1211_REG_TEMP2_CONFIG 0x4c /* In, temp & fan alarm bits */ static const u8 bitalarmin[] = {11, 0, 1, 3, 8, 2, 9}; static const u8 bitalarmtemp[] = {4, 15, 11, 0, 1, 3, 8}; static const u8 bitalarmfan[] = {6, 7}; /* --------------------------------------------------------------------- * Data structures and manipulation thereof * --------------------------------------------------------------------- */ struct vt1211_data { unsigned short addr; const char *name; struct device *hwmon_dev; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ /* Register values */ u8 in[6]; u8 in_max[6]; u8 in_min[6]; u8 temp[7]; u8 temp_max[7]; u8 temp_hyst[7]; u8 fan[2]; u8 fan_min[2]; u8 fan_div[2]; u8 fan_ctl; u8 pwm[2]; u8 pwm_ctl[2]; u8 pwm_clk; u8 pwm_auto_temp[4]; u8 pwm_auto_pwm[2][4]; u8 vid; /* Read once at init time */ u8 vrm; u8 uch_config; /* Read once at init time */ u16 alarms; }; /* ix = [0-5] */ #define ISVOLT(ix, uch_config) ((ix) > 4 ? 1 : \ !(((uch_config) >> ((ix) + 2)) & 1)) /* ix = [0-6] */ #define ISTEMP(ix, uch_config) ((ix) < 2 ? 1 : \ ((uch_config) >> (ix)) & 1) /* * in5 (ix = 5) is special. It's the internal 3.3V so it's scaled in the * driver according to the VT1211 BIOS porting guide */ #define IN_FROM_REG(ix, reg) ((reg) < 3 ? 0 : (ix) == 5 ? \ (((reg) - 3) * 15882 + 479) / 958 : \ (((reg) - 3) * 10000 + 479) / 958) #define IN_TO_REG(ix, val) (SENSORS_LIMIT((ix) == 5 ? \ ((val) * 958 + 7941) / 15882 + 3 : \ ((val) * 958 + 5000) / 10000 + 3, 0, 255)) /* * temp1 (ix = 0) is an intel thermal diode which is scaled in user space. * temp2 (ix = 1) is the internal temp diode so it's scaled in the driver * according to some measurements that I took on an EPIA M10000. * temp3-7 are thermistor based so the driver returns the voltage measured at * the pin (range 0V - 2.2V). */ #define TEMP_FROM_REG(ix, reg) ((ix) == 0 ? (reg) * 1000 : \ (ix) == 1 ? (reg) < 51 ? 0 : \ ((reg) - 51) * 1000 : \ ((253 - (reg)) * 2200 + 105) / 210) #define TEMP_TO_REG(ix, val) SENSORS_LIMIT( \ ((ix) == 0 ? ((val) + 500) / 1000 : \ (ix) == 1 ? ((val) + 500) / 1000 + 51 : \ 253 - ((val) * 210 + 1100) / 2200), 0, 255) #define DIV_FROM_REG(reg) (1 << (reg)) #define RPM_FROM_REG(reg, div) (((reg) == 0) || ((reg) == 255) ? 0 : \ 1310720 / (reg) / DIV_FROM_REG(div)) #define RPM_TO_REG(val, div) ((val) == 0 ? 255 : \ SENSORS_LIMIT((1310720 / (val) / \ DIV_FROM_REG(div)), 1, 254)) /* --------------------------------------------------------------------- * Super-I/O constants and functions * --------------------------------------------------------------------- */ /* * Configuration index port registers * The vt1211 can live at 2 different addresses so we need to probe both */ #define SIO_REG_CIP1 0x2e #define SIO_REG_CIP2 0x4e /* Configuration registers */ #define SIO_VT1211_LDN 0x07 /* logical device number */ #define SIO_VT1211_DEVID 0x20 /* device ID */ #define SIO_VT1211_DEVREV 0x21 /* device revision */ #define SIO_VT1211_ACTIVE 0x30 /* HW monitor active */ #define SIO_VT1211_BADDR 0x60 /* base I/O address */ #define SIO_VT1211_ID 0x3c /* VT1211 device ID */ /* VT1211 logical device numbers */ #define SIO_VT1211_LDN_HWMON 0x0b /* HW monitor */ static inline void superio_outb(int sio_cip, int reg, int val) { outb(reg, sio_cip); outb(val, sio_cip + 1); } static inline int superio_inb(int sio_cip, int reg) { outb(reg, sio_cip); return inb(sio_cip + 1); } static inline void superio_select(int sio_cip, int ldn) { outb(SIO_VT1211_LDN, sio_cip); outb(ldn, sio_cip + 1); } static inline void superio_enter(int sio_cip) { outb(0x87, sio_cip); outb(0x87, sio_cip); } static inline void superio_exit(int sio_cip) { outb(0xaa, sio_cip); } /* --------------------------------------------------------------------- * Device I/O access * --------------------------------------------------------------------- */ static inline u8 vt1211_read8(struct vt1211_data *data, u8 reg) { return inb(data->addr + reg); } static inline void vt1211_write8(struct vt1211_data *data, u8 reg, u8 val) { outb(val, data->addr + reg); } static struct vt1211_data *vt1211_update_device(struct device *dev) { struct vt1211_data *data = dev_get_drvdata(dev); int ix, val; mutex_lock(&data->update_lock); /* registers cache is refreshed after 1 second */ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { /* read VID */ data->vid = vt1211_read8(data, VT1211_REG_VID) & 0x1f; /* voltage (in) registers */ for (ix = 0; ix < ARRAY_SIZE(data->in); ix++) { if (ISVOLT(ix, data->uch_config)) { data->in[ix] = vt1211_read8(data, VT1211_REG_IN(ix)); data->in_min[ix] = vt1211_read8(data, VT1211_REG_IN_MIN(ix)); data->in_max[ix] = vt1211_read8(data, VT1211_REG_IN_MAX(ix)); } } /* temp registers */ for (ix = 0; ix < ARRAY_SIZE(data->temp); ix++) { if (ISTEMP(ix, data->uch_config)) { data->temp[ix] = vt1211_read8(data, regtemp[ix]); data->temp_max[ix] = vt1211_read8(data, regtempmax[ix]); data->temp_hyst[ix] = vt1211_read8(data, regtemphyst[ix]); } } /* fan & pwm registers */ for (ix = 0; ix < ARRAY_SIZE(data->fan); ix++) { data->fan[ix] = vt1211_read8(data, VT1211_REG_FAN(ix)); data->fan_min[ix] = vt1211_read8(data, VT1211_REG_FAN_MIN(ix)); data->pwm[ix] = vt1211_read8(data, VT1211_REG_PWM(ix)); } val = vt1211_read8(data, VT1211_REG_FAN_DIV); data->fan_div[0] = (val >> 4) & 3; data->fan_div[1] = (val >> 6) & 3; data->fan_ctl = val & 0xf; val = vt1211_read8(data, VT1211_REG_PWM_CTL); data->pwm_ctl[0] = val & 0xf; data->pwm_ctl[1] = (val >> 4) & 0xf; data->pwm_clk = vt1211_read8(data, VT1211_REG_PWM_CLK); /* pwm & temp auto point registers */ data->pwm_auto_pwm[0][1] = vt1211_read8(data, VT1211_REG_PWM_AUTO_PWM(0, 1)); data->pwm_auto_pwm[0][2] = vt1211_read8(data, VT1211_REG_PWM_AUTO_PWM(0, 2)); data->pwm_auto_pwm[1][1] = vt1211_read8(data, VT1211_REG_PWM_AUTO_PWM(1, 1)); data->pwm_auto_pwm[1][2] = vt1211_read8(data, VT1211_REG_PWM_AUTO_PWM(1, 2)); for (ix = 0; ix < ARRAY_SIZE(data->pwm_auto_temp); ix++) { data->pwm_auto_temp[ix] = vt1211_read8(data, VT1211_REG_PWM_AUTO_TEMP(ix)); } /* alarm registers */ data->alarms = (vt1211_read8(data, VT1211_REG_ALARM2) << 8) | vt1211_read8(data, VT1211_REG_ALARM1); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } /* --------------------------------------------------------------------- * Voltage sysfs interfaces * ix = [0-5] * --------------------------------------------------------------------- */ #define SHOW_IN_INPUT 0 #define SHOW_SET_IN_MIN 1 #define SHOW_SET_IN_MAX 2 #define SHOW_IN_ALARM 3 static ssize_t show_in(struct device *dev, struct device_attribute *attr, char *buf) { struct vt1211_data *data = vt1211_update_device(dev); struct sensor_device_attribute_2 *sensor_attr_2 = to_sensor_dev_attr_2(attr); int ix = sensor_attr_2->index; int fn = sensor_attr_2->nr; int res; switch (fn) { case SHOW_IN_INPUT: res = IN_FROM_REG(ix, data->in[ix]); break; case SHOW_SET_IN_MIN: res = IN_FROM_REG(ix, data->in_min[ix]); break; case SHOW_SET_IN_MAX: res = IN_FROM_REG(ix, data->in_max[ix]); break; case SHOW_IN_ALARM: res = (data->alarms >> bitalarmin[ix]) & 1; break; default: res = 0; dev_dbg(dev, "Unknown attr fetch (%d)\n", fn); } return sprintf(buf, "%d\n", res); } static ssize_t set_in(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vt1211_data *data = dev_get_drvdata(dev); struct sensor_device_attribute_2 *sensor_attr_2 = to_sensor_dev_attr_2(attr); int ix = sensor_attr_2->index; int fn = sensor_attr_2->nr; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); switch (fn) { case SHOW_SET_IN_MIN: data->in_min[ix] = IN_TO_REG(ix, val); vt1211_write8(data, VT1211_REG_IN_MIN(ix), data->in_min[ix]); break; case SHOW_SET_IN_MAX: data->in_max[ix] = IN_TO_REG(ix, val); vt1211_write8(data, VT1211_REG_IN_MAX(ix), data->in_max[ix]); break; default: dev_dbg(dev, "Unknown attr fetch (%d)\n", fn); } mutex_unlock(&data->update_lock); return count; } /* --------------------------------------------------------------------- * Temperature sysfs interfaces * ix = [0-6] * --------------------------------------------------------------------- */ #define SHOW_TEMP_INPUT 0 #define SHOW_SET_TEMP_MAX 1 #define SHOW_SET_TEMP_MAX_HYST 2 #define SHOW_TEMP_ALARM 3 static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct vt1211_data *data = vt1211_update_device(dev); struct sensor_device_attribute_2 *sensor_attr_2 = to_sensor_dev_attr_2(attr); int ix = sensor_attr_2->index; int fn = sensor_attr_2->nr; int res; switch (fn) { case SHOW_TEMP_INPUT: res = TEMP_FROM_REG(ix, data->temp[ix]); break; case SHOW_SET_TEMP_MAX: res = TEMP_FROM_REG(ix, data->temp_max[ix]); break; case SHOW_SET_TEMP_MAX_HYST: res = TEMP_FROM_REG(ix, data->temp_hyst[ix]); break; case SHOW_TEMP_ALARM: res = (data->alarms >> bitalarmtemp[ix]) & 1; break; default: res = 0; dev_dbg(dev, "Unknown attr fetch (%d)\n", fn); } return sprintf(buf, "%d\n", res); } static ssize_t set_temp(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vt1211_data *data = dev_get_drvdata(dev); struct sensor_device_attribute_2 *sensor_attr_2 = to_sensor_dev_attr_2(attr); int ix = sensor_attr_2->index; int fn = sensor_attr_2->nr; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); switch (fn) { case SHOW_SET_TEMP_MAX: data->temp_max[ix] = TEMP_TO_REG(ix, val); vt1211_write8(data, regtempmax[ix], data->temp_max[ix]); break; case SHOW_SET_TEMP_MAX_HYST: data->temp_hyst[ix] = TEMP_TO_REG(ix, val); vt1211_write8(data, regtemphyst[ix], data->temp_hyst[ix]); break; default: dev_dbg(dev, "Unknown attr fetch (%d)\n", fn); } mutex_unlock(&data->update_lock); return count; } /* --------------------------------------------------------------------- * Fan sysfs interfaces * ix = [0-1] * --------------------------------------------------------------------- */ #define SHOW_FAN_INPUT 0 #define SHOW_SET_FAN_MIN 1 #define SHOW_SET_FAN_DIV 2 #define SHOW_FAN_ALARM 3 static ssize_t show_fan(struct device *dev, struct device_attribute *attr, char *buf) { struct vt1211_data *data = vt1211_update_device(dev); struct sensor_device_attribute_2 *sensor_attr_2 = to_sensor_dev_attr_2(attr); int ix = sensor_attr_2->index; int fn = sensor_attr_2->nr; int res; switch (fn) { case SHOW_FAN_INPUT: res = RPM_FROM_REG(data->fan[ix], data->fan_div[ix]); break; case SHOW_SET_FAN_MIN: res = RPM_FROM_REG(data->fan_min[ix], data->fan_div[ix]); break; case SHOW_SET_FAN_DIV: res = DIV_FROM_REG(data->fan_div[ix]); break; case SHOW_FAN_ALARM: res = (data->alarms >> bitalarmfan[ix]) & 1; break; default: res = 0; dev_dbg(dev, "Unknown attr fetch (%d)\n", fn); } return sprintf(buf, "%d\n", res); } static ssize_t set_fan(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vt1211_data *data = dev_get_drvdata(dev); struct sensor_device_attribute_2 *sensor_attr_2 = to_sensor_dev_attr_2(attr); int ix = sensor_attr_2->index; int fn = sensor_attr_2->nr; int reg; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); /* sync the data cache */ reg = vt1211_read8(data, VT1211_REG_FAN_DIV); data->fan_div[0] = (reg >> 4) & 3; data->fan_div[1] = (reg >> 6) & 3; data->fan_ctl = reg & 0xf; switch (fn) { case SHOW_SET_FAN_MIN: data->fan_min[ix] = RPM_TO_REG(val, data->fan_div[ix]); vt1211_write8(data, VT1211_REG_FAN_MIN(ix), data->fan_min[ix]); break; case SHOW_SET_FAN_DIV: switch (val) { case 1: data->fan_div[ix] = 0; break; case 2: data->fan_div[ix] = 1; break; case 4: data->fan_div[ix] = 2; break; case 8: data->fan_div[ix] = 3; break; default: count = -EINVAL; dev_warn(dev, "fan div value %ld not supported. " "Choose one of 1, 2, 4, or 8.\n", val); goto EXIT; } vt1211_write8(data, VT1211_REG_FAN_DIV, ((data->fan_div[1] << 6) | (data->fan_div[0] << 4) | data->fan_ctl)); break; default: dev_dbg(dev, "Unknown attr fetch (%d)\n", fn); } EXIT: mutex_unlock(&data->update_lock); return count; } /* --------------------------------------------------------------------- * PWM sysfs interfaces * ix = [0-1] * --------------------------------------------------------------------- */ #define SHOW_PWM 0 #define SHOW_SET_PWM_ENABLE 1 #define SHOW_SET_PWM_FREQ 2 #define SHOW_SET_PWM_AUTO_CHANNELS_TEMP 3 static ssize_t show_pwm(struct device *dev, struct device_attribute *attr, char *buf) { struct vt1211_data *data = vt1211_update_device(dev); struct sensor_device_attribute_2 *sensor_attr_2 = to_sensor_dev_attr_2(attr); int ix = sensor_attr_2->index; int fn = sensor_attr_2->nr; int res; switch (fn) { case SHOW_PWM: res = data->pwm[ix]; break; case SHOW_SET_PWM_ENABLE: res = ((data->pwm_ctl[ix] >> 3) & 1) ? 2 : 0; break; case SHOW_SET_PWM_FREQ: res = 90000 >> (data->pwm_clk & 7); break; case SHOW_SET_PWM_AUTO_CHANNELS_TEMP: res = (data->pwm_ctl[ix] & 7) + 1; break; default: res = 0; dev_dbg(dev, "Unknown attr fetch (%d)\n", fn); } return sprintf(buf, "%d\n", res); } static ssize_t set_pwm(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vt1211_data *data = dev_get_drvdata(dev); struct sensor_device_attribute_2 *sensor_attr_2 = to_sensor_dev_attr_2(attr); int ix = sensor_attr_2->index; int fn = sensor_attr_2->nr; int tmp, reg; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); switch (fn) { case SHOW_SET_PWM_ENABLE: /* sync the data cache */ reg = vt1211_read8(data, VT1211_REG_FAN_DIV); data->fan_div[0] = (reg >> 4) & 3; data->fan_div[1] = (reg >> 6) & 3; data->fan_ctl = reg & 0xf; reg = vt1211_read8(data, VT1211_REG_PWM_CTL); data->pwm_ctl[0] = reg & 0xf; data->pwm_ctl[1] = (reg >> 4) & 0xf; switch (val) { case 0: data->pwm_ctl[ix] &= 7; /* * disable SmartGuardian if both PWM outputs are * disabled */ if ((data->pwm_ctl[ix ^ 1] & 1) == 0) data->fan_ctl &= 0xe; break; case 2: data->pwm_ctl[ix] |= 8; data->fan_ctl |= 1; break; default: count = -EINVAL; dev_warn(dev, "pwm mode %ld not supported. " "Choose one of 0 or 2.\n", val); goto EXIT; } vt1211_write8(data, VT1211_REG_PWM_CTL, ((data->pwm_ctl[1] << 4) | data->pwm_ctl[0])); vt1211_write8(data, VT1211_REG_FAN_DIV, ((data->fan_div[1] << 6) | (data->fan_div[0] << 4) | data->fan_ctl)); break; case SHOW_SET_PWM_FREQ: val = 135000 / SENSORS_LIMIT(val, 135000 >> 7, 135000); /* calculate tmp = log2(val) */ tmp = 0; for (val >>= 1; val > 0; val >>= 1) tmp++; /* sync the data cache */ reg = vt1211_read8(data, VT1211_REG_PWM_CLK); data->pwm_clk = (reg & 0xf8) | tmp; vt1211_write8(data, VT1211_REG_PWM_CLK, data->pwm_clk); break; case SHOW_SET_PWM_AUTO_CHANNELS_TEMP: if (val < 1 || val > 7) { count = -EINVAL; dev_warn(dev, "temp channel %ld not supported. " "Choose a value between 1 and 7.\n", val); goto EXIT; } if (!ISTEMP(val - 1, data->uch_config)) { count = -EINVAL; dev_warn(dev, "temp channel %ld is not available.\n", val); goto EXIT; } /* sync the data cache */ reg = vt1211_read8(data, VT1211_REG_PWM_CTL); data->pwm_ctl[0] = reg & 0xf; data->pwm_ctl[1] = (reg >> 4) & 0xf; data->pwm_ctl[ix] = (data->pwm_ctl[ix] & 8) | (val - 1); vt1211_write8(data, VT1211_REG_PWM_CTL, ((data->pwm_ctl[1] << 4) | data->pwm_ctl[0])); break; default: dev_dbg(dev, "Unknown attr fetch (%d)\n", fn); } EXIT: mutex_unlock(&data->update_lock); return count; } /* --------------------------------------------------------------------- * PWM auto point definitions * ix = [0-1] * ap = [0-3] * --------------------------------------------------------------------- */ /* * pwm[ix+1]_auto_point[ap+1]_temp mapping table: * Note that there is only a single set of temp auto points that controls both * PWM controllers. We still create 2 sets of sysfs files to make it look * more consistent even though they map to the same registers. * * ix ap : description * ------------------- * 0 0 : pwm1/2 off temperature (pwm_auto_temp[0]) * 0 1 : pwm1/2 low speed temperature (pwm_auto_temp[1]) * 0 2 : pwm1/2 high speed temperature (pwm_auto_temp[2]) * 0 3 : pwm1/2 full speed temperature (pwm_auto_temp[3]) * 1 0 : pwm1/2 off temperature (pwm_auto_temp[0]) * 1 1 : pwm1/2 low speed temperature (pwm_auto_temp[1]) * 1 2 : pwm1/2 high speed temperature (pwm_auto_temp[2]) * 1 3 : pwm1/2 full speed temperature (pwm_auto_temp[3]) */ static ssize_t show_pwm_auto_point_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct vt1211_data *data = vt1211_update_device(dev); struct sensor_device_attribute_2 *sensor_attr_2 = to_sensor_dev_attr_2(attr); int ix = sensor_attr_2->index; int ap = sensor_attr_2->nr; return sprintf(buf, "%d\n", TEMP_FROM_REG(data->pwm_ctl[ix] & 7, data->pwm_auto_temp[ap])); } static ssize_t set_pwm_auto_point_temp(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vt1211_data *data = dev_get_drvdata(dev); struct sensor_device_attribute_2 *sensor_attr_2 = to_sensor_dev_attr_2(attr); int ix = sensor_attr_2->index; int ap = sensor_attr_2->nr; int reg; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); /* sync the data cache */ reg = vt1211_read8(data, VT1211_REG_PWM_CTL); data->pwm_ctl[0] = reg & 0xf; data->pwm_ctl[1] = (reg >> 4) & 0xf; data->pwm_auto_temp[ap] = TEMP_TO_REG(data->pwm_ctl[ix] & 7, val); vt1211_write8(data, VT1211_REG_PWM_AUTO_TEMP(ap), data->pwm_auto_temp[ap]); mutex_unlock(&data->update_lock); return count; } /* * pwm[ix+1]_auto_point[ap+1]_pwm mapping table: * Note that the PWM auto points 0 & 3 are hard-wired in the VT1211 and can't * be changed. * * ix ap : description * ------------------- * 0 0 : pwm1 off (pwm_auto_pwm[0][0], hard-wired to 0) * 0 1 : pwm1 low speed duty cycle (pwm_auto_pwm[0][1]) * 0 2 : pwm1 high speed duty cycle (pwm_auto_pwm[0][2]) * 0 3 : pwm1 full speed (pwm_auto_pwm[0][3], hard-wired to 255) * 1 0 : pwm2 off (pwm_auto_pwm[1][0], hard-wired to 0) * 1 1 : pwm2 low speed duty cycle (pwm_auto_pwm[1][1]) * 1 2 : pwm2 high speed duty cycle (pwm_auto_pwm[1][2]) * 1 3 : pwm2 full speed (pwm_auto_pwm[1][3], hard-wired to 255) */ static ssize_t show_pwm_auto_point_pwm(struct device *dev, struct device_attribute *attr, char *buf) { struct vt1211_data *data = vt1211_update_device(dev); struct sensor_device_attribute_2 *sensor_attr_2 = to_sensor_dev_attr_2(attr); int ix = sensor_attr_2->index; int ap = sensor_attr_2->nr; return sprintf(buf, "%d\n", data->pwm_auto_pwm[ix][ap]); } static ssize_t set_pwm_auto_point_pwm(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vt1211_data *data = dev_get_drvdata(dev); struct sensor_device_attribute_2 *sensor_attr_2 = to_sensor_dev_attr_2(attr); int ix = sensor_attr_2->index; int ap = sensor_attr_2->nr; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->pwm_auto_pwm[ix][ap] = SENSORS_LIMIT(val, 0, 255); vt1211_write8(data, VT1211_REG_PWM_AUTO_PWM(ix, ap), data->pwm_auto_pwm[ix][ap]); mutex_unlock(&data->update_lock); return count; } /* --------------------------------------------------------------------- * Miscellaneous sysfs interfaces (VRM, VID, name, and (legacy) alarms) * --------------------------------------------------------------------- */ static ssize_t show_vrm(struct device *dev, struct device_attribute *attr, char *buf) { struct vt1211_data *data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", data->vrm); } static ssize_t set_vrm(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vt1211_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; data->vrm = val; return count; } static ssize_t show_vid(struct device *dev, struct device_attribute *attr, char *buf) { struct vt1211_data *data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm)); } static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) { struct vt1211_data *data = dev_get_drvdata(dev); return sprintf(buf, "%s\n", data->name); } static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, char *buf) { struct vt1211_data *data = vt1211_update_device(dev); return sprintf(buf, "%d\n", data->alarms); } /* --------------------------------------------------------------------- * Device attribute structs * --------------------------------------------------------------------- */ #define SENSOR_ATTR_IN(ix) \ { SENSOR_ATTR_2(in##ix##_input, S_IRUGO, \ show_in, NULL, SHOW_IN_INPUT, ix), \ SENSOR_ATTR_2(in##ix##_min, S_IRUGO | S_IWUSR, \ show_in, set_in, SHOW_SET_IN_MIN, ix), \ SENSOR_ATTR_2(in##ix##_max, S_IRUGO | S_IWUSR, \ show_in, set_in, SHOW_SET_IN_MAX, ix), \ SENSOR_ATTR_2(in##ix##_alarm, S_IRUGO, \ show_in, NULL, SHOW_IN_ALARM, ix) \ } static struct sensor_device_attribute_2 vt1211_sysfs_in[][4] = { SENSOR_ATTR_IN(0), SENSOR_ATTR_IN(1), SENSOR_ATTR_IN(2), SENSOR_ATTR_IN(3), SENSOR_ATTR_IN(4), SENSOR_ATTR_IN(5) }; #define IN_UNIT_ATTRS(X) \ { &vt1211_sysfs_in[X][0].dev_attr.attr, \ &vt1211_sysfs_in[X][1].dev_attr.attr, \ &vt1211_sysfs_in[X][2].dev_attr.attr, \ &vt1211_sysfs_in[X][3].dev_attr.attr, \ NULL \ } static struct attribute *vt1211_in_attr[][5] = { IN_UNIT_ATTRS(0), IN_UNIT_ATTRS(1), IN_UNIT_ATTRS(2), IN_UNIT_ATTRS(3), IN_UNIT_ATTRS(4), IN_UNIT_ATTRS(5) }; static const struct attribute_group vt1211_in_attr_group[] = { { .attrs = vt1211_in_attr[0] }, { .attrs = vt1211_in_attr[1] }, { .attrs = vt1211_in_attr[2] }, { .attrs = vt1211_in_attr[3] }, { .attrs = vt1211_in_attr[4] }, { .attrs = vt1211_in_attr[5] } }; #define SENSOR_ATTR_TEMP(ix) \ { SENSOR_ATTR_2(temp##ix##_input, S_IRUGO, \ show_temp, NULL, SHOW_TEMP_INPUT, ix-1), \ SENSOR_ATTR_2(temp##ix##_max, S_IRUGO | S_IWUSR, \ show_temp, set_temp, SHOW_SET_TEMP_MAX, ix-1), \ SENSOR_ATTR_2(temp##ix##_max_hyst, S_IRUGO | S_IWUSR, \ show_temp, set_temp, SHOW_SET_TEMP_MAX_HYST, ix-1), \ SENSOR_ATTR_2(temp##ix##_alarm, S_IRUGO, \ show_temp, NULL, SHOW_TEMP_ALARM, ix-1) \ } static struct sensor_device_attribute_2 vt1211_sysfs_temp[][4] = { SENSOR_ATTR_TEMP(1), SENSOR_ATTR_TEMP(2), SENSOR_ATTR_TEMP(3), SENSOR_ATTR_TEMP(4), SENSOR_ATTR_TEMP(5), SENSOR_ATTR_TEMP(6), SENSOR_ATTR_TEMP(7), }; #define TEMP_UNIT_ATTRS(X) \ { &vt1211_sysfs_temp[X][0].dev_attr.attr, \ &vt1211_sysfs_temp[X][1].dev_attr.attr, \ &vt1211_sysfs_temp[X][2].dev_attr.attr, \ &vt1211_sysfs_temp[X][3].dev_attr.attr, \ NULL \ } static struct attribute *vt1211_temp_attr[][5] = { TEMP_UNIT_ATTRS(0), TEMP_UNIT_ATTRS(1), TEMP_UNIT_ATTRS(2), TEMP_UNIT_ATTRS(3), TEMP_UNIT_ATTRS(4), TEMP_UNIT_ATTRS(5), TEMP_UNIT_ATTRS(6) }; static const struct attribute_group vt1211_temp_attr_group[] = { { .attrs = vt1211_temp_attr[0] }, { .attrs = vt1211_temp_attr[1] }, { .attrs = vt1211_temp_attr[2] }, { .attrs = vt1211_temp_attr[3] }, { .attrs = vt1211_temp_attr[4] }, { .attrs = vt1211_temp_attr[5] }, { .attrs = vt1211_temp_attr[6] } }; #define SENSOR_ATTR_FAN(ix) \ SENSOR_ATTR_2(fan##ix##_input, S_IRUGO, \ show_fan, NULL, SHOW_FAN_INPUT, ix-1), \ SENSOR_ATTR_2(fan##ix##_min, S_IRUGO | S_IWUSR, \ show_fan, set_fan, SHOW_SET_FAN_MIN, ix-1), \ SENSOR_ATTR_2(fan##ix##_div, S_IRUGO | S_IWUSR, \ show_fan, set_fan, SHOW_SET_FAN_DIV, ix-1), \ SENSOR_ATTR_2(fan##ix##_alarm, S_IRUGO, \ show_fan, NULL, SHOW_FAN_ALARM, ix-1) #define SENSOR_ATTR_PWM(ix) \ SENSOR_ATTR_2(pwm##ix, S_IRUGO, \ show_pwm, NULL, SHOW_PWM, ix-1), \ SENSOR_ATTR_2(pwm##ix##_enable, S_IRUGO | S_IWUSR, \ show_pwm, set_pwm, SHOW_SET_PWM_ENABLE, ix-1), \ SENSOR_ATTR_2(pwm##ix##_auto_channels_temp, S_IRUGO | S_IWUSR, \ show_pwm, set_pwm, SHOW_SET_PWM_AUTO_CHANNELS_TEMP, ix-1) #define SENSOR_ATTR_PWM_FREQ(ix) \ SENSOR_ATTR_2(pwm##ix##_freq, S_IRUGO | S_IWUSR, \ show_pwm, set_pwm, SHOW_SET_PWM_FREQ, ix-1) #define SENSOR_ATTR_PWM_FREQ_RO(ix) \ SENSOR_ATTR_2(pwm##ix##_freq, S_IRUGO, \ show_pwm, NULL, SHOW_SET_PWM_FREQ, ix-1) #define SENSOR_ATTR_PWM_AUTO_POINT_TEMP(ix, ap) \ SENSOR_ATTR_2(pwm##ix##_auto_point##ap##_temp, S_IRUGO | S_IWUSR, \ show_pwm_auto_point_temp, set_pwm_auto_point_temp, \ ap-1, ix-1) #define SENSOR_ATTR_PWM_AUTO_POINT_TEMP_RO(ix, ap) \ SENSOR_ATTR_2(pwm##ix##_auto_point##ap##_temp, S_IRUGO, \ show_pwm_auto_point_temp, NULL, \ ap-1, ix-1) #define SENSOR_ATTR_PWM_AUTO_POINT_PWM(ix, ap) \ SENSOR_ATTR_2(pwm##ix##_auto_point##ap##_pwm, S_IRUGO | S_IWUSR, \ show_pwm_auto_point_pwm, set_pwm_auto_point_pwm, \ ap-1, ix-1) #define SENSOR_ATTR_PWM_AUTO_POINT_PWM_RO(ix, ap) \ SENSOR_ATTR_2(pwm##ix##_auto_point##ap##_pwm, S_IRUGO, \ show_pwm_auto_point_pwm, NULL, \ ap-1, ix-1) static struct sensor_device_attribute_2 vt1211_sysfs_fan_pwm[] = { SENSOR_ATTR_FAN(1), SENSOR_ATTR_FAN(2), SENSOR_ATTR_PWM(1), SENSOR_ATTR_PWM(2), SENSOR_ATTR_PWM_FREQ(1), SENSOR_ATTR_PWM_FREQ_RO(2), SENSOR_ATTR_PWM_AUTO_POINT_TEMP(1, 1), SENSOR_ATTR_PWM_AUTO_POINT_TEMP(1, 2), SENSOR_ATTR_PWM_AUTO_POINT_TEMP(1, 3), SENSOR_ATTR_PWM_AUTO_POINT_TEMP(1, 4), SENSOR_ATTR_PWM_AUTO_POINT_TEMP_RO(2, 1), SENSOR_ATTR_PWM_AUTO_POINT_TEMP_RO(2, 2), SENSOR_ATTR_PWM_AUTO_POINT_TEMP_RO(2, 3), SENSOR_ATTR_PWM_AUTO_POINT_TEMP_RO(2, 4), SENSOR_ATTR_PWM_AUTO_POINT_PWM_RO(1, 1), SENSOR_ATTR_PWM_AUTO_POINT_PWM(1, 2), SENSOR_ATTR_PWM_AUTO_POINT_PWM(1, 3), SENSOR_ATTR_PWM_AUTO_POINT_PWM_RO(1, 4), SENSOR_ATTR_PWM_AUTO_POINT_PWM_RO(2, 1), SENSOR_ATTR_PWM_AUTO_POINT_PWM(2, 2), SENSOR_ATTR_PWM_AUTO_POINT_PWM(2, 3), SENSOR_ATTR_PWM_AUTO_POINT_PWM_RO(2, 4), }; static struct device_attribute vt1211_sysfs_misc[] = { __ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm), __ATTR(cpu0_vid, S_IRUGO, show_vid, NULL), __ATTR(name, S_IRUGO, show_name, NULL), __ATTR(alarms, S_IRUGO, show_alarms, NULL), }; /* --------------------------------------------------------------------- * Device registration and initialization * --------------------------------------------------------------------- */ static void __devinit vt1211_init_device(struct vt1211_data *data) { /* set VRM */ data->vrm = vid_which_vrm(); /* Read (and initialize) UCH config */ data->uch_config = vt1211_read8(data, VT1211_REG_UCH_CONFIG); if (uch_config > -1) { data->uch_config = (data->uch_config & 0x83) | (uch_config << 2); vt1211_write8(data, VT1211_REG_UCH_CONFIG, data->uch_config); } /* * Initialize the interrupt mode (if request at module load time). * The VT1211 implements 3 different modes for clearing interrupts: * 0: Clear INT when status register is read. Regenerate INT as long * as temp stays above hysteresis limit. * 1: Clear INT when status register is read. DON'T regenerate INT * until temp falls below hysteresis limit and exceeds hot limit * again. * 2: Clear INT when temp falls below max limit. * * The driver only allows to force mode 0 since that's the only one * that makes sense for 'sensors' */ if (int_mode == 0) { vt1211_write8(data, VT1211_REG_TEMP1_CONFIG, 0); vt1211_write8(data, VT1211_REG_TEMP2_CONFIG, 0); } /* Fill in some hard wired values into our data struct */ data->pwm_auto_pwm[0][3] = 255; data->pwm_auto_pwm[1][3] = 255; } static void vt1211_remove_sysfs(struct platform_device *pdev) { struct device *dev = &pdev->dev; int i; for (i = 0; i < ARRAY_SIZE(vt1211_in_attr_group); i++) sysfs_remove_group(&dev->kobj, &vt1211_in_attr_group[i]); for (i = 0; i < ARRAY_SIZE(vt1211_temp_attr_group); i++) sysfs_remove_group(&dev->kobj, &vt1211_temp_attr_group[i]); for (i = 0; i < ARRAY_SIZE(vt1211_sysfs_fan_pwm); i++) { device_remove_file(dev, &vt1211_sysfs_fan_pwm[i].dev_attr); } for (i = 0; i < ARRAY_SIZE(vt1211_sysfs_misc); i++) device_remove_file(dev, &vt1211_sysfs_misc[i]); } static int __devinit vt1211_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct vt1211_data *data; struct resource *res; int i, err; data = kzalloc(sizeof(struct vt1211_data), GFP_KERNEL); if (!data) { err = -ENOMEM; dev_err(dev, "Out of memory\n"); goto EXIT; } res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!request_region(res->start, resource_size(res), DRVNAME)) { err = -EBUSY; dev_err(dev, "Failed to request region 0x%lx-0x%lx\n", (unsigned long)res->start, (unsigned long)res->end); goto EXIT_KFREE; } data->addr = res->start; data->name = DRVNAME; mutex_init(&data->update_lock); platform_set_drvdata(pdev, data); /* Initialize the VT1211 chip */ vt1211_init_device(data); /* Create sysfs interface files */ for (i = 0; i < ARRAY_SIZE(vt1211_in_attr_group); i++) { if (ISVOLT(i, data->uch_config)) { err = sysfs_create_group(&dev->kobj, &vt1211_in_attr_group[i]); if (err) goto EXIT_DEV_REMOVE; } } for (i = 0; i < ARRAY_SIZE(vt1211_temp_attr_group); i++) { if (ISTEMP(i, data->uch_config)) { err = sysfs_create_group(&dev->kobj, &vt1211_temp_attr_group[i]); if (err) goto EXIT_DEV_REMOVE; } } for (i = 0; i < ARRAY_SIZE(vt1211_sysfs_fan_pwm); i++) { err = device_create_file(dev, &vt1211_sysfs_fan_pwm[i].dev_attr); if (err) goto EXIT_DEV_REMOVE; } for (i = 0; i < ARRAY_SIZE(vt1211_sysfs_misc); i++) { err = device_create_file(dev, &vt1211_sysfs_misc[i]); if (err) goto EXIT_DEV_REMOVE; } /* Register device */ data->hwmon_dev = hwmon_device_register(dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); dev_err(dev, "Class registration failed (%d)\n", err); goto EXIT_DEV_REMOVE_SILENT; } return 0; EXIT_DEV_REMOVE: dev_err(dev, "Sysfs interface creation failed (%d)\n", err); EXIT_DEV_REMOVE_SILENT: vt1211_remove_sysfs(pdev); release_region(res->start, resource_size(res)); EXIT_KFREE: platform_set_drvdata(pdev, NULL); kfree(data); EXIT: return err; } static int __devexit vt1211_remove(struct platform_device *pdev) { struct vt1211_data *data = platform_get_drvdata(pdev); struct resource *res; hwmon_device_unregister(data->hwmon_dev); vt1211_remove_sysfs(pdev); platform_set_drvdata(pdev, NULL); kfree(data); res = platform_get_resource(pdev, IORESOURCE_IO, 0); release_region(res->start, resource_size(res)); return 0; } static struct platform_driver vt1211_driver = { .driver = { .owner = THIS_MODULE, .name = DRVNAME, }, .probe = vt1211_probe, .remove = __devexit_p(vt1211_remove), }; static int __init vt1211_device_add(unsigned short address) { struct resource res = { .start = address, .end = address + 0x7f, .flags = IORESOURCE_IO, }; int err; pdev = platform_device_alloc(DRVNAME, address); if (!pdev) { err = -ENOMEM; pr_err("Device allocation failed (%d)\n", err); goto EXIT; } res.name = pdev->name; err = acpi_check_resource_conflict(&res); if (err) goto EXIT_DEV_PUT; err = platform_device_add_resources(pdev, &res, 1); if (err) { pr_err("Device resource addition failed (%d)\n", err); goto EXIT_DEV_PUT; } err = platform_device_add(pdev); if (err) { pr_err("Device addition failed (%d)\n", err); goto EXIT_DEV_PUT; } return 0; EXIT_DEV_PUT: platform_device_put(pdev); EXIT: return err; } static int __init vt1211_find(int sio_cip, unsigned short *address) { int err = -ENODEV; int devid; superio_enter(sio_cip); devid = force_id ? force_id : superio_inb(sio_cip, SIO_VT1211_DEVID); if (devid != SIO_VT1211_ID) goto EXIT; superio_select(sio_cip, SIO_VT1211_LDN_HWMON); if ((superio_inb(sio_cip, SIO_VT1211_ACTIVE) & 1) == 0) { pr_warn("HW monitor is disabled, skipping\n"); goto EXIT; } *address = ((superio_inb(sio_cip, SIO_VT1211_BADDR) << 8) | (superio_inb(sio_cip, SIO_VT1211_BADDR + 1))) & 0xff00; if (*address == 0) { pr_warn("Base address is not set, skipping\n"); goto EXIT; } err = 0; pr_info("Found VT1211 chip at 0x%04x, revision %u\n", *address, superio_inb(sio_cip, SIO_VT1211_DEVREV)); EXIT: superio_exit(sio_cip); return err; } static int __init vt1211_init(void) { int err; unsigned short address = 0; err = vt1211_find(SIO_REG_CIP1, &address); if (err) { err = vt1211_find(SIO_REG_CIP2, &address); if (err) goto EXIT; } if ((uch_config < -1) || (uch_config > 31)) { err = -EINVAL; pr_warn("Invalid UCH configuration %d. " "Choose a value between 0 and 31.\n", uch_config); goto EXIT; } if ((int_mode < -1) || (int_mode > 0)) { err = -EINVAL; pr_warn("Invalid interrupt mode %d. " "Only mode 0 is supported.\n", int_mode); goto EXIT; } err = platform_driver_register(&vt1211_driver); if (err) goto EXIT; /* Sets global pdev as a side effect */ err = vt1211_device_add(address); if (err) goto EXIT_DRV_UNREGISTER; return 0; EXIT_DRV_UNREGISTER: platform_driver_unregister(&vt1211_driver); EXIT: return err; } static void __exit vt1211_exit(void) { platform_device_unregister(pdev); platform_driver_unregister(&vt1211_driver); } MODULE_AUTHOR("Juerg Haefliger <juergh@gmail.com>"); MODULE_DESCRIPTION("VT1211 sensors"); MODULE_LICENSE("GPL"); module_init(vt1211_init); module_exit(vt1211_exit);
gpl-2.0
MoKee/android_kernel_zte_nx507j
drivers/hwmon/lm83.c
4857
12807
/* * lm83.c - Part of lm_sensors, Linux kernel modules for hardware * monitoring * Copyright (C) 2003-2009 Jean Delvare <khali@linux-fr.org> * * Heavily inspired from the lm78, lm75 and adm1021 drivers. The LM83 is * a sensor chip made by National Semiconductor. It reports up to four * temperatures (its own plus up to three external ones) with a 1 deg * resolution and a 3-4 deg accuracy. Complete datasheet can be obtained * from National's website at: * http://www.national.com/pf/LM/LM83.html * Since the datasheet omits to give the chip stepping code, I give it * here: 0x03 (at register 0xff). * * Also supports the LM82 temp sensor, which is basically a stripped down * model of the LM83. Datasheet is here: * http://www.national.com/pf/LM/LM82.html * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon-sysfs.h> #include <linux/hwmon.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/sysfs.h> /* * Addresses to scan * Address is selected using 2 three-level pins, resulting in 9 possible * addresses. */ static const unsigned short normal_i2c[] = { 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END }; enum chips { lm83, lm82 }; /* * The LM83 registers * Manufacturer ID is 0x01 for National Semiconductor. */ #define LM83_REG_R_MAN_ID 0xFE #define LM83_REG_R_CHIP_ID 0xFF #define LM83_REG_R_CONFIG 0x03 #define LM83_REG_W_CONFIG 0x09 #define LM83_REG_R_STATUS1 0x02 #define LM83_REG_R_STATUS2 0x35 #define LM83_REG_R_LOCAL_TEMP 0x00 #define LM83_REG_R_LOCAL_HIGH 0x05 #define LM83_REG_W_LOCAL_HIGH 0x0B #define LM83_REG_R_REMOTE1_TEMP 0x30 #define LM83_REG_R_REMOTE1_HIGH 0x38 #define LM83_REG_W_REMOTE1_HIGH 0x50 #define LM83_REG_R_REMOTE2_TEMP 0x01 #define LM83_REG_R_REMOTE2_HIGH 0x07 #define LM83_REG_W_REMOTE2_HIGH 0x0D #define LM83_REG_R_REMOTE3_TEMP 0x31 #define LM83_REG_R_REMOTE3_HIGH 0x3A #define LM83_REG_W_REMOTE3_HIGH 0x52 #define LM83_REG_R_TCRIT 0x42 #define LM83_REG_W_TCRIT 0x5A /* * Conversions and various macros * The LM83 uses signed 8-bit values with LSB = 1 degree Celsius. */ #define TEMP_FROM_REG(val) ((val) * 1000) #define TEMP_TO_REG(val) ((val) <= -128000 ? -128 : \ (val) >= 127000 ? 127 : \ (val) < 0 ? ((val) - 500) / 1000 : \ ((val) + 500) / 1000) static const u8 LM83_REG_R_TEMP[] = { LM83_REG_R_LOCAL_TEMP, LM83_REG_R_REMOTE1_TEMP, LM83_REG_R_REMOTE2_TEMP, LM83_REG_R_REMOTE3_TEMP, LM83_REG_R_LOCAL_HIGH, LM83_REG_R_REMOTE1_HIGH, LM83_REG_R_REMOTE2_HIGH, LM83_REG_R_REMOTE3_HIGH, LM83_REG_R_TCRIT, }; static const u8 LM83_REG_W_HIGH[] = { LM83_REG_W_LOCAL_HIGH, LM83_REG_W_REMOTE1_HIGH, LM83_REG_W_REMOTE2_HIGH, LM83_REG_W_REMOTE3_HIGH, LM83_REG_W_TCRIT, }; /* * Functions declaration */ static int lm83_detect(struct i2c_client *new_client, struct i2c_board_info *info); static int lm83_probe(struct i2c_client *client, const struct i2c_device_id *id); static int lm83_remove(struct i2c_client *client); static struct lm83_data *lm83_update_device(struct device *dev); /* * Driver data (common to all clients) */ static const struct i2c_device_id lm83_id[] = { { "lm83", lm83 }, { "lm82", lm82 }, { } }; MODULE_DEVICE_TABLE(i2c, lm83_id); static struct i2c_driver lm83_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "lm83", }, .probe = lm83_probe, .remove = lm83_remove, .id_table = lm83_id, .detect = lm83_detect, .address_list = normal_i2c, }; /* * Client data (each client gets its own) */ struct lm83_data { struct device *hwmon_dev; struct mutex update_lock; char valid; /* zero until following fields are valid */ unsigned long last_updated; /* in jiffies */ /* registers values */ s8 temp[9]; /* 0..3: input 1-4, 4..7: high limit 1-4, 8 : critical limit */ u16 alarms; /* bitvector, combined */ }; /* * Sysfs stuff */ static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct lm83_data *data = lm83_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[attr->index])); } static ssize_t set_temp(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i2c_client *client = to_i2c_client(dev); struct lm83_data *data = i2c_get_clientdata(client); long val; int nr = attr->index; int err; err = kstrtol(buf, 10, &val); if (err < 0) return err; mutex_lock(&data->update_lock); data->temp[nr] = TEMP_TO_REG(val); i2c_smbus_write_byte_data(client, LM83_REG_W_HIGH[nr - 4], data->temp[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_alarms(struct device *dev, struct device_attribute *dummy, char *buf) { struct lm83_data *data = lm83_update_device(dev); return sprintf(buf, "%d\n", data->alarms); } static ssize_t show_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct lm83_data *data = lm83_update_device(dev); int bitnr = attr->index; return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1); } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1); static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2); static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3); static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp, set_temp, 4); static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp, set_temp, 5); static SENSOR_DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_temp, set_temp, 6); static SENSOR_DEVICE_ATTR(temp4_max, S_IWUSR | S_IRUGO, show_temp, set_temp, 7); static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp, NULL, 8); static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_temp, NULL, 8); static SENSOR_DEVICE_ATTR(temp3_crit, S_IWUSR | S_IRUGO, show_temp, set_temp, 8); static SENSOR_DEVICE_ATTR(temp4_crit, S_IRUGO, show_temp, NULL, 8); /* Individual alarm files */ static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6); static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 8); static SENSOR_DEVICE_ATTR(temp4_crit_alarm, S_IRUGO, show_alarm, NULL, 9); static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_alarm, NULL, 10); static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_alarm, NULL, 12); static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 13); static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 15); /* Raw alarm file for compatibility */ static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); static struct attribute *lm83_attributes[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp1_crit.dev_attr.attr, &sensor_dev_attr_temp3_crit.dev_attr.attr, &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp3_fault.dev_attr.attr, &sensor_dev_attr_temp3_max_alarm.dev_attr.attr, &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, &dev_attr_alarms.attr, NULL }; static const struct attribute_group lm83_group = { .attrs = lm83_attributes, }; static struct attribute *lm83_attributes_opt[] = { &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp4_input.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp4_max.dev_attr.attr, &sensor_dev_attr_temp2_crit.dev_attr.attr, &sensor_dev_attr_temp4_crit.dev_attr.attr, &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp4_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp4_fault.dev_attr.attr, &sensor_dev_attr_temp4_max_alarm.dev_attr.attr, &sensor_dev_attr_temp2_fault.dev_attr.attr, &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, NULL }; static const struct attribute_group lm83_group_opt = { .attrs = lm83_attributes_opt, }; /* * Real code */ /* Return 0 if detection is successful, -ENODEV otherwise */ static int lm83_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; const char *name; u8 man_id, chip_id; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; /* Detection */ if ((i2c_smbus_read_byte_data(new_client, LM83_REG_R_STATUS1) & 0xA8) || (i2c_smbus_read_byte_data(new_client, LM83_REG_R_STATUS2) & 0x48) || (i2c_smbus_read_byte_data(new_client, LM83_REG_R_CONFIG) & 0x41)) { dev_dbg(&adapter->dev, "LM83 detection failed at 0x%02x\n", new_client->addr); return -ENODEV; } /* Identification */ man_id = i2c_smbus_read_byte_data(new_client, LM83_REG_R_MAN_ID); if (man_id != 0x01) /* National Semiconductor */ return -ENODEV; chip_id = i2c_smbus_read_byte_data(new_client, LM83_REG_R_CHIP_ID); switch (chip_id) { case 0x03: name = "lm83"; break; case 0x01: name = "lm82"; break; default: /* identification failed */ dev_info(&adapter->dev, "Unsupported chip (man_id=0x%02X, chip_id=0x%02X)\n", man_id, chip_id); return -ENODEV; } strlcpy(info->type, name, I2C_NAME_SIZE); return 0; } static int lm83_probe(struct i2c_client *new_client, const struct i2c_device_id *id) { struct lm83_data *data; int err; data = kzalloc(sizeof(struct lm83_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } i2c_set_clientdata(new_client, data); data->valid = 0; mutex_init(&data->update_lock); /* * Register sysfs hooks * The LM82 can only monitor one external diode which is * at the same register as the LM83 temp3 entry - so we * declare 1 and 3 common, and then 2 and 4 only for the LM83. */ err = sysfs_create_group(&new_client->dev.kobj, &lm83_group); if (err) goto exit_free; if (id->driver_data == lm83) { err = sysfs_create_group(&new_client->dev.kobj, &lm83_group_opt); if (err) goto exit_remove_files; } data->hwmon_dev = hwmon_device_register(&new_client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove_files; } return 0; exit_remove_files: sysfs_remove_group(&new_client->dev.kobj, &lm83_group); sysfs_remove_group(&new_client->dev.kobj, &lm83_group_opt); exit_free: kfree(data); exit: return err; } static int lm83_remove(struct i2c_client *client) { struct lm83_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &lm83_group); sysfs_remove_group(&client->dev.kobj, &lm83_group_opt); kfree(data); return 0; } static struct lm83_data *lm83_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct lm83_data *data = i2c_get_clientdata(client); mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) { int nr; dev_dbg(&client->dev, "Updating lm83 data.\n"); for (nr = 0; nr < 9; nr++) { data->temp[nr] = i2c_smbus_read_byte_data(client, LM83_REG_R_TEMP[nr]); } data->alarms = i2c_smbus_read_byte_data(client, LM83_REG_R_STATUS1) + (i2c_smbus_read_byte_data(client, LM83_REG_R_STATUS2) << 8); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } module_i2c_driver(lm83_driver); MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>"); MODULE_DESCRIPTION("LM83 driver"); MODULE_LICENSE("GPL");
gpl-2.0
cr1exe/android_kernel_sony_taoshan
drivers/hwmon/thmc50.c
4857
14303
/* * thmc50.c - Part of lm_sensors, Linux kernel modules for hardware * monitoring * Copyright (C) 2007 Krzysztof Helt <krzysztof.h1@wp.pl> * Based on 2.4 driver by Frodo Looijaard <frodol@dds.nl> and * Philip Edelbrock <phil@netroedge.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> MODULE_LICENSE("GPL"); /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; /* Insmod parameters */ enum chips { thmc50, adm1022 }; static unsigned short adm1022_temp3[16]; static unsigned int adm1022_temp3_num; module_param_array(adm1022_temp3, ushort, &adm1022_temp3_num, 0); MODULE_PARM_DESC(adm1022_temp3, "List of adapter,address pairs " "to enable 3rd temperature (ADM1022 only)"); /* Many THMC50 constants specified below */ /* The THMC50 registers */ #define THMC50_REG_CONF 0x40 #define THMC50_REG_COMPANY_ID 0x3E #define THMC50_REG_DIE_CODE 0x3F #define THMC50_REG_ANALOG_OUT 0x19 /* * The mirror status register cannot be used as * reading it does not clear alarms. */ #define THMC50_REG_INTR 0x41 static const u8 THMC50_REG_TEMP[] = { 0x27, 0x26, 0x20 }; static const u8 THMC50_REG_TEMP_MIN[] = { 0x3A, 0x38, 0x2C }; static const u8 THMC50_REG_TEMP_MAX[] = { 0x39, 0x37, 0x2B }; static const u8 THMC50_REG_TEMP_CRITICAL[] = { 0x13, 0x14, 0x14 }; static const u8 THMC50_REG_TEMP_DEFAULT[] = { 0x17, 0x18, 0x18 }; #define THMC50_REG_CONF_nFANOFF 0x20 #define THMC50_REG_CONF_PROGRAMMED 0x08 /* Each client has this additional data */ struct thmc50_data { struct device *hwmon_dev; struct mutex update_lock; enum chips type; unsigned long last_updated; /* In jiffies */ char has_temp3; /* !=0 if it is ADM1022 in temp3 mode */ char valid; /* !=0 if following fields are valid */ /* Register values */ s8 temp_input[3]; s8 temp_max[3]; s8 temp_min[3]; s8 temp_critical[3]; u8 analog_out; u8 alarms; }; static int thmc50_detect(struct i2c_client *client, struct i2c_board_info *info); static int thmc50_probe(struct i2c_client *client, const struct i2c_device_id *id); static int thmc50_remove(struct i2c_client *client); static void thmc50_init_client(struct i2c_client *client); static struct thmc50_data *thmc50_update_device(struct device *dev); static const struct i2c_device_id thmc50_id[] = { { "adm1022", adm1022 }, { "thmc50", thmc50 }, { } }; MODULE_DEVICE_TABLE(i2c, thmc50_id); static struct i2c_driver thmc50_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "thmc50", }, .probe = thmc50_probe, .remove = thmc50_remove, .id_table = thmc50_id, .detect = thmc50_detect, .address_list = normal_i2c, }; static ssize_t show_analog_out(struct device *dev, struct device_attribute *attr, char *buf) { struct thmc50_data *data = thmc50_update_device(dev); return sprintf(buf, "%d\n", data->analog_out); } static ssize_t set_analog_out(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct thmc50_data *data = i2c_get_clientdata(client); int config; unsigned long tmp; int err; err = kstrtoul(buf, 10, &tmp); if (err) return err; mutex_lock(&data->update_lock); data->analog_out = SENSORS_LIMIT(tmp, 0, 255); i2c_smbus_write_byte_data(client, THMC50_REG_ANALOG_OUT, data->analog_out); config = i2c_smbus_read_byte_data(client, THMC50_REG_CONF); if (data->analog_out == 0) config &= ~THMC50_REG_CONF_nFANOFF; else config |= THMC50_REG_CONF_nFANOFF; i2c_smbus_write_byte_data(client, THMC50_REG_CONF, config); mutex_unlock(&data->update_lock); return count; } /* There is only one PWM mode = DC */ static ssize_t show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "0\n"); } /* Temperatures */ static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char *buf) { int nr = to_sensor_dev_attr(attr)->index; struct thmc50_data *data = thmc50_update_device(dev); return sprintf(buf, "%d\n", data->temp_input[nr] * 1000); } static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr, char *buf) { int nr = to_sensor_dev_attr(attr)->index; struct thmc50_data *data = thmc50_update_device(dev); return sprintf(buf, "%d\n", data->temp_min[nr] * 1000); } static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(attr)->index; struct i2c_client *client = to_i2c_client(dev); struct thmc50_data *data = i2c_get_clientdata(client); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_min[nr] = SENSORS_LIMIT(val / 1000, -128, 127); i2c_smbus_write_byte_data(client, THMC50_REG_TEMP_MIN[nr], data->temp_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr, char *buf) { int nr = to_sensor_dev_attr(attr)->index; struct thmc50_data *data = thmc50_update_device(dev); return sprintf(buf, "%d\n", data->temp_max[nr] * 1000); } static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(attr)->index; struct i2c_client *client = to_i2c_client(dev); struct thmc50_data *data = i2c_get_clientdata(client); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_max[nr] = SENSORS_LIMIT(val / 1000, -128, 127); i2c_smbus_write_byte_data(client, THMC50_REG_TEMP_MAX[nr], data->temp_max[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_critical(struct device *dev, struct device_attribute *attr, char *buf) { int nr = to_sensor_dev_attr(attr)->index; struct thmc50_data *data = thmc50_update_device(dev); return sprintf(buf, "%d\n", data->temp_critical[nr] * 1000); } static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { int index = to_sensor_dev_attr(attr)->index; struct thmc50_data *data = thmc50_update_device(dev); return sprintf(buf, "%u\n", (data->alarms >> index) & 1); } #define temp_reg(offset) \ static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, show_temp, \ NULL, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \ show_temp_min, set_temp_min, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \ show_temp_max, set_temp_max, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_crit, S_IRUGO, \ show_temp_critical, NULL, offset - 1); temp_reg(1); temp_reg(2); temp_reg(3); static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 7); static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_analog_out, set_analog_out, 0); static SENSOR_DEVICE_ATTR(pwm1_mode, S_IRUGO, show_pwm_mode, NULL, 0); static struct attribute *thmc50_attributes[] = { &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_crit.dev_attr.attr, &sensor_dev_attr_temp1_alarm.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_min.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_crit.dev_attr.attr, &sensor_dev_attr_temp2_alarm.dev_attr.attr, &sensor_dev_attr_temp2_fault.dev_attr.attr, &sensor_dev_attr_pwm1.dev_attr.attr, &sensor_dev_attr_pwm1_mode.dev_attr.attr, NULL }; static const struct attribute_group thmc50_group = { .attrs = thmc50_attributes, }; /* for ADM1022 3rd temperature mode */ static struct attribute *temp3_attributes[] = { &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp3_min.dev_attr.attr, &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp3_crit.dev_attr.attr, &sensor_dev_attr_temp3_alarm.dev_attr.attr, &sensor_dev_attr_temp3_fault.dev_attr.attr, NULL }; static const struct attribute_group temp3_group = { .attrs = temp3_attributes, }; /* Return 0 if detection is successful, -ENODEV otherwise */ static int thmc50_detect(struct i2c_client *client, struct i2c_board_info *info) { unsigned company; unsigned revision; unsigned config; struct i2c_adapter *adapter = client->adapter; const char *type_name; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { pr_debug("thmc50: detect failed, " "smbus byte data not supported!\n"); return -ENODEV; } pr_debug("thmc50: Probing for THMC50 at 0x%2X on bus %d\n", client->addr, i2c_adapter_id(client->adapter)); company = i2c_smbus_read_byte_data(client, THMC50_REG_COMPANY_ID); revision = i2c_smbus_read_byte_data(client, THMC50_REG_DIE_CODE); config = i2c_smbus_read_byte_data(client, THMC50_REG_CONF); if (revision < 0xc0 || (config & 0x10)) return -ENODEV; if (company == 0x41) { int id = i2c_adapter_id(client->adapter); int i; type_name = "adm1022"; for (i = 0; i + 1 < adm1022_temp3_num; i += 2) if (adm1022_temp3[i] == id && adm1022_temp3[i + 1] == client->addr) { /* enable 2nd remote temp */ config |= (1 << 7); i2c_smbus_write_byte_data(client, THMC50_REG_CONF, config); break; } } else if (company == 0x49) { type_name = "thmc50"; } else { pr_debug("thmc50: Detection of THMC50/ADM1022 failed\n"); return -ENODEV; } pr_debug("thmc50: Detected %s (version %x, revision %x)\n", type_name, (revision >> 4) - 0xc, revision & 0xf); strlcpy(info->type, type_name, I2C_NAME_SIZE); return 0; } static int thmc50_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct thmc50_data *data; int err; data = kzalloc(sizeof(struct thmc50_data), GFP_KERNEL); if (!data) { pr_debug("thmc50: detect failed, kzalloc failed!\n"); err = -ENOMEM; goto exit; } i2c_set_clientdata(client, data); data->type = id->driver_data; mutex_init(&data->update_lock); thmc50_init_client(client); /* Register sysfs hooks */ err = sysfs_create_group(&client->dev.kobj, &thmc50_group); if (err) goto exit_free; /* Register ADM1022 sysfs hooks */ if (data->has_temp3) { err = sysfs_create_group(&client->dev.kobj, &temp3_group); if (err) goto exit_remove_sysfs_thmc50; } /* Register a new directory entry with module sensors */ data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove_sysfs; } return 0; exit_remove_sysfs: if (data->has_temp3) sysfs_remove_group(&client->dev.kobj, &temp3_group); exit_remove_sysfs_thmc50: sysfs_remove_group(&client->dev.kobj, &thmc50_group); exit_free: kfree(data); exit: return err; } static int thmc50_remove(struct i2c_client *client) { struct thmc50_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &thmc50_group); if (data->has_temp3) sysfs_remove_group(&client->dev.kobj, &temp3_group); kfree(data); return 0; } static void thmc50_init_client(struct i2c_client *client) { struct thmc50_data *data = i2c_get_clientdata(client); int config; data->analog_out = i2c_smbus_read_byte_data(client, THMC50_REG_ANALOG_OUT); /* set up to at least 1 */ if (data->analog_out == 0) { data->analog_out = 1; i2c_smbus_write_byte_data(client, THMC50_REG_ANALOG_OUT, data->analog_out); } config = i2c_smbus_read_byte_data(client, THMC50_REG_CONF); config |= 0x1; /* start the chip if it is in standby mode */ if (data->type == adm1022 && (config & (1 << 7))) data->has_temp3 = 1; i2c_smbus_write_byte_data(client, THMC50_REG_CONF, config); } static struct thmc50_data *thmc50_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct thmc50_data *data = i2c_get_clientdata(client); int timeout = HZ / 5 + (data->type == thmc50 ? HZ : 0); mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + timeout) || !data->valid) { int temps = data->has_temp3 ? 3 : 2; int i; int prog = i2c_smbus_read_byte_data(client, THMC50_REG_CONF); prog &= THMC50_REG_CONF_PROGRAMMED; for (i = 0; i < temps; i++) { data->temp_input[i] = i2c_smbus_read_byte_data(client, THMC50_REG_TEMP[i]); data->temp_max[i] = i2c_smbus_read_byte_data(client, THMC50_REG_TEMP_MAX[i]); data->temp_min[i] = i2c_smbus_read_byte_data(client, THMC50_REG_TEMP_MIN[i]); data->temp_critical[i] = i2c_smbus_read_byte_data(client, prog ? THMC50_REG_TEMP_CRITICAL[i] : THMC50_REG_TEMP_DEFAULT[i]); } data->analog_out = i2c_smbus_read_byte_data(client, THMC50_REG_ANALOG_OUT); data->alarms = i2c_smbus_read_byte_data(client, THMC50_REG_INTR); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } module_i2c_driver(thmc50_driver); MODULE_AUTHOR("Krzysztof Helt <krzysztof.h1@wp.pl>"); MODULE_DESCRIPTION("THMC50 driver");
gpl-2.0
lollipop-og/kernel_msm
drivers/hwmon/w83791d.c
4857
51605
/* * w83791d.c - Part of lm_sensors, Linux kernel modules for hardware * monitoring * * Copyright (C) 2006-2007 Charles Spirakis <bezaur@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Supports following chips: * * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA * w83791d 10 5 5 3 0x71 0x5ca3 yes no * * The w83791d chip appears to be part way between the 83781d and the * 83792d. Thus, this file is derived from both the w83792d.c and * w83781d.c files. * * The w83791g chip is the same as the w83791d but lead-free. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-vid.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #define NUMBER_OF_VIN 10 #define NUMBER_OF_FANIN 5 #define NUMBER_OF_TEMPIN 3 #define NUMBER_OF_PWM 5 /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; /* Insmod parameters */ static unsigned short force_subclients[4]; module_param_array(force_subclients, short, NULL, 0); MODULE_PARM_DESC(force_subclients, "List of subclient addresses: " "{bus, clientaddr, subclientaddr1, subclientaddr2}"); static bool reset; module_param(reset, bool, 0); MODULE_PARM_DESC(reset, "Set to one to force a hardware chip reset"); static bool init; module_param(init, bool, 0); MODULE_PARM_DESC(init, "Set to one to force extra software initialization"); /* The W83791D registers */ static const u8 W83791D_REG_IN[NUMBER_OF_VIN] = { 0x20, /* VCOREA in DataSheet */ 0x21, /* VINR0 in DataSheet */ 0x22, /* +3.3VIN in DataSheet */ 0x23, /* VDD5V in DataSheet */ 0x24, /* +12VIN in DataSheet */ 0x25, /* -12VIN in DataSheet */ 0x26, /* -5VIN in DataSheet */ 0xB0, /* 5VSB in DataSheet */ 0xB1, /* VBAT in DataSheet */ 0xB2 /* VINR1 in DataSheet */ }; static const u8 W83791D_REG_IN_MAX[NUMBER_OF_VIN] = { 0x2B, /* VCOREA High Limit in DataSheet */ 0x2D, /* VINR0 High Limit in DataSheet */ 0x2F, /* +3.3VIN High Limit in DataSheet */ 0x31, /* VDD5V High Limit in DataSheet */ 0x33, /* +12VIN High Limit in DataSheet */ 0x35, /* -12VIN High Limit in DataSheet */ 0x37, /* -5VIN High Limit in DataSheet */ 0xB4, /* 5VSB High Limit in DataSheet */ 0xB6, /* VBAT High Limit in DataSheet */ 0xB8 /* VINR1 High Limit in DataSheet */ }; static const u8 W83791D_REG_IN_MIN[NUMBER_OF_VIN] = { 0x2C, /* VCOREA Low Limit in DataSheet */ 0x2E, /* VINR0 Low Limit in DataSheet */ 0x30, /* +3.3VIN Low Limit in DataSheet */ 0x32, /* VDD5V Low Limit in DataSheet */ 0x34, /* +12VIN Low Limit in DataSheet */ 0x36, /* -12VIN Low Limit in DataSheet */ 0x38, /* -5VIN Low Limit in DataSheet */ 0xB5, /* 5VSB Low Limit in DataSheet */ 0xB7, /* VBAT Low Limit in DataSheet */ 0xB9 /* VINR1 Low Limit in DataSheet */ }; static const u8 W83791D_REG_FAN[NUMBER_OF_FANIN] = { 0x28, /* FAN 1 Count in DataSheet */ 0x29, /* FAN 2 Count in DataSheet */ 0x2A, /* FAN 3 Count in DataSheet */ 0xBA, /* FAN 4 Count in DataSheet */ 0xBB, /* FAN 5 Count in DataSheet */ }; static const u8 W83791D_REG_FAN_MIN[NUMBER_OF_FANIN] = { 0x3B, /* FAN 1 Count Low Limit in DataSheet */ 0x3C, /* FAN 2 Count Low Limit in DataSheet */ 0x3D, /* FAN 3 Count Low Limit in DataSheet */ 0xBC, /* FAN 4 Count Low Limit in DataSheet */ 0xBD, /* FAN 5 Count Low Limit in DataSheet */ }; static const u8 W83791D_REG_PWM[NUMBER_OF_PWM] = { 0x81, /* PWM 1 duty cycle register in DataSheet */ 0x83, /* PWM 2 duty cycle register in DataSheet */ 0x94, /* PWM 3 duty cycle register in DataSheet */ 0xA0, /* PWM 4 duty cycle register in DataSheet */ 0xA1, /* PWM 5 duty cycle register in DataSheet */ }; static const u8 W83791D_REG_TEMP_TARGET[3] = { 0x85, /* PWM 1 target temperature for temp 1 */ 0x86, /* PWM 2 target temperature for temp 2 */ 0x96, /* PWM 3 target temperature for temp 3 */ }; static const u8 W83791D_REG_TEMP_TOL[2] = { 0x87, /* PWM 1/2 temperature tolerance */ 0x97, /* PWM 3 temperature tolerance */ }; static const u8 W83791D_REG_FAN_CFG[2] = { 0x84, /* FAN 1/2 configuration */ 0x95, /* FAN 3 configuration */ }; static const u8 W83791D_REG_FAN_DIV[3] = { 0x47, /* contains FAN1 and FAN2 Divisor */ 0x4b, /* contains FAN3 Divisor */ 0x5C, /* contains FAN4 and FAN5 Divisor */ }; #define W83791D_REG_BANK 0x4E #define W83791D_REG_TEMP2_CONFIG 0xC2 #define W83791D_REG_TEMP3_CONFIG 0xCA static const u8 W83791D_REG_TEMP1[3] = { 0x27, /* TEMP 1 in DataSheet */ 0x39, /* TEMP 1 Over in DataSheet */ 0x3A, /* TEMP 1 Hyst in DataSheet */ }; static const u8 W83791D_REG_TEMP_ADD[2][6] = { {0xC0, /* TEMP 2 in DataSheet */ 0xC1, /* TEMP 2(0.5 deg) in DataSheet */ 0xC5, /* TEMP 2 Over High part in DataSheet */ 0xC6, /* TEMP 2 Over Low part in DataSheet */ 0xC3, /* TEMP 2 Thyst High part in DataSheet */ 0xC4}, /* TEMP 2 Thyst Low part in DataSheet */ {0xC8, /* TEMP 3 in DataSheet */ 0xC9, /* TEMP 3(0.5 deg) in DataSheet */ 0xCD, /* TEMP 3 Over High part in DataSheet */ 0xCE, /* TEMP 3 Over Low part in DataSheet */ 0xCB, /* TEMP 3 Thyst High part in DataSheet */ 0xCC} /* TEMP 3 Thyst Low part in DataSheet */ }; #define W83791D_REG_BEEP_CONFIG 0x4D static const u8 W83791D_REG_BEEP_CTRL[3] = { 0x56, /* BEEP Control Register 1 */ 0x57, /* BEEP Control Register 2 */ 0xA3, /* BEEP Control Register 3 */ }; #define W83791D_REG_GPIO 0x15 #define W83791D_REG_CONFIG 0x40 #define W83791D_REG_VID_FANDIV 0x47 #define W83791D_REG_DID_VID4 0x49 #define W83791D_REG_WCHIPID 0x58 #define W83791D_REG_CHIPMAN 0x4F #define W83791D_REG_PIN 0x4B #define W83791D_REG_I2C_SUBADDR 0x4A #define W83791D_REG_ALARM1 0xA9 /* realtime status register1 */ #define W83791D_REG_ALARM2 0xAA /* realtime status register2 */ #define W83791D_REG_ALARM3 0xAB /* realtime status register3 */ #define W83791D_REG_VBAT 0x5D #define W83791D_REG_I2C_ADDR 0x48 /* * The SMBus locks itself. The Winbond W83791D has a bank select register * (index 0x4e), but the driver only accesses registers in bank 0. Since * we don't switch banks, we don't need any special code to handle * locking access between bank switches */ static inline int w83791d_read(struct i2c_client *client, u8 reg) { return i2c_smbus_read_byte_data(client, reg); } static inline int w83791d_write(struct i2c_client *client, u8 reg, u8 value) { return i2c_smbus_write_byte_data(client, reg, value); } /* * The analog voltage inputs have 16mV LSB. Since the sysfs output is * in mV as would be measured on the chip input pin, need to just * multiply/divide by 16 to translate from/to register values. */ #define IN_TO_REG(val) (SENSORS_LIMIT((((val) + 8) / 16), 0, 255)) #define IN_FROM_REG(val) ((val) * 16) static u8 fan_to_reg(long rpm, int div) { if (rpm == 0) return 255; rpm = SENSORS_LIMIT(rpm, 1, 1000000); return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254); } #define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \ ((val) == 255 ? 0 : \ 1350000 / ((val) * (div)))) /* for temp1 which is 8-bit resolution, LSB = 1 degree Celsius */ #define TEMP1_FROM_REG(val) ((val) * 1000) #define TEMP1_TO_REG(val) ((val) <= -128000 ? -128 : \ (val) >= 127000 ? 127 : \ (val) < 0 ? ((val) - 500) / 1000 : \ ((val) + 500) / 1000) /* * for temp2 and temp3 which are 9-bit resolution, LSB = 0.5 degree Celsius * Assumes the top 8 bits are the integral amount and the bottom 8 bits * are the fractional amount. Since we only have 0.5 degree resolution, * the bottom 7 bits will always be zero */ #define TEMP23_FROM_REG(val) ((val) / 128 * 500) #define TEMP23_TO_REG(val) ((val) <= -128000 ? 0x8000 : \ (val) >= 127500 ? 0x7F80 : \ (val) < 0 ? ((val) - 250) / 500 * 128 : \ ((val) + 250) / 500 * 128) /* for thermal cruise target temp, 7-bits, LSB = 1 degree Celsius */ #define TARGET_TEMP_TO_REG(val) ((val) < 0 ? 0 : \ (val) >= 127000 ? 127 : \ ((val) + 500) / 1000) /* for thermal cruise temp tolerance, 4-bits, LSB = 1 degree Celsius */ #define TOL_TEMP_TO_REG(val) ((val) < 0 ? 0 : \ (val) >= 15000 ? 15 : \ ((val) + 500) / 1000) #define BEEP_MASK_TO_REG(val) ((val) & 0xffffff) #define BEEP_MASK_FROM_REG(val) ((val) & 0xffffff) #define DIV_FROM_REG(val) (1 << (val)) static u8 div_to_reg(int nr, long val) { int i; /* fan divisors max out at 128 */ val = SENSORS_LIMIT(val, 1, 128) >> 1; for (i = 0; i < 7; i++) { if (val == 0) break; val >>= 1; } return (u8) i; } struct w83791d_data { struct device *hwmon_dev; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ /* array of 2 pointers to subclients */ struct i2c_client *lm75[2]; /* volts */ u8 in[NUMBER_OF_VIN]; /* Register value */ u8 in_max[NUMBER_OF_VIN]; /* Register value */ u8 in_min[NUMBER_OF_VIN]; /* Register value */ /* fans */ u8 fan[NUMBER_OF_FANIN]; /* Register value */ u8 fan_min[NUMBER_OF_FANIN]; /* Register value */ u8 fan_div[NUMBER_OF_FANIN]; /* Register encoding, shifted right */ /* Temperature sensors */ s8 temp1[3]; /* current, over, thyst */ s16 temp_add[2][3]; /* fixed point value. Top 8 bits are the * integral part, bottom 8 bits are the * fractional part. We only use the top * 9 bits as the resolution is only * to the 0.5 degree C... * two sensors with three values * (cur, over, hyst) */ /* PWMs */ u8 pwm[5]; /* pwm duty cycle */ u8 pwm_enable[3]; /* pwm enable status for fan 1-3 * (fan 4-5 only support manual mode) */ u8 temp_target[3]; /* pwm 1-3 target temperature */ u8 temp_tolerance[3]; /* pwm 1-3 temperature tolerance */ /* Misc */ u32 alarms; /* realtime status register encoding,combined */ u8 beep_enable; /* Global beep enable */ u32 beep_mask; /* Mask off specific beeps */ u8 vid; /* Register encoding, combined */ u8 vrm; /* hwmon-vid */ }; static int w83791d_probe(struct i2c_client *client, const struct i2c_device_id *id); static int w83791d_detect(struct i2c_client *client, struct i2c_board_info *info); static int w83791d_remove(struct i2c_client *client); static int w83791d_read(struct i2c_client *client, u8 reg); static int w83791d_write(struct i2c_client *client, u8 reg, u8 value); static struct w83791d_data *w83791d_update_device(struct device *dev); #ifdef DEBUG static void w83791d_print_debug(struct w83791d_data *data, struct device *dev); #endif static void w83791d_init_client(struct i2c_client *client); static const struct i2c_device_id w83791d_id[] = { { "w83791d", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, w83791d_id); static struct i2c_driver w83791d_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "w83791d", }, .probe = w83791d_probe, .remove = w83791d_remove, .id_table = w83791d_id, .detect = w83791d_detect, .address_list = normal_i2c, }; /* following are the sysfs callback functions */ #define show_in_reg(reg) \ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct sensor_device_attribute *sensor_attr = \ to_sensor_dev_attr(attr); \ struct w83791d_data *data = w83791d_update_device(dev); \ int nr = sensor_attr->index; \ return sprintf(buf, "%d\n", IN_FROM_REG(data->reg[nr])); \ } show_in_reg(in); show_in_reg(in_min); show_in_reg(in_max); #define store_in_reg(REG, reg) \ static ssize_t store_in_##reg(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct sensor_device_attribute *sensor_attr = \ to_sensor_dev_attr(attr); \ struct i2c_client *client = to_i2c_client(dev); \ struct w83791d_data *data = i2c_get_clientdata(client); \ int nr = sensor_attr->index; \ unsigned long val; \ int err = kstrtoul(buf, 10, &val); \ if (err) \ return err; \ mutex_lock(&data->update_lock); \ data->in_##reg[nr] = IN_TO_REG(val); \ w83791d_write(client, W83791D_REG_IN_##REG[nr], data->in_##reg[nr]); \ mutex_unlock(&data->update_lock); \ \ return count; \ } store_in_reg(MIN, min); store_in_reg(MAX, max); static struct sensor_device_attribute sda_in_input[] = { SENSOR_ATTR(in0_input, S_IRUGO, show_in, NULL, 0), SENSOR_ATTR(in1_input, S_IRUGO, show_in, NULL, 1), SENSOR_ATTR(in2_input, S_IRUGO, show_in, NULL, 2), SENSOR_ATTR(in3_input, S_IRUGO, show_in, NULL, 3), SENSOR_ATTR(in4_input, S_IRUGO, show_in, NULL, 4), SENSOR_ATTR(in5_input, S_IRUGO, show_in, NULL, 5), SENSOR_ATTR(in6_input, S_IRUGO, show_in, NULL, 6), SENSOR_ATTR(in7_input, S_IRUGO, show_in, NULL, 7), SENSOR_ATTR(in8_input, S_IRUGO, show_in, NULL, 8), SENSOR_ATTR(in9_input, S_IRUGO, show_in, NULL, 9), }; static struct sensor_device_attribute sda_in_min[] = { SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0), SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1), SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2), SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3), SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4), SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5), SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6), SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7), SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8), SENSOR_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 9), }; static struct sensor_device_attribute sda_in_max[] = { SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0), SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1), SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2), SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3), SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4), SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5), SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6), SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7), SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8), SENSOR_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 9), }; static ssize_t show_beep(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); struct w83791d_data *data = w83791d_update_device(dev); int bitnr = sensor_attr->index; return sprintf(buf, "%d\n", (data->beep_mask >> bitnr) & 1); } static ssize_t store_beep(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); struct i2c_client *client = to_i2c_client(dev); struct w83791d_data *data = i2c_get_clientdata(client); int bitnr = sensor_attr->index; int bytenr = bitnr / 8; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; val = val ? 1 : 0; mutex_lock(&data->update_lock); data->beep_mask &= ~(0xff << (bytenr * 8)); data->beep_mask |= w83791d_read(client, W83791D_REG_BEEP_CTRL[bytenr]) << (bytenr * 8); data->beep_mask &= ~(1 << bitnr); data->beep_mask |= val << bitnr; w83791d_write(client, W83791D_REG_BEEP_CTRL[bytenr], (data->beep_mask >> (bytenr * 8)) & 0xff); mutex_unlock(&data->update_lock); return count; } static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); struct w83791d_data *data = w83791d_update_device(dev); int bitnr = sensor_attr->index; return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1); } /* * Note: The bitmask for the beep enable/disable is different than * the bitmask for the alarm. */ static struct sensor_device_attribute sda_in_beep[] = { SENSOR_ATTR(in0_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 0), SENSOR_ATTR(in1_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 13), SENSOR_ATTR(in2_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 2), SENSOR_ATTR(in3_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 3), SENSOR_ATTR(in4_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 8), SENSOR_ATTR(in5_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 9), SENSOR_ATTR(in6_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 10), SENSOR_ATTR(in7_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 16), SENSOR_ATTR(in8_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 17), SENSOR_ATTR(in9_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 14), }; static struct sensor_device_attribute sda_in_alarm[] = { SENSOR_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0), SENSOR_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1), SENSOR_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2), SENSOR_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3), SENSOR_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8), SENSOR_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9), SENSOR_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 10), SENSOR_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 19), SENSOR_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 20), SENSOR_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL, 14), }; #define show_fan_reg(reg) \ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct sensor_device_attribute *sensor_attr = \ to_sensor_dev_attr(attr); \ struct w83791d_data *data = w83791d_update_device(dev); \ int nr = sensor_attr->index; \ return sprintf(buf, "%d\n", \ FAN_FROM_REG(data->reg[nr], DIV_FROM_REG(data->fan_div[nr]))); \ } show_fan_reg(fan); show_fan_reg(fan_min); static ssize_t store_fan_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); struct i2c_client *client = to_i2c_client(dev); struct w83791d_data *data = i2c_get_clientdata(client); int nr = sensor_attr->index; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->fan_min[nr] = fan_to_reg(val, DIV_FROM_REG(data->fan_div[nr])); w83791d_write(client, W83791D_REG_FAN_MIN[nr], data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct w83791d_data *data = w83791d_update_device(dev); return sprintf(buf, "%u\n", DIV_FROM_REG(data->fan_div[nr])); } /* * Note: we save and restore the fan minimum here, because its value is * determined in part by the fan divisor. This follows the principle of * least surprise; the user doesn't expect the fan minimum to change just * because the divisor changed. */ static ssize_t store_fan_div(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); struct i2c_client *client = to_i2c_client(dev); struct w83791d_data *data = i2c_get_clientdata(client); int nr = sensor_attr->index; unsigned long min; u8 tmp_fan_div; u8 fan_div_reg; u8 vbat_reg; int indx = 0; u8 keep_mask = 0; u8 new_shift = 0; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; /* Save fan_min */ min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])); mutex_lock(&data->update_lock); data->fan_div[nr] = div_to_reg(nr, val); switch (nr) { case 0: indx = 0; keep_mask = 0xcf; new_shift = 4; break; case 1: indx = 0; keep_mask = 0x3f; new_shift = 6; break; case 2: indx = 1; keep_mask = 0x3f; new_shift = 6; break; case 3: indx = 2; keep_mask = 0xf8; new_shift = 0; break; case 4: indx = 2; keep_mask = 0x8f; new_shift = 4; break; #ifdef DEBUG default: dev_warn(dev, "store_fan_div: Unexpected nr seen: %d\n", nr); count = -EINVAL; goto err_exit; #endif } fan_div_reg = w83791d_read(client, W83791D_REG_FAN_DIV[indx]) & keep_mask; tmp_fan_div = (data->fan_div[nr] << new_shift) & ~keep_mask; w83791d_write(client, W83791D_REG_FAN_DIV[indx], fan_div_reg | tmp_fan_div); /* Bit 2 of fans 0-2 is stored in the vbat register (bits 5-7) */ if (nr < 3) { keep_mask = ~(1 << (nr + 5)); vbat_reg = w83791d_read(client, W83791D_REG_VBAT) & keep_mask; tmp_fan_div = (data->fan_div[nr] << (3 + nr)) & ~keep_mask; w83791d_write(client, W83791D_REG_VBAT, vbat_reg | tmp_fan_div); } /* Restore fan_min */ data->fan_min[nr] = fan_to_reg(min, DIV_FROM_REG(data->fan_div[nr])); w83791d_write(client, W83791D_REG_FAN_MIN[nr], data->fan_min[nr]); #ifdef DEBUG err_exit: #endif mutex_unlock(&data->update_lock); return count; } static struct sensor_device_attribute sda_fan_input[] = { SENSOR_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0), SENSOR_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1), SENSOR_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2), SENSOR_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3), SENSOR_ATTR(fan5_input, S_IRUGO, show_fan, NULL, 4), }; static struct sensor_device_attribute sda_fan_min[] = { SENSOR_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 0), SENSOR_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 1), SENSOR_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 2), SENSOR_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 3), SENSOR_ATTR(fan5_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 4), }; static struct sensor_device_attribute sda_fan_div[] = { SENSOR_ATTR(fan1_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 0), SENSOR_ATTR(fan2_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 1), SENSOR_ATTR(fan3_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 2), SENSOR_ATTR(fan4_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 3), SENSOR_ATTR(fan5_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 4), }; static struct sensor_device_attribute sda_fan_beep[] = { SENSOR_ATTR(fan1_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 6), SENSOR_ATTR(fan2_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 7), SENSOR_ATTR(fan3_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 11), SENSOR_ATTR(fan4_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 21), SENSOR_ATTR(fan5_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 22), }; static struct sensor_device_attribute sda_fan_alarm[] = { SENSOR_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6), SENSOR_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7), SENSOR_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11), SENSOR_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 21), SENSOR_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 22), }; /* read/write PWMs */ static ssize_t show_pwm(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct w83791d_data *data = w83791d_update_device(dev); return sprintf(buf, "%u\n", data->pwm[nr]); } static ssize_t store_pwm(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); struct i2c_client *client = to_i2c_client(dev); struct w83791d_data *data = i2c_get_clientdata(client); int nr = sensor_attr->index; unsigned long val; if (kstrtoul(buf, 10, &val)) return -EINVAL; mutex_lock(&data->update_lock); data->pwm[nr] = SENSORS_LIMIT(val, 0, 255); w83791d_write(client, W83791D_REG_PWM[nr], data->pwm[nr]); mutex_unlock(&data->update_lock); return count; } static struct sensor_device_attribute sda_pwm[] = { SENSOR_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0), SENSOR_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1), SENSOR_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2), SENSOR_ATTR(pwm4, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 3), SENSOR_ATTR(pwm5, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 4), }; static ssize_t show_pwmenable(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct w83791d_data *data = w83791d_update_device(dev); return sprintf(buf, "%u\n", data->pwm_enable[nr] + 1); } static ssize_t store_pwmenable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); struct i2c_client *client = to_i2c_client(dev); struct w83791d_data *data = i2c_get_clientdata(client); int nr = sensor_attr->index; unsigned long val; u8 reg_cfg_tmp; u8 reg_idx = 0; u8 val_shift = 0; u8 keep_mask = 0; int ret = kstrtoul(buf, 10, &val); if (ret || val < 1 || val > 3) return -EINVAL; mutex_lock(&data->update_lock); data->pwm_enable[nr] = val - 1; switch (nr) { case 0: reg_idx = 0; val_shift = 2; keep_mask = 0xf3; break; case 1: reg_idx = 0; val_shift = 4; keep_mask = 0xcf; break; case 2: reg_idx = 1; val_shift = 2; keep_mask = 0xf3; break; } reg_cfg_tmp = w83791d_read(client, W83791D_REG_FAN_CFG[reg_idx]); reg_cfg_tmp = (reg_cfg_tmp & keep_mask) | data->pwm_enable[nr] << val_shift; w83791d_write(client, W83791D_REG_FAN_CFG[reg_idx], reg_cfg_tmp); mutex_unlock(&data->update_lock); return count; } static struct sensor_device_attribute sda_pwmenable[] = { SENSOR_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, show_pwmenable, store_pwmenable, 0), SENSOR_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, show_pwmenable, store_pwmenable, 1), SENSOR_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, show_pwmenable, store_pwmenable, 2), }; /* For Smart Fan I / Thermal Cruise */ static ssize_t show_temp_target(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); struct w83791d_data *data = w83791d_update_device(dev); int nr = sensor_attr->index; return sprintf(buf, "%d\n", TEMP1_FROM_REG(data->temp_target[nr])); } static ssize_t store_temp_target(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); struct i2c_client *client = to_i2c_client(dev); struct w83791d_data *data = i2c_get_clientdata(client); int nr = sensor_attr->index; unsigned long val; u8 target_mask; if (kstrtoul(buf, 10, &val)) return -EINVAL; mutex_lock(&data->update_lock); data->temp_target[nr] = TARGET_TEMP_TO_REG(val); target_mask = w83791d_read(client, W83791D_REG_TEMP_TARGET[nr]) & 0x80; w83791d_write(client, W83791D_REG_TEMP_TARGET[nr], data->temp_target[nr] | target_mask); mutex_unlock(&data->update_lock); return count; } static struct sensor_device_attribute sda_temp_target[] = { SENSOR_ATTR(temp1_target, S_IWUSR | S_IRUGO, show_temp_target, store_temp_target, 0), SENSOR_ATTR(temp2_target, S_IWUSR | S_IRUGO, show_temp_target, store_temp_target, 1), SENSOR_ATTR(temp3_target, S_IWUSR | S_IRUGO, show_temp_target, store_temp_target, 2), }; static ssize_t show_temp_tolerance(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); struct w83791d_data *data = w83791d_update_device(dev); int nr = sensor_attr->index; return sprintf(buf, "%d\n", TEMP1_FROM_REG(data->temp_tolerance[nr])); } static ssize_t store_temp_tolerance(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); struct i2c_client *client = to_i2c_client(dev); struct w83791d_data *data = i2c_get_clientdata(client); int nr = sensor_attr->index; unsigned long val; u8 target_mask; u8 reg_idx = 0; u8 val_shift = 0; u8 keep_mask = 0; if (kstrtoul(buf, 10, &val)) return -EINVAL; switch (nr) { case 0: reg_idx = 0; val_shift = 0; keep_mask = 0xf0; break; case 1: reg_idx = 0; val_shift = 4; keep_mask = 0x0f; break; case 2: reg_idx = 1; val_shift = 0; keep_mask = 0xf0; break; } mutex_lock(&data->update_lock); data->temp_tolerance[nr] = TOL_TEMP_TO_REG(val); target_mask = w83791d_read(client, W83791D_REG_TEMP_TOL[reg_idx]) & keep_mask; w83791d_write(client, W83791D_REG_TEMP_TOL[reg_idx], (data->temp_tolerance[nr] << val_shift) | target_mask); mutex_unlock(&data->update_lock); return count; } static struct sensor_device_attribute sda_temp_tolerance[] = { SENSOR_ATTR(temp1_tolerance, S_IWUSR | S_IRUGO, show_temp_tolerance, store_temp_tolerance, 0), SENSOR_ATTR(temp2_tolerance, S_IWUSR | S_IRUGO, show_temp_tolerance, store_temp_tolerance, 1), SENSOR_ATTR(temp3_tolerance, S_IWUSR | S_IRUGO, show_temp_tolerance, store_temp_tolerance, 2), }; /* read/write the temperature1, includes measured value and limits */ static ssize_t show_temp1(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct w83791d_data *data = w83791d_update_device(dev); return sprintf(buf, "%d\n", TEMP1_FROM_REG(data->temp1[attr->index])); } static ssize_t store_temp1(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i2c_client *client = to_i2c_client(dev); struct w83791d_data *data = i2c_get_clientdata(client); int nr = attr->index; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp1[nr] = TEMP1_TO_REG(val); w83791d_write(client, W83791D_REG_TEMP1[nr], data->temp1[nr]); mutex_unlock(&data->update_lock); return count; } /* read/write temperature2-3, includes measured value and limits */ static ssize_t show_temp23(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr); struct w83791d_data *data = w83791d_update_device(dev); int nr = attr->nr; int index = attr->index; return sprintf(buf, "%d\n", TEMP23_FROM_REG(data->temp_add[nr][index])); } static ssize_t store_temp23(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr); struct i2c_client *client = to_i2c_client(dev); struct w83791d_data *data = i2c_get_clientdata(client); long val; int err; int nr = attr->nr; int index = attr->index; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_add[nr][index] = TEMP23_TO_REG(val); w83791d_write(client, W83791D_REG_TEMP_ADD[nr][index * 2], data->temp_add[nr][index] >> 8); w83791d_write(client, W83791D_REG_TEMP_ADD[nr][index * 2 + 1], data->temp_add[nr][index] & 0x80); mutex_unlock(&data->update_lock); return count; } static struct sensor_device_attribute_2 sda_temp_input[] = { SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp1, NULL, 0, 0), SENSOR_ATTR_2(temp2_input, S_IRUGO, show_temp23, NULL, 0, 0), SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp23, NULL, 1, 0), }; static struct sensor_device_attribute_2 sda_temp_max[] = { SENSOR_ATTR_2(temp1_max, S_IRUGO | S_IWUSR, show_temp1, store_temp1, 0, 1), SENSOR_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp23, store_temp23, 0, 1), SENSOR_ATTR_2(temp3_max, S_IRUGO | S_IWUSR, show_temp23, store_temp23, 1, 1), }; static struct sensor_device_attribute_2 sda_temp_max_hyst[] = { SENSOR_ATTR_2(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp1, store_temp1, 0, 2), SENSOR_ATTR_2(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp23, store_temp23, 0, 2), SENSOR_ATTR_2(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp23, store_temp23, 1, 2), }; /* * Note: The bitmask for the beep enable/disable is different than * the bitmask for the alarm. */ static struct sensor_device_attribute sda_temp_beep[] = { SENSOR_ATTR(temp1_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 4), SENSOR_ATTR(temp2_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 5), SENSOR_ATTR(temp3_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 1), }; static struct sensor_device_attribute sda_temp_alarm[] = { SENSOR_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4), SENSOR_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5), SENSOR_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13), }; /* get reatime status of all sensors items: voltage, temp, fan */ static ssize_t show_alarms_reg(struct device *dev, struct device_attribute *attr, char *buf) { struct w83791d_data *data = w83791d_update_device(dev); return sprintf(buf, "%u\n", data->alarms); } static DEVICE_ATTR(alarms, S_IRUGO, show_alarms_reg, NULL); /* Beep control */ #define GLOBAL_BEEP_ENABLE_SHIFT 15 #define GLOBAL_BEEP_ENABLE_MASK (1 << GLOBAL_BEEP_ENABLE_SHIFT) static ssize_t show_beep_enable(struct device *dev, struct device_attribute *attr, char *buf) { struct w83791d_data *data = w83791d_update_device(dev); return sprintf(buf, "%d\n", data->beep_enable); } static ssize_t show_beep_mask(struct device *dev, struct device_attribute *attr, char *buf) { struct w83791d_data *data = w83791d_update_device(dev); return sprintf(buf, "%d\n", BEEP_MASK_FROM_REG(data->beep_mask)); } static ssize_t store_beep_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct w83791d_data *data = i2c_get_clientdata(client); int i; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); /* * The beep_enable state overrides any enabling request from * the masks */ data->beep_mask = BEEP_MASK_TO_REG(val) & ~GLOBAL_BEEP_ENABLE_MASK; data->beep_mask |= (data->beep_enable << GLOBAL_BEEP_ENABLE_SHIFT); val = data->beep_mask; for (i = 0; i < 3; i++) { w83791d_write(client, W83791D_REG_BEEP_CTRL[i], (val & 0xff)); val >>= 8; } mutex_unlock(&data->update_lock); return count; } static ssize_t store_beep_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct w83791d_data *data = i2c_get_clientdata(client); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->beep_enable = val ? 1 : 0; /* Keep the full mask value in sync with the current enable */ data->beep_mask &= ~GLOBAL_BEEP_ENABLE_MASK; data->beep_mask |= (data->beep_enable << GLOBAL_BEEP_ENABLE_SHIFT); /* * The global control is in the second beep control register * so only need to update that register */ val = (data->beep_mask >> 8) & 0xff; w83791d_write(client, W83791D_REG_BEEP_CTRL[1], val); mutex_unlock(&data->update_lock); return count; } static struct sensor_device_attribute sda_beep_ctrl[] = { SENSOR_ATTR(beep_enable, S_IRUGO | S_IWUSR, show_beep_enable, store_beep_enable, 0), SENSOR_ATTR(beep_mask, S_IRUGO | S_IWUSR, show_beep_mask, store_beep_mask, 1) }; /* cpu voltage regulation information */ static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr, char *buf) { struct w83791d_data *data = w83791d_update_device(dev); return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm)); } static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL); static ssize_t show_vrm_reg(struct device *dev, struct device_attribute *attr, char *buf) { struct w83791d_data *data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", data->vrm); } static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w83791d_data *data = dev_get_drvdata(dev); unsigned long val; int err; /* * No lock needed as vrm is internal to the driver * (not read from a chip register) and so is not * updated in w83791d_update_device() */ err = kstrtoul(buf, 10, &val); if (err) return err; data->vrm = val; return count; } static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg); #define IN_UNIT_ATTRS(X) \ &sda_in_input[X].dev_attr.attr, \ &sda_in_min[X].dev_attr.attr, \ &sda_in_max[X].dev_attr.attr, \ &sda_in_beep[X].dev_attr.attr, \ &sda_in_alarm[X].dev_attr.attr #define FAN_UNIT_ATTRS(X) \ &sda_fan_input[X].dev_attr.attr, \ &sda_fan_min[X].dev_attr.attr, \ &sda_fan_div[X].dev_attr.attr, \ &sda_fan_beep[X].dev_attr.attr, \ &sda_fan_alarm[X].dev_attr.attr #define TEMP_UNIT_ATTRS(X) \ &sda_temp_input[X].dev_attr.attr, \ &sda_temp_max[X].dev_attr.attr, \ &sda_temp_max_hyst[X].dev_attr.attr, \ &sda_temp_beep[X].dev_attr.attr, \ &sda_temp_alarm[X].dev_attr.attr static struct attribute *w83791d_attributes[] = { IN_UNIT_ATTRS(0), IN_UNIT_ATTRS(1), IN_UNIT_ATTRS(2), IN_UNIT_ATTRS(3), IN_UNIT_ATTRS(4), IN_UNIT_ATTRS(5), IN_UNIT_ATTRS(6), IN_UNIT_ATTRS(7), IN_UNIT_ATTRS(8), IN_UNIT_ATTRS(9), FAN_UNIT_ATTRS(0), FAN_UNIT_ATTRS(1), FAN_UNIT_ATTRS(2), TEMP_UNIT_ATTRS(0), TEMP_UNIT_ATTRS(1), TEMP_UNIT_ATTRS(2), &dev_attr_alarms.attr, &sda_beep_ctrl[0].dev_attr.attr, &sda_beep_ctrl[1].dev_attr.attr, &dev_attr_cpu0_vid.attr, &dev_attr_vrm.attr, &sda_pwm[0].dev_attr.attr, &sda_pwm[1].dev_attr.attr, &sda_pwm[2].dev_attr.attr, &sda_pwmenable[0].dev_attr.attr, &sda_pwmenable[1].dev_attr.attr, &sda_pwmenable[2].dev_attr.attr, &sda_temp_target[0].dev_attr.attr, &sda_temp_target[1].dev_attr.attr, &sda_temp_target[2].dev_attr.attr, &sda_temp_tolerance[0].dev_attr.attr, &sda_temp_tolerance[1].dev_attr.attr, &sda_temp_tolerance[2].dev_attr.attr, NULL }; static const struct attribute_group w83791d_group = { .attrs = w83791d_attributes, }; /* * Separate group of attributes for fan/pwm 4-5. Their pins can also be * in use for GPIO in which case their sysfs-interface should not be made * available */ static struct attribute *w83791d_attributes_fanpwm45[] = { FAN_UNIT_ATTRS(3), FAN_UNIT_ATTRS(4), &sda_pwm[3].dev_attr.attr, &sda_pwm[4].dev_attr.attr, NULL }; static const struct attribute_group w83791d_group_fanpwm45 = { .attrs = w83791d_attributes_fanpwm45, }; static int w83791d_detect_subclients(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; struct w83791d_data *data = i2c_get_clientdata(client); int address = client->addr; int i, id, err; u8 val; id = i2c_adapter_id(adapter); if (force_subclients[0] == id && force_subclients[1] == address) { for (i = 2; i <= 3; i++) { if (force_subclients[i] < 0x48 || force_subclients[i] > 0x4f) { dev_err(&client->dev, "invalid subclient " "address %d; must be 0x48-0x4f\n", force_subclients[i]); err = -ENODEV; goto error_sc_0; } } w83791d_write(client, W83791D_REG_I2C_SUBADDR, (force_subclients[2] & 0x07) | ((force_subclients[3] & 0x07) << 4)); } val = w83791d_read(client, W83791D_REG_I2C_SUBADDR); if (!(val & 0x08)) data->lm75[0] = i2c_new_dummy(adapter, 0x48 + (val & 0x7)); if (!(val & 0x80)) { if ((data->lm75[0] != NULL) && ((val & 0x7) == ((val >> 4) & 0x7))) { dev_err(&client->dev, "duplicate addresses 0x%x, " "use force_subclient\n", data->lm75[0]->addr); err = -ENODEV; goto error_sc_1; } data->lm75[1] = i2c_new_dummy(adapter, 0x48 + ((val >> 4) & 0x7)); } return 0; /* Undo inits in case of errors */ error_sc_1: if (data->lm75[0] != NULL) i2c_unregister_device(data->lm75[0]); error_sc_0: return err; } /* Return 0 if detection is successful, -ENODEV otherwise */ static int w83791d_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int val1, val2; unsigned short address = client->addr; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; if (w83791d_read(client, W83791D_REG_CONFIG) & 0x80) return -ENODEV; val1 = w83791d_read(client, W83791D_REG_BANK); val2 = w83791d_read(client, W83791D_REG_CHIPMAN); /* Check for Winbond ID if in bank 0 */ if (!(val1 & 0x07)) { if ((!(val1 & 0x80) && val2 != 0xa3) || ((val1 & 0x80) && val2 != 0x5c)) { return -ENODEV; } } /* * If Winbond chip, address of chip and W83791D_REG_I2C_ADDR * should match */ if (w83791d_read(client, W83791D_REG_I2C_ADDR) != address) return -ENODEV; /* We want bank 0 and Vendor ID high byte */ val1 = w83791d_read(client, W83791D_REG_BANK) & 0x78; w83791d_write(client, W83791D_REG_BANK, val1 | 0x80); /* Verify it is a Winbond w83791d */ val1 = w83791d_read(client, W83791D_REG_WCHIPID); val2 = w83791d_read(client, W83791D_REG_CHIPMAN); if (val1 != 0x71 || val2 != 0x5c) return -ENODEV; strlcpy(info->type, "w83791d", I2C_NAME_SIZE); return 0; } static int w83791d_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct w83791d_data *data; struct device *dev = &client->dev; int i, err; u8 has_fanpwm45; #ifdef DEBUG int val1; val1 = w83791d_read(client, W83791D_REG_DID_VID4); dev_dbg(dev, "Device ID version: %d.%d (0x%02x)\n", (val1 >> 5) & 0x07, (val1 >> 1) & 0x0f, val1); #endif data = kzalloc(sizeof(struct w83791d_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto error0; } i2c_set_clientdata(client, data); mutex_init(&data->update_lock); err = w83791d_detect_subclients(client); if (err) goto error1; /* Initialize the chip */ w83791d_init_client(client); /* * If the fan_div is changed, make sure there is a rational * fan_min in place */ for (i = 0; i < NUMBER_OF_FANIN; i++) data->fan_min[i] = w83791d_read(client, W83791D_REG_FAN_MIN[i]); /* Register sysfs hooks */ err = sysfs_create_group(&client->dev.kobj, &w83791d_group); if (err) goto error3; /* Check if pins of fan/pwm 4-5 are in use as GPIO */ has_fanpwm45 = w83791d_read(client, W83791D_REG_GPIO) & 0x10; if (has_fanpwm45) { err = sysfs_create_group(&client->dev.kobj, &w83791d_group_fanpwm45); if (err) goto error4; } /* Everything is ready, now register the working device */ data->hwmon_dev = hwmon_device_register(dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto error5; } return 0; error5: if (has_fanpwm45) sysfs_remove_group(&client->dev.kobj, &w83791d_group_fanpwm45); error4: sysfs_remove_group(&client->dev.kobj, &w83791d_group); error3: if (data->lm75[0] != NULL) i2c_unregister_device(data->lm75[0]); if (data->lm75[1] != NULL) i2c_unregister_device(data->lm75[1]); error1: kfree(data); error0: return err; } static int w83791d_remove(struct i2c_client *client) { struct w83791d_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &w83791d_group); if (data->lm75[0] != NULL) i2c_unregister_device(data->lm75[0]); if (data->lm75[1] != NULL) i2c_unregister_device(data->lm75[1]); kfree(data); return 0; } static void w83791d_init_client(struct i2c_client *client) { struct w83791d_data *data = i2c_get_clientdata(client); u8 tmp; u8 old_beep; /* * The difference between reset and init is that reset * does a hard reset of the chip via index 0x40, bit 7, * but init simply forces certain registers to have "sane" * values. The hope is that the BIOS has done the right * thing (which is why the default is reset=0, init=0), * but if not, reset is the hard hammer and init * is the soft mallet both of which are trying to whack * things into place... * NOTE: The data sheet makes a distinction between * "power on defaults" and "reset by MR". As far as I can tell, * the hard reset puts everything into a power-on state so I'm * not sure what "reset by MR" means or how it can happen. */ if (reset || init) { /* keep some BIOS settings when we... */ old_beep = w83791d_read(client, W83791D_REG_BEEP_CONFIG); if (reset) { /* ... reset the chip and ... */ w83791d_write(client, W83791D_REG_CONFIG, 0x80); } /* ... disable power-on abnormal beep */ w83791d_write(client, W83791D_REG_BEEP_CONFIG, old_beep | 0x80); /* disable the global beep (not done by hard reset) */ tmp = w83791d_read(client, W83791D_REG_BEEP_CTRL[1]); w83791d_write(client, W83791D_REG_BEEP_CTRL[1], tmp & 0xef); if (init) { /* Make sure monitoring is turned on for add-ons */ tmp = w83791d_read(client, W83791D_REG_TEMP2_CONFIG); if (tmp & 1) { w83791d_write(client, W83791D_REG_TEMP2_CONFIG, tmp & 0xfe); } tmp = w83791d_read(client, W83791D_REG_TEMP3_CONFIG); if (tmp & 1) { w83791d_write(client, W83791D_REG_TEMP3_CONFIG, tmp & 0xfe); } /* Start monitoring */ tmp = w83791d_read(client, W83791D_REG_CONFIG) & 0xf7; w83791d_write(client, W83791D_REG_CONFIG, tmp | 0x01); } } data->vrm = vid_which_vrm(); } static struct w83791d_data *w83791d_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct w83791d_data *data = i2c_get_clientdata(client); int i, j; u8 reg_array_tmp[3]; u8 vbat_reg; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + (HZ * 3)) || !data->valid) { dev_dbg(dev, "Starting w83791d device update\n"); /* Update the voltages measured value and limits */ for (i = 0; i < NUMBER_OF_VIN; i++) { data->in[i] = w83791d_read(client, W83791D_REG_IN[i]); data->in_max[i] = w83791d_read(client, W83791D_REG_IN_MAX[i]); data->in_min[i] = w83791d_read(client, W83791D_REG_IN_MIN[i]); } /* Update the fan counts and limits */ for (i = 0; i < NUMBER_OF_FANIN; i++) { /* Update the Fan measured value and limits */ data->fan[i] = w83791d_read(client, W83791D_REG_FAN[i]); data->fan_min[i] = w83791d_read(client, W83791D_REG_FAN_MIN[i]); } /* Update the fan divisor */ for (i = 0; i < 3; i++) { reg_array_tmp[i] = w83791d_read(client, W83791D_REG_FAN_DIV[i]); } data->fan_div[0] = (reg_array_tmp[0] >> 4) & 0x03; data->fan_div[1] = (reg_array_tmp[0] >> 6) & 0x03; data->fan_div[2] = (reg_array_tmp[1] >> 6) & 0x03; data->fan_div[3] = reg_array_tmp[2] & 0x07; data->fan_div[4] = (reg_array_tmp[2] >> 4) & 0x07; /* * The fan divisor for fans 0-2 get bit 2 from * bits 5-7 respectively of vbat register */ vbat_reg = w83791d_read(client, W83791D_REG_VBAT); for (i = 0; i < 3; i++) data->fan_div[i] |= (vbat_reg >> (3 + i)) & 0x04; /* Update PWM duty cycle */ for (i = 0; i < NUMBER_OF_PWM; i++) { data->pwm[i] = w83791d_read(client, W83791D_REG_PWM[i]); } /* Update PWM enable status */ for (i = 0; i < 2; i++) { reg_array_tmp[i] = w83791d_read(client, W83791D_REG_FAN_CFG[i]); } data->pwm_enable[0] = (reg_array_tmp[0] >> 2) & 0x03; data->pwm_enable[1] = (reg_array_tmp[0] >> 4) & 0x03; data->pwm_enable[2] = (reg_array_tmp[1] >> 2) & 0x03; /* Update PWM target temperature */ for (i = 0; i < 3; i++) { data->temp_target[i] = w83791d_read(client, W83791D_REG_TEMP_TARGET[i]) & 0x7f; } /* Update PWM temperature tolerance */ for (i = 0; i < 2; i++) { reg_array_tmp[i] = w83791d_read(client, W83791D_REG_TEMP_TOL[i]); } data->temp_tolerance[0] = reg_array_tmp[0] & 0x0f; data->temp_tolerance[1] = (reg_array_tmp[0] >> 4) & 0x0f; data->temp_tolerance[2] = reg_array_tmp[1] & 0x0f; /* Update the first temperature sensor */ for (i = 0; i < 3; i++) { data->temp1[i] = w83791d_read(client, W83791D_REG_TEMP1[i]); } /* Update the rest of the temperature sensors */ for (i = 0; i < 2; i++) { for (j = 0; j < 3; j++) { data->temp_add[i][j] = (w83791d_read(client, W83791D_REG_TEMP_ADD[i][j * 2]) << 8) | w83791d_read(client, W83791D_REG_TEMP_ADD[i][j * 2 + 1]); } } /* Update the realtime status */ data->alarms = w83791d_read(client, W83791D_REG_ALARM1) + (w83791d_read(client, W83791D_REG_ALARM2) << 8) + (w83791d_read(client, W83791D_REG_ALARM3) << 16); /* Update the beep configuration information */ data->beep_mask = w83791d_read(client, W83791D_REG_BEEP_CTRL[0]) + (w83791d_read(client, W83791D_REG_BEEP_CTRL[1]) << 8) + (w83791d_read(client, W83791D_REG_BEEP_CTRL[2]) << 16); /* Extract global beep enable flag */ data->beep_enable = (data->beep_mask >> GLOBAL_BEEP_ENABLE_SHIFT) & 0x01; /* Update the cpu voltage information */ i = w83791d_read(client, W83791D_REG_VID_FANDIV); data->vid = i & 0x0f; data->vid |= (w83791d_read(client, W83791D_REG_DID_VID4) & 0x01) << 4; data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); #ifdef DEBUG w83791d_print_debug(data, dev); #endif return data; } #ifdef DEBUG static void w83791d_print_debug(struct w83791d_data *data, struct device *dev) { int i = 0, j = 0; dev_dbg(dev, "======Start of w83791d debug values======\n"); dev_dbg(dev, "%d set of Voltages: ===>\n", NUMBER_OF_VIN); for (i = 0; i < NUMBER_OF_VIN; i++) { dev_dbg(dev, "vin[%d] is: 0x%02x\n", i, data->in[i]); dev_dbg(dev, "vin[%d] min is: 0x%02x\n", i, data->in_min[i]); dev_dbg(dev, "vin[%d] max is: 0x%02x\n", i, data->in_max[i]); } dev_dbg(dev, "%d set of Fan Counts/Divisors: ===>\n", NUMBER_OF_FANIN); for (i = 0; i < NUMBER_OF_FANIN; i++) { dev_dbg(dev, "fan[%d] is: 0x%02x\n", i, data->fan[i]); dev_dbg(dev, "fan[%d] min is: 0x%02x\n", i, data->fan_min[i]); dev_dbg(dev, "fan_div[%d] is: 0x%02x\n", i, data->fan_div[i]); } /* * temperature math is signed, but only print out the * bits that matter */ dev_dbg(dev, "%d set of Temperatures: ===>\n", NUMBER_OF_TEMPIN); for (i = 0; i < 3; i++) dev_dbg(dev, "temp1[%d] is: 0x%02x\n", i, (u8) data->temp1[i]); for (i = 0; i < 2; i++) { for (j = 0; j < 3; j++) { dev_dbg(dev, "temp_add[%d][%d] is: 0x%04x\n", i, j, (u16) data->temp_add[i][j]); } } dev_dbg(dev, "Misc Information: ===>\n"); dev_dbg(dev, "alarm is: 0x%08x\n", data->alarms); dev_dbg(dev, "beep_mask is: 0x%08x\n", data->beep_mask); dev_dbg(dev, "beep_enable is: %d\n", data->beep_enable); dev_dbg(dev, "vid is: 0x%02x\n", data->vid); dev_dbg(dev, "vrm is: 0x%02x\n", data->vrm); dev_dbg(dev, "=======End of w83791d debug values========\n"); dev_dbg(dev, "\n"); } #endif module_i2c_driver(w83791d_driver); MODULE_AUTHOR("Charles Spirakis <bezaur@gmail.com>"); MODULE_DESCRIPTION("W83791D driver"); MODULE_LICENSE("GPL");
gpl-2.0
zarboz/DLXPUL_RELOADED
drivers/watchdog/of_xilinx_wdt.c
4857
9904
/* * of_xilinx_wdt.c 1.01 A Watchdog Device Driver for Xilinx xps_timebase_wdt * * (C) Copyright 2011 (Alejandro Cabrera <aldaya@gmail.com>) * * ----------------------- * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * ----------------------- * 30-May-2011 Alejandro Cabrera <aldaya@gmail.com> * - If "xlnx,wdt-enable-once" wasn't found on device tree the * module will use CONFIG_WATCHDOG_NOWAYOUT * - If the device tree parameters ("clock-frequency" and * "xlnx,wdt-interval") wasn't found the driver won't * know the wdt reset interval */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/watchdog.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_address.h> /* Register offsets for the Wdt device */ #define XWT_TWCSR0_OFFSET 0x0 /* Control/Status Register0 */ #define XWT_TWCSR1_OFFSET 0x4 /* Control/Status Register1 */ #define XWT_TBR_OFFSET 0x8 /* Timebase Register Offset */ /* Control/Status Register Masks */ #define XWT_CSR0_WRS_MASK 0x00000008 /* Reset status */ #define XWT_CSR0_WDS_MASK 0x00000004 /* Timer state */ #define XWT_CSR0_EWDT1_MASK 0x00000002 /* Enable bit 1 */ /* Control/Status Register 0/1 bits */ #define XWT_CSRX_EWDT2_MASK 0x00000001 /* Enable bit 2 */ /* SelfTest constants */ #define XWT_MAX_SELFTEST_LOOP_COUNT 0x00010000 #define XWT_TIMER_FAILED 0xFFFFFFFF #define WATCHDOG_NAME "Xilinx Watchdog" #define PFX WATCHDOG_NAME ": " struct xwdt_device { struct resource res; void __iomem *base; u32 nowayout; u32 wdt_interval; u32 boot_status; }; static struct xwdt_device xdev; static u32 timeout; static u32 control_status_reg; static u8 expect_close; static u8 no_timeout; static unsigned long driver_open; static DEFINE_SPINLOCK(spinlock); static void xwdt_start(void) { spin_lock(&spinlock); /* Clean previous status and enable the watchdog timer */ control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET); control_status_reg |= (XWT_CSR0_WRS_MASK | XWT_CSR0_WDS_MASK); iowrite32((control_status_reg | XWT_CSR0_EWDT1_MASK), xdev.base + XWT_TWCSR0_OFFSET); iowrite32(XWT_CSRX_EWDT2_MASK, xdev.base + XWT_TWCSR1_OFFSET); spin_unlock(&spinlock); } static void xwdt_stop(void) { spin_lock(&spinlock); control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET); iowrite32((control_status_reg & ~XWT_CSR0_EWDT1_MASK), xdev.base + XWT_TWCSR0_OFFSET); iowrite32(0, xdev.base + XWT_TWCSR1_OFFSET); spin_unlock(&spinlock); pr_info("Stopped!\n"); } static void xwdt_keepalive(void) { spin_lock(&spinlock); control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET); control_status_reg |= (XWT_CSR0_WRS_MASK | XWT_CSR0_WDS_MASK); iowrite32(control_status_reg, xdev.base + XWT_TWCSR0_OFFSET); spin_unlock(&spinlock); } static void xwdt_get_status(int *status) { int new_status; spin_lock(&spinlock); control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET); new_status = ((control_status_reg & (XWT_CSR0_WRS_MASK | XWT_CSR0_WDS_MASK)) != 0); spin_unlock(&spinlock); *status = 0; if (new_status & 1) *status |= WDIOF_CARDRESET; } static u32 xwdt_selftest(void) { int i; u32 timer_value1; u32 timer_value2; spin_lock(&spinlock); timer_value1 = ioread32(xdev.base + XWT_TBR_OFFSET); timer_value2 = ioread32(xdev.base + XWT_TBR_OFFSET); for (i = 0; ((i <= XWT_MAX_SELFTEST_LOOP_COUNT) && (timer_value2 == timer_value1)); i++) { timer_value2 = ioread32(xdev.base + XWT_TBR_OFFSET); } spin_unlock(&spinlock); if (timer_value2 != timer_value1) return ~XWT_TIMER_FAILED; else return XWT_TIMER_FAILED; } static int xwdt_open(struct inode *inode, struct file *file) { /* Only one process can handle the wdt at a time */ if (test_and_set_bit(0, &driver_open)) return -EBUSY; /* Make sure that the module are always loaded...*/ if (xdev.nowayout) __module_get(THIS_MODULE); xwdt_start(); pr_info("Started...\n"); return nonseekable_open(inode, file); } static int xwdt_release(struct inode *inode, struct file *file) { if (expect_close == 42) { xwdt_stop(); } else { pr_crit("Unexpected close, not stopping watchdog!\n"); xwdt_keepalive(); } clear_bit(0, &driver_open); expect_close = 0; return 0; } /* * xwdt_write: * @file: file handle to the watchdog * @buf: buffer to write (unused as data does not matter here * @count: count of bytes * @ppos: pointer to the position to write. No seeks allowed * * A write to a watchdog device is defined as a keepalive signal. Any * write of data will do, as we don't define content meaning. */ static ssize_t xwdt_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { if (len) { if (!xdev.nowayout) { size_t i; /* In case it was set long ago */ expect_close = 0; for (i = 0; i != len; i++) { char c; if (get_user(c, buf + i)) return -EFAULT; if (c == 'V') expect_close = 42; } } xwdt_keepalive(); } return len; } static const struct watchdog_info ident = { .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, .firmware_version = 1, .identity = WATCHDOG_NAME, }; /* * xwdt_ioctl: * @file: file handle to the device * @cmd: watchdog command * @arg: argument pointer * * The watchdog API defines a common set of functions for all watchdogs * according to their available features. */ static long xwdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int status; union { struct watchdog_info __user *ident; int __user *i; } uarg; uarg.i = (int __user *)arg; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(uarg.ident, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETBOOTSTATUS: return put_user(xdev.boot_status, uarg.i); case WDIOC_GETSTATUS: xwdt_get_status(&status); return put_user(status, uarg.i); case WDIOC_KEEPALIVE: xwdt_keepalive(); return 0; case WDIOC_GETTIMEOUT: if (no_timeout) return -ENOTTY; else return put_user(timeout, uarg.i); default: return -ENOTTY; } } static const struct file_operations xwdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = xwdt_write, .open = xwdt_open, .release = xwdt_release, .unlocked_ioctl = xwdt_ioctl, }; static struct miscdevice xwdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &xwdt_fops, }; static int __devinit xwdt_probe(struct platform_device *pdev) { int rc; u32 *tmptr; u32 *pfreq; no_timeout = 0; pfreq = (u32 *)of_get_property(pdev->dev.of_node->parent, "clock-frequency", NULL); if (pfreq == NULL) { pr_warn("The watchdog clock frequency cannot be obtained!\n"); no_timeout = 1; } rc = of_address_to_resource(pdev->dev.of_node, 0, &xdev.res); if (rc) { pr_warn("invalid address!\n"); return rc; } tmptr = (u32 *)of_get_property(pdev->dev.of_node, "xlnx,wdt-interval", NULL); if (tmptr == NULL) { pr_warn("Parameter \"xlnx,wdt-interval\" not found in device tree!\n"); no_timeout = 1; } else { xdev.wdt_interval = *tmptr; } tmptr = (u32 *)of_get_property(pdev->dev.of_node, "xlnx,wdt-enable-once", NULL); if (tmptr == NULL) { pr_warn("Parameter \"xlnx,wdt-enable-once\" not found in device tree!\n"); xdev.nowayout = WATCHDOG_NOWAYOUT; } /* * Twice of the 2^wdt_interval / freq because the first wdt overflow is * ignored (interrupt), reset is only generated at second wdt overflow */ if (!no_timeout) timeout = 2 * ((1<<xdev.wdt_interval) / *pfreq); if (!request_mem_region(xdev.res.start, xdev.res.end - xdev.res.start + 1, WATCHDOG_NAME)) { rc = -ENXIO; pr_err("memory request failure!\n"); goto err_out; } xdev.base = ioremap(xdev.res.start, xdev.res.end - xdev.res.start + 1); if (xdev.base == NULL) { rc = -ENOMEM; pr_err("ioremap failure!\n"); goto release_mem; } rc = xwdt_selftest(); if (rc == XWT_TIMER_FAILED) { pr_err("SelfTest routine error!\n"); goto unmap_io; } xwdt_get_status(&xdev.boot_status); rc = misc_register(&xwdt_miscdev); if (rc) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", xwdt_miscdev.minor, rc); goto unmap_io; } if (no_timeout) pr_info("driver loaded (timeout=? sec, nowayout=%d)\n", xdev.nowayout); else pr_info("driver loaded (timeout=%d sec, nowayout=%d)\n", timeout, xdev.nowayout); expect_close = 0; clear_bit(0, &driver_open); return 0; unmap_io: iounmap(xdev.base); release_mem: release_mem_region(xdev.res.start, resource_size(&xdev.res)); err_out: return rc; } static int __devexit xwdt_remove(struct platform_device *dev) { misc_deregister(&xwdt_miscdev); iounmap(xdev.base); release_mem_region(xdev.res.start, resource_size(&xdev.res)); return 0; } /* Match table for of_platform binding */ static struct of_device_id __devinitdata xwdt_of_match[] = { { .compatible = "xlnx,xps-timebase-wdt-1.01.a", }, {}, }; MODULE_DEVICE_TABLE(of, xwdt_of_match); static struct platform_driver xwdt_driver = { .probe = xwdt_probe, .remove = __devexit_p(xwdt_remove), .driver = { .owner = THIS_MODULE, .name = WATCHDOG_NAME, .of_match_table = xwdt_of_match, }, }; module_platform_driver(xwdt_driver); MODULE_AUTHOR("Alejandro Cabrera <aldaya@gmail.com>"); MODULE_DESCRIPTION("Xilinx Watchdog driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
GalaxyTab4/android_kernel_motorola_msm8226
drivers/hwmon/lm92.c
4857
13365
/* * lm92 - Hardware monitoring driver * Copyright (C) 2005-2008 Jean Delvare <khali@linux-fr.org> * * Based on the lm90 driver, with some ideas taken from the lm_sensors * lm92 driver as well. * * The LM92 is a sensor chip made by National Semiconductor. It reports * its own temperature with a 0.0625 deg resolution and a 0.33 deg * accuracy. Complete datasheet can be obtained from National's website * at: * http://www.national.com/pf/LM/LM92.html * * This driver also supports the MAX6635 sensor chip made by Maxim. * This chip is compatible with the LM92, but has a lesser accuracy * (1.0 deg). Complete datasheet can be obtained from Maxim's website * at: * http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3074 * * Since the LM92 was the first chipset supported by this driver, most * comments will refer to this chipset, but are actually general and * concern all supported chipsets, unless mentioned otherwise. * * Support could easily be added for the National Semiconductor LM76 * and Maxim MAX6633 and MAX6634 chips, which are mostly compatible * with the LM92. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> /* * The LM92 and MAX6635 have 2 two-state pins for address selection, * resulting in 4 possible addresses. */ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, I2C_CLIENT_END }; /* The LM92 registers */ #define LM92_REG_CONFIG 0x01 /* 8-bit, RW */ #define LM92_REG_TEMP 0x00 /* 16-bit, RO */ #define LM92_REG_TEMP_HYST 0x02 /* 16-bit, RW */ #define LM92_REG_TEMP_CRIT 0x03 /* 16-bit, RW */ #define LM92_REG_TEMP_LOW 0x04 /* 16-bit, RW */ #define LM92_REG_TEMP_HIGH 0x05 /* 16-bit, RW */ #define LM92_REG_MAN_ID 0x07 /* 16-bit, RO, LM92 only */ /* * The LM92 uses signed 13-bit values with LSB = 0.0625 degree Celsius, * left-justified in 16-bit registers. No rounding is done, with such * a resolution it's just not worth it. Note that the MAX6635 doesn't * make use of the 4 lower bits for limits (i.e. effective resolution * for limits is 1 degree Celsius). */ static inline int TEMP_FROM_REG(s16 reg) { return reg / 8 * 625 / 10; } static inline s16 TEMP_TO_REG(int val) { if (val <= -60000) return -60000 * 10 / 625 * 8; if (val >= 160000) return 160000 * 10 / 625 * 8; return val * 10 / 625 * 8; } /* Alarm flags are stored in the 3 LSB of the temperature register */ static inline u8 ALARMS_FROM_REG(s16 reg) { return reg & 0x0007; } /* Driver data (common to all clients) */ static struct i2c_driver lm92_driver; /* Client data (each client gets its own) */ struct lm92_data { struct device *hwmon_dev; struct mutex update_lock; char valid; /* zero until following fields are valid */ unsigned long last_updated; /* in jiffies */ /* registers values */ s16 temp1_input, temp1_crit, temp1_min, temp1_max, temp1_hyst; }; /* * Sysfs attributes and callback functions */ static struct lm92_data *lm92_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct lm92_data *data = i2c_get_clientdata(client); mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { dev_dbg(&client->dev, "Updating lm92 data\n"); data->temp1_input = i2c_smbus_read_word_swapped(client, LM92_REG_TEMP); data->temp1_hyst = i2c_smbus_read_word_swapped(client, LM92_REG_TEMP_HYST); data->temp1_crit = i2c_smbus_read_word_swapped(client, LM92_REG_TEMP_CRIT); data->temp1_min = i2c_smbus_read_word_swapped(client, LM92_REG_TEMP_LOW); data->temp1_max = i2c_smbus_read_word_swapped(client, LM92_REG_TEMP_HIGH); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } #define show_temp(value) \ static ssize_t show_##value(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct lm92_data *data = lm92_update_device(dev); \ return sprintf(buf, "%d\n", TEMP_FROM_REG(data->value)); \ } show_temp(temp1_input); show_temp(temp1_crit); show_temp(temp1_min); show_temp(temp1_max); #define set_temp(value, reg) \ static ssize_t set_##value(struct device *dev, struct device_attribute *attr, \ const char *buf, \ size_t count) \ { \ struct i2c_client *client = to_i2c_client(dev); \ struct lm92_data *data = i2c_get_clientdata(client); \ long val; \ int err = kstrtol(buf, 10, &val); \ if (err) \ return err; \ \ mutex_lock(&data->update_lock); \ data->value = TEMP_TO_REG(val); \ i2c_smbus_write_word_swapped(client, reg, data->value); \ mutex_unlock(&data->update_lock); \ return count; \ } set_temp(temp1_crit, LM92_REG_TEMP_CRIT); set_temp(temp1_min, LM92_REG_TEMP_LOW); set_temp(temp1_max, LM92_REG_TEMP_HIGH); static ssize_t show_temp1_crit_hyst(struct device *dev, struct device_attribute *attr, char *buf) { struct lm92_data *data = lm92_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp1_crit) - TEMP_FROM_REG(data->temp1_hyst)); } static ssize_t show_temp1_max_hyst(struct device *dev, struct device_attribute *attr, char *buf) { struct lm92_data *data = lm92_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp1_max) - TEMP_FROM_REG(data->temp1_hyst)); } static ssize_t show_temp1_min_hyst(struct device *dev, struct device_attribute *attr, char *buf) { struct lm92_data *data = lm92_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp1_min) + TEMP_FROM_REG(data->temp1_hyst)); } static ssize_t set_temp1_crit_hyst(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct lm92_data *data = i2c_get_clientdata(client); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp1_hyst = TEMP_FROM_REG(data->temp1_crit) - val; i2c_smbus_write_word_swapped(client, LM92_REG_TEMP_HYST, TEMP_TO_REG(data->temp1_hyst)); mutex_unlock(&data->update_lock); return count; } static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, char *buf) { struct lm92_data *data = lm92_update_device(dev); return sprintf(buf, "%d\n", ALARMS_FROM_REG(data->temp1_input)); } static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { int bitnr = to_sensor_dev_attr(attr)->index; struct lm92_data *data = lm92_update_device(dev); return sprintf(buf, "%d\n", (data->temp1_input >> bitnr) & 1); } static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp1_input, NULL); static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp1_crit, set_temp1_crit); static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp1_crit_hyst, set_temp1_crit_hyst); static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp1_min, set_temp1_min); static DEVICE_ATTR(temp1_min_hyst, S_IRUGO, show_temp1_min_hyst, NULL); static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp1_max, set_temp1_max); static DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_temp1_max_hyst, NULL); static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 1); /* * Detection and registration */ static void lm92_init_client(struct i2c_client *client) { u8 config; /* Start the conversions if needed */ config = i2c_smbus_read_byte_data(client, LM92_REG_CONFIG); if (config & 0x01) i2c_smbus_write_byte_data(client, LM92_REG_CONFIG, config & 0xFE); } /* * The MAX6635 has no identification register, so we have to use tricks * to identify it reliably. This is somewhat slow. * Note that we do NOT rely on the 2 MSB of the configuration register * always reading 0, as suggested by the datasheet, because it was once * reported not to be true. */ static int max6635_check(struct i2c_client *client) { u16 temp_low, temp_high, temp_hyst, temp_crit; u8 conf; int i; /* * No manufacturer ID register, so a read from this address will * always return the last read value. */ temp_low = i2c_smbus_read_word_data(client, LM92_REG_TEMP_LOW); if (i2c_smbus_read_word_data(client, LM92_REG_MAN_ID) != temp_low) return 0; temp_high = i2c_smbus_read_word_data(client, LM92_REG_TEMP_HIGH); if (i2c_smbus_read_word_data(client, LM92_REG_MAN_ID) != temp_high) return 0; /* Limits are stored as integer values (signed, 9-bit). */ if ((temp_low & 0x7f00) || (temp_high & 0x7f00)) return 0; temp_hyst = i2c_smbus_read_word_data(client, LM92_REG_TEMP_HYST); temp_crit = i2c_smbus_read_word_data(client, LM92_REG_TEMP_CRIT); if ((temp_hyst & 0x7f00) || (temp_crit & 0x7f00)) return 0; /* * Registers addresses were found to cycle over 16-byte boundaries. * We don't test all registers with all offsets so as to save some * reads and time, but this should still be sufficient to dismiss * non-MAX6635 chips. */ conf = i2c_smbus_read_byte_data(client, LM92_REG_CONFIG); for (i = 16; i < 96; i *= 2) { if (temp_hyst != i2c_smbus_read_word_data(client, LM92_REG_TEMP_HYST + i - 16) || temp_crit != i2c_smbus_read_word_data(client, LM92_REG_TEMP_CRIT + i) || temp_low != i2c_smbus_read_word_data(client, LM92_REG_TEMP_LOW + i + 16) || temp_high != i2c_smbus_read_word_data(client, LM92_REG_TEMP_HIGH + i + 32) || conf != i2c_smbus_read_byte_data(client, LM92_REG_CONFIG + i)) return 0; } return 1; } static struct attribute *lm92_attributes[] = { &dev_attr_temp1_input.attr, &dev_attr_temp1_crit.attr, &dev_attr_temp1_crit_hyst.attr, &dev_attr_temp1_min.attr, &dev_attr_temp1_min_hyst.attr, &dev_attr_temp1_max.attr, &dev_attr_temp1_max_hyst.attr, &dev_attr_alarms.attr, &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp1_min_alarm.dev_attr.attr, &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, NULL }; static const struct attribute_group lm92_group = { .attrs = lm92_attributes, }; /* Return 0 if detection is successful, -ENODEV otherwise */ static int lm92_detect(struct i2c_client *new_client, struct i2c_board_info *info) { struct i2c_adapter *adapter = new_client->adapter; u8 config; u16 man_id; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA)) return -ENODEV; config = i2c_smbus_read_byte_data(new_client, LM92_REG_CONFIG); man_id = i2c_smbus_read_word_data(new_client, LM92_REG_MAN_ID); if ((config & 0xe0) == 0x00 && man_id == 0x0180) pr_info("lm92: Found National Semiconductor LM92 chip\n"); else if (max6635_check(new_client)) pr_info("lm92: Found Maxim MAX6635 chip\n"); else return -ENODEV; strlcpy(info->type, "lm92", I2C_NAME_SIZE); return 0; } static int lm92_probe(struct i2c_client *new_client, const struct i2c_device_id *id) { struct lm92_data *data; int err; data = kzalloc(sizeof(struct lm92_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } i2c_set_clientdata(new_client, data); data->valid = 0; mutex_init(&data->update_lock); /* Initialize the chipset */ lm92_init_client(new_client); /* Register sysfs hooks */ err = sysfs_create_group(&new_client->dev.kobj, &lm92_group); if (err) goto exit_free; data->hwmon_dev = hwmon_device_register(&new_client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove; } return 0; exit_remove: sysfs_remove_group(&new_client->dev.kobj, &lm92_group); exit_free: kfree(data); exit: return err; } static int lm92_remove(struct i2c_client *client) { struct lm92_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &lm92_group); kfree(data); return 0; } /* * Module and driver stuff */ static const struct i2c_device_id lm92_id[] = { { "lm92", 0 }, /* max6635 could be added here */ { } }; MODULE_DEVICE_TABLE(i2c, lm92_id); static struct i2c_driver lm92_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "lm92", }, .probe = lm92_probe, .remove = lm92_remove, .id_table = lm92_id, .detect = lm92_detect, .address_list = normal_i2c, }; module_i2c_driver(lm92_driver); MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>"); MODULE_DESCRIPTION("LM92/MAX6635 driver"); MODULE_LICENSE("GPL");
gpl-2.0
garwynn/android_kernel_samsung_klte
drivers/hwmon/gl518sm.c
4857
23219
/* * gl518sm.c - Part of lm_sensors, Linux kernel modules for hardware * monitoring * Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl> and * Kyosti Malkki <kmalkki@cc.hut.fi> * Copyright (C) 2004 Hong-Gunn Chew <hglinux@gunnet.org> and * Jean Delvare <khali@linux-fr.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Ported to Linux 2.6 by Hong-Gunn Chew with the help of Jean Delvare * and advice of Greg Kroah-Hartman. * * Notes about the port: * Release 0x00 of the GL518SM chipset doesn't support reading of in0, * in1 nor in2. The original driver had an ugly workaround to get them * anyway (changing limits and watching alarms trigger and wear off). * We did not keep that part of the original driver in the Linux 2.6 * version, since it was making the driver significantly more complex * with no real benefit. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/sysfs.h> /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END }; enum chips { gl518sm_r00, gl518sm_r80 }; /* Many GL518 constants specified below */ /* The GL518 registers */ #define GL518_REG_CHIP_ID 0x00 #define GL518_REG_REVISION 0x01 #define GL518_REG_VENDOR_ID 0x02 #define GL518_REG_CONF 0x03 #define GL518_REG_TEMP_IN 0x04 #define GL518_REG_TEMP_MAX 0x05 #define GL518_REG_TEMP_HYST 0x06 #define GL518_REG_FAN_COUNT 0x07 #define GL518_REG_FAN_LIMIT 0x08 #define GL518_REG_VIN1_LIMIT 0x09 #define GL518_REG_VIN2_LIMIT 0x0a #define GL518_REG_VIN3_LIMIT 0x0b #define GL518_REG_VDD_LIMIT 0x0c #define GL518_REG_VIN3 0x0d #define GL518_REG_MISC 0x0f #define GL518_REG_ALARM 0x10 #define GL518_REG_MASK 0x11 #define GL518_REG_INT 0x12 #define GL518_REG_VIN2 0x13 #define GL518_REG_VIN1 0x14 #define GL518_REG_VDD 0x15 /* * Conversions. Rounding and limit checking is only done on the TO_REG * variants. Note that you should be a bit careful with which arguments * these macros are called: arguments may be evaluated more than once. * Fixing this is just not worth it. */ #define RAW_FROM_REG(val) val #define BOOL_FROM_REG(val) ((val) ? 0 : 1) #define BOOL_TO_REG(val) ((val) ? 0 : 1) #define TEMP_TO_REG(val) SENSORS_LIMIT(((((val) < 0 ? \ (val) - 500 : \ (val) + 500) / 1000) + 119), 0, 255) #define TEMP_FROM_REG(val) (((val) - 119) * 1000) static inline u8 FAN_TO_REG(long rpm, int div) { long rpmdiv; if (rpm == 0) return 0; rpmdiv = SENSORS_LIMIT(rpm, 1, 960000) * div; return SENSORS_LIMIT((480000 + rpmdiv / 2) / rpmdiv, 1, 255); } #define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (480000 / ((val) * (div)))) #define IN_TO_REG(val) SENSORS_LIMIT((((val) + 9) / 19), 0, 255) #define IN_FROM_REG(val) ((val) * 19) #define VDD_TO_REG(val) SENSORS_LIMIT((((val) * 4 + 47) / 95), 0, 255) #define VDD_FROM_REG(val) (((val) * 95 + 2) / 4) #define DIV_FROM_REG(val) (1 << (val)) #define BEEP_MASK_TO_REG(val) ((val) & 0x7f & data->alarm_mask) #define BEEP_MASK_FROM_REG(val) ((val) & 0x7f) /* Each client has this additional data */ struct gl518_data { struct device *hwmon_dev; enum chips type; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ u8 voltage_in[4]; /* Register values; [0] = VDD */ u8 voltage_min[4]; /* Register values; [0] = VDD */ u8 voltage_max[4]; /* Register values; [0] = VDD */ u8 fan_in[2]; u8 fan_min[2]; u8 fan_div[2]; /* Register encoding, shifted right */ u8 fan_auto1; /* Boolean */ u8 temp_in; /* Register values */ u8 temp_max; /* Register values */ u8 temp_hyst; /* Register values */ u8 alarms; /* Register value */ u8 alarm_mask; u8 beep_mask; /* Register value */ u8 beep_enable; /* Boolean */ }; static int gl518_probe(struct i2c_client *client, const struct i2c_device_id *id); static int gl518_detect(struct i2c_client *client, struct i2c_board_info *info); static void gl518_init_client(struct i2c_client *client); static int gl518_remove(struct i2c_client *client); static int gl518_read_value(struct i2c_client *client, u8 reg); static int gl518_write_value(struct i2c_client *client, u8 reg, u16 value); static struct gl518_data *gl518_update_device(struct device *dev); static const struct i2c_device_id gl518_id[] = { { "gl518sm", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, gl518_id); /* This is the driver that will be inserted */ static struct i2c_driver gl518_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "gl518sm", }, .probe = gl518_probe, .remove = gl518_remove, .id_table = gl518_id, .detect = gl518_detect, .address_list = normal_i2c, }; /* * Sysfs stuff */ #define show(type, suffix, value) \ static ssize_t show_##suffix(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct gl518_data *data = gl518_update_device(dev); \ return sprintf(buf, "%d\n", type##_FROM_REG(data->value)); \ } show(TEMP, temp_input1, temp_in); show(TEMP, temp_max1, temp_max); show(TEMP, temp_hyst1, temp_hyst); show(BOOL, fan_auto1, fan_auto1); show(VDD, in_input0, voltage_in[0]); show(IN, in_input1, voltage_in[1]); show(IN, in_input2, voltage_in[2]); show(IN, in_input3, voltage_in[3]); show(VDD, in_min0, voltage_min[0]); show(IN, in_min1, voltage_min[1]); show(IN, in_min2, voltage_min[2]); show(IN, in_min3, voltage_min[3]); show(VDD, in_max0, voltage_max[0]); show(IN, in_max1, voltage_max[1]); show(IN, in_max2, voltage_max[2]); show(IN, in_max3, voltage_max[3]); show(RAW, alarms, alarms); show(BOOL, beep_enable, beep_enable); show(BEEP_MASK, beep_mask, beep_mask); static ssize_t show_fan_input(struct device *dev, struct device_attribute *attr, char *buf) { int nr = to_sensor_dev_attr(attr)->index; struct gl518_data *data = gl518_update_device(dev); return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_in[nr], DIV_FROM_REG(data->fan_div[nr]))); } static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr, char *buf) { int nr = to_sensor_dev_attr(attr)->index; struct gl518_data *data = gl518_update_device(dev); return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr]))); } static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr, char *buf) { int nr = to_sensor_dev_attr(attr)->index; struct gl518_data *data = gl518_update_device(dev); return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr])); } #define set(type, suffix, value, reg) \ static ssize_t set_##suffix(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct i2c_client *client = to_i2c_client(dev); \ struct gl518_data *data = i2c_get_clientdata(client); \ long val; \ int err = kstrtol(buf, 10, &val); \ if (err) \ return err; \ \ mutex_lock(&data->update_lock); \ data->value = type##_TO_REG(val); \ gl518_write_value(client, reg, data->value); \ mutex_unlock(&data->update_lock); \ return count; \ } #define set_bits(type, suffix, value, reg, mask, shift) \ static ssize_t set_##suffix(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct i2c_client *client = to_i2c_client(dev); \ struct gl518_data *data = i2c_get_clientdata(client); \ int regvalue; \ unsigned long val; \ int err = kstrtoul(buf, 10, &val); \ if (err) \ return err; \ \ mutex_lock(&data->update_lock); \ regvalue = gl518_read_value(client, reg); \ data->value = type##_TO_REG(val); \ regvalue = (regvalue & ~mask) | (data->value << shift); \ gl518_write_value(client, reg, regvalue); \ mutex_unlock(&data->update_lock); \ return count; \ } #define set_low(type, suffix, value, reg) \ set_bits(type, suffix, value, reg, 0x00ff, 0) #define set_high(type, suffix, value, reg) \ set_bits(type, suffix, value, reg, 0xff00, 8) set(TEMP, temp_max1, temp_max, GL518_REG_TEMP_MAX); set(TEMP, temp_hyst1, temp_hyst, GL518_REG_TEMP_HYST); set_bits(BOOL, fan_auto1, fan_auto1, GL518_REG_MISC, 0x08, 3); set_low(VDD, in_min0, voltage_min[0], GL518_REG_VDD_LIMIT); set_low(IN, in_min1, voltage_min[1], GL518_REG_VIN1_LIMIT); set_low(IN, in_min2, voltage_min[2], GL518_REG_VIN2_LIMIT); set_low(IN, in_min3, voltage_min[3], GL518_REG_VIN3_LIMIT); set_high(VDD, in_max0, voltage_max[0], GL518_REG_VDD_LIMIT); set_high(IN, in_max1, voltage_max[1], GL518_REG_VIN1_LIMIT); set_high(IN, in_max2, voltage_max[2], GL518_REG_VIN2_LIMIT); set_high(IN, in_max3, voltage_max[3], GL518_REG_VIN3_LIMIT); set_bits(BOOL, beep_enable, beep_enable, GL518_REG_CONF, 0x04, 2); set(BEEP_MASK, beep_mask, beep_mask, GL518_REG_ALARM); static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct gl518_data *data = i2c_get_clientdata(client); int nr = to_sensor_dev_attr(attr)->index; int regvalue; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); regvalue = gl518_read_value(client, GL518_REG_FAN_LIMIT); data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr])); regvalue = (regvalue & (0xff << (8 * nr))) | (data->fan_min[nr] << (8 * (1 - nr))); gl518_write_value(client, GL518_REG_FAN_LIMIT, regvalue); data->beep_mask = gl518_read_value(client, GL518_REG_ALARM); if (data->fan_min[nr] == 0) data->alarm_mask &= ~(0x20 << nr); else data->alarm_mask |= (0x20 << nr); data->beep_mask &= data->alarm_mask; gl518_write_value(client, GL518_REG_ALARM, data->beep_mask); mutex_unlock(&data->update_lock); return count; } static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct gl518_data *data = i2c_get_clientdata(client); int nr = to_sensor_dev_attr(attr)->index; int regvalue; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; switch (val) { case 1: val = 0; break; case 2: val = 1; break; case 4: val = 2; break; case 8: val = 3; break; default: dev_err(dev, "Invalid fan clock divider %lu, choose one " "of 1, 2, 4 or 8\n", val); return -EINVAL; } mutex_lock(&data->update_lock); regvalue = gl518_read_value(client, GL518_REG_MISC); data->fan_div[nr] = val; regvalue = (regvalue & ~(0xc0 >> (2 * nr))) | (data->fan_div[nr] << (6 - 2 * nr)); gl518_write_value(client, GL518_REG_MISC, regvalue); mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input1, NULL); static DEVICE_ATTR(temp1_max, S_IWUSR|S_IRUGO, show_temp_max1, set_temp_max1); static DEVICE_ATTR(temp1_max_hyst, S_IWUSR|S_IRUGO, show_temp_hyst1, set_temp_hyst1); static DEVICE_ATTR(fan1_auto, S_IWUSR|S_IRUGO, show_fan_auto1, set_fan_auto1); static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_input, NULL, 0); static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_input, NULL, 1); static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR|S_IRUGO, show_fan_min, set_fan_min, 0); static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR|S_IRUGO, show_fan_min, set_fan_min, 1); static SENSOR_DEVICE_ATTR(fan1_div, S_IWUSR|S_IRUGO, show_fan_div, set_fan_div, 0); static SENSOR_DEVICE_ATTR(fan2_div, S_IWUSR|S_IRUGO, show_fan_div, set_fan_div, 1); static DEVICE_ATTR(in0_input, S_IRUGO, show_in_input0, NULL); static DEVICE_ATTR(in1_input, S_IRUGO, show_in_input1, NULL); static DEVICE_ATTR(in2_input, S_IRUGO, show_in_input2, NULL); static DEVICE_ATTR(in3_input, S_IRUGO, show_in_input3, NULL); static DEVICE_ATTR(in0_min, S_IWUSR|S_IRUGO, show_in_min0, set_in_min0); static DEVICE_ATTR(in1_min, S_IWUSR|S_IRUGO, show_in_min1, set_in_min1); static DEVICE_ATTR(in2_min, S_IWUSR|S_IRUGO, show_in_min2, set_in_min2); static DEVICE_ATTR(in3_min, S_IWUSR|S_IRUGO, show_in_min3, set_in_min3); static DEVICE_ATTR(in0_max, S_IWUSR|S_IRUGO, show_in_max0, set_in_max0); static DEVICE_ATTR(in1_max, S_IWUSR|S_IRUGO, show_in_max1, set_in_max1); static DEVICE_ATTR(in2_max, S_IWUSR|S_IRUGO, show_in_max2, set_in_max2); static DEVICE_ATTR(in3_max, S_IWUSR|S_IRUGO, show_in_max3, set_in_max3); static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); static DEVICE_ATTR(beep_enable, S_IWUSR|S_IRUGO, show_beep_enable, set_beep_enable); static DEVICE_ATTR(beep_mask, S_IWUSR|S_IRUGO, show_beep_mask, set_beep_mask); static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { int bitnr = to_sensor_dev_attr(attr)->index; struct gl518_data *data = gl518_update_device(dev); return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1); } static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 6); static ssize_t show_beep(struct device *dev, struct device_attribute *attr, char *buf) { int bitnr = to_sensor_dev_attr(attr)->index; struct gl518_data *data = gl518_update_device(dev); return sprintf(buf, "%u\n", (data->beep_mask >> bitnr) & 1); } static ssize_t set_beep(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct gl518_data *data = i2c_get_clientdata(client); int bitnr = to_sensor_dev_attr(attr)->index; unsigned long bit; int err; err = kstrtoul(buf, 10, &bit); if (err) return err; if (bit & ~1) return -EINVAL; mutex_lock(&data->update_lock); data->beep_mask = gl518_read_value(client, GL518_REG_ALARM); if (bit) data->beep_mask |= (1 << bitnr); else data->beep_mask &= ~(1 << bitnr); gl518_write_value(client, GL518_REG_ALARM, data->beep_mask); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(in0_beep, S_IRUGO|S_IWUSR, show_beep, set_beep, 0); static SENSOR_DEVICE_ATTR(in1_beep, S_IRUGO|S_IWUSR, show_beep, set_beep, 1); static SENSOR_DEVICE_ATTR(in2_beep, S_IRUGO|S_IWUSR, show_beep, set_beep, 2); static SENSOR_DEVICE_ATTR(in3_beep, S_IRUGO|S_IWUSR, show_beep, set_beep, 3); static SENSOR_DEVICE_ATTR(temp1_beep, S_IRUGO|S_IWUSR, show_beep, set_beep, 4); static SENSOR_DEVICE_ATTR(fan1_beep, S_IRUGO|S_IWUSR, show_beep, set_beep, 5); static SENSOR_DEVICE_ATTR(fan2_beep, S_IRUGO|S_IWUSR, show_beep, set_beep, 6); static struct attribute *gl518_attributes[] = { &dev_attr_in3_input.attr, &dev_attr_in0_min.attr, &dev_attr_in1_min.attr, &dev_attr_in2_min.attr, &dev_attr_in3_min.attr, &dev_attr_in0_max.attr, &dev_attr_in1_max.attr, &dev_attr_in2_max.attr, &dev_attr_in3_max.attr, &sensor_dev_attr_in0_alarm.dev_attr.attr, &sensor_dev_attr_in1_alarm.dev_attr.attr, &sensor_dev_attr_in2_alarm.dev_attr.attr, &sensor_dev_attr_in3_alarm.dev_attr.attr, &sensor_dev_attr_in0_beep.dev_attr.attr, &sensor_dev_attr_in1_beep.dev_attr.attr, &sensor_dev_attr_in2_beep.dev_attr.attr, &sensor_dev_attr_in3_beep.dev_attr.attr, &dev_attr_fan1_auto.attr, &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan1_min.dev_attr.attr, &sensor_dev_attr_fan2_min.dev_attr.attr, &sensor_dev_attr_fan1_div.dev_attr.attr, &sensor_dev_attr_fan2_div.dev_attr.attr, &sensor_dev_attr_fan1_alarm.dev_attr.attr, &sensor_dev_attr_fan2_alarm.dev_attr.attr, &sensor_dev_attr_fan1_beep.dev_attr.attr, &sensor_dev_attr_fan2_beep.dev_attr.attr, &dev_attr_temp1_input.attr, &dev_attr_temp1_max.attr, &dev_attr_temp1_max_hyst.attr, &sensor_dev_attr_temp1_alarm.dev_attr.attr, &sensor_dev_attr_temp1_beep.dev_attr.attr, &dev_attr_alarms.attr, &dev_attr_beep_enable.attr, &dev_attr_beep_mask.attr, NULL }; static const struct attribute_group gl518_group = { .attrs = gl518_attributes, }; static struct attribute *gl518_attributes_r80[] = { &dev_attr_in0_input.attr, &dev_attr_in1_input.attr, &dev_attr_in2_input.attr, NULL }; static const struct attribute_group gl518_group_r80 = { .attrs = gl518_attributes_r80, }; /* * Real code */ /* Return 0 if detection is successful, -ENODEV otherwise */ static int gl518_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int rev; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA)) return -ENODEV; /* Now, we do the remaining detection. */ if ((gl518_read_value(client, GL518_REG_CHIP_ID) != 0x80) || (gl518_read_value(client, GL518_REG_CONF) & 0x80)) return -ENODEV; /* Determine the chip type. */ rev = gl518_read_value(client, GL518_REG_REVISION); if (rev != 0x00 && rev != 0x80) return -ENODEV; strlcpy(info->type, "gl518sm", I2C_NAME_SIZE); return 0; } static int gl518_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct gl518_data *data; int err, revision; data = kzalloc(sizeof(struct gl518_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } i2c_set_clientdata(client, data); revision = gl518_read_value(client, GL518_REG_REVISION); data->type = revision == 0x80 ? gl518sm_r80 : gl518sm_r00; mutex_init(&data->update_lock); /* Initialize the GL518SM chip */ data->alarm_mask = 0xff; gl518_init_client(client); /* Register sysfs hooks */ err = sysfs_create_group(&client->dev.kobj, &gl518_group); if (err) goto exit_free; if (data->type == gl518sm_r80) { err = sysfs_create_group(&client->dev.kobj, &gl518_group_r80); if (err) goto exit_remove_files; } data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove_files; } return 0; exit_remove_files: sysfs_remove_group(&client->dev.kobj, &gl518_group); if (data->type == gl518sm_r80) sysfs_remove_group(&client->dev.kobj, &gl518_group_r80); exit_free: kfree(data); exit: return err; } /* * Called when we have found a new GL518SM. * Note that we preserve D4:NoFan2 and D2:beep_enable. */ static void gl518_init_client(struct i2c_client *client) { /* Make sure we leave D7:Reset untouched */ u8 regvalue = gl518_read_value(client, GL518_REG_CONF) & 0x7f; /* Comparator mode (D3=0), standby mode (D6=0) */ gl518_write_value(client, GL518_REG_CONF, (regvalue &= 0x37)); /* Never interrupts */ gl518_write_value(client, GL518_REG_MASK, 0x00); /* Clear status register (D5=1), start (D6=1) */ gl518_write_value(client, GL518_REG_CONF, 0x20 | regvalue); gl518_write_value(client, GL518_REG_CONF, 0x40 | regvalue); } static int gl518_remove(struct i2c_client *client) { struct gl518_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &gl518_group); if (data->type == gl518sm_r80) sysfs_remove_group(&client->dev.kobj, &gl518_group_r80); kfree(data); return 0; } /* * Registers 0x07 to 0x0c are word-sized, others are byte-sized * GL518 uses a high-byte first convention, which is exactly opposite to * the SMBus standard. */ static int gl518_read_value(struct i2c_client *client, u8 reg) { if ((reg >= 0x07) && (reg <= 0x0c)) return i2c_smbus_read_word_swapped(client, reg); else return i2c_smbus_read_byte_data(client, reg); } static int gl518_write_value(struct i2c_client *client, u8 reg, u16 value) { if ((reg >= 0x07) && (reg <= 0x0c)) return i2c_smbus_write_word_swapped(client, reg, value); else return i2c_smbus_write_byte_data(client, reg, value); } static struct gl518_data *gl518_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct gl518_data *data = i2c_get_clientdata(client); int val; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { dev_dbg(&client->dev, "Starting gl518 update\n"); data->alarms = gl518_read_value(client, GL518_REG_INT); data->beep_mask = gl518_read_value(client, GL518_REG_ALARM); val = gl518_read_value(client, GL518_REG_VDD_LIMIT); data->voltage_min[0] = val & 0xff; data->voltage_max[0] = (val >> 8) & 0xff; val = gl518_read_value(client, GL518_REG_VIN1_LIMIT); data->voltage_min[1] = val & 0xff; data->voltage_max[1] = (val >> 8) & 0xff; val = gl518_read_value(client, GL518_REG_VIN2_LIMIT); data->voltage_min[2] = val & 0xff; data->voltage_max[2] = (val >> 8) & 0xff; val = gl518_read_value(client, GL518_REG_VIN3_LIMIT); data->voltage_min[3] = val & 0xff; data->voltage_max[3] = (val >> 8) & 0xff; val = gl518_read_value(client, GL518_REG_FAN_COUNT); data->fan_in[0] = (val >> 8) & 0xff; data->fan_in[1] = val & 0xff; val = gl518_read_value(client, GL518_REG_FAN_LIMIT); data->fan_min[0] = (val >> 8) & 0xff; data->fan_min[1] = val & 0xff; data->temp_in = gl518_read_value(client, GL518_REG_TEMP_IN); data->temp_max = gl518_read_value(client, GL518_REG_TEMP_MAX); data->temp_hyst = gl518_read_value(client, GL518_REG_TEMP_HYST); val = gl518_read_value(client, GL518_REG_MISC); data->fan_div[0] = (val >> 6) & 0x03; data->fan_div[1] = (val >> 4) & 0x03; data->fan_auto1 = (val >> 3) & 0x01; data->alarms &= data->alarm_mask; val = gl518_read_value(client, GL518_REG_CONF); data->beep_enable = (val >> 2) & 1; if (data->type != gl518sm_r00) { data->voltage_in[0] = gl518_read_value(client, GL518_REG_VDD); data->voltage_in[1] = gl518_read_value(client, GL518_REG_VIN1); data->voltage_in[2] = gl518_read_value(client, GL518_REG_VIN2); } data->voltage_in[3] = gl518_read_value(client, GL518_REG_VIN3); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } module_i2c_driver(gl518_driver); MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>, " "Kyosti Malkki <kmalkki@cc.hut.fi> and " "Hong-Gunn Chew <hglinux@gunnet.org>"); MODULE_DESCRIPTION("GL518SM driver"); MODULE_LICENSE("GPL");
gpl-2.0
MoKee/android_kernel_samsung_exynos5410
drivers/hwmon/w83792d.c
4857
57730
/* * w83792d.c - Part of lm_sensors, Linux kernel modules for hardware * monitoring * Copyright (C) 2004, 2005 Winbond Electronics Corp. * Chunhao Huang <DZShen@Winbond.com.tw>, * Rudolf Marek <r.marek@assembler.cz> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Note: * 1. This driver is only for 2.6 kernel, 2.4 kernel need a different driver. * 2. This driver is only for Winbond W83792D C version device, there * are also some motherboards with B version W83792D device. The * calculation method to in6-in7(measured value, limits) is a little * different between C and B version. C or B version can be identified * by CR[0x49h]. */ /* * Supports following chips: * * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA * w83792d 9 7 7 3 0x7a 0x5ca3 yes no */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/sysfs.h> /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; /* Insmod parameters */ static unsigned short force_subclients[4]; module_param_array(force_subclients, short, NULL, 0); MODULE_PARM_DESC(force_subclients, "List of subclient addresses: " "{bus, clientaddr, subclientaddr1, subclientaddr2}"); static bool init; module_param(init, bool, 0); MODULE_PARM_DESC(init, "Set to one to force chip initialization"); /* The W83792D registers */ static const u8 W83792D_REG_IN[9] = { 0x20, /* Vcore A in DataSheet */ 0x21, /* Vcore B in DataSheet */ 0x22, /* VIN0 in DataSheet */ 0x23, /* VIN1 in DataSheet */ 0x24, /* VIN2 in DataSheet */ 0x25, /* VIN3 in DataSheet */ 0x26, /* 5VCC in DataSheet */ 0xB0, /* 5VSB in DataSheet */ 0xB1 /* VBAT in DataSheet */ }; #define W83792D_REG_LOW_BITS1 0x3E /* Low Bits I in DataSheet */ #define W83792D_REG_LOW_BITS2 0x3F /* Low Bits II in DataSheet */ static const u8 W83792D_REG_IN_MAX[9] = { 0x2B, /* Vcore A High Limit in DataSheet */ 0x2D, /* Vcore B High Limit in DataSheet */ 0x2F, /* VIN0 High Limit in DataSheet */ 0x31, /* VIN1 High Limit in DataSheet */ 0x33, /* VIN2 High Limit in DataSheet */ 0x35, /* VIN3 High Limit in DataSheet */ 0x37, /* 5VCC High Limit in DataSheet */ 0xB4, /* 5VSB High Limit in DataSheet */ 0xB6 /* VBAT High Limit in DataSheet */ }; static const u8 W83792D_REG_IN_MIN[9] = { 0x2C, /* Vcore A Low Limit in DataSheet */ 0x2E, /* Vcore B Low Limit in DataSheet */ 0x30, /* VIN0 Low Limit in DataSheet */ 0x32, /* VIN1 Low Limit in DataSheet */ 0x34, /* VIN2 Low Limit in DataSheet */ 0x36, /* VIN3 Low Limit in DataSheet */ 0x38, /* 5VCC Low Limit in DataSheet */ 0xB5, /* 5VSB Low Limit in DataSheet */ 0xB7 /* VBAT Low Limit in DataSheet */ }; static const u8 W83792D_REG_FAN[7] = { 0x28, /* FAN 1 Count in DataSheet */ 0x29, /* FAN 2 Count in DataSheet */ 0x2A, /* FAN 3 Count in DataSheet */ 0xB8, /* FAN 4 Count in DataSheet */ 0xB9, /* FAN 5 Count in DataSheet */ 0xBA, /* FAN 6 Count in DataSheet */ 0xBE /* FAN 7 Count in DataSheet */ }; static const u8 W83792D_REG_FAN_MIN[7] = { 0x3B, /* FAN 1 Count Low Limit in DataSheet */ 0x3C, /* FAN 2 Count Low Limit in DataSheet */ 0x3D, /* FAN 3 Count Low Limit in DataSheet */ 0xBB, /* FAN 4 Count Low Limit in DataSheet */ 0xBC, /* FAN 5 Count Low Limit in DataSheet */ 0xBD, /* FAN 6 Count Low Limit in DataSheet */ 0xBF /* FAN 7 Count Low Limit in DataSheet */ }; #define W83792D_REG_FAN_CFG 0x84 /* FAN Configuration in DataSheet */ static const u8 W83792D_REG_FAN_DIV[4] = { 0x47, /* contains FAN2 and FAN1 Divisor */ 0x5B, /* contains FAN4 and FAN3 Divisor */ 0x5C, /* contains FAN6 and FAN5 Divisor */ 0x9E /* contains FAN7 Divisor. */ }; static const u8 W83792D_REG_PWM[7] = { 0x81, /* FAN 1 Duty Cycle, be used to control */ 0x83, /* FAN 2 Duty Cycle, be used to control */ 0x94, /* FAN 3 Duty Cycle, be used to control */ 0xA3, /* FAN 4 Duty Cycle, be used to control */ 0xA4, /* FAN 5 Duty Cycle, be used to control */ 0xA5, /* FAN 6 Duty Cycle, be used to control */ 0xA6 /* FAN 7 Duty Cycle, be used to control */ }; #define W83792D_REG_BANK 0x4E #define W83792D_REG_TEMP2_CONFIG 0xC2 #define W83792D_REG_TEMP3_CONFIG 0xCA static const u8 W83792D_REG_TEMP1[3] = { 0x27, /* TEMP 1 in DataSheet */ 0x39, /* TEMP 1 Over in DataSheet */ 0x3A, /* TEMP 1 Hyst in DataSheet */ }; static const u8 W83792D_REG_TEMP_ADD[2][6] = { { 0xC0, /* TEMP 2 in DataSheet */ 0xC1, /* TEMP 2(0.5 deg) in DataSheet */ 0xC5, /* TEMP 2 Over High part in DataSheet */ 0xC6, /* TEMP 2 Over Low part in DataSheet */ 0xC3, /* TEMP 2 Thyst High part in DataSheet */ 0xC4 }, /* TEMP 2 Thyst Low part in DataSheet */ { 0xC8, /* TEMP 3 in DataSheet */ 0xC9, /* TEMP 3(0.5 deg) in DataSheet */ 0xCD, /* TEMP 3 Over High part in DataSheet */ 0xCE, /* TEMP 3 Over Low part in DataSheet */ 0xCB, /* TEMP 3 Thyst High part in DataSheet */ 0xCC } /* TEMP 3 Thyst Low part in DataSheet */ }; static const u8 W83792D_REG_THERMAL[3] = { 0x85, /* SmartFanI: Fan1 target value */ 0x86, /* SmartFanI: Fan2 target value */ 0x96 /* SmartFanI: Fan3 target value */ }; static const u8 W83792D_REG_TOLERANCE[3] = { 0x87, /* (bit3-0)SmartFan Fan1 tolerance */ 0x87, /* (bit7-4)SmartFan Fan2 tolerance */ 0x97 /* (bit3-0)SmartFan Fan3 tolerance */ }; static const u8 W83792D_REG_POINTS[3][4] = { { 0x85, /* SmartFanII: Fan1 temp point 1 */ 0xE3, /* SmartFanII: Fan1 temp point 2 */ 0xE4, /* SmartFanII: Fan1 temp point 3 */ 0xE5 }, /* SmartFanII: Fan1 temp point 4 */ { 0x86, /* SmartFanII: Fan2 temp point 1 */ 0xE6, /* SmartFanII: Fan2 temp point 2 */ 0xE7, /* SmartFanII: Fan2 temp point 3 */ 0xE8 }, /* SmartFanII: Fan2 temp point 4 */ { 0x96, /* SmartFanII: Fan3 temp point 1 */ 0xE9, /* SmartFanII: Fan3 temp point 2 */ 0xEA, /* SmartFanII: Fan3 temp point 3 */ 0xEB } /* SmartFanII: Fan3 temp point 4 */ }; static const u8 W83792D_REG_LEVELS[3][4] = { { 0x88, /* (bit3-0) SmartFanII: Fan1 Non-Stop */ 0x88, /* (bit7-4) SmartFanII: Fan1 Level 1 */ 0xE0, /* (bit7-4) SmartFanII: Fan1 Level 2 */ 0xE0 }, /* (bit3-0) SmartFanII: Fan1 Level 3 */ { 0x89, /* (bit3-0) SmartFanII: Fan2 Non-Stop */ 0x89, /* (bit7-4) SmartFanII: Fan2 Level 1 */ 0xE1, /* (bit7-4) SmartFanII: Fan2 Level 2 */ 0xE1 }, /* (bit3-0) SmartFanII: Fan2 Level 3 */ { 0x98, /* (bit3-0) SmartFanII: Fan3 Non-Stop */ 0x98, /* (bit7-4) SmartFanII: Fan3 Level 1 */ 0xE2, /* (bit7-4) SmartFanII: Fan3 Level 2 */ 0xE2 } /* (bit3-0) SmartFanII: Fan3 Level 3 */ }; #define W83792D_REG_GPIO_EN 0x1A #define W83792D_REG_CONFIG 0x40 #define W83792D_REG_VID_FANDIV 0x47 #define W83792D_REG_CHIPID 0x49 #define W83792D_REG_WCHIPID 0x58 #define W83792D_REG_CHIPMAN 0x4F #define W83792D_REG_PIN 0x4B #define W83792D_REG_I2C_SUBADDR 0x4A #define W83792D_REG_ALARM1 0xA9 /* realtime status register1 */ #define W83792D_REG_ALARM2 0xAA /* realtime status register2 */ #define W83792D_REG_ALARM3 0xAB /* realtime status register3 */ #define W83792D_REG_CHASSIS 0x42 /* Bit 5: Case Open status bit */ #define W83792D_REG_CHASSIS_CLR 0x44 /* Bit 7: Case Open CLR_CHS/Reset bit */ /* control in0/in1 's limit modifiability */ #define W83792D_REG_VID_IN_B 0x17 #define W83792D_REG_VBAT 0x5D #define W83792D_REG_I2C_ADDR 0x48 /* * Conversions. Rounding and limit checking is only done on the TO_REG * variants. Note that you should be a bit careful with which arguments * these macros are called: arguments may be evaluated more than once. * Fixing this is just not worth it. */ #define IN_FROM_REG(nr, val) (((nr) <= 1) ? ((val) * 2) : \ ((((nr) == 6) || ((nr) == 7)) ? ((val) * 6) : ((val) * 4))) #define IN_TO_REG(nr, val) (((nr) <= 1) ? ((val) / 2) : \ ((((nr) == 6) || ((nr) == 7)) ? ((val) / 6) : ((val) / 4))) static inline u8 FAN_TO_REG(long rpm, int div) { if (rpm == 0) return 255; rpm = SENSORS_LIMIT(rpm, 1, 1000000); return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254); } #define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \ ((val) == 255 ? 0 : \ 1350000 / ((val) * (div)))) /* for temp1 */ #define TEMP1_TO_REG(val) (SENSORS_LIMIT(((val) < 0 ? (val)+0x100*1000 \ : (val)) / 1000, 0, 0xff)) #define TEMP1_FROM_REG(val) (((val) & 0x80 ? (val)-0x100 : (val)) * 1000) /* for temp2 and temp3, because they need additional resolution */ #define TEMP_ADD_FROM_REG(val1, val2) \ ((((val1) & 0x80 ? (val1)-0x100 \ : (val1)) * 1000) + ((val2 & 0x80) ? 500 : 0)) #define TEMP_ADD_TO_REG_HIGH(val) \ (SENSORS_LIMIT(((val) < 0 ? (val)+0x100*1000 \ : (val)) / 1000, 0, 0xff)) #define TEMP_ADD_TO_REG_LOW(val) ((val%1000) ? 0x80 : 0x00) #define DIV_FROM_REG(val) (1 << (val)) static inline u8 DIV_TO_REG(long val) { int i; val = SENSORS_LIMIT(val, 1, 128) >> 1; for (i = 0; i < 7; i++) { if (val == 0) break; val >>= 1; } return (u8)i; } struct w83792d_data { struct device *hwmon_dev; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ /* array of 2 pointers to subclients */ struct i2c_client *lm75[2]; u8 in[9]; /* Register value */ u8 in_max[9]; /* Register value */ u8 in_min[9]; /* Register value */ u16 low_bits; /* Additional resolution to voltage in6-0 */ u8 fan[7]; /* Register value */ u8 fan_min[7]; /* Register value */ u8 temp1[3]; /* current, over, thyst */ u8 temp_add[2][6]; /* Register value */ u8 fan_div[7]; /* Register encoding, shifted right */ u8 pwm[7]; /* * We only consider the first 3 set of pwm, * although 792 chip has 7 set of pwm. */ u8 pwmenable[3]; u32 alarms; /* realtime status register encoding,combined */ u8 chassis; /* Chassis status */ u8 chassis_clear; /* CLR_CHS, clear chassis intrusion detection */ u8 thermal_cruise[3]; /* Smart FanI: Fan1,2,3 target value */ u8 tolerance[3]; /* Fan1,2,3 tolerance(Smart Fan I/II) */ u8 sf2_points[3][4]; /* Smart FanII: Fan1,2,3 temperature points */ u8 sf2_levels[3][4]; /* Smart FanII: Fan1,2,3 duty cycle levels */ }; static int w83792d_probe(struct i2c_client *client, const struct i2c_device_id *id); static int w83792d_detect(struct i2c_client *client, struct i2c_board_info *info); static int w83792d_remove(struct i2c_client *client); static struct w83792d_data *w83792d_update_device(struct device *dev); #ifdef DEBUG static void w83792d_print_debug(struct w83792d_data *data, struct device *dev); #endif static void w83792d_init_client(struct i2c_client *client); static const struct i2c_device_id w83792d_id[] = { { "w83792d", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, w83792d_id); static struct i2c_driver w83792d_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "w83792d", }, .probe = w83792d_probe, .remove = w83792d_remove, .id_table = w83792d_id, .detect = w83792d_detect, .address_list = normal_i2c, }; static inline long in_count_from_reg(int nr, struct w83792d_data *data) { /* in7 and in8 do not have low bits, but the formula still works */ return (data->in[nr] << 2) | ((data->low_bits >> (2 * nr)) & 0x03); } /* * The SMBus locks itself. The Winbond W83792D chip has a bank register, * but the driver only accesses registers in bank 0, so we don't have * to switch banks and lock access between switches. */ static inline int w83792d_read_value(struct i2c_client *client, u8 reg) { return i2c_smbus_read_byte_data(client, reg); } static inline int w83792d_write_value(struct i2c_client *client, u8 reg, u8 value) { return i2c_smbus_write_byte_data(client, reg, value); } /* following are the sysfs callback functions */ static ssize_t show_in(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct w83792d_data *data = w83792d_update_device(dev); return sprintf(buf, "%ld\n", IN_FROM_REG(nr, in_count_from_reg(nr, data))); } #define show_in_reg(reg) \ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct sensor_device_attribute *sensor_attr \ = to_sensor_dev_attr(attr); \ int nr = sensor_attr->index; \ struct w83792d_data *data = w83792d_update_device(dev); \ return sprintf(buf, "%ld\n", \ (long)(IN_FROM_REG(nr, data->reg[nr]) * 4)); \ } show_in_reg(in_min); show_in_reg(in_max); #define store_in_reg(REG, reg) \ static ssize_t store_in_##reg(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct sensor_device_attribute *sensor_attr \ = to_sensor_dev_attr(attr); \ int nr = sensor_attr->index; \ struct i2c_client *client = to_i2c_client(dev); \ struct w83792d_data *data = i2c_get_clientdata(client); \ unsigned long val; \ int err = kstrtoul(buf, 10, &val); \ if (err) \ return err; \ mutex_lock(&data->update_lock); \ data->in_##reg[nr] = SENSORS_LIMIT(IN_TO_REG(nr, val) / 4, 0, 255); \ w83792d_write_value(client, W83792D_REG_IN_##REG[nr], \ data->in_##reg[nr]); \ mutex_unlock(&data->update_lock); \ \ return count; \ } store_in_reg(MIN, min); store_in_reg(MAX, max); #define show_fan_reg(reg) \ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct sensor_device_attribute *sensor_attr \ = to_sensor_dev_attr(attr); \ int nr = sensor_attr->index - 1; \ struct w83792d_data *data = w83792d_update_device(dev); \ return sprintf(buf, "%d\n", \ FAN_FROM_REG(data->reg[nr], DIV_FROM_REG(data->fan_div[nr]))); \ } show_fan_reg(fan); show_fan_reg(fan_min); static ssize_t store_fan_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index - 1; struct i2c_client *client = to_i2c_client(dev); struct w83792d_data *data = i2c_get_clientdata(client); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr])); w83792d_write_value(client, W83792D_REG_FAN_MIN[nr], data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct w83792d_data *data = w83792d_update_device(dev); return sprintf(buf, "%u\n", DIV_FROM_REG(data->fan_div[nr - 1])); } /* * Note: we save and restore the fan minimum here, because its value is * determined in part by the fan divisor. This follows the principle of * least surprise; the user doesn't expect the fan minimum to change just * because the divisor changed. */ static ssize_t store_fan_div(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index - 1; struct i2c_client *client = to_i2c_client(dev); struct w83792d_data *data = i2c_get_clientdata(client); unsigned long min; /*u8 reg;*/ u8 fan_div_reg = 0; u8 tmp_fan_div; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; /* Save fan_min */ mutex_lock(&data->update_lock); min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])); data->fan_div[nr] = DIV_TO_REG(val); fan_div_reg = w83792d_read_value(client, W83792D_REG_FAN_DIV[nr >> 1]); fan_div_reg &= (nr & 0x01) ? 0x8f : 0xf8; tmp_fan_div = (nr & 0x01) ? (((data->fan_div[nr]) << 4) & 0x70) : ((data->fan_div[nr]) & 0x07); w83792d_write_value(client, W83792D_REG_FAN_DIV[nr >> 1], fan_div_reg | tmp_fan_div); /* Restore fan_min */ data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr])); w83792d_write_value(client, W83792D_REG_FAN_MIN[nr], data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } /* read/write the temperature1, includes measured value and limits */ static ssize_t show_temp1(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct w83792d_data *data = w83792d_update_device(dev); return sprintf(buf, "%d\n", TEMP1_FROM_REG(data->temp1[nr])); } static ssize_t store_temp1(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct w83792d_data *data = i2c_get_clientdata(client); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp1[nr] = TEMP1_TO_REG(val); w83792d_write_value(client, W83792D_REG_TEMP1[nr], data->temp1[nr]); mutex_unlock(&data->update_lock); return count; } /* read/write the temperature2-3, includes measured value and limits */ static ssize_t show_temp23(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index; struct w83792d_data *data = w83792d_update_device(dev); return sprintf(buf, "%ld\n", (long)TEMP_ADD_FROM_REG(data->temp_add[nr][index], data->temp_add[nr][index+1])); } static ssize_t store_temp23(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct w83792d_data *data = i2c_get_clientdata(client); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_add[nr][index] = TEMP_ADD_TO_REG_HIGH(val); data->temp_add[nr][index+1] = TEMP_ADD_TO_REG_LOW(val); w83792d_write_value(client, W83792D_REG_TEMP_ADD[nr][index], data->temp_add[nr][index]); w83792d_write_value(client, W83792D_REG_TEMP_ADD[nr][index+1], data->temp_add[nr][index+1]); mutex_unlock(&data->update_lock); return count; } /* get reatime status of all sensors items: voltage, temp, fan */ static ssize_t show_alarms_reg(struct device *dev, struct device_attribute *attr, char *buf) { struct w83792d_data *data = w83792d_update_device(dev); return sprintf(buf, "%d\n", data->alarms); } static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct w83792d_data *data = w83792d_update_device(dev); return sprintf(buf, "%d\n", (data->alarms >> nr) & 1); } static ssize_t show_pwm(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct w83792d_data *data = w83792d_update_device(dev); return sprintf(buf, "%d\n", (data->pwm[nr] & 0x0f) << 4); } static ssize_t show_pwmenable(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index - 1; struct w83792d_data *data = w83792d_update_device(dev); long pwm_enable_tmp = 1; switch (data->pwmenable[nr]) { case 0: pwm_enable_tmp = 1; /* manual mode */ break; case 1: pwm_enable_tmp = 3; /*thermal cruise/Smart Fan I */ break; case 2: pwm_enable_tmp = 2; /* Smart Fan II */ break; } return sprintf(buf, "%ld\n", pwm_enable_tmp); } static ssize_t store_pwm(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct w83792d_data *data = i2c_get_clientdata(client); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; val = SENSORS_LIMIT(val, 0, 255) >> 4; mutex_lock(&data->update_lock); val |= w83792d_read_value(client, W83792D_REG_PWM[nr]) & 0xf0; data->pwm[nr] = val; w83792d_write_value(client, W83792D_REG_PWM[nr], data->pwm[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t store_pwmenable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index - 1; struct i2c_client *client = to_i2c_client(dev); struct w83792d_data *data = i2c_get_clientdata(client); u8 fan_cfg_tmp, cfg1_tmp, cfg2_tmp, cfg3_tmp, cfg4_tmp; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; if (val < 1 || val > 3) return -EINVAL; mutex_lock(&data->update_lock); switch (val) { case 1: data->pwmenable[nr] = 0; /* manual mode */ break; case 2: data->pwmenable[nr] = 2; /* Smart Fan II */ break; case 3: data->pwmenable[nr] = 1; /* thermal cruise/Smart Fan I */ break; } cfg1_tmp = data->pwmenable[0]; cfg2_tmp = (data->pwmenable[1]) << 2; cfg3_tmp = (data->pwmenable[2]) << 4; cfg4_tmp = w83792d_read_value(client, W83792D_REG_FAN_CFG) & 0xc0; fan_cfg_tmp = ((cfg4_tmp | cfg3_tmp) | cfg2_tmp) | cfg1_tmp; w83792d_write_value(client, W83792D_REG_FAN_CFG, fan_cfg_tmp); mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct w83792d_data *data = w83792d_update_device(dev); return sprintf(buf, "%d\n", data->pwm[nr] >> 7); } static ssize_t store_pwm_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct w83792d_data *data = i2c_get_clientdata(client); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; if (val > 1) return -EINVAL; mutex_lock(&data->update_lock); data->pwm[nr] = w83792d_read_value(client, W83792D_REG_PWM[nr]); if (val) { /* PWM mode */ data->pwm[nr] |= 0x80; } else { /* DC mode */ data->pwm[nr] &= 0x7f; } w83792d_write_value(client, W83792D_REG_PWM[nr], data->pwm[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_chassis(struct device *dev, struct device_attribute *attr, char *buf) { struct w83792d_data *data = w83792d_update_device(dev); return sprintf(buf, "%d\n", data->chassis); } static ssize_t show_regs_chassis(struct device *dev, struct device_attribute *attr, char *buf) { dev_warn(dev, "Attribute %s is deprecated, use intrusion0_alarm instead\n", "chassis"); return show_chassis(dev, attr, buf); } static ssize_t show_chassis_clear(struct device *dev, struct device_attribute *attr, char *buf) { struct w83792d_data *data = w83792d_update_device(dev); return sprintf(buf, "%d\n", data->chassis_clear); } static ssize_t store_chassis_clear_legacy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct w83792d_data *data = i2c_get_clientdata(client); unsigned long val; int err; u8 temp1 = 0, temp2 = 0; dev_warn(dev, "Attribute %s is deprecated, use intrusion0_alarm instead\n", "chassis_clear"); err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->chassis_clear = SENSORS_LIMIT(val, 0, 1); temp1 = ((data->chassis_clear) << 7) & 0x80; temp2 = w83792d_read_value(client, W83792D_REG_CHASSIS_CLR) & 0x7f; w83792d_write_value(client, W83792D_REG_CHASSIS_CLR, temp1 | temp2); mutex_unlock(&data->update_lock); return count; } static ssize_t store_chassis_clear(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct w83792d_data *data = i2c_get_clientdata(client); unsigned long val; u8 reg; if (kstrtoul(buf, 10, &val) || val != 0) return -EINVAL; mutex_lock(&data->update_lock); reg = w83792d_read_value(client, W83792D_REG_CHASSIS_CLR); w83792d_write_value(client, W83792D_REG_CHASSIS_CLR, reg | 0x80); data->valid = 0; /* Force cache refresh */ mutex_unlock(&data->update_lock); return count; } /* For Smart Fan I / Thermal Cruise */ static ssize_t show_thermal_cruise(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct w83792d_data *data = w83792d_update_device(dev); return sprintf(buf, "%ld\n", (long)data->thermal_cruise[nr-1]); } static ssize_t store_thermal_cruise(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index - 1; struct i2c_client *client = to_i2c_client(dev); struct w83792d_data *data = i2c_get_clientdata(client); u8 target_tmp = 0, target_mask = 0; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; target_tmp = val; target_tmp = target_tmp & 0x7f; mutex_lock(&data->update_lock); target_mask = w83792d_read_value(client, W83792D_REG_THERMAL[nr]) & 0x80; data->thermal_cruise[nr] = SENSORS_LIMIT(target_tmp, 0, 255); w83792d_write_value(client, W83792D_REG_THERMAL[nr], (data->thermal_cruise[nr]) | target_mask); mutex_unlock(&data->update_lock); return count; } /* For Smart Fan I/Thermal Cruise and Smart Fan II */ static ssize_t show_tolerance(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct w83792d_data *data = w83792d_update_device(dev); return sprintf(buf, "%ld\n", (long)data->tolerance[nr-1]); } static ssize_t store_tolerance(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index - 1; struct i2c_client *client = to_i2c_client(dev); struct w83792d_data *data = i2c_get_clientdata(client); u8 tol_tmp, tol_mask; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); tol_mask = w83792d_read_value(client, W83792D_REG_TOLERANCE[nr]) & ((nr == 1) ? 0x0f : 0xf0); tol_tmp = SENSORS_LIMIT(val, 0, 15); tol_tmp &= 0x0f; data->tolerance[nr] = tol_tmp; if (nr == 1) tol_tmp <<= 4; w83792d_write_value(client, W83792D_REG_TOLERANCE[nr], tol_mask | tol_tmp); mutex_unlock(&data->update_lock); return count; } /* For Smart Fan II */ static ssize_t show_sf2_point(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index; struct w83792d_data *data = w83792d_update_device(dev); return sprintf(buf, "%ld\n", (long)data->sf2_points[index-1][nr-1]); } static ssize_t store_sf2_point(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr - 1; int index = sensor_attr->index - 1; struct i2c_client *client = to_i2c_client(dev); struct w83792d_data *data = i2c_get_clientdata(client); u8 mask_tmp = 0; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->sf2_points[index][nr] = SENSORS_LIMIT(val, 0, 127); mask_tmp = w83792d_read_value(client, W83792D_REG_POINTS[index][nr]) & 0x80; w83792d_write_value(client, W83792D_REG_POINTS[index][nr], mask_tmp|data->sf2_points[index][nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_sf2_level(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index; struct w83792d_data *data = w83792d_update_device(dev); return sprintf(buf, "%d\n", (((data->sf2_levels[index-1][nr]) * 100) / 15)); } static ssize_t store_sf2_level(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index - 1; struct i2c_client *client = to_i2c_client(dev); struct w83792d_data *data = i2c_get_clientdata(client); u8 mask_tmp = 0, level_tmp = 0; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->sf2_levels[index][nr] = SENSORS_LIMIT((val * 15) / 100, 0, 15); mask_tmp = w83792d_read_value(client, W83792D_REG_LEVELS[index][nr]) & ((nr == 3) ? 0xf0 : 0x0f); if (nr == 3) level_tmp = data->sf2_levels[index][nr]; else level_tmp = data->sf2_levels[index][nr] << 4; w83792d_write_value(client, W83792D_REG_LEVELS[index][nr], level_tmp | mask_tmp); mutex_unlock(&data->update_lock); return count; } static int w83792d_detect_subclients(struct i2c_client *new_client) { int i, id, err; int address = new_client->addr; u8 val; struct i2c_adapter *adapter = new_client->adapter; struct w83792d_data *data = i2c_get_clientdata(new_client); id = i2c_adapter_id(adapter); if (force_subclients[0] == id && force_subclients[1] == address) { for (i = 2; i <= 3; i++) { if (force_subclients[i] < 0x48 || force_subclients[i] > 0x4f) { dev_err(&new_client->dev, "invalid subclient " "address %d; must be 0x48-0x4f\n", force_subclients[i]); err = -ENODEV; goto ERROR_SC_0; } } w83792d_write_value(new_client, W83792D_REG_I2C_SUBADDR, (force_subclients[2] & 0x07) | ((force_subclients[3] & 0x07) << 4)); } val = w83792d_read_value(new_client, W83792D_REG_I2C_SUBADDR); if (!(val & 0x08)) data->lm75[0] = i2c_new_dummy(adapter, 0x48 + (val & 0x7)); if (!(val & 0x80)) { if ((data->lm75[0] != NULL) && ((val & 0x7) == ((val >> 4) & 0x7))) { dev_err(&new_client->dev, "duplicate addresses 0x%x, " "use force_subclient\n", data->lm75[0]->addr); err = -ENODEV; goto ERROR_SC_1; } data->lm75[1] = i2c_new_dummy(adapter, 0x48 + ((val >> 4) & 0x7)); } return 0; /* Undo inits in case of errors */ ERROR_SC_1: if (data->lm75[0] != NULL) i2c_unregister_device(data->lm75[0]); ERROR_SC_0: return err; } static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_in, NULL, 0); static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_in, NULL, 1); static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_in, NULL, 2); static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_in, NULL, 3); static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_in, NULL, 4); static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_in, NULL, 5); static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_in, NULL, 6); static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_in, NULL, 7); static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, show_in, NULL, 8); static SENSOR_DEVICE_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0); static SENSOR_DEVICE_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1); static SENSOR_DEVICE_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2); static SENSOR_DEVICE_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3); static SENSOR_DEVICE_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4); static SENSOR_DEVICE_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5); static SENSOR_DEVICE_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6); static SENSOR_DEVICE_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7); static SENSOR_DEVICE_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8); static SENSOR_DEVICE_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0); static SENSOR_DEVICE_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1); static SENSOR_DEVICE_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2); static SENSOR_DEVICE_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3); static SENSOR_DEVICE_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4); static SENSOR_DEVICE_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5); static SENSOR_DEVICE_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6); static SENSOR_DEVICE_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7); static SENSOR_DEVICE_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8); static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp1, NULL, 0, 0); static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp23, NULL, 0, 0); static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp23, NULL, 1, 0); static SENSOR_DEVICE_ATTR_2(temp1_max, S_IRUGO | S_IWUSR, show_temp1, store_temp1, 0, 1); static SENSOR_DEVICE_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp23, store_temp23, 0, 2); static SENSOR_DEVICE_ATTR_2(temp3_max, S_IRUGO | S_IWUSR, show_temp23, store_temp23, 1, 2); static SENSOR_DEVICE_ATTR_2(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp1, store_temp1, 0, 2); static SENSOR_DEVICE_ATTR_2(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp23, store_temp23, 0, 4); static SENSOR_DEVICE_ATTR_2(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp23, store_temp23, 1, 4); static DEVICE_ATTR(alarms, S_IRUGO, show_alarms_reg, NULL); static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 6); static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 7); static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 8); static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 9); static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 10); static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 11); static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 12); static SENSOR_DEVICE_ATTR(fan7_alarm, S_IRUGO, show_alarm, NULL, 15); static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 19); static SENSOR_DEVICE_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 20); static SENSOR_DEVICE_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 21); static SENSOR_DEVICE_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 22); static SENSOR_DEVICE_ATTR(fan6_alarm, S_IRUGO, show_alarm, NULL, 23); static DEVICE_ATTR(chassis, S_IRUGO, show_regs_chassis, NULL); static DEVICE_ATTR(chassis_clear, S_IRUGO | S_IWUSR, show_chassis_clear, store_chassis_clear_legacy); static DEVICE_ATTR(intrusion0_alarm, S_IRUGO | S_IWUSR, show_chassis, store_chassis_clear); static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0); static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1); static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2); static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, show_pwmenable, store_pwmenable, 1); static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, show_pwmenable, store_pwmenable, 2); static SENSOR_DEVICE_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, show_pwmenable, store_pwmenable, 3); static SENSOR_DEVICE_ATTR(pwm1_mode, S_IWUSR | S_IRUGO, show_pwm_mode, store_pwm_mode, 0); static SENSOR_DEVICE_ATTR(pwm2_mode, S_IWUSR | S_IRUGO, show_pwm_mode, store_pwm_mode, 1); static SENSOR_DEVICE_ATTR(pwm3_mode, S_IWUSR | S_IRUGO, show_pwm_mode, store_pwm_mode, 2); static SENSOR_DEVICE_ATTR(tolerance1, S_IWUSR | S_IRUGO, show_tolerance, store_tolerance, 1); static SENSOR_DEVICE_ATTR(tolerance2, S_IWUSR | S_IRUGO, show_tolerance, store_tolerance, 2); static SENSOR_DEVICE_ATTR(tolerance3, S_IWUSR | S_IRUGO, show_tolerance, store_tolerance, 3); static SENSOR_DEVICE_ATTR(thermal_cruise1, S_IWUSR | S_IRUGO, show_thermal_cruise, store_thermal_cruise, 1); static SENSOR_DEVICE_ATTR(thermal_cruise2, S_IWUSR | S_IRUGO, show_thermal_cruise, store_thermal_cruise, 2); static SENSOR_DEVICE_ATTR(thermal_cruise3, S_IWUSR | S_IRUGO, show_thermal_cruise, store_thermal_cruise, 3); static SENSOR_DEVICE_ATTR_2(sf2_point1_fan1, S_IRUGO | S_IWUSR, show_sf2_point, store_sf2_point, 1, 1); static SENSOR_DEVICE_ATTR_2(sf2_point2_fan1, S_IRUGO | S_IWUSR, show_sf2_point, store_sf2_point, 2, 1); static SENSOR_DEVICE_ATTR_2(sf2_point3_fan1, S_IRUGO | S_IWUSR, show_sf2_point, store_sf2_point, 3, 1); static SENSOR_DEVICE_ATTR_2(sf2_point4_fan1, S_IRUGO | S_IWUSR, show_sf2_point, store_sf2_point, 4, 1); static SENSOR_DEVICE_ATTR_2(sf2_point1_fan2, S_IRUGO | S_IWUSR, show_sf2_point, store_sf2_point, 1, 2); static SENSOR_DEVICE_ATTR_2(sf2_point2_fan2, S_IRUGO | S_IWUSR, show_sf2_point, store_sf2_point, 2, 2); static SENSOR_DEVICE_ATTR_2(sf2_point3_fan2, S_IRUGO | S_IWUSR, show_sf2_point, store_sf2_point, 3, 2); static SENSOR_DEVICE_ATTR_2(sf2_point4_fan2, S_IRUGO | S_IWUSR, show_sf2_point, store_sf2_point, 4, 2); static SENSOR_DEVICE_ATTR_2(sf2_point1_fan3, S_IRUGO | S_IWUSR, show_sf2_point, store_sf2_point, 1, 3); static SENSOR_DEVICE_ATTR_2(sf2_point2_fan3, S_IRUGO | S_IWUSR, show_sf2_point, store_sf2_point, 2, 3); static SENSOR_DEVICE_ATTR_2(sf2_point3_fan3, S_IRUGO | S_IWUSR, show_sf2_point, store_sf2_point, 3, 3); static SENSOR_DEVICE_ATTR_2(sf2_point4_fan3, S_IRUGO | S_IWUSR, show_sf2_point, store_sf2_point, 4, 3); static SENSOR_DEVICE_ATTR_2(sf2_level1_fan1, S_IRUGO | S_IWUSR, show_sf2_level, store_sf2_level, 1, 1); static SENSOR_DEVICE_ATTR_2(sf2_level2_fan1, S_IRUGO | S_IWUSR, show_sf2_level, store_sf2_level, 2, 1); static SENSOR_DEVICE_ATTR_2(sf2_level3_fan1, S_IRUGO | S_IWUSR, show_sf2_level, store_sf2_level, 3, 1); static SENSOR_DEVICE_ATTR_2(sf2_level1_fan2, S_IRUGO | S_IWUSR, show_sf2_level, store_sf2_level, 1, 2); static SENSOR_DEVICE_ATTR_2(sf2_level2_fan2, S_IRUGO | S_IWUSR, show_sf2_level, store_sf2_level, 2, 2); static SENSOR_DEVICE_ATTR_2(sf2_level3_fan2, S_IRUGO | S_IWUSR, show_sf2_level, store_sf2_level, 3, 2); static SENSOR_DEVICE_ATTR_2(sf2_level1_fan3, S_IRUGO | S_IWUSR, show_sf2_level, store_sf2_level, 1, 3); static SENSOR_DEVICE_ATTR_2(sf2_level2_fan3, S_IRUGO | S_IWUSR, show_sf2_level, store_sf2_level, 2, 3); static SENSOR_DEVICE_ATTR_2(sf2_level3_fan3, S_IRUGO | S_IWUSR, show_sf2_level, store_sf2_level, 3, 3); static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 1); static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 2); static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 3); static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 4); static SENSOR_DEVICE_ATTR(fan5_input, S_IRUGO, show_fan, NULL, 5); static SENSOR_DEVICE_ATTR(fan6_input, S_IRUGO, show_fan, NULL, 6); static SENSOR_DEVICE_ATTR(fan7_input, S_IRUGO, show_fan, NULL, 7); static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 1); static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 2); static SENSOR_DEVICE_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 3); static SENSOR_DEVICE_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 4); static SENSOR_DEVICE_ATTR(fan5_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 5); static SENSOR_DEVICE_ATTR(fan6_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 6); static SENSOR_DEVICE_ATTR(fan7_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 7); static SENSOR_DEVICE_ATTR(fan1_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 1); static SENSOR_DEVICE_ATTR(fan2_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 2); static SENSOR_DEVICE_ATTR(fan3_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 3); static SENSOR_DEVICE_ATTR(fan4_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 4); static SENSOR_DEVICE_ATTR(fan5_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 5); static SENSOR_DEVICE_ATTR(fan6_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 6); static SENSOR_DEVICE_ATTR(fan7_div, S_IWUSR | S_IRUGO, show_fan_div, store_fan_div, 7); static struct attribute *w83792d_attributes_fan[4][5] = { { &sensor_dev_attr_fan4_input.dev_attr.attr, &sensor_dev_attr_fan4_min.dev_attr.attr, &sensor_dev_attr_fan4_div.dev_attr.attr, &sensor_dev_attr_fan4_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_fan5_input.dev_attr.attr, &sensor_dev_attr_fan5_min.dev_attr.attr, &sensor_dev_attr_fan5_div.dev_attr.attr, &sensor_dev_attr_fan5_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_fan6_input.dev_attr.attr, &sensor_dev_attr_fan6_min.dev_attr.attr, &sensor_dev_attr_fan6_div.dev_attr.attr, &sensor_dev_attr_fan6_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_fan7_input.dev_attr.attr, &sensor_dev_attr_fan7_min.dev_attr.attr, &sensor_dev_attr_fan7_div.dev_attr.attr, &sensor_dev_attr_fan7_alarm.dev_attr.attr, NULL } }; static const struct attribute_group w83792d_group_fan[4] = { { .attrs = w83792d_attributes_fan[0] }, { .attrs = w83792d_attributes_fan[1] }, { .attrs = w83792d_attributes_fan[2] }, { .attrs = w83792d_attributes_fan[3] }, }; static struct attribute *w83792d_attributes[] = { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in0_max.dev_attr.attr, &sensor_dev_attr_in0_min.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in1_max.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in2_max.dev_attr.attr, &sensor_dev_attr_in2_min.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in3_max.dev_attr.attr, &sensor_dev_attr_in3_min.dev_attr.attr, &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in4_max.dev_attr.attr, &sensor_dev_attr_in4_min.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in5_max.dev_attr.attr, &sensor_dev_attr_in5_min.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in6_max.dev_attr.attr, &sensor_dev_attr_in6_min.dev_attr.attr, &sensor_dev_attr_in7_input.dev_attr.attr, &sensor_dev_attr_in7_max.dev_attr.attr, &sensor_dev_attr_in7_min.dev_attr.attr, &sensor_dev_attr_in8_input.dev_attr.attr, &sensor_dev_attr_in8_max.dev_attr.attr, &sensor_dev_attr_in8_min.dev_attr.attr, &sensor_dev_attr_in0_alarm.dev_attr.attr, &sensor_dev_attr_in1_alarm.dev_attr.attr, &sensor_dev_attr_in2_alarm.dev_attr.attr, &sensor_dev_attr_in3_alarm.dev_attr.attr, &sensor_dev_attr_in4_alarm.dev_attr.attr, &sensor_dev_attr_in5_alarm.dev_attr.attr, &sensor_dev_attr_in6_alarm.dev_attr.attr, &sensor_dev_attr_in7_alarm.dev_attr.attr, &sensor_dev_attr_in8_alarm.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_max_hyst.dev_attr.attr, &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp3_max_hyst.dev_attr.attr, &sensor_dev_attr_temp1_alarm.dev_attr.attr, &sensor_dev_attr_temp2_alarm.dev_attr.attr, &sensor_dev_attr_temp3_alarm.dev_attr.attr, &sensor_dev_attr_pwm1.dev_attr.attr, &sensor_dev_attr_pwm1_mode.dev_attr.attr, &sensor_dev_attr_pwm1_enable.dev_attr.attr, &sensor_dev_attr_pwm2.dev_attr.attr, &sensor_dev_attr_pwm2_mode.dev_attr.attr, &sensor_dev_attr_pwm2_enable.dev_attr.attr, &sensor_dev_attr_pwm3.dev_attr.attr, &sensor_dev_attr_pwm3_mode.dev_attr.attr, &sensor_dev_attr_pwm3_enable.dev_attr.attr, &dev_attr_alarms.attr, &dev_attr_chassis.attr, &dev_attr_chassis_clear.attr, &dev_attr_intrusion0_alarm.attr, &sensor_dev_attr_tolerance1.dev_attr.attr, &sensor_dev_attr_thermal_cruise1.dev_attr.attr, &sensor_dev_attr_tolerance2.dev_attr.attr, &sensor_dev_attr_thermal_cruise2.dev_attr.attr, &sensor_dev_attr_tolerance3.dev_attr.attr, &sensor_dev_attr_thermal_cruise3.dev_attr.attr, &sensor_dev_attr_sf2_point1_fan1.dev_attr.attr, &sensor_dev_attr_sf2_point2_fan1.dev_attr.attr, &sensor_dev_attr_sf2_point3_fan1.dev_attr.attr, &sensor_dev_attr_sf2_point4_fan1.dev_attr.attr, &sensor_dev_attr_sf2_point1_fan2.dev_attr.attr, &sensor_dev_attr_sf2_point2_fan2.dev_attr.attr, &sensor_dev_attr_sf2_point3_fan2.dev_attr.attr, &sensor_dev_attr_sf2_point4_fan2.dev_attr.attr, &sensor_dev_attr_sf2_point1_fan3.dev_attr.attr, &sensor_dev_attr_sf2_point2_fan3.dev_attr.attr, &sensor_dev_attr_sf2_point3_fan3.dev_attr.attr, &sensor_dev_attr_sf2_point4_fan3.dev_attr.attr, &sensor_dev_attr_sf2_level1_fan1.dev_attr.attr, &sensor_dev_attr_sf2_level2_fan1.dev_attr.attr, &sensor_dev_attr_sf2_level3_fan1.dev_attr.attr, &sensor_dev_attr_sf2_level1_fan2.dev_attr.attr, &sensor_dev_attr_sf2_level2_fan2.dev_attr.attr, &sensor_dev_attr_sf2_level3_fan2.dev_attr.attr, &sensor_dev_attr_sf2_level1_fan3.dev_attr.attr, &sensor_dev_attr_sf2_level2_fan3.dev_attr.attr, &sensor_dev_attr_sf2_level3_fan3.dev_attr.attr, &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan1_min.dev_attr.attr, &sensor_dev_attr_fan1_div.dev_attr.attr, &sensor_dev_attr_fan1_alarm.dev_attr.attr, &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan2_min.dev_attr.attr, &sensor_dev_attr_fan2_div.dev_attr.attr, &sensor_dev_attr_fan2_alarm.dev_attr.attr, &sensor_dev_attr_fan3_input.dev_attr.attr, &sensor_dev_attr_fan3_min.dev_attr.attr, &sensor_dev_attr_fan3_div.dev_attr.attr, &sensor_dev_attr_fan3_alarm.dev_attr.attr, NULL }; static const struct attribute_group w83792d_group = { .attrs = w83792d_attributes, }; /* Return 0 if detection is successful, -ENODEV otherwise */ static int w83792d_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int val1, val2; unsigned short address = client->addr; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; if (w83792d_read_value(client, W83792D_REG_CONFIG) & 0x80) return -ENODEV; val1 = w83792d_read_value(client, W83792D_REG_BANK); val2 = w83792d_read_value(client, W83792D_REG_CHIPMAN); /* Check for Winbond ID if in bank 0 */ if (!(val1 & 0x07)) { /* is Bank0 */ if ((!(val1 & 0x80) && val2 != 0xa3) || ((val1 & 0x80) && val2 != 0x5c)) return -ENODEV; } /* * If Winbond chip, address of chip and W83792D_REG_I2C_ADDR * should match */ if (w83792d_read_value(client, W83792D_REG_I2C_ADDR) != address) return -ENODEV; /* Put it now into bank 0 and Vendor ID High Byte */ w83792d_write_value(client, W83792D_REG_BANK, (w83792d_read_value(client, W83792D_REG_BANK) & 0x78) | 0x80); /* Determine the chip type. */ val1 = w83792d_read_value(client, W83792D_REG_WCHIPID); val2 = w83792d_read_value(client, W83792D_REG_CHIPMAN); if (val1 != 0x7a || val2 != 0x5c) return -ENODEV; strlcpy(info->type, "w83792d", I2C_NAME_SIZE); return 0; } static int w83792d_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct w83792d_data *data; struct device *dev = &client->dev; int i, val1, err; data = kzalloc(sizeof(struct w83792d_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto ERROR0; } i2c_set_clientdata(client, data); data->valid = 0; mutex_init(&data->update_lock); err = w83792d_detect_subclients(client); if (err) goto ERROR1; /* Initialize the chip */ w83792d_init_client(client); /* A few vars need to be filled upon startup */ for (i = 0; i < 7; i++) { data->fan_min[i] = w83792d_read_value(client, W83792D_REG_FAN_MIN[i]); } /* Register sysfs hooks */ err = sysfs_create_group(&dev->kobj, &w83792d_group); if (err) goto ERROR3; /* * Read GPIO enable register to check if pins for fan 4,5 are used as * GPIO */ val1 = w83792d_read_value(client, W83792D_REG_GPIO_EN); if (!(val1 & 0x40)) { err = sysfs_create_group(&dev->kobj, &w83792d_group_fan[0]); if (err) goto exit_remove_files; } if (!(val1 & 0x20)) { err = sysfs_create_group(&dev->kobj, &w83792d_group_fan[1]); if (err) goto exit_remove_files; } val1 = w83792d_read_value(client, W83792D_REG_PIN); if (val1 & 0x40) { err = sysfs_create_group(&dev->kobj, &w83792d_group_fan[2]); if (err) goto exit_remove_files; } if (val1 & 0x04) { err = sysfs_create_group(&dev->kobj, &w83792d_group_fan[3]); if (err) goto exit_remove_files; } data->hwmon_dev = hwmon_device_register(dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove_files; } return 0; exit_remove_files: sysfs_remove_group(&dev->kobj, &w83792d_group); for (i = 0; i < ARRAY_SIZE(w83792d_group_fan); i++) sysfs_remove_group(&dev->kobj, &w83792d_group_fan[i]); ERROR3: if (data->lm75[0] != NULL) i2c_unregister_device(data->lm75[0]); if (data->lm75[1] != NULL) i2c_unregister_device(data->lm75[1]); ERROR1: kfree(data); ERROR0: return err; } static int w83792d_remove(struct i2c_client *client) { struct w83792d_data *data = i2c_get_clientdata(client); int i; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &w83792d_group); for (i = 0; i < ARRAY_SIZE(w83792d_group_fan); i++) sysfs_remove_group(&client->dev.kobj, &w83792d_group_fan[i]); if (data->lm75[0] != NULL) i2c_unregister_device(data->lm75[0]); if (data->lm75[1] != NULL) i2c_unregister_device(data->lm75[1]); kfree(data); return 0; } static void w83792d_init_client(struct i2c_client *client) { u8 temp2_cfg, temp3_cfg, vid_in_b; if (init) w83792d_write_value(client, W83792D_REG_CONFIG, 0x80); /* * Clear the bit6 of W83792D_REG_VID_IN_B(set it into 0): * W83792D_REG_VID_IN_B bit6 = 0: the high/low limit of * vin0/vin1 can be modified by user; * W83792D_REG_VID_IN_B bit6 = 1: the high/low limit of * vin0/vin1 auto-updated, can NOT be modified by user. */ vid_in_b = w83792d_read_value(client, W83792D_REG_VID_IN_B); w83792d_write_value(client, W83792D_REG_VID_IN_B, vid_in_b & 0xbf); temp2_cfg = w83792d_read_value(client, W83792D_REG_TEMP2_CONFIG); temp3_cfg = w83792d_read_value(client, W83792D_REG_TEMP3_CONFIG); w83792d_write_value(client, W83792D_REG_TEMP2_CONFIG, temp2_cfg & 0xe6); w83792d_write_value(client, W83792D_REG_TEMP3_CONFIG, temp3_cfg & 0xe6); /* Start monitoring */ w83792d_write_value(client, W83792D_REG_CONFIG, (w83792d_read_value(client, W83792D_REG_CONFIG) & 0xf7) | 0x01); } static struct w83792d_data *w83792d_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct w83792d_data *data = i2c_get_clientdata(client); int i, j; u8 reg_array_tmp[4], reg_tmp; mutex_lock(&data->update_lock); if (time_after (jiffies - data->last_updated, (unsigned long) (HZ * 3)) || time_before(jiffies, data->last_updated) || !data->valid) { dev_dbg(dev, "Starting device update\n"); /* Update the voltages measured value and limits */ for (i = 0; i < 9; i++) { data->in[i] = w83792d_read_value(client, W83792D_REG_IN[i]); data->in_max[i] = w83792d_read_value(client, W83792D_REG_IN_MAX[i]); data->in_min[i] = w83792d_read_value(client, W83792D_REG_IN_MIN[i]); } data->low_bits = w83792d_read_value(client, W83792D_REG_LOW_BITS1) + (w83792d_read_value(client, W83792D_REG_LOW_BITS2) << 8); for (i = 0; i < 7; i++) { /* Update the Fan measured value and limits */ data->fan[i] = w83792d_read_value(client, W83792D_REG_FAN[i]); data->fan_min[i] = w83792d_read_value(client, W83792D_REG_FAN_MIN[i]); /* Update the PWM/DC Value and PWM/DC flag */ data->pwm[i] = w83792d_read_value(client, W83792D_REG_PWM[i]); } reg_tmp = w83792d_read_value(client, W83792D_REG_FAN_CFG); data->pwmenable[0] = reg_tmp & 0x03; data->pwmenable[1] = (reg_tmp>>2) & 0x03; data->pwmenable[2] = (reg_tmp>>4) & 0x03; for (i = 0; i < 3; i++) { data->temp1[i] = w83792d_read_value(client, W83792D_REG_TEMP1[i]); } for (i = 0; i < 2; i++) { for (j = 0; j < 6; j++) { data->temp_add[i][j] = w83792d_read_value( client, W83792D_REG_TEMP_ADD[i][j]); } } /* Update the Fan Divisor */ for (i = 0; i < 4; i++) { reg_array_tmp[i] = w83792d_read_value(client, W83792D_REG_FAN_DIV[i]); } data->fan_div[0] = reg_array_tmp[0] & 0x07; data->fan_div[1] = (reg_array_tmp[0] >> 4) & 0x07; data->fan_div[2] = reg_array_tmp[1] & 0x07; data->fan_div[3] = (reg_array_tmp[1] >> 4) & 0x07; data->fan_div[4] = reg_array_tmp[2] & 0x07; data->fan_div[5] = (reg_array_tmp[2] >> 4) & 0x07; data->fan_div[6] = reg_array_tmp[3] & 0x07; /* Update the realtime status */ data->alarms = w83792d_read_value(client, W83792D_REG_ALARM1) + (w83792d_read_value(client, W83792D_REG_ALARM2) << 8) + (w83792d_read_value(client, W83792D_REG_ALARM3) << 16); /* Update CaseOpen status and it's CLR_CHS. */ data->chassis = (w83792d_read_value(client, W83792D_REG_CHASSIS) >> 5) & 0x01; data->chassis_clear = (w83792d_read_value(client, W83792D_REG_CHASSIS_CLR) >> 7) & 0x01; /* Update Thermal Cruise/Smart Fan I target value */ for (i = 0; i < 3; i++) { data->thermal_cruise[i] = w83792d_read_value(client, W83792D_REG_THERMAL[i]) & 0x7f; } /* Update Smart Fan I/II tolerance */ reg_tmp = w83792d_read_value(client, W83792D_REG_TOLERANCE[0]); data->tolerance[0] = reg_tmp & 0x0f; data->tolerance[1] = (reg_tmp >> 4) & 0x0f; data->tolerance[2] = w83792d_read_value(client, W83792D_REG_TOLERANCE[2]) & 0x0f; /* Update Smart Fan II temperature points */ for (i = 0; i < 3; i++) { for (j = 0; j < 4; j++) { data->sf2_points[i][j] = w83792d_read_value(client, W83792D_REG_POINTS[i][j]) & 0x7f; } } /* Update Smart Fan II duty cycle levels */ for (i = 0; i < 3; i++) { reg_tmp = w83792d_read_value(client, W83792D_REG_LEVELS[i][0]); data->sf2_levels[i][0] = reg_tmp & 0x0f; data->sf2_levels[i][1] = (reg_tmp >> 4) & 0x0f; reg_tmp = w83792d_read_value(client, W83792D_REG_LEVELS[i][2]); data->sf2_levels[i][2] = (reg_tmp >> 4) & 0x0f; data->sf2_levels[i][3] = reg_tmp & 0x0f; } data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); #ifdef DEBUG w83792d_print_debug(data, dev); #endif return data; } #ifdef DEBUG static void w83792d_print_debug(struct w83792d_data *data, struct device *dev) { int i = 0, j = 0; dev_dbg(dev, "==========The following is the debug message...========\n"); dev_dbg(dev, "9 set of Voltages: =====>\n"); for (i = 0; i < 9; i++) { dev_dbg(dev, "vin[%d] is: 0x%x\n", i, data->in[i]); dev_dbg(dev, "vin[%d] max is: 0x%x\n", i, data->in_max[i]); dev_dbg(dev, "vin[%d] min is: 0x%x\n", i, data->in_min[i]); } dev_dbg(dev, "Low Bit1 is: 0x%x\n", data->low_bits & 0xff); dev_dbg(dev, "Low Bit2 is: 0x%x\n", data->low_bits >> 8); dev_dbg(dev, "7 set of Fan Counts and Duty Cycles: =====>\n"); for (i = 0; i < 7; i++) { dev_dbg(dev, "fan[%d] is: 0x%x\n", i, data->fan[i]); dev_dbg(dev, "fan[%d] min is: 0x%x\n", i, data->fan_min[i]); dev_dbg(dev, "pwm[%d] is: 0x%x\n", i, data->pwm[i]); } dev_dbg(dev, "3 set of Temperatures: =====>\n"); for (i = 0; i < 3; i++) dev_dbg(dev, "temp1[%d] is: 0x%x\n", i, data->temp1[i]); for (i = 0; i < 2; i++) { for (j = 0; j < 6; j++) { dev_dbg(dev, "temp_add[%d][%d] is: 0x%x\n", i, j, data->temp_add[i][j]); } } for (i = 0; i < 7; i++) dev_dbg(dev, "fan_div[%d] is: 0x%x\n", i, data->fan_div[i]); dev_dbg(dev, "==========End of the debug message...================\n"); dev_dbg(dev, "\n"); } #endif module_i2c_driver(w83792d_driver); MODULE_AUTHOR("Chunhao Huang @ Winbond <DZShen@Winbond.com.tw>"); MODULE_DESCRIPTION("W83792AD/D driver for linux-2.6"); MODULE_LICENSE("GPL");
gpl-2.0
TeamRegular/android_kernel_lge_msm8916
arch/ia64/kernel/irq_ia64.c
5113
16110
/* * linux/arch/ia64/kernel/irq_ia64.c * * Copyright (C) 1998-2001 Hewlett-Packard Co * Stephane Eranian <eranian@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com> * * 6/10/99: Updated to bring in sync with x86 version to facilitate * support for SMP and different interrupt controllers. * * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector * PCI to vector allocation routine. * 04/14/2004 Ashok Raj <ashok.raj@intel.com> * Added CPU Hotplug handling for IPF. */ #include <linux/module.h> #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/kernel_stat.h> #include <linux/ptrace.h> #include <linux/signal.h> #include <linux/smp.h> #include <linux/threads.h> #include <linux/bitops.h> #include <linux/irq.h> #include <linux/ratelimit.h> #include <linux/acpi.h> #include <linux/sched.h> #include <asm/delay.h> #include <asm/intrinsics.h> #include <asm/io.h> #include <asm/hw_irq.h> #include <asm/machvec.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #ifdef CONFIG_PERFMON # include <asm/perfmon.h> #endif #define IRQ_DEBUG 0 #define IRQ_VECTOR_UNASSIGNED (0) #define IRQ_UNUSED (0) #define IRQ_USED (1) #define IRQ_RSVD (2) /* These can be overridden in platform_irq_init */ int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR; int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR; /* default base addr of IPI table */ void __iomem *ipi_base_addr = ((void __iomem *) (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR)); static cpumask_t vector_allocation_domain(int cpu); /* * Legacy IRQ to IA-64 vector translation table. */ __u8 isa_irq_to_vector_map[16] = { /* 8259 IRQ translation, first 16 entries */ 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21 }; EXPORT_SYMBOL(isa_irq_to_vector_map); DEFINE_SPINLOCK(vector_lock); struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = { [0 ... NR_IRQS - 1] = { .vector = IRQ_VECTOR_UNASSIGNED, .domain = CPU_MASK_NONE } }; DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = { [0 ... IA64_NUM_VECTORS - 1] = -1 }; static cpumask_t vector_table[IA64_NUM_VECTORS] = { [0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE }; static int irq_status[NR_IRQS] = { [0 ... NR_IRQS -1] = IRQ_UNUSED }; int check_irq_used(int irq) { if (irq_status[irq] == IRQ_USED) return 1; return -1; } static inline int find_unassigned_irq(void) { int irq; for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++) if (irq_status[irq] == IRQ_UNUSED) return irq; return -ENOSPC; } static inline int find_unassigned_vector(cpumask_t domain) { cpumask_t mask; int pos, vector; cpumask_and(&mask, &domain, cpu_online_mask); if (cpus_empty(mask)) return -EINVAL; for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) { vector = IA64_FIRST_DEVICE_VECTOR + pos; cpus_and(mask, domain, vector_table[vector]); if (!cpus_empty(mask)) continue; return vector; } return -ENOSPC; } static int __bind_irq_vector(int irq, int vector, cpumask_t domain) { cpumask_t mask; int cpu; struct irq_cfg *cfg = &irq_cfg[irq]; BUG_ON((unsigned)irq >= NR_IRQS); BUG_ON((unsigned)vector >= IA64_NUM_VECTORS); cpumask_and(&mask, &domain, cpu_online_mask); if (cpus_empty(mask)) return -EINVAL; if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain)) return 0; if (cfg->vector != IRQ_VECTOR_UNASSIGNED) return -EBUSY; for_each_cpu_mask(cpu, mask) per_cpu(vector_irq, cpu)[vector] = irq; cfg->vector = vector; cfg->domain = domain; irq_status[irq] = IRQ_USED; cpus_or(vector_table[vector], vector_table[vector], domain); return 0; } int bind_irq_vector(int irq, int vector, cpumask_t domain) { unsigned long flags; int ret; spin_lock_irqsave(&vector_lock, flags); ret = __bind_irq_vector(irq, vector, domain); spin_unlock_irqrestore(&vector_lock, flags); return ret; } static void __clear_irq_vector(int irq) { int vector, cpu; cpumask_t mask; cpumask_t domain; struct irq_cfg *cfg = &irq_cfg[irq]; BUG_ON((unsigned)irq >= NR_IRQS); BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED); vector = cfg->vector; domain = cfg->domain; cpumask_and(&mask, &cfg->domain, cpu_online_mask); for_each_cpu_mask(cpu, mask) per_cpu(vector_irq, cpu)[vector] = -1; cfg->vector = IRQ_VECTOR_UNASSIGNED; cfg->domain = CPU_MASK_NONE; irq_status[irq] = IRQ_UNUSED; cpus_andnot(vector_table[vector], vector_table[vector], domain); } static void clear_irq_vector(int irq) { unsigned long flags; spin_lock_irqsave(&vector_lock, flags); __clear_irq_vector(irq); spin_unlock_irqrestore(&vector_lock, flags); } int ia64_native_assign_irq_vector (int irq) { unsigned long flags; int vector, cpu; cpumask_t domain = CPU_MASK_NONE; vector = -ENOSPC; spin_lock_irqsave(&vector_lock, flags); for_each_online_cpu(cpu) { domain = vector_allocation_domain(cpu); vector = find_unassigned_vector(domain); if (vector >= 0) break; } if (vector < 0) goto out; if (irq == AUTO_ASSIGN) irq = vector; BUG_ON(__bind_irq_vector(irq, vector, domain)); out: spin_unlock_irqrestore(&vector_lock, flags); return vector; } void ia64_native_free_irq_vector (int vector) { if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR) return; clear_irq_vector(vector); } int reserve_irq_vector (int vector) { if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR) return -EINVAL; return !!bind_irq_vector(vector, vector, CPU_MASK_ALL); } /* * Initialize vector_irq on a new cpu. This function must be called * with vector_lock held. */ void __setup_vector_irq(int cpu) { int irq, vector; /* Clear vector_irq */ for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) per_cpu(vector_irq, cpu)[vector] = -1; /* Mark the inuse vectors */ for (irq = 0; irq < NR_IRQS; ++irq) { if (!cpu_isset(cpu, irq_cfg[irq].domain)) continue; vector = irq_to_vector(irq); per_cpu(vector_irq, cpu)[vector] = irq; } } #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) static enum vector_domain_type { VECTOR_DOMAIN_NONE, VECTOR_DOMAIN_PERCPU } vector_domain_type = VECTOR_DOMAIN_NONE; static cpumask_t vector_allocation_domain(int cpu) { if (vector_domain_type == VECTOR_DOMAIN_PERCPU) return cpumask_of_cpu(cpu); return CPU_MASK_ALL; } static int __irq_prepare_move(int irq, int cpu) { struct irq_cfg *cfg = &irq_cfg[irq]; int vector; cpumask_t domain; if (cfg->move_in_progress || cfg->move_cleanup_count) return -EBUSY; if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu)) return -EINVAL; if (cpu_isset(cpu, cfg->domain)) return 0; domain = vector_allocation_domain(cpu); vector = find_unassigned_vector(domain); if (vector < 0) return -ENOSPC; cfg->move_in_progress = 1; cfg->old_domain = cfg->domain; cfg->vector = IRQ_VECTOR_UNASSIGNED; cfg->domain = CPU_MASK_NONE; BUG_ON(__bind_irq_vector(irq, vector, domain)); return 0; } int irq_prepare_move(int irq, int cpu) { unsigned long flags; int ret; spin_lock_irqsave(&vector_lock, flags); ret = __irq_prepare_move(irq, cpu); spin_unlock_irqrestore(&vector_lock, flags); return ret; } void irq_complete_move(unsigned irq) { struct irq_cfg *cfg = &irq_cfg[irq]; cpumask_t cleanup_mask; int i; if (likely(!cfg->move_in_progress)) return; if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain))) return; cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask); cfg->move_cleanup_count = cpus_weight(cleanup_mask); for_each_cpu_mask(i, cleanup_mask) platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0); cfg->move_in_progress = 0; } static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) { int me = smp_processor_id(); ia64_vector vector; unsigned long flags; for (vector = IA64_FIRST_DEVICE_VECTOR; vector < IA64_LAST_DEVICE_VECTOR; vector++) { int irq; struct irq_desc *desc; struct irq_cfg *cfg; irq = __get_cpu_var(vector_irq)[vector]; if (irq < 0) continue; desc = irq_to_desc(irq); cfg = irq_cfg + irq; raw_spin_lock(&desc->lock); if (!cfg->move_cleanup_count) goto unlock; if (!cpu_isset(me, cfg->old_domain)) goto unlock; spin_lock_irqsave(&vector_lock, flags); __get_cpu_var(vector_irq)[vector] = -1; cpu_clear(me, vector_table[vector]); spin_unlock_irqrestore(&vector_lock, flags); cfg->move_cleanup_count--; unlock: raw_spin_unlock(&desc->lock); } return IRQ_HANDLED; } static struct irqaction irq_move_irqaction = { .handler = smp_irq_move_cleanup_interrupt, .flags = IRQF_DISABLED, .name = "irq_move" }; static int __init parse_vector_domain(char *arg) { if (!arg) return -EINVAL; if (!strcmp(arg, "percpu")) { vector_domain_type = VECTOR_DOMAIN_PERCPU; no_int_routing = 1; } return 0; } early_param("vector", parse_vector_domain); #else static cpumask_t vector_allocation_domain(int cpu) { return CPU_MASK_ALL; } #endif void destroy_and_reserve_irq(unsigned int irq) { unsigned long flags; dynamic_irq_cleanup(irq); spin_lock_irqsave(&vector_lock, flags); __clear_irq_vector(irq); irq_status[irq] = IRQ_RSVD; spin_unlock_irqrestore(&vector_lock, flags); } /* * Dynamic irq allocate and deallocation for MSI */ int create_irq(void) { unsigned long flags; int irq, vector, cpu; cpumask_t domain = CPU_MASK_NONE; irq = vector = -ENOSPC; spin_lock_irqsave(&vector_lock, flags); for_each_online_cpu(cpu) { domain = vector_allocation_domain(cpu); vector = find_unassigned_vector(domain); if (vector >= 0) break; } if (vector < 0) goto out; irq = find_unassigned_irq(); if (irq < 0) goto out; BUG_ON(__bind_irq_vector(irq, vector, domain)); out: spin_unlock_irqrestore(&vector_lock, flags); if (irq >= 0) dynamic_irq_init(irq); return irq; } void destroy_irq(unsigned int irq) { dynamic_irq_cleanup(irq); clear_irq_vector(irq); } #ifdef CONFIG_SMP # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE) # define IS_LOCAL_TLB_FLUSH(vec) (vec == IA64_IPI_LOCAL_TLB_FLUSH) #else # define IS_RESCHEDULE(vec) (0) # define IS_LOCAL_TLB_FLUSH(vec) (0) #endif /* * That's where the IVT branches when we get an external * interrupt. This branches to the correct hardware IRQ handler via * function ptr. */ void ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); unsigned long saved_tpr; #if IRQ_DEBUG { unsigned long bsp, sp; /* * Note: if the interrupt happened while executing in * the context switch routine (ia64_switch_to), we may * get a spurious stack overflow here. This is * because the register and the memory stack are not * switched atomically. */ bsp = ia64_getreg(_IA64_REG_AR_BSP); sp = ia64_getreg(_IA64_REG_SP); if ((sp - bsp) < 1024) { static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); if (__ratelimit(&ratelimit)) { printk("ia64_handle_irq: DANGER: less than " "1KB of free stack space!!\n" "(bsp=0x%lx, sp=%lx)\n", bsp, sp); } } } #endif /* IRQ_DEBUG */ /* * Always set TPR to limit maximum interrupt nesting depth to * 16 (without this, it would be ~240, which could easily lead * to kernel stack overflows). */ irq_enter(); saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); ia64_srlz_d(); while (vector != IA64_SPURIOUS_INT_VECTOR) { int irq = local_vector_to_irq(vector); struct irq_desc *desc = irq_to_desc(irq); if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { smp_local_flush_tlb(); kstat_incr_irqs_this_cpu(irq, desc); } else if (unlikely(IS_RESCHEDULE(vector))) { scheduler_ipi(); kstat_incr_irqs_this_cpu(irq, desc); } else { ia64_setreg(_IA64_REG_CR_TPR, vector); ia64_srlz_d(); if (unlikely(irq < 0)) { printk(KERN_ERR "%s: Unexpected interrupt " "vector %d on CPU %d is not mapped " "to any IRQ!\n", __func__, vector, smp_processor_id()); } else generic_handle_irq(irq); /* * Disable interrupts and send EOI: */ local_irq_disable(); ia64_setreg(_IA64_REG_CR_TPR, saved_tpr); } ia64_eoi(); vector = ia64_get_ivr(); } /* * This must be done *after* the ia64_eoi(). For example, the keyboard softirq * handler needs to be able to wait for further keyboard interrupts, which can't * come through until ia64_eoi() has been done. */ irq_exit(); set_irq_regs(old_regs); } #ifdef CONFIG_HOTPLUG_CPU /* * This function emulates a interrupt processing when a cpu is about to be * brought down. */ void ia64_process_pending_intr(void) { ia64_vector vector; unsigned long saved_tpr; extern unsigned int vectors_in_migration[NR_IRQS]; vector = ia64_get_ivr(); irq_enter(); saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); ia64_srlz_d(); /* * Perform normal interrupt style processing */ while (vector != IA64_SPURIOUS_INT_VECTOR) { int irq = local_vector_to_irq(vector); struct irq_desc *desc = irq_to_desc(irq); if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { smp_local_flush_tlb(); kstat_incr_irqs_this_cpu(irq, desc); } else if (unlikely(IS_RESCHEDULE(vector))) { kstat_incr_irqs_this_cpu(irq, desc); } else { struct pt_regs *old_regs = set_irq_regs(NULL); ia64_setreg(_IA64_REG_CR_TPR, vector); ia64_srlz_d(); /* * Now try calling normal ia64_handle_irq as it would have got called * from a real intr handler. Try passing null for pt_regs, hopefully * it will work. I hope it works!. * Probably could shared code. */ if (unlikely(irq < 0)) { printk(KERN_ERR "%s: Unexpected interrupt " "vector %d on CPU %d not being mapped " "to any IRQ!!\n", __func__, vector, smp_processor_id()); } else { vectors_in_migration[irq]=0; generic_handle_irq(irq); } set_irq_regs(old_regs); /* * Disable interrupts and send EOI */ local_irq_disable(); ia64_setreg(_IA64_REG_CR_TPR, saved_tpr); } ia64_eoi(); vector = ia64_get_ivr(); } irq_exit(); } #endif #ifdef CONFIG_SMP static irqreturn_t dummy_handler (int irq, void *dev_id) { BUG(); } static struct irqaction ipi_irqaction = { .handler = handle_IPI, .flags = IRQF_DISABLED, .name = "IPI" }; /* * KVM uses this interrupt to force a cpu out of guest mode */ static struct irqaction resched_irqaction = { .handler = dummy_handler, .flags = IRQF_DISABLED, .name = "resched" }; static struct irqaction tlb_irqaction = { .handler = dummy_handler, .flags = IRQF_DISABLED, .name = "tlb_flush" }; #endif void ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action) { unsigned int irq; irq = vec; BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL)); irq_set_status_flags(irq, IRQ_PER_CPU); irq_set_chip(irq, &irq_type_ia64_lsapic); if (action) setup_irq(irq, action); irq_set_handler(irq, handle_percpu_irq); } void __init ia64_native_register_ipi(void) { #ifdef CONFIG_SMP register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction); register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction); #endif } void __init init_IRQ (void) { #ifdef CONFIG_ACPI acpi_boot_init(); #endif ia64_register_ipi(); register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); #ifdef CONFIG_SMP #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG) if (vector_domain_type != VECTOR_DOMAIN_NONE) register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction); #endif #endif #ifdef CONFIG_PERFMON pfm_init_percpu(); #endif platform_irq_init(); } void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect) { void __iomem *ipi_addr; unsigned long ipi_data; unsigned long phys_cpu_id; phys_cpu_id = cpu_physical_id(cpu); /* * cpu number is in 8bit ID and 8bit EID */ ipi_data = (delivery_mode << 8) | (vector & 0xff); ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3)); writeq(ipi_data, ipi_addr); }
gpl-2.0
spock1104/android_kernel_zte_nex
arch/mips/bcm63xx/cs.c
13561
3250
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/log2.h> #include <bcm63xx_cpu.h> #include <bcm63xx_io.h> #include <bcm63xx_regs.h> #include <bcm63xx_cs.h> static DEFINE_SPINLOCK(bcm63xx_cs_lock); /* * check if given chip select exists */ static int is_valid_cs(unsigned int cs) { if (cs > 6) return 0; return 1; } /* * Configure chipselect base address and size (bytes). * Size must be a power of two between 8k and 256M. */ int bcm63xx_set_cs_base(unsigned int cs, u32 base, unsigned int size) { unsigned long flags; u32 val; if (!is_valid_cs(cs)) return -EINVAL; /* sanity check on size */ if (size != roundup_pow_of_two(size)) return -EINVAL; if (size < 8 * 1024 || size > 256 * 1024 * 1024) return -EINVAL; val = (base & MPI_CSBASE_BASE_MASK); /* 8k => 0 - 256M => 15 */ val |= (ilog2(size) - ilog2(8 * 1024)) << MPI_CSBASE_SIZE_SHIFT; spin_lock_irqsave(&bcm63xx_cs_lock, flags); bcm_mpi_writel(val, MPI_CSBASE_REG(cs)); spin_unlock_irqrestore(&bcm63xx_cs_lock, flags); return 0; } EXPORT_SYMBOL(bcm63xx_set_cs_base); /* * configure chipselect timing (ns) */ int bcm63xx_set_cs_timing(unsigned int cs, unsigned int wait, unsigned int setup, unsigned int hold) { unsigned long flags; u32 val; if (!is_valid_cs(cs)) return -EINVAL; spin_lock_irqsave(&bcm63xx_cs_lock, flags); val = bcm_mpi_readl(MPI_CSCTL_REG(cs)); val &= ~(MPI_CSCTL_WAIT_MASK); val &= ~(MPI_CSCTL_SETUP_MASK); val &= ~(MPI_CSCTL_HOLD_MASK); val |= wait << MPI_CSCTL_WAIT_SHIFT; val |= setup << MPI_CSCTL_SETUP_SHIFT; val |= hold << MPI_CSCTL_HOLD_SHIFT; bcm_mpi_writel(val, MPI_CSCTL_REG(cs)); spin_unlock_irqrestore(&bcm63xx_cs_lock, flags); return 0; } EXPORT_SYMBOL(bcm63xx_set_cs_timing); /* * configure other chipselect parameter (data bus size, ...) */ int bcm63xx_set_cs_param(unsigned int cs, u32 params) { unsigned long flags; u32 val; if (!is_valid_cs(cs)) return -EINVAL; /* none of this fields apply to pcmcia */ if (cs == MPI_CS_PCMCIA_COMMON || cs == MPI_CS_PCMCIA_ATTR || cs == MPI_CS_PCMCIA_IO) return -EINVAL; spin_lock_irqsave(&bcm63xx_cs_lock, flags); val = bcm_mpi_readl(MPI_CSCTL_REG(cs)); val &= ~(MPI_CSCTL_DATA16_MASK); val &= ~(MPI_CSCTL_SYNCMODE_MASK); val &= ~(MPI_CSCTL_TSIZE_MASK); val &= ~(MPI_CSCTL_ENDIANSWAP_MASK); val |= params; bcm_mpi_writel(val, MPI_CSCTL_REG(cs)); spin_unlock_irqrestore(&bcm63xx_cs_lock, flags); return 0; } EXPORT_SYMBOL(bcm63xx_set_cs_param); /* * set cs status (enable/disable) */ int bcm63xx_set_cs_status(unsigned int cs, int enable) { unsigned long flags; u32 val; if (!is_valid_cs(cs)) return -EINVAL; spin_lock_irqsave(&bcm63xx_cs_lock, flags); val = bcm_mpi_readl(MPI_CSCTL_REG(cs)); if (enable) val |= MPI_CSCTL_ENABLE_MASK; else val &= ~MPI_CSCTL_ENABLE_MASK; bcm_mpi_writel(val, MPI_CSCTL_REG(cs)); spin_unlock_irqrestore(&bcm63xx_cs_lock, flags); return 0; } EXPORT_SYMBOL(bcm63xx_set_cs_status);
gpl-2.0
yetu/linux-pfla02
fs/f2fs/file.c
250
23447
/* * fs/f2fs/file.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/fs.h> #include <linux/f2fs_fs.h> #include <linux/stat.h> #include <linux/buffer_head.h> #include <linux/writeback.h> #include <linux/blkdev.h> #include <linux/falloc.h> #include <linux/types.h> #include <linux/compat.h> #include <linux/uaccess.h> #include <linux/mount.h> #include <linux/pagevec.h> #include "f2fs.h" #include "node.h" #include "segment.h" #include "xattr.h" #include "acl.h" #include <trace/events/f2fs.h> static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; struct inode *inode = file_inode(vma->vm_file); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct dnode_of_data dn; int err; f2fs_balance_fs(sbi); sb_start_pagefault(inode->i_sb); /* force to convert with normal data indices */ err = f2fs_convert_inline_data(inode, MAX_INLINE_DATA + 1, page); if (err) goto out; /* block allocation */ f2fs_lock_op(sbi); set_new_dnode(&dn, inode, NULL, NULL, 0); err = f2fs_reserve_block(&dn, page->index); f2fs_unlock_op(sbi); if (err) goto out; file_update_time(vma->vm_file); lock_page(page); if (unlikely(page->mapping != inode->i_mapping || page_offset(page) > i_size_read(inode) || !PageUptodate(page))) { unlock_page(page); err = -EFAULT; goto out; } /* * check to see if the page is mapped already (no holes) */ if (PageMappedToDisk(page)) goto mapped; /* page is wholly or partially inside EOF */ if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) { unsigned offset; offset = i_size_read(inode) & ~PAGE_CACHE_MASK; zero_user_segment(page, offset, PAGE_CACHE_SIZE); } set_page_dirty(page); SetPageUptodate(page); trace_f2fs_vm_page_mkwrite(page, DATA); mapped: /* fill the page */ f2fs_wait_on_page_writeback(page, DATA); out: sb_end_pagefault(inode->i_sb); return block_page_mkwrite_return(err); } static const struct vm_operations_struct f2fs_file_vm_ops = { .fault = filemap_fault, .map_pages = filemap_map_pages, .page_mkwrite = f2fs_vm_page_mkwrite, .remap_pages = generic_file_remap_pages, }; static int get_parent_ino(struct inode *inode, nid_t *pino) { struct dentry *dentry; inode = igrab(inode); dentry = d_find_any_alias(inode); iput(inode); if (!dentry) return 0; if (update_dent_inode(inode, &dentry->d_name)) { dput(dentry); return 0; } *pino = parent_ino(dentry); dput(dentry); return 1; } static inline bool need_do_checkpoint(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); bool need_cp = false; if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1) need_cp = true; else if (file_wrong_pino(inode)) need_cp = true; else if (!space_for_roll_forward(sbi)) need_cp = true; else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino)) need_cp = true; else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi))) need_cp = true; return need_cp; } int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); nid_t ino = inode->i_ino; int ret = 0; bool need_cp = false; struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = LONG_MAX, .for_reclaim = 0, }; if (unlikely(f2fs_readonly(inode->i_sb))) return 0; trace_f2fs_sync_file_enter(inode); /* if fdatasync is triggered, let's do in-place-update */ if (get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks) set_inode_flag(fi, FI_NEED_IPU); ret = filemap_write_and_wait_range(inode->i_mapping, start, end); clear_inode_flag(fi, FI_NEED_IPU); if (ret) { trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret); return ret; } /* * if there is no written data, don't waste time to write recovery info. */ if (!is_inode_flag_set(fi, FI_APPEND_WRITE) && !exist_written_data(sbi, ino, APPEND_INO)) { struct page *i = find_get_page(NODE_MAPPING(sbi), ino); /* But we need to avoid that there are some inode updates */ if ((i && PageDirty(i)) || need_inode_block_update(sbi, ino)) { f2fs_put_page(i, 0); goto go_write; } f2fs_put_page(i, 0); if (is_inode_flag_set(fi, FI_UPDATE_WRITE) || exist_written_data(sbi, ino, UPDATE_INO)) goto flush_out; goto out; } go_write: /* guarantee free sections for fsync */ f2fs_balance_fs(sbi); /* * Both of fdatasync() and fsync() are able to be recovered from * sudden-power-off. */ down_read(&fi->i_sem); need_cp = need_do_checkpoint(inode); up_read(&fi->i_sem); if (need_cp) { nid_t pino; /* all the dirty node pages should be flushed for POR */ ret = f2fs_sync_fs(inode->i_sb, 1); down_write(&fi->i_sem); F2FS_I(inode)->xattr_ver = 0; if (file_wrong_pino(inode) && inode->i_nlink == 1 && get_parent_ino(inode, &pino)) { F2FS_I(inode)->i_pino = pino; file_got_pino(inode); up_write(&fi->i_sem); mark_inode_dirty_sync(inode); ret = f2fs_write_inode(inode, NULL); if (ret) goto out; } else { up_write(&fi->i_sem); } } else { sync_nodes: sync_node_pages(sbi, ino, &wbc); if (need_inode_block_update(sbi, ino)) { mark_inode_dirty_sync(inode); ret = f2fs_write_inode(inode, NULL); if (ret) goto out; goto sync_nodes; } ret = wait_on_node_pages_writeback(sbi, ino); if (ret) goto out; /* once recovery info is written, don't need to tack this */ remove_dirty_inode(sbi, ino, APPEND_INO); clear_inode_flag(fi, FI_APPEND_WRITE); flush_out: remove_dirty_inode(sbi, ino, UPDATE_INO); clear_inode_flag(fi, FI_UPDATE_WRITE); ret = f2fs_issue_flush(F2FS_I_SB(inode)); } out: trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret); return ret; } static pgoff_t __get_first_dirty_index(struct address_space *mapping, pgoff_t pgofs, int whence) { struct pagevec pvec; int nr_pages; if (whence != SEEK_DATA) return 0; /* find first dirty page index */ pagevec_init(&pvec, 0); nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs, PAGECACHE_TAG_DIRTY, 1); pgofs = nr_pages ? pvec.pages[0]->index : LONG_MAX; pagevec_release(&pvec); return pgofs; } static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs, int whence) { switch (whence) { case SEEK_DATA: if ((blkaddr == NEW_ADDR && dirty == pgofs) || (blkaddr != NEW_ADDR && blkaddr != NULL_ADDR)) return true; break; case SEEK_HOLE: if (blkaddr == NULL_ADDR) return true; break; } return false; } static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) { struct inode *inode = file->f_mapping->host; loff_t maxbytes = inode->i_sb->s_maxbytes; struct dnode_of_data dn; pgoff_t pgofs, end_offset, dirty; loff_t data_ofs = offset; loff_t isize; int err = 0; mutex_lock(&inode->i_mutex); isize = i_size_read(inode); if (offset >= isize) goto fail; /* handle inline data case */ if (f2fs_has_inline_data(inode)) { if (whence == SEEK_HOLE) data_ofs = isize; goto found; } pgofs = (pgoff_t)(offset >> PAGE_CACHE_SHIFT); dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence); for (; data_ofs < isize; data_ofs = pgofs << PAGE_CACHE_SHIFT) { set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA); if (err && err != -ENOENT) { goto fail; } else if (err == -ENOENT) { /* direct node does not exists */ if (whence == SEEK_DATA) { pgofs = PGOFS_OF_NEXT_DNODE(pgofs, F2FS_I(inode)); continue; } else { goto found; } } end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); /* find data/hole in dnode block */ for (; dn.ofs_in_node < end_offset; dn.ofs_in_node++, pgofs++, data_ofs = pgofs << PAGE_CACHE_SHIFT) { block_t blkaddr; blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); if (__found_offset(blkaddr, dirty, pgofs, whence)) { f2fs_put_dnode(&dn); goto found; } } f2fs_put_dnode(&dn); } if (whence == SEEK_DATA) goto fail; found: if (whence == SEEK_HOLE && data_ofs > isize) data_ofs = isize; mutex_unlock(&inode->i_mutex); return vfs_setpos(file, data_ofs, maxbytes); fail: mutex_unlock(&inode->i_mutex); return -ENXIO; } static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence) { struct inode *inode = file->f_mapping->host; loff_t maxbytes = inode->i_sb->s_maxbytes; switch (whence) { case SEEK_SET: case SEEK_CUR: case SEEK_END: return generic_file_llseek_size(file, offset, whence, maxbytes, i_size_read(inode)); case SEEK_DATA: case SEEK_HOLE: if (offset < 0) return -ENXIO; return f2fs_seek_block(file, offset, whence); } return -EINVAL; } static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) { file_accessed(file); vma->vm_ops = &f2fs_file_vm_ops; return 0; } int truncate_data_blocks_range(struct dnode_of_data *dn, int count) { int nr_free = 0, ofs = dn->ofs_in_node; struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); struct f2fs_node *raw_node; __le32 *addr; raw_node = F2FS_NODE(dn->node_page); addr = blkaddr_in_node(raw_node) + ofs; for (; count > 0; count--, addr++, dn->ofs_in_node++) { block_t blkaddr = le32_to_cpu(*addr); if (blkaddr == NULL_ADDR) continue; update_extent_cache(NULL_ADDR, dn); invalidate_blocks(sbi, blkaddr); nr_free++; } if (nr_free) { dec_valid_block_count(sbi, dn->inode, nr_free); set_page_dirty(dn->node_page); sync_inode_page(dn); } dn->ofs_in_node = ofs; trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid, dn->ofs_in_node, nr_free); return nr_free; } void truncate_data_blocks(struct dnode_of_data *dn) { truncate_data_blocks_range(dn, ADDRS_PER_BLOCK); } static void truncate_partial_data_page(struct inode *inode, u64 from) { unsigned offset = from & (PAGE_CACHE_SIZE - 1); struct page *page; if (f2fs_has_inline_data(inode)) return truncate_inline_data(inode, from); if (!offset) return; page = find_data_page(inode, from >> PAGE_CACHE_SHIFT, false); if (IS_ERR(page)) return; lock_page(page); if (unlikely(!PageUptodate(page) || page->mapping != inode->i_mapping)) goto out; f2fs_wait_on_page_writeback(page, DATA); zero_user(page, offset, PAGE_CACHE_SIZE - offset); set_page_dirty(page); out: f2fs_put_page(page, 1); } int truncate_blocks(struct inode *inode, u64 from, bool lock) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); unsigned int blocksize = inode->i_sb->s_blocksize; struct dnode_of_data dn; pgoff_t free_from; int count = 0, err = 0; trace_f2fs_truncate_blocks_enter(inode, from); if (f2fs_has_inline_data(inode)) goto done; free_from = (pgoff_t) ((from + blocksize - 1) >> (sbi->log_blocksize)); if (lock) f2fs_lock_op(sbi); set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE); if (err) { if (err == -ENOENT) goto free_next; if (lock) f2fs_unlock_op(sbi); trace_f2fs_truncate_blocks_exit(inode, err); return err; } count = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); count -= dn.ofs_in_node; f2fs_bug_on(sbi, count < 0); if (dn.ofs_in_node || IS_INODE(dn.node_page)) { truncate_data_blocks_range(&dn, count); free_from += count; } f2fs_put_dnode(&dn); free_next: err = truncate_inode_blocks(inode, free_from); if (lock) f2fs_unlock_op(sbi); done: /* lastly zero out the first data page */ truncate_partial_data_page(inode, from); trace_f2fs_truncate_blocks_exit(inode, err); return err; } void f2fs_truncate(struct inode *inode) { if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return; trace_f2fs_truncate(inode); if (!truncate_blocks(inode, i_size_read(inode), true)) { inode->i_mtime = inode->i_ctime = CURRENT_TIME; mark_inode_dirty(inode); } } int f2fs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode = dentry->d_inode; generic_fillattr(inode, stat); stat->blocks <<= 3; return 0; } #ifdef CONFIG_F2FS_FS_POSIX_ACL static void __setattr_copy(struct inode *inode, const struct iattr *attr) { struct f2fs_inode_info *fi = F2FS_I(inode); unsigned int ia_valid = attr->ia_valid; if (ia_valid & ATTR_UID) inode->i_uid = attr->ia_uid; if (ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; if (ia_valid & ATTR_ATIME) inode->i_atime = timespec_trunc(attr->ia_atime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_MTIME) inode->i_mtime = timespec_trunc(attr->ia_mtime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_CTIME) inode->i_ctime = timespec_trunc(attr->ia_ctime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_MODE) { umode_t mode = attr->ia_mode; if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) mode &= ~S_ISGID; set_acl_inode(fi, mode); } } #else #define __setattr_copy setattr_copy #endif int f2fs_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; struct f2fs_inode_info *fi = F2FS_I(inode); int err; err = inode_change_ok(inode, attr); if (err) return err; if (attr->ia_valid & ATTR_SIZE) { err = f2fs_convert_inline_data(inode, attr->ia_size, NULL); if (err) return err; if (attr->ia_size != i_size_read(inode)) { truncate_setsize(inode, attr->ia_size); f2fs_truncate(inode); f2fs_balance_fs(F2FS_I_SB(inode)); } else { /* * giving a chance to truncate blocks past EOF which * are fallocated with FALLOC_FL_KEEP_SIZE. */ f2fs_truncate(inode); } } __setattr_copy(inode, attr); if (attr->ia_valid & ATTR_MODE) { err = posix_acl_chmod(inode, get_inode_mode(inode)); if (err || is_inode_flag_set(fi, FI_ACL_MODE)) { inode->i_mode = fi->i_acl_mode; clear_inode_flag(fi, FI_ACL_MODE); } } mark_inode_dirty(inode); return err; } const struct inode_operations f2fs_file_inode_operations = { .getattr = f2fs_getattr, .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, .set_acl = f2fs_set_acl, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif .fiemap = f2fs_fiemap, }; static void fill_zero(struct inode *inode, pgoff_t index, loff_t start, loff_t len) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct page *page; if (!len) return; f2fs_balance_fs(sbi); f2fs_lock_op(sbi); page = get_new_data_page(inode, NULL, index, false); f2fs_unlock_op(sbi); if (!IS_ERR(page)) { f2fs_wait_on_page_writeback(page, DATA); zero_user(page, start, len); set_page_dirty(page); f2fs_put_page(page, 1); } } int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) { pgoff_t index; int err; for (index = pg_start; index < pg_end; index++) { struct dnode_of_data dn; set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, index, LOOKUP_NODE); if (err) { if (err == -ENOENT) continue; return err; } if (dn.data_blkaddr != NULL_ADDR) truncate_data_blocks_range(&dn, 1); f2fs_put_dnode(&dn); } return 0; } static int punch_hole(struct inode *inode, loff_t offset, loff_t len) { pgoff_t pg_start, pg_end; loff_t off_start, off_end; int ret = 0; if (!S_ISREG(inode->i_mode)) return -EOPNOTSUPP; /* skip punching hole beyond i_size */ if (offset >= inode->i_size) return ret; ret = f2fs_convert_inline_data(inode, MAX_INLINE_DATA + 1, NULL); if (ret) return ret; pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; off_start = offset & (PAGE_CACHE_SIZE - 1); off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); if (pg_start == pg_end) { fill_zero(inode, pg_start, off_start, off_end - off_start); } else { if (off_start) fill_zero(inode, pg_start++, off_start, PAGE_CACHE_SIZE - off_start); if (off_end) fill_zero(inode, pg_end, 0, off_end); if (pg_start < pg_end) { struct address_space *mapping = inode->i_mapping; loff_t blk_start, blk_end; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); f2fs_balance_fs(sbi); blk_start = pg_start << PAGE_CACHE_SHIFT; blk_end = pg_end << PAGE_CACHE_SHIFT; truncate_inode_pages_range(mapping, blk_start, blk_end - 1); f2fs_lock_op(sbi); ret = truncate_hole(inode, pg_start, pg_end); f2fs_unlock_op(sbi); } } return ret; } static int expand_inode_data(struct inode *inode, loff_t offset, loff_t len, int mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); pgoff_t index, pg_start, pg_end; loff_t new_size = i_size_read(inode); loff_t off_start, off_end; int ret = 0; f2fs_balance_fs(sbi); ret = inode_newsize_ok(inode, (len + offset)); if (ret) return ret; ret = f2fs_convert_inline_data(inode, offset + len, NULL); if (ret) return ret; pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; off_start = offset & (PAGE_CACHE_SIZE - 1); off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); f2fs_lock_op(sbi); for (index = pg_start; index <= pg_end; index++) { struct dnode_of_data dn; if (index == pg_end && !off_end) goto noalloc; set_new_dnode(&dn, inode, NULL, NULL, 0); ret = f2fs_reserve_block(&dn, index); if (ret) break; noalloc: if (pg_start == pg_end) new_size = offset + len; else if (index == pg_start && off_start) new_size = (index + 1) << PAGE_CACHE_SHIFT; else if (index == pg_end) new_size = (index << PAGE_CACHE_SHIFT) + off_end; else new_size += PAGE_CACHE_SIZE; } if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) { i_size_write(inode, new_size); mark_inode_dirty(inode); update_inode_page(inode); } f2fs_unlock_op(sbi); return ret; } static long f2fs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); long ret; if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) return -EOPNOTSUPP; mutex_lock(&inode->i_mutex); if (mode & FALLOC_FL_PUNCH_HOLE) ret = punch_hole(inode, offset, len); else ret = expand_inode_data(inode, offset, len, mode); if (!ret) { inode->i_mtime = inode->i_ctime = CURRENT_TIME; mark_inode_dirty(inode); } mutex_unlock(&inode->i_mutex); trace_f2fs_fallocate(inode, mode, offset, len, ret); return ret; } #define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL)) #define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL) static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags) { if (S_ISDIR(mode)) return flags; else if (S_ISREG(mode)) return flags & F2FS_REG_FLMASK; else return flags & F2FS_OTHER_FLMASK; } static int f2fs_ioc_getflags(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_inode_info *fi = F2FS_I(inode); unsigned int flags = fi->i_flags & FS_FL_USER_VISIBLE; return put_user(flags, (int __user *)arg); } static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_inode_info *fi = F2FS_I(inode); unsigned int flags = fi->i_flags & FS_FL_USER_VISIBLE; unsigned int oldflags; int ret; ret = mnt_want_write_file(filp); if (ret) return ret; if (!inode_owner_or_capable(inode)) { ret = -EACCES; goto out; } if (get_user(flags, (int __user *)arg)) { ret = -EFAULT; goto out; } flags = f2fs_mask_flags(inode->i_mode, flags); mutex_lock(&inode->i_mutex); oldflags = fi->i_flags; if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) { if (!capable(CAP_LINUX_IMMUTABLE)) { mutex_unlock(&inode->i_mutex); ret = -EPERM; goto out; } } flags = flags & FS_FL_USER_MODIFIABLE; flags |= oldflags & ~FS_FL_USER_MODIFIABLE; fi->i_flags = flags; mutex_unlock(&inode->i_mutex); f2fs_set_inode_flags(inode); inode->i_ctime = CURRENT_TIME; mark_inode_dirty(inode); out: mnt_drop_write_file(filp); return ret; } static int f2fs_ioc_start_atomic_write(struct file *filp) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); if (!inode_owner_or_capable(inode)) return -EACCES; f2fs_balance_fs(sbi); set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); return f2fs_convert_inline_data(inode, MAX_INLINE_DATA + 1, NULL); } static int f2fs_ioc_commit_atomic_write(struct file *filp) { struct inode *inode = file_inode(filp); int ret; if (!inode_owner_or_capable(inode)) return -EACCES; if (f2fs_is_volatile_file(inode)) return 0; ret = mnt_want_write_file(filp); if (ret) return ret; if (f2fs_is_atomic_file(inode)) commit_inmem_pages(inode, false); ret = f2fs_sync_file(filp, 0, LONG_MAX, 0); mnt_drop_write_file(filp); return ret; } static int f2fs_ioc_start_volatile_write(struct file *filp) { struct inode *inode = file_inode(filp); if (!inode_owner_or_capable(inode)) return -EACCES; set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE); return 0; } static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct super_block *sb = inode->i_sb; struct request_queue *q = bdev_get_queue(sb->s_bdev); struct fstrim_range range; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!blk_queue_discard(q)) return -EOPNOTSUPP; if (copy_from_user(&range, (struct fstrim_range __user *)arg, sizeof(range))) return -EFAULT; range.minlen = max((unsigned int)range.minlen, q->limits.discard_granularity); ret = f2fs_trim_fs(F2FS_SB(sb), &range); if (ret < 0) return ret; if (copy_to_user((struct fstrim_range __user *)arg, &range, sizeof(range))) return -EFAULT; return 0; } long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { switch (cmd) { case F2FS_IOC_GETFLAGS: return f2fs_ioc_getflags(filp, arg); case F2FS_IOC_SETFLAGS: return f2fs_ioc_setflags(filp, arg); case F2FS_IOC_START_ATOMIC_WRITE: return f2fs_ioc_start_atomic_write(filp); case F2FS_IOC_COMMIT_ATOMIC_WRITE: return f2fs_ioc_commit_atomic_write(filp); case F2FS_IOC_START_VOLATILE_WRITE: return f2fs_ioc_start_volatile_write(filp); case FITRIM: return f2fs_ioc_fitrim(filp, arg); default: return -ENOTTY; } } #ifdef CONFIG_COMPAT long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { case F2FS_IOC32_GETFLAGS: cmd = F2FS_IOC_GETFLAGS; break; case F2FS_IOC32_SETFLAGS: cmd = F2FS_IOC_SETFLAGS; break; default: return -ENOIOCTLCMD; } return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); } #endif const struct file_operations f2fs_file_operations = { .llseek = f2fs_llseek, .read = new_sync_read, .write = new_sync_write, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .open = generic_file_open, .mmap = f2fs_file_mmap, .fsync = f2fs_sync_file, .fallocate = f2fs_fallocate, .unlocked_ioctl = f2fs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = f2fs_compat_ioctl, #endif .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, };
gpl-2.0
viaembedded/vab820-kernel-bsp
arch/microblaze/mm/init.c
250
9573
/* * Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu> * Copyright (C) 2006 Atmark Techno, Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/bootmem.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/memblock.h> #include <linux/mm.h> /* mem_init */ #include <linux/initrd.h> #include <linux/pagemap.h> #include <linux/pfn.h> #include <linux/slab.h> #include <linux/swap.h> #include <linux/export.h> #include <asm/page.h> #include <asm/mmu_context.h> #include <asm/pgalloc.h> #include <asm/sections.h> #include <asm/tlb.h> /* Use for MMU and noMMU because of PCI generic code */ int mem_init_done; #ifndef CONFIG_MMU unsigned int __page_offset; EXPORT_SYMBOL(__page_offset); #else static int init_bootmem_done; #endif /* CONFIG_MMU */ char *klimit = _end; /* * Initialize the bootmem system and give it all the memory we * have available. */ unsigned long memory_start; EXPORT_SYMBOL(memory_start); unsigned long memory_end; /* due to mm/nommu.c */ unsigned long memory_size; EXPORT_SYMBOL(memory_size); /* * paging_init() sets up the page tables - in fact we've already done this. */ static void __init paging_init(void) { unsigned long zones_size[MAX_NR_ZONES]; /* Clean every zones */ memset(zones_size, 0, sizeof(zones_size)); /* * old: we can DMA to/from any address.put all page into ZONE_DMA * We use only ZONE_NORMAL */ zones_size[ZONE_NORMAL] = max_mapnr; free_area_init(zones_size); } void __init setup_memory(void) { unsigned long map_size; struct memblock_region *reg; #ifndef CONFIG_MMU u32 kernel_align_start, kernel_align_size; /* Find main memory where is the kernel */ for_each_memblock(memory, reg) { memory_start = (u32)reg->base; memory_end = (u32) reg->base + reg->size; if ((memory_start <= (u32)_text) && ((u32)_text <= memory_end)) { memory_size = memory_end - memory_start; PAGE_OFFSET = memory_start; printk(KERN_INFO "%s: Main mem: 0x%x-0x%x, " "size 0x%08x\n", __func__, (u32) memory_start, (u32) memory_end, (u32) memory_size); break; } } if (!memory_start || !memory_end) { panic("%s: Missing memory setting 0x%08x-0x%08x\n", __func__, (u32) memory_start, (u32) memory_end); } /* reservation of region where is the kernel */ kernel_align_start = PAGE_DOWN((u32)_text); /* ALIGN can be remove because _end in vmlinux.lds.S is align */ kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start; memblock_reserve(kernel_align_start, kernel_align_size); printk(KERN_INFO "%s: kernel addr=0x%08x-0x%08x size=0x%08x\n", __func__, kernel_align_start, kernel_align_start + kernel_align_size, kernel_align_size); #endif /* * Kernel: * start: base phys address of kernel - page align * end: base phys address of kernel - page align * * min_low_pfn - the first page (mm/bootmem.c - node_boot_start) * max_low_pfn * max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn) * num_physpages - number of all pages */ /* memory start is from the kernel end (aligned) to higher addr */ min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */ /* RAM is assumed contiguous */ num_physpages = max_mapnr = memory_size >> PAGE_SHIFT; max_pfn = max_low_pfn = memory_end >> PAGE_SHIFT; printk(KERN_INFO "%s: max_mapnr: %#lx\n", __func__, max_mapnr); printk(KERN_INFO "%s: min_low_pfn: %#lx\n", __func__, min_low_pfn); printk(KERN_INFO "%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); /* * Find an area to use for the bootmem bitmap. * We look for the first area which is at least * 128kB in length (128kB is enough for a bitmap * for 4GB of memory, using 4kB pages), plus 1 page * (in case the address isn't page-aligned). */ map_size = init_bootmem_node(NODE_DATA(0), PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn); memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size); /* free bootmem is whole main memory */ free_bootmem(memory_start, memory_size); /* reserve allocate blocks */ for_each_memblock(reserved, reg) { pr_debug("reserved - 0x%08x-0x%08x\n", (u32) reg->base, (u32) reg->size); reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); } #ifdef CONFIG_MMU init_bootmem_done = 1; #endif paging_init(); } void free_init_pages(char *what, unsigned long begin, unsigned long end) { unsigned long addr; for (addr = begin; addr < end; addr += PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); init_page_count(virt_to_page(addr)); free_page(addr); totalram_pages++; } printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { int pages = 0; for (; start < end; start += PAGE_SIZE) { ClearPageReserved(virt_to_page(start)); init_page_count(virt_to_page(start)); free_page(start); totalram_pages++; pages++; } printk(KERN_NOTICE "Freeing initrd memory: %dk freed\n", (int)(pages * (PAGE_SIZE / 1024))); } #endif void free_initmem(void) { free_init_pages("unused kernel memory", (unsigned long)(&__init_begin), (unsigned long)(&__init_end)); } void __init mem_init(void) { high_memory = (void *)__va(memory_end); /* this will put all memory onto the freelists */ totalram_pages += free_all_bootmem(); printk(KERN_INFO "Memory: %luk/%luk available\n", nr_free_pages() << (PAGE_SHIFT-10), num_physpages << (PAGE_SHIFT-10)); mem_init_done = 1; } #ifndef CONFIG_MMU int page_is_ram(unsigned long pfn) { return __range_ok(pfn, 0); } #else int page_is_ram(unsigned long pfn) { return pfn < max_low_pfn; } /* * Check for command-line options that affect what MMU_init will do. */ static void mm_cmdline_setup(void) { unsigned long maxmem = 0; char *p = cmd_line; /* Look for mem= option on command line */ p = strstr(cmd_line, "mem="); if (p) { p += 4; maxmem = memparse(p, &p); if (maxmem && memory_size > maxmem) { memory_size = maxmem; memory_end = memory_start + memory_size; memblock.memory.regions[0].size = memory_size; } } } /* * MMU_init_hw does the chip-specific initialization of the MMU hardware. */ static void __init mmu_init_hw(void) { /* * The Zone Protection Register (ZPR) defines how protection will * be applied to every page which is a member of a given zone. At * present, we utilize only two of the zones. * The zone index bits (of ZSEL) in the PTE are used for software * indicators, except the LSB. For user access, zone 1 is used, * for kernel access, zone 0 is used. We set all but zone 1 * to zero, allowing only kernel access as indicated in the PTE. * For zone 1, we set a 01 binary (a value of 10 will not work) * to allow user access as indicated in the PTE. This also allows * kernel access as indicated in the PTE. */ __asm__ __volatile__ ("ori r11, r0, 0x10000000;" \ "mts rzpr, r11;" : : : "r11"); } /* * MMU_init sets up the basic memory mappings for the kernel, * including both RAM and possibly some I/O regions, * and sets up the page tables and the MMU hardware ready to go. */ /* called from head.S */ asmlinkage void __init mmu_init(void) { unsigned int kstart, ksize; if (!memblock.reserved.cnt) { printk(KERN_EMERG "Error memory count\n"); machine_restart(NULL); } if ((u32) memblock.memory.regions[0].size < 0x1000000) { printk(KERN_EMERG "Memory must be greater than 16MB\n"); machine_restart(NULL); } /* Find main memory where the kernel is */ memory_start = (u32) memblock.memory.regions[0].base; memory_end = (u32) memblock.memory.regions[0].base + (u32) memblock.memory.regions[0].size; memory_size = memory_end - memory_start; mm_cmdline_setup(); /* FIXME parse args from command line - not used */ /* * Map out the kernel text/data/bss from the available physical * memory. */ kstart = __pa(CONFIG_KERNEL_START); /* kernel start */ /* kernel size */ ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START)); memblock_reserve(kstart, ksize); #if defined(CONFIG_BLK_DEV_INITRD) /* Remove the init RAM disk from the available memory. */ /* if (initrd_start) { mem_pieces_remove(&phys_avail, __pa(initrd_start), initrd_end - initrd_start, 1); }*/ #endif /* CONFIG_BLK_DEV_INITRD */ /* Initialize the MMU hardware */ mmu_init_hw(); /* Map in all of RAM starting at CONFIG_KERNEL_START */ mapin_ram(); #ifdef CONFIG_HIGHMEM_START_BOOL ioremap_base = CONFIG_HIGHMEM_START; #else ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */ #endif /* CONFIG_HIGHMEM_START_BOOL */ ioremap_bot = ioremap_base; /* Initialize the context management stuff */ mmu_context_init(); } /* This is only called until mem_init is done. */ void __init *early_get_page(void) { void *p; if (init_bootmem_done) { p = alloc_bootmem_pages(PAGE_SIZE); } else { /* * Mem start + 32MB -> here is limit * because of mem mapping from head.S */ p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, memory_start + 0x2000000)); } return p; } #endif /* CONFIG_MMU */ void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask) { if (mem_init_done) return kmalloc(size, mask); else return alloc_bootmem(size); } void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask) { void *p; if (mem_init_done) p = kzalloc(size, mask); else { p = alloc_bootmem(size); if (p) memset(p, 0, size); } return p; }
gpl-2.0
marsleezm/qemu
linux-user/arm/nwfpe/extended_cpdo.c
250
6199
/* NetWinder Floating Point Emulator (c) Rebel.COM, 1998,1999 Direct questions, comments to Scott Bambrough <scottb@netwinder.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include "fpa11.h" #include "softfloat.h" #include "fpopcode.h" floatx80 floatx80_exp(floatx80 Fm); floatx80 floatx80_ln(floatx80 Fm); floatx80 floatx80_sin(floatx80 rFm); floatx80 floatx80_cos(floatx80 rFm); floatx80 floatx80_arcsin(floatx80 rFm); floatx80 floatx80_arctan(floatx80 rFm); floatx80 floatx80_log(floatx80 rFm); floatx80 floatx80_tan(floatx80 rFm); floatx80 floatx80_arccos(floatx80 rFm); floatx80 floatx80_pow(floatx80 rFn,floatx80 rFm); floatx80 floatx80_pol(floatx80 rFn,floatx80 rFm); unsigned int ExtendedCPDO(const unsigned int opcode) { FPA11 *fpa11 = GET_FPA11(); floatx80 rFm, rFn; unsigned int Fd, Fm, Fn, nRc = 1; //printk("ExtendedCPDO(0x%08x)\n",opcode); Fm = getFm(opcode); if (CONSTANT_FM(opcode)) { rFm = getExtendedConstant(Fm); } else { switch (fpa11->fType[Fm]) { case typeSingle: rFm = float32_to_floatx80(fpa11->fpreg[Fm].fSingle, &fpa11->fp_status); break; case typeDouble: rFm = float64_to_floatx80(fpa11->fpreg[Fm].fDouble, &fpa11->fp_status); break; case typeExtended: rFm = fpa11->fpreg[Fm].fExtended; break; default: return 0; } } if (!MONADIC_INSTRUCTION(opcode)) { Fn = getFn(opcode); switch (fpa11->fType[Fn]) { case typeSingle: rFn = float32_to_floatx80(fpa11->fpreg[Fn].fSingle, &fpa11->fp_status); break; case typeDouble: rFn = float64_to_floatx80(fpa11->fpreg[Fn].fDouble, &fpa11->fp_status); break; case typeExtended: rFn = fpa11->fpreg[Fn].fExtended; break; default: return 0; } } Fd = getFd(opcode); switch (opcode & MASK_ARITHMETIC_OPCODE) { /* dyadic opcodes */ case ADF_CODE: fpa11->fpreg[Fd].fExtended = floatx80_add(rFn,rFm, &fpa11->fp_status); break; case MUF_CODE: case FML_CODE: fpa11->fpreg[Fd].fExtended = floatx80_mul(rFn,rFm, &fpa11->fp_status); break; case SUF_CODE: fpa11->fpreg[Fd].fExtended = floatx80_sub(rFn,rFm, &fpa11->fp_status); break; case RSF_CODE: fpa11->fpreg[Fd].fExtended = floatx80_sub(rFm,rFn, &fpa11->fp_status); break; case DVF_CODE: case FDV_CODE: fpa11->fpreg[Fd].fExtended = floatx80_div(rFn,rFm, &fpa11->fp_status); break; case RDF_CODE: case FRD_CODE: fpa11->fpreg[Fd].fExtended = floatx80_div(rFm,rFn, &fpa11->fp_status); break; #if 0 case POW_CODE: fpa11->fpreg[Fd].fExtended = floatx80_pow(rFn,rFm); break; case RPW_CODE: fpa11->fpreg[Fd].fExtended = floatx80_pow(rFm,rFn); break; #endif case RMF_CODE: fpa11->fpreg[Fd].fExtended = floatx80_rem(rFn,rFm, &fpa11->fp_status); break; #if 0 case POL_CODE: fpa11->fpreg[Fd].fExtended = floatx80_pol(rFn,rFm); break; #endif /* monadic opcodes */ case MVF_CODE: fpa11->fpreg[Fd].fExtended = rFm; break; case MNF_CODE: rFm.high ^= 0x8000; fpa11->fpreg[Fd].fExtended = rFm; break; case ABS_CODE: rFm.high &= 0x7fff; fpa11->fpreg[Fd].fExtended = rFm; break; case RND_CODE: case URD_CODE: fpa11->fpreg[Fd].fExtended = floatx80_round_to_int(rFm, &fpa11->fp_status); break; case SQT_CODE: fpa11->fpreg[Fd].fExtended = floatx80_sqrt(rFm, &fpa11->fp_status); break; #if 0 case LOG_CODE: fpa11->fpreg[Fd].fExtended = floatx80_log(rFm); break; case LGN_CODE: fpa11->fpreg[Fd].fExtended = floatx80_ln(rFm); break; case EXP_CODE: fpa11->fpreg[Fd].fExtended = floatx80_exp(rFm); break; case SIN_CODE: fpa11->fpreg[Fd].fExtended = floatx80_sin(rFm); break; case COS_CODE: fpa11->fpreg[Fd].fExtended = floatx80_cos(rFm); break; case TAN_CODE: fpa11->fpreg[Fd].fExtended = floatx80_tan(rFm); break; case ASN_CODE: fpa11->fpreg[Fd].fExtended = floatx80_arcsin(rFm); break; case ACS_CODE: fpa11->fpreg[Fd].fExtended = floatx80_arccos(rFm); break; case ATN_CODE: fpa11->fpreg[Fd].fExtended = floatx80_arctan(rFm); break; #endif case NRM_CODE: break; default: { nRc = 0; } } if (0 != nRc) fpa11->fType[Fd] = typeExtended; return nRc; } #if 0 floatx80 floatx80_exp(floatx80 Fm) { //series } floatx80 floatx80_ln(floatx80 Fm) { //series } floatx80 floatx80_sin(floatx80 rFm) { //series } floatx80 floatx80_cos(floatx80 rFm) { //series } floatx80 floatx80_arcsin(floatx80 rFm) { //series } floatx80 floatx80_arctan(floatx80 rFm) { //series } floatx80 floatx80_log(floatx80 rFm) { return floatx80_div(floatx80_ln(rFm),getExtendedConstant(7)); } floatx80 floatx80_tan(floatx80 rFm) { return floatx80_div(floatx80_sin(rFm),floatx80_cos(rFm)); } floatx80 floatx80_arccos(floatx80 rFm) { //return floatx80_sub(halfPi,floatx80_arcsin(rFm)); } floatx80 floatx80_pow(floatx80 rFn,floatx80 rFm) { return floatx80_exp(floatx80_mul(rFm,floatx80_ln(rFn))); } floatx80 floatx80_pol(floatx80 rFn,floatx80 rFm) { return floatx80_arctan(floatx80_div(rFn,rFm)); } #endif
gpl-2.0
FeyoMx/MDSdevKernel_a7010
sound/firewire/fireworks/fireworks_hwdep.c
250
7535
/* * fireworks_hwdep.c - a part of driver for Fireworks based devices * * Copyright (c) 2013-2014 Takashi Sakamoto * * Licensed under the terms of the GNU General Public License, version 2. */ /* * This codes have five functionalities. * * 1.get information about firewire node * 2.get notification about starting/stopping stream * 3.lock/unlock streaming * 4.transmit command of EFW transaction * 5.receive response of EFW transaction * */ #include "fireworks.h" static long hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained, loff_t *offset) { unsigned int length, till_end, type; struct snd_efw_transaction *t; u8 *pull_ptr; long count = 0; if (remained < sizeof(type) + sizeof(struct snd_efw_transaction)) return -ENOSPC; /* data type is SNDRV_FIREWIRE_EVENT_EFW_RESPONSE */ type = SNDRV_FIREWIRE_EVENT_EFW_RESPONSE; if (copy_to_user(buf, &type, sizeof(type))) return -EFAULT; remained -= sizeof(type); buf += sizeof(type); /* write into buffer as many responses as possible */ spin_lock_irq(&efw->lock); /* * When another task reaches here during this task's access to user * space, it picks up current position in buffer and can read the same * series of responses. */ pull_ptr = efw->pull_ptr; while (efw->push_ptr != pull_ptr) { t = (struct snd_efw_transaction *)(pull_ptr); length = be32_to_cpu(t->length) * sizeof(__be32); /* confirm enough space for this response */ if (remained < length) break; /* copy from ring buffer to user buffer */ while (length > 0) { till_end = snd_efw_resp_buf_size - (unsigned int)(pull_ptr - efw->resp_buf); till_end = min_t(unsigned int, length, till_end); spin_unlock_irq(&efw->lock); if (copy_to_user(buf, pull_ptr, till_end)) return -EFAULT; spin_lock_irq(&efw->lock); pull_ptr += till_end; if (pull_ptr >= efw->resp_buf + snd_efw_resp_buf_size) pull_ptr -= snd_efw_resp_buf_size; length -= till_end; buf += till_end; count += till_end; remained -= till_end; } } /* * All of tasks can read from the buffer nearly simultaneously, but the * last position for each task is different depending on the length of * given buffer. Here, for simplicity, a position of buffer is set by * the latest task. It's better for a listening application to allow one * thread to read from the buffer. Unless, each task can read different * sequence of responses depending on variation of buffer length. */ efw->pull_ptr = pull_ptr; spin_unlock_irq(&efw->lock); return count; } static long hwdep_read_locked(struct snd_efw *efw, char __user *buf, long count, loff_t *offset) { union snd_firewire_event event = { .lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS, }; spin_lock_irq(&efw->lock); event.lock_status.status = (efw->dev_lock_count > 0); efw->dev_lock_changed = false; spin_unlock_irq(&efw->lock); count = min_t(long, count, sizeof(event.lock_status)); if (copy_to_user(buf, &event, count)) return -EFAULT; return count; } static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count, loff_t *offset) { struct snd_efw *efw = hwdep->private_data; DEFINE_WAIT(wait); bool dev_lock_changed; bool queued; spin_lock_irq(&efw->lock); dev_lock_changed = efw->dev_lock_changed; queued = efw->push_ptr != efw->pull_ptr; while (!dev_lock_changed && !queued) { prepare_to_wait(&efw->hwdep_wait, &wait, TASK_INTERRUPTIBLE); spin_unlock_irq(&efw->lock); schedule(); finish_wait(&efw->hwdep_wait, &wait); if (signal_pending(current)) return -ERESTARTSYS; spin_lock_irq(&efw->lock); dev_lock_changed = efw->dev_lock_changed; queued = efw->push_ptr != efw->pull_ptr; } spin_unlock_irq(&efw->lock); if (dev_lock_changed) count = hwdep_read_locked(efw, buf, count, offset); else if (queued) count = hwdep_read_resp_buf(efw, buf, count, offset); return count; } static long hwdep_write(struct snd_hwdep *hwdep, const char __user *data, long count, loff_t *offset) { struct snd_efw *efw = hwdep->private_data; u32 seqnum; u8 *buf; if (count < sizeof(struct snd_efw_transaction) || SND_EFW_RESPONSE_MAXIMUM_BYTES < count) return -EINVAL; buf = memdup_user(data, count); if (IS_ERR(buf)) return PTR_ERR(buf); /* check seqnum is not for kernel-land */ seqnum = be32_to_cpu(((struct snd_efw_transaction *)buf)->seqnum); if (seqnum > SND_EFW_TRANSACTION_USER_SEQNUM_MAX) { count = -EINVAL; goto end; } if (snd_efw_transaction_cmd(efw->unit, buf, count) < 0) count = -EIO; end: kfree(buf); return count; } static unsigned int hwdep_poll(struct snd_hwdep *hwdep, struct file *file, poll_table *wait) { struct snd_efw *efw = hwdep->private_data; unsigned int events; poll_wait(file, &efw->hwdep_wait, wait); spin_lock_irq(&efw->lock); if (efw->dev_lock_changed || efw->pull_ptr != efw->push_ptr) events = POLLIN | POLLRDNORM; else events = 0; spin_unlock_irq(&efw->lock); return events | POLLOUT; } static int hwdep_get_info(struct snd_efw *efw, void __user *arg) { struct fw_device *dev = fw_parent_device(efw->unit); struct snd_firewire_get_info info; memset(&info, 0, sizeof(info)); info.type = SNDRV_FIREWIRE_TYPE_FIREWORKS; info.card = dev->card->index; *(__be32 *)&info.guid[0] = cpu_to_be32(dev->config_rom[3]); *(__be32 *)&info.guid[4] = cpu_to_be32(dev->config_rom[4]); strlcpy(info.device_name, dev_name(&dev->device), sizeof(info.device_name)); if (copy_to_user(arg, &info, sizeof(info))) return -EFAULT; return 0; } static int hwdep_lock(struct snd_efw *efw) { int err; spin_lock_irq(&efw->lock); if (efw->dev_lock_count == 0) { efw->dev_lock_count = -1; err = 0; } else { err = -EBUSY; } spin_unlock_irq(&efw->lock); return err; } static int hwdep_unlock(struct snd_efw *efw) { int err; spin_lock_irq(&efw->lock); if (efw->dev_lock_count == -1) { efw->dev_lock_count = 0; err = 0; } else { err = -EBADFD; } spin_unlock_irq(&efw->lock); return err; } static int hwdep_release(struct snd_hwdep *hwdep, struct file *file) { struct snd_efw *efw = hwdep->private_data; spin_lock_irq(&efw->lock); if (efw->dev_lock_count == -1) efw->dev_lock_count = 0; spin_unlock_irq(&efw->lock); return 0; } static int hwdep_ioctl(struct snd_hwdep *hwdep, struct file *file, unsigned int cmd, unsigned long arg) { struct snd_efw *efw = hwdep->private_data; switch (cmd) { case SNDRV_FIREWIRE_IOCTL_GET_INFO: return hwdep_get_info(efw, (void __user *)arg); case SNDRV_FIREWIRE_IOCTL_LOCK: return hwdep_lock(efw); case SNDRV_FIREWIRE_IOCTL_UNLOCK: return hwdep_unlock(efw); default: return -ENOIOCTLCMD; } } #ifdef CONFIG_COMPAT static int hwdep_compat_ioctl(struct snd_hwdep *hwdep, struct file *file, unsigned int cmd, unsigned long arg) { return hwdep_ioctl(hwdep, file, cmd, (unsigned long)compat_ptr(arg)); } #else #define hwdep_compat_ioctl NULL #endif static const struct snd_hwdep_ops hwdep_ops = { .read = hwdep_read, .write = hwdep_write, .release = hwdep_release, .poll = hwdep_poll, .ioctl = hwdep_ioctl, .ioctl_compat = hwdep_compat_ioctl, }; int snd_efw_create_hwdep_device(struct snd_efw *efw) { struct snd_hwdep *hwdep; int err; err = snd_hwdep_new(efw->card, "Fireworks", 0, &hwdep); if (err < 0) goto end; strcpy(hwdep->name, "Fireworks"); hwdep->iface = SNDRV_HWDEP_IFACE_FW_FIREWORKS; hwdep->ops = hwdep_ops; hwdep->private_data = efw; hwdep->exclusive = true; end: return err; }
gpl-2.0
boa19861105/android_442_KitKat_kernel_htc_dlxub1
drivers/mtd/mtdpart.c
506
20988
/* * Simple MTD partitioning layer * * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net> * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de> * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/kmod.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/err.h> #include "mtdcore.h" /* Our partition linked list */ static LIST_HEAD(mtd_partitions); static DEFINE_MUTEX(mtd_partitions_mutex); /* Our partition node structure */ struct mtd_part { struct mtd_info mtd; struct mtd_info *master; uint64_t offset; struct list_head list; }; /* * Given a pointer to the MTD object in the mtd_part structure, we can retrieve * the pointer to that structure with this macro. */ #define PART(x) ((struct mtd_part *)(x)) /* * MTD methods which simply translate the effective address and pass through * to the _real_ device. */ static int part_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { struct mtd_part *part = PART(mtd); struct mtd_ecc_stats stats; int res; stats = part->master->ecc_stats; res = part->master->_read(part->master, from + part->offset, len, retlen, buf); if (unlikely(res)) { if (mtd_is_bitflip(res)) mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected; if (mtd_is_eccerr(res)) mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed; } return res; } static int part_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, void **virt, resource_size_t *phys) { struct mtd_part *part = PART(mtd); return part->master->_point(part->master, from + part->offset, len, retlen, virt, phys); } static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) { struct mtd_part *part = PART(mtd); return part->master->_unpoint(part->master, from + part->offset, len); } static unsigned long part_get_unmapped_area(struct mtd_info *mtd, unsigned long len, unsigned long offset, unsigned long flags) { struct mtd_part *part = PART(mtd); offset += part->offset; return part->master->_get_unmapped_area(part->master, len, offset, flags); } static int part_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) { struct mtd_part *part = PART(mtd); int res; if (from >= mtd->size) return -EINVAL; if (ops->datbuf && from + ops->len > mtd->size) return -EINVAL; /* * If OOB is also requested, make sure that we do not read past the end * of this partition. */ if (ops->oobbuf) { size_t len, pages; if (ops->mode == MTD_OPS_AUTO_OOB) len = mtd->oobavail; else len = mtd->oobsize; pages = mtd_div_by_ws(mtd->size, mtd); pages -= mtd_div_by_ws(from, mtd); if (ops->ooboffs + ops->ooblen > pages * len) return -EINVAL; } res = part->master->_read_oob(part->master, from + part->offset, ops); if (unlikely(res)) { if (mtd_is_bitflip(res)) mtd->ecc_stats.corrected++; if (mtd_is_eccerr(res)) mtd->ecc_stats.failed++; } return res; } static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { struct mtd_part *part = PART(mtd); return part->master->_read_user_prot_reg(part->master, from, len, retlen, buf); } static int part_get_user_prot_info(struct mtd_info *mtd, struct otp_info *buf, size_t len) { struct mtd_part *part = PART(mtd); return part->master->_get_user_prot_info(part->master, buf, len); } static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { struct mtd_part *part = PART(mtd); return part->master->_read_fact_prot_reg(part->master, from, len, retlen, buf); } static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, size_t len) { struct mtd_part *part = PART(mtd); return part->master->_get_fact_prot_info(part->master, buf, len); } static int part_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { struct mtd_part *part = PART(mtd); return part->master->_write(part->master, to + part->offset, len, retlen, buf); } static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { struct mtd_part *part = PART(mtd); return part->master->_panic_write(part->master, to + part->offset, len, retlen, buf); } static int part_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) { struct mtd_part *part = PART(mtd); if (to >= mtd->size) return -EINVAL; if (ops->datbuf && to + ops->len > mtd->size) return -EINVAL; return part->master->_write_oob(part->master, to + part->offset, ops); } static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { struct mtd_part *part = PART(mtd); return part->master->_write_user_prot_reg(part->master, from, len, retlen, buf); } static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) { struct mtd_part *part = PART(mtd); return part->master->_lock_user_prot_reg(part->master, from, len); } static int part_writev(struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen) { struct mtd_part *part = PART(mtd); return part->master->_writev(part->master, vecs, count, to + part->offset, retlen); } static int part_erase(struct mtd_info *mtd, struct erase_info *instr) { struct mtd_part *part = PART(mtd); int ret; instr->addr += part->offset; ret = part->master->_erase(part->master, instr); if (ret) { if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) instr->fail_addr -= part->offset; instr->addr -= part->offset; } return ret; } void mtd_erase_callback(struct erase_info *instr) { if (instr->mtd->_erase == part_erase) { struct mtd_part *part = PART(instr->mtd); if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) instr->fail_addr -= part->offset; instr->addr -= part->offset; } if (instr->callback) instr->callback(instr); } EXPORT_SYMBOL_GPL(mtd_erase_callback); static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { struct mtd_part *part = PART(mtd); return part->master->_lock(part->master, ofs + part->offset, len); } static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { struct mtd_part *part = PART(mtd); return part->master->_unlock(part->master, ofs + part->offset, len); } static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) { struct mtd_part *part = PART(mtd); return part->master->_is_locked(part->master, ofs + part->offset, len); } static void part_sync(struct mtd_info *mtd) { struct mtd_part *part = PART(mtd); part->master->_sync(part->master); } static int part_suspend(struct mtd_info *mtd) { struct mtd_part *part = PART(mtd); return part->master->_suspend(part->master); } static void part_resume(struct mtd_info *mtd) { struct mtd_part *part = PART(mtd); part->master->_resume(part->master); } static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) { struct mtd_part *part = PART(mtd); ofs += part->offset; return part->master->_block_isbad(part->master, ofs); } static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) { struct mtd_part *part = PART(mtd); int res; ofs += part->offset; res = part->master->_block_markbad(part->master, ofs); if (!res) mtd->ecc_stats.badblocks++; return res; } static inline void free_partition(struct mtd_part *p) { kfree(p->mtd.name); kfree(p); } /* * This function unregisters and destroy all slave MTD objects which are * attached to the given master MTD object. */ int del_mtd_partitions(struct mtd_info *master) { struct mtd_part *slave, *next; int ret, err = 0; mutex_lock(&mtd_partitions_mutex); list_for_each_entry_safe(slave, next, &mtd_partitions, list) if (slave->master == master) { ret = del_mtd_device(&slave->mtd); if (ret < 0) { err = ret; continue; } list_del(&slave->list); free_partition(slave); } mutex_unlock(&mtd_partitions_mutex); return err; } static struct mtd_part *allocate_partition(struct mtd_info *master, const struct mtd_partition *part, int partno, uint64_t cur_offset) { struct mtd_part *slave; char *name; /* allocate the partition structure */ slave = kzalloc(sizeof(*slave), GFP_KERNEL); name = kstrdup(part->name, GFP_KERNEL); if (!name || !slave) { printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n", master->name); kfree(name); kfree(slave); return ERR_PTR(-ENOMEM); } /* set up the MTD object for this partition */ slave->mtd.type = master->type; slave->mtd.flags = master->flags & ~part->mask_flags; slave->mtd.size = part->size; slave->mtd.writesize = master->writesize; slave->mtd.writebufsize = master->writebufsize; slave->mtd.oobsize = master->oobsize; slave->mtd.oobavail = master->oobavail; slave->mtd.subpage_sft = master->subpage_sft; slave->mtd.name = name; slave->mtd.owner = master->owner; slave->mtd.backing_dev_info = master->backing_dev_info; /* NOTE: we don't arrange MTDs as a tree; it'd be error-prone * to have the same data be in two different partitions. */ slave->mtd.dev.parent = master->dev.parent; slave->mtd._read = part_read; slave->mtd._write = part_write; if (master->_panic_write) slave->mtd._panic_write = part_panic_write; if (master->_point && master->_unpoint) { slave->mtd._point = part_point; slave->mtd._unpoint = part_unpoint; } if (master->_get_unmapped_area) slave->mtd._get_unmapped_area = part_get_unmapped_area; if (master->_read_oob) slave->mtd._read_oob = part_read_oob; if (master->_write_oob) slave->mtd._write_oob = part_write_oob; if (master->_read_user_prot_reg) slave->mtd._read_user_prot_reg = part_read_user_prot_reg; if (master->_read_fact_prot_reg) slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg; if (master->_write_user_prot_reg) slave->mtd._write_user_prot_reg = part_write_user_prot_reg; if (master->_lock_user_prot_reg) slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg; if (master->_get_user_prot_info) slave->mtd._get_user_prot_info = part_get_user_prot_info; if (master->_get_fact_prot_info) slave->mtd._get_fact_prot_info = part_get_fact_prot_info; if (master->_sync) slave->mtd._sync = part_sync; if (!partno && !master->dev.class && master->_suspend && master->_resume) { slave->mtd._suspend = part_suspend; slave->mtd._resume = part_resume; } if (master->_writev) slave->mtd._writev = part_writev; if (master->_lock) slave->mtd._lock = part_lock; if (master->_unlock) slave->mtd._unlock = part_unlock; if (master->_is_locked) slave->mtd._is_locked = part_is_locked; if (master->_block_isbad) slave->mtd._block_isbad = part_block_isbad; if (master->_block_markbad) slave->mtd._block_markbad = part_block_markbad; slave->mtd._erase = part_erase; slave->master = master; slave->offset = part->offset; if (slave->offset == MTDPART_OFS_APPEND) slave->offset = cur_offset; if (slave->offset == MTDPART_OFS_NXTBLK) { slave->offset = cur_offset; if (mtd_mod_by_eb(cur_offset, master) != 0) { /* Round up to next erasesize */ slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize; printk(KERN_NOTICE "Moving partition %d: " "0x%012llx -> 0x%012llx\n", partno, (unsigned long long)cur_offset, (unsigned long long)slave->offset); } } if (slave->offset == MTDPART_OFS_RETAIN) { slave->offset = cur_offset; if (master->size - slave->offset >= slave->mtd.size) { slave->mtd.size = master->size - slave->offset - slave->mtd.size; } else { printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n", part->name, master->size - slave->offset, slave->mtd.size); /* register to preserve ordering */ goto out_register; } } if (slave->mtd.size == MTDPART_SIZ_FULL) slave->mtd.size = master->size - slave->offset; printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset, (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name); /* let's do some sanity checks */ if (slave->offset >= master->size) { /* let's register it anyway to preserve ordering */ slave->offset = 0; slave->mtd.size = 0; printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", part->name); goto out_register; } if (slave->offset + slave->mtd.size > master->size) { slave->mtd.size = master->size - slave->offset; printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n", part->name, master->name, (unsigned long long)slave->mtd.size); } if (master->numeraseregions > 1) { /* Deal with variable erase size stuff */ int i, max = master->numeraseregions; u64 end = slave->offset + slave->mtd.size; struct mtd_erase_region_info *regions = master->eraseregions; /* Find the first erase regions which is part of this * partition. */ for (i = 0; i < max && regions[i].offset <= slave->offset; i++) ; /* The loop searched for the region _behind_ the first one */ if (i > 0) i--; /* Pick biggest erasesize */ for (; i < max && regions[i].offset < end; i++) { if (slave->mtd.erasesize < regions[i].erasesize) { slave->mtd.erasesize = regions[i].erasesize; } } BUG_ON(slave->mtd.erasesize == 0); } else { /* Single erase size */ slave->mtd.erasesize = master->erasesize; } if ((slave->mtd.flags & MTD_WRITEABLE) && mtd_mod_by_eb(slave->offset, &slave->mtd)) { /* Doesn't start on a boundary of major erase size */ /* FIXME: Let it be writable if it is on a boundary of * _minor_ erase size though */ slave->mtd.flags &= ~MTD_WRITEABLE; printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n", part->name); } if ((slave->mtd.flags & MTD_WRITEABLE) && mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) { slave->mtd.flags &= ~MTD_WRITEABLE; printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n", part->name); } slave->mtd.ecclayout = master->ecclayout; slave->mtd.ecc_strength = master->ecc_strength; if (master->_block_isbad) { uint64_t offs = 0; while (offs < slave->mtd.size) { if (mtd_block_isbad(master, offs + slave->offset)) slave->mtd.ecc_stats.badblocks++; offs += slave->mtd.erasesize; } } out_register: return slave; } int mtd_add_partition(struct mtd_info *master, char *name, long long offset, long long length) { struct mtd_partition part; struct mtd_part *p, *new; uint64_t start, end; int ret = 0; /* the direct offset is expected */ if (offset == MTDPART_OFS_APPEND || offset == MTDPART_OFS_NXTBLK) return -EINVAL; if (length == MTDPART_SIZ_FULL) length = master->size - offset; if (length <= 0) return -EINVAL; part.name = name; part.size = length; part.offset = offset; part.mask_flags = 0; part.ecclayout = NULL; new = allocate_partition(master, &part, -1, offset); if (IS_ERR(new)) return PTR_ERR(new); start = offset; end = offset + length; mutex_lock(&mtd_partitions_mutex); list_for_each_entry(p, &mtd_partitions, list) if (p->master == master) { if ((start >= p->offset) && (start < (p->offset + p->mtd.size))) goto err_inv; if ((end >= p->offset) && (end < (p->offset + p->mtd.size))) goto err_inv; } list_add(&new->list, &mtd_partitions); mutex_unlock(&mtd_partitions_mutex); add_mtd_device(&new->mtd); return ret; err_inv: mutex_unlock(&mtd_partitions_mutex); free_partition(new); return -EINVAL; } EXPORT_SYMBOL_GPL(mtd_add_partition); int mtd_del_partition(struct mtd_info *master, int partno) { struct mtd_part *slave, *next; int ret = -EINVAL; mutex_lock(&mtd_partitions_mutex); list_for_each_entry_safe(slave, next, &mtd_partitions, list) if ((slave->master == master) && (slave->mtd.index == partno)) { ret = del_mtd_device(&slave->mtd); if (ret < 0) break; list_del(&slave->list); free_partition(slave); break; } mutex_unlock(&mtd_partitions_mutex); return ret; } EXPORT_SYMBOL_GPL(mtd_del_partition); /* * This function, given a master MTD object and a partition table, creates * and registers slave MTD objects which are bound to the master according to * the partition definitions. * * We don't register the master, or expect the caller to have done so, * for reasons of data integrity. */ int add_mtd_partitions(struct mtd_info *master, const struct mtd_partition *parts, int nbparts) { struct mtd_part *slave; uint64_t cur_offset = 0; int i; printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); for (i = 0; i < nbparts; i++) { slave = allocate_partition(master, parts + i, i, cur_offset); if (IS_ERR(slave)) return PTR_ERR(slave); mutex_lock(&mtd_partitions_mutex); list_add(&slave->list, &mtd_partitions); mutex_unlock(&mtd_partitions_mutex); add_mtd_device(&slave->mtd); cur_offset = slave->offset + slave->mtd.size; } return 0; } static DEFINE_SPINLOCK(part_parser_lock); static LIST_HEAD(part_parsers); static struct mtd_part_parser *get_partition_parser(const char *name) { struct mtd_part_parser *p, *ret = NULL; spin_lock(&part_parser_lock); list_for_each_entry(p, &part_parsers, list) if (!strcmp(p->name, name) && try_module_get(p->owner)) { ret = p; break; } spin_unlock(&part_parser_lock); return ret; } #define put_partition_parser(p) do { module_put((p)->owner); } while (0) int register_mtd_parser(struct mtd_part_parser *p) { spin_lock(&part_parser_lock); list_add(&p->list, &part_parsers); spin_unlock(&part_parser_lock); return 0; } EXPORT_SYMBOL_GPL(register_mtd_parser); int deregister_mtd_parser(struct mtd_part_parser *p) { spin_lock(&part_parser_lock); list_del(&p->list); spin_unlock(&part_parser_lock); return 0; } EXPORT_SYMBOL_GPL(deregister_mtd_parser); /* * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you * are changing this array! */ static const char *default_mtd_part_types[] = { "cmdlinepart", "ofpart", NULL }; /** * parse_mtd_partitions - parse MTD partitions * @master: the master partition (describes whole MTD device) * @types: names of partition parsers to try or %NULL * @pparts: array of partitions found is returned here * @data: MTD partition parser-specific data * * This function tries to find partition on MTD device @master. It uses MTD * partition parsers, specified in @types. However, if @types is %NULL, then * the default list of parsers is used. The default list contains only the * "cmdlinepart" and "ofpart" parsers ATM. * * This function may return: * o a negative error code in case of failure * o zero if no partitions were found * o a positive number of found partitions, in which case on exit @pparts will * point to an array containing this number of &struct mtd_info objects. */ int parse_mtd_partitions(struct mtd_info *master, const char **types, struct mtd_partition **pparts, struct mtd_part_parser_data *data) { struct mtd_part_parser *parser; int ret = 0; if (!types) types = default_mtd_part_types; for ( ; ret <= 0 && *types; types++) { parser = get_partition_parser(*types); if (!parser && !request_module("%s", *types)) parser = get_partition_parser(*types); if (!parser) continue; ret = (*parser->parse_fn)(master, pparts, data); if (ret > 0) { printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n", ret, parser->name, master->name); } put_partition_parser(parser); } return ret; } int mtd_is_partition(struct mtd_info *mtd) { struct mtd_part *part; int ispart = 0; mutex_lock(&mtd_partitions_mutex); list_for_each_entry(part, &mtd_partitions, list) if (&part->mtd == mtd) { ispart = 1; break; } mutex_unlock(&mtd_partitions_mutex); return ispart; } EXPORT_SYMBOL_GPL(mtd_is_partition);
gpl-2.0
Acidburn0zzz/linux-tux3
drivers/spi/spi-bcm2835.c
506
11024
/* * Driver for Broadcom BCM2835 SPI Controllers * * Copyright (C) 2012 Chris Boot * Copyright (C) 2013 Stephen Warren * * This driver is inspired by: * spi-ath79.c, Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org> * spi-atmel.c, Copyright (C) 2006 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/clk.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_device.h> #include <linux/spi/spi.h> /* SPI register offsets */ #define BCM2835_SPI_CS 0x00 #define BCM2835_SPI_FIFO 0x04 #define BCM2835_SPI_CLK 0x08 #define BCM2835_SPI_DLEN 0x0c #define BCM2835_SPI_LTOH 0x10 #define BCM2835_SPI_DC 0x14 /* Bitfields in CS */ #define BCM2835_SPI_CS_LEN_LONG 0x02000000 #define BCM2835_SPI_CS_DMA_LEN 0x01000000 #define BCM2835_SPI_CS_CSPOL2 0x00800000 #define BCM2835_SPI_CS_CSPOL1 0x00400000 #define BCM2835_SPI_CS_CSPOL0 0x00200000 #define BCM2835_SPI_CS_RXF 0x00100000 #define BCM2835_SPI_CS_RXR 0x00080000 #define BCM2835_SPI_CS_TXD 0x00040000 #define BCM2835_SPI_CS_RXD 0x00020000 #define BCM2835_SPI_CS_DONE 0x00010000 #define BCM2835_SPI_CS_LEN 0x00002000 #define BCM2835_SPI_CS_REN 0x00001000 #define BCM2835_SPI_CS_ADCS 0x00000800 #define BCM2835_SPI_CS_INTR 0x00000400 #define BCM2835_SPI_CS_INTD 0x00000200 #define BCM2835_SPI_CS_DMAEN 0x00000100 #define BCM2835_SPI_CS_TA 0x00000080 #define BCM2835_SPI_CS_CSPOL 0x00000040 #define BCM2835_SPI_CS_CLEAR_RX 0x00000020 #define BCM2835_SPI_CS_CLEAR_TX 0x00000010 #define BCM2835_SPI_CS_CPOL 0x00000008 #define BCM2835_SPI_CS_CPHA 0x00000004 #define BCM2835_SPI_CS_CS_10 0x00000002 #define BCM2835_SPI_CS_CS_01 0x00000001 #define BCM2835_SPI_TIMEOUT_MS 30000 #define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_NO_CS) #define DRV_NAME "spi-bcm2835" struct bcm2835_spi { void __iomem *regs; struct clk *clk; int irq; struct completion done; const u8 *tx_buf; u8 *rx_buf; int len; }; static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg) { return readl(bs->regs + reg); } static inline void bcm2835_wr(struct bcm2835_spi *bs, unsigned reg, u32 val) { writel(val, bs->regs + reg); } static inline void bcm2835_rd_fifo(struct bcm2835_spi *bs, int len) { u8 byte; while (len--) { byte = bcm2835_rd(bs, BCM2835_SPI_FIFO); if (bs->rx_buf) *bs->rx_buf++ = byte; } } static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs, int len) { u8 byte; if (len > bs->len) len = bs->len; while (len--) { byte = bs->tx_buf ? *bs->tx_buf++ : 0; bcm2835_wr(bs, BCM2835_SPI_FIFO, byte); bs->len--; } } static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id) { struct spi_master *master = dev_id; struct bcm2835_spi *bs = spi_master_get_devdata(master); u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); /* * RXR - RX needs Reading. This means 12 (or more) bytes have been * transmitted and hence 12 (or more) bytes have been received. * * The FIFO is 16-bytes deep. We check for this interrupt to keep the * FIFO full; we have a 4-byte-time buffer for IRQ latency. We check * this before DONE (TX empty) just in case we delayed processing this * interrupt for some reason. * * We only check for this case if we have more bytes to TX; at the end * of the transfer, we ignore this pipelining optimization, and let * bcm2835_spi_finish_transfer() drain the RX FIFO. */ if (bs->len && (cs & BCM2835_SPI_CS_RXR)) { /* Read 12 bytes of data */ bcm2835_rd_fifo(bs, 12); /* Write up to 12 bytes */ bcm2835_wr_fifo(bs, 12); /* * We must have written something to the TX FIFO due to the * bs->len check above, so cannot be DONE. Hence, return * early. Note that DONE could also be set if we serviced an * RXR interrupt really late. */ return IRQ_HANDLED; } /* * DONE - TX empty. This occurs when we first enable the transfer * since we do not pre-fill the TX FIFO. At any other time, given that * we refill the TX FIFO above based on RXR, and hence ignore DONE if * RXR is set, DONE really does mean end-of-transfer. */ if (cs & BCM2835_SPI_CS_DONE) { if (bs->len) { /* First interrupt in a transfer */ bcm2835_wr_fifo(bs, 16); } else { /* Transfer complete */ /* Disable SPI interrupts */ cs &= ~(BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD); bcm2835_wr(bs, BCM2835_SPI_CS, cs); /* * Wake up bcm2835_spi_transfer_one(), which will call * bcm2835_spi_finish_transfer(), to drain the RX FIFO. */ complete(&bs->done); } return IRQ_HANDLED; } return IRQ_NONE; } static int bcm2835_spi_start_transfer(struct spi_device *spi, struct spi_transfer *tfr) { struct bcm2835_spi *bs = spi_master_get_devdata(spi->master); unsigned long spi_hz, clk_hz, cdiv; u32 cs = BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA; spi_hz = tfr->speed_hz; clk_hz = clk_get_rate(bs->clk); if (spi_hz >= clk_hz / 2) { cdiv = 2; /* clk_hz/2 is the fastest we can go */ } else if (spi_hz) { /* CDIV must be a power of two */ cdiv = roundup_pow_of_two(DIV_ROUND_UP(clk_hz, spi_hz)); if (cdiv >= 65536) cdiv = 0; /* 0 is the slowest we can go */ } else cdiv = 0; /* 0 is the slowest we can go */ if (spi->mode & SPI_CPOL) cs |= BCM2835_SPI_CS_CPOL; if (spi->mode & SPI_CPHA) cs |= BCM2835_SPI_CS_CPHA; if (!(spi->mode & SPI_NO_CS)) { if (spi->mode & SPI_CS_HIGH) { cs |= BCM2835_SPI_CS_CSPOL; cs |= BCM2835_SPI_CS_CSPOL0 << spi->chip_select; } cs |= spi->chip_select; } reinit_completion(&bs->done); bs->tx_buf = tfr->tx_buf; bs->rx_buf = tfr->rx_buf; bs->len = tfr->len; bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv); /* * Enable the HW block. This will immediately trigger a DONE (TX * empty) interrupt, upon which we will fill the TX FIFO with the * first TX bytes. Pre-filling the TX FIFO here to avoid the * interrupt doesn't work:-( */ bcm2835_wr(bs, BCM2835_SPI_CS, cs); return 0; } static int bcm2835_spi_finish_transfer(struct spi_device *spi, struct spi_transfer *tfr, bool cs_change) { struct bcm2835_spi *bs = spi_master_get_devdata(spi->master); u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); /* Drain RX FIFO */ while (cs & BCM2835_SPI_CS_RXD) { bcm2835_rd_fifo(bs, 1); cs = bcm2835_rd(bs, BCM2835_SPI_CS); } if (tfr->delay_usecs) udelay(tfr->delay_usecs); if (cs_change) /* Clear TA flag */ bcm2835_wr(bs, BCM2835_SPI_CS, cs & ~BCM2835_SPI_CS_TA); return 0; } static int bcm2835_spi_transfer_one(struct spi_master *master, struct spi_message *mesg) { struct bcm2835_spi *bs = spi_master_get_devdata(master); struct spi_transfer *tfr; struct spi_device *spi = mesg->spi; int err = 0; unsigned int timeout; bool cs_change; list_for_each_entry(tfr, &mesg->transfers, transfer_list) { err = bcm2835_spi_start_transfer(spi, tfr); if (err) goto out; timeout = wait_for_completion_timeout(&bs->done, msecs_to_jiffies(BCM2835_SPI_TIMEOUT_MS)); if (!timeout) { err = -ETIMEDOUT; goto out; } cs_change = tfr->cs_change || list_is_last(&tfr->transfer_list, &mesg->transfers); err = bcm2835_spi_finish_transfer(spi, tfr, cs_change); if (err) goto out; mesg->actual_length += (tfr->len - bs->len); } out: /* Clear FIFOs, and disable the HW block */ bcm2835_wr(bs, BCM2835_SPI_CS, BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); mesg->status = err; spi_finalize_current_message(master); return 0; } static int bcm2835_spi_probe(struct platform_device *pdev) { struct spi_master *master; struct bcm2835_spi *bs; struct resource *res; int err; master = spi_alloc_master(&pdev->dev, sizeof(*bs)); if (!master) { dev_err(&pdev->dev, "spi_alloc_master() failed\n"); return -ENOMEM; } platform_set_drvdata(pdev, master); master->mode_bits = BCM2835_SPI_MODE_BITS; master->bits_per_word_mask = SPI_BPW_MASK(8); master->num_chipselect = 3; master->transfer_one_message = bcm2835_spi_transfer_one; master->dev.of_node = pdev->dev.of_node; bs = spi_master_get_devdata(master); init_completion(&bs->done); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); bs->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(bs->regs)) { err = PTR_ERR(bs->regs); goto out_master_put; } bs->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(bs->clk)) { err = PTR_ERR(bs->clk); dev_err(&pdev->dev, "could not get clk: %d\n", err); goto out_master_put; } bs->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); if (bs->irq <= 0) { dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq); err = bs->irq ? bs->irq : -ENODEV; goto out_master_put; } clk_prepare_enable(bs->clk); err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0, dev_name(&pdev->dev), master); if (err) { dev_err(&pdev->dev, "could not request IRQ: %d\n", err); goto out_clk_disable; } /* initialise the hardware */ bcm2835_wr(bs, BCM2835_SPI_CS, BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); err = devm_spi_register_master(&pdev->dev, master); if (err) { dev_err(&pdev->dev, "could not register SPI master: %d\n", err); goto out_clk_disable; } return 0; out_clk_disable: clk_disable_unprepare(bs->clk); out_master_put: spi_master_put(master); return err; } static int bcm2835_spi_remove(struct platform_device *pdev) { struct spi_master *master = platform_get_drvdata(pdev); struct bcm2835_spi *bs = spi_master_get_devdata(master); /* Clear FIFOs, and disable the HW block */ bcm2835_wr(bs, BCM2835_SPI_CS, BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); clk_disable_unprepare(bs->clk); return 0; } static const struct of_device_id bcm2835_spi_match[] = { { .compatible = "brcm,bcm2835-spi", }, {} }; MODULE_DEVICE_TABLE(of, bcm2835_spi_match); static struct platform_driver bcm2835_spi_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, .of_match_table = bcm2835_spi_match, }, .probe = bcm2835_spi_probe, .remove = bcm2835_spi_remove, }; module_platform_driver(bcm2835_spi_driver); MODULE_DESCRIPTION("SPI controller driver for Broadcom BCM2835"); MODULE_AUTHOR("Chris Boot <bootc@bootc.net>"); MODULE_LICENSE("GPL v2");
gpl-2.0
phiexz/kernel-cyanogen-gio
fs/xfs/xfs_log_cil.c
762
21916
/* * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_types.h" #include "xfs_bit.h" #include "xfs_log.h" #include "xfs_inum.h" #include "xfs_trans.h" #include "xfs_trans_priv.h" #include "xfs_log_priv.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_dir2.h" #include "xfs_dmapi.h" #include "xfs_mount.h" #include "xfs_error.h" #include "xfs_alloc.h" /* * Perform initial CIL structure initialisation. If the CIL is not * enabled in this filesystem, ensure the log->l_cilp is null so * we can check this conditional to determine if we are doing delayed * logging or not. */ int xlog_cil_init( struct log *log) { struct xfs_cil *cil; struct xfs_cil_ctx *ctx; log->l_cilp = NULL; if (!(log->l_mp->m_flags & XFS_MOUNT_DELAYLOG)) return 0; cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL); if (!cil) return ENOMEM; ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL); if (!ctx) { kmem_free(cil); return ENOMEM; } INIT_LIST_HEAD(&cil->xc_cil); INIT_LIST_HEAD(&cil->xc_committing); spin_lock_init(&cil->xc_cil_lock); init_rwsem(&cil->xc_ctx_lock); sv_init(&cil->xc_commit_wait, SV_DEFAULT, "cilwait"); INIT_LIST_HEAD(&ctx->committing); INIT_LIST_HEAD(&ctx->busy_extents); ctx->sequence = 1; ctx->cil = cil; cil->xc_ctx = ctx; cil->xc_log = log; log->l_cilp = cil; return 0; } void xlog_cil_destroy( struct log *log) { if (!log->l_cilp) return; if (log->l_cilp->xc_ctx) { if (log->l_cilp->xc_ctx->ticket) xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket); kmem_free(log->l_cilp->xc_ctx); } ASSERT(list_empty(&log->l_cilp->xc_cil)); kmem_free(log->l_cilp); } /* * Allocate a new ticket. Failing to get a new ticket makes it really hard to * recover, so we don't allow failure here. Also, we allocate in a context that * we don't want to be issuing transactions from, so we need to tell the * allocation code this as well. * * We don't reserve any space for the ticket - we are going to steal whatever * space we require from transactions as they commit. To ensure we reserve all * the space required, we need to set the current reservation of the ticket to * zero so that we know to steal the initial transaction overhead from the * first transaction commit. */ static struct xlog_ticket * xlog_cil_ticket_alloc( struct log *log) { struct xlog_ticket *tic; tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0, KM_SLEEP|KM_NOFS); tic->t_trans_type = XFS_TRANS_CHECKPOINT; /* * set the current reservation to zero so we know to steal the basic * transaction overhead reservation from the first transaction commit. */ tic->t_curr_res = 0; return tic; } /* * After the first stage of log recovery is done, we know where the head and * tail of the log are. We need this log initialisation done before we can * initialise the first CIL checkpoint context. * * Here we allocate a log ticket to track space usage during a CIL push. This * ticket is passed to xlog_write() directly so that we don't slowly leak log * space by failing to account for space used by log headers and additional * region headers for split regions. */ void xlog_cil_init_post_recovery( struct log *log) { if (!log->l_cilp) return; log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); log->l_cilp->xc_ctx->sequence = 1; log->l_cilp->xc_ctx->commit_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block); } /* * Insert the log item into the CIL and calculate the difference in space * consumed by the item. Add the space to the checkpoint ticket and calculate * if the change requires additional log metadata. If it does, take that space * as well. Remove the amount of space we addded to the checkpoint ticket from * the current transaction ticket so that the accounting works out correctly. * * If this is the first time the item is being placed into the CIL in this * context, pin it so it can't be written to disk until the CIL is flushed to * the iclog and the iclog written to disk. */ static void xlog_cil_insert( struct log *log, struct xlog_ticket *ticket, struct xfs_log_item *item, struct xfs_log_vec *lv) { struct xfs_cil *cil = log->l_cilp; struct xfs_log_vec *old = lv->lv_item->li_lv; struct xfs_cil_ctx *ctx = cil->xc_ctx; int len; int diff_iovecs; int iclog_space; if (old) { /* existing lv on log item, space used is a delta */ ASSERT(!list_empty(&item->li_cil)); ASSERT(old->lv_buf && old->lv_buf_len && old->lv_niovecs); len = lv->lv_buf_len - old->lv_buf_len; diff_iovecs = lv->lv_niovecs - old->lv_niovecs; kmem_free(old->lv_buf); kmem_free(old); } else { /* new lv, must pin the log item */ ASSERT(!lv->lv_item->li_lv); ASSERT(list_empty(&item->li_cil)); len = lv->lv_buf_len; diff_iovecs = lv->lv_niovecs; IOP_PIN(lv->lv_item); } len += diff_iovecs * sizeof(xlog_op_header_t); /* attach new log vector to log item */ lv->lv_item->li_lv = lv; spin_lock(&cil->xc_cil_lock); list_move_tail(&item->li_cil, &cil->xc_cil); ctx->nvecs += diff_iovecs; /* * If this is the first time the item is being committed to the CIL, * store the sequence number on the log item so we can tell * in future commits whether this is the first checkpoint the item is * being committed into. */ if (!item->li_seq) item->li_seq = ctx->sequence; /* * Now transfer enough transaction reservation to the context ticket * for the checkpoint. The context ticket is special - the unit * reservation has to grow as well as the current reservation as we * steal from tickets so we can correctly determine the space used * during the transaction commit. */ if (ctx->ticket->t_curr_res == 0) { /* first commit in checkpoint, steal the header reservation */ ASSERT(ticket->t_curr_res >= ctx->ticket->t_unit_res + len); ctx->ticket->t_curr_res = ctx->ticket->t_unit_res; ticket->t_curr_res -= ctx->ticket->t_unit_res; } /* do we need space for more log record headers? */ iclog_space = log->l_iclog_size - log->l_iclog_hsize; if (len > 0 && (ctx->space_used / iclog_space != (ctx->space_used + len) / iclog_space)) { int hdrs; hdrs = (len + iclog_space - 1) / iclog_space; /* need to take into account split region headers, too */ hdrs *= log->l_iclog_hsize + sizeof(struct xlog_op_header); ctx->ticket->t_unit_res += hdrs; ctx->ticket->t_curr_res += hdrs; ticket->t_curr_res -= hdrs; ASSERT(ticket->t_curr_res >= len); } ticket->t_curr_res -= len; ctx->space_used += len; spin_unlock(&cil->xc_cil_lock); } /* * Format log item into a flat buffers * * For delayed logging, we need to hold a formatted buffer containing all the * changes on the log item. This enables us to relog the item in memory and * write it out asynchronously without needing to relock the object that was * modified at the time it gets written into the iclog. * * This function builds a vector for the changes in each log item in the * transaction. It then works out the length of the buffer needed for each log * item, allocates them and formats the vector for the item into the buffer. * The buffer is then attached to the log item are then inserted into the * Committed Item List for tracking until the next checkpoint is written out. * * We don't set up region headers during this process; we simply copy the * regions into the flat buffer. We can do this because we still have to do a * formatting step to write the regions into the iclog buffer. Writing the * ophdrs during the iclog write means that we can support splitting large * regions across iclog boundares without needing a change in the format of the * item/region encapsulation. * * Hence what we need to do now is change the rewrite the vector array to point * to the copied region inside the buffer we just allocated. This allows us to * format the regions into the iclog as though they are being formatted * directly out of the objects themselves. */ static void xlog_cil_format_items( struct log *log, struct xfs_log_vec *log_vector, struct xlog_ticket *ticket, xfs_lsn_t *start_lsn) { struct xfs_log_vec *lv; if (start_lsn) *start_lsn = log->l_cilp->xc_ctx->sequence; ASSERT(log_vector); for (lv = log_vector; lv; lv = lv->lv_next) { void *ptr; int index; int len = 0; /* build the vector array and calculate it's length */ IOP_FORMAT(lv->lv_item, lv->lv_iovecp); for (index = 0; index < lv->lv_niovecs; index++) len += lv->lv_iovecp[index].i_len; lv->lv_buf_len = len; lv->lv_buf = kmem_zalloc(lv->lv_buf_len, KM_SLEEP|KM_NOFS); ptr = lv->lv_buf; for (index = 0; index < lv->lv_niovecs; index++) { struct xfs_log_iovec *vec = &lv->lv_iovecp[index]; memcpy(ptr, vec->i_addr, vec->i_len); vec->i_addr = ptr; ptr += vec->i_len; } ASSERT(ptr == lv->lv_buf + lv->lv_buf_len); xlog_cil_insert(log, ticket, lv->lv_item, lv); } } static void xlog_cil_free_logvec( struct xfs_log_vec *log_vector) { struct xfs_log_vec *lv; for (lv = log_vector; lv; ) { struct xfs_log_vec *next = lv->lv_next; kmem_free(lv->lv_buf); kmem_free(lv); lv = next; } } /* * Commit a transaction with the given vector to the Committed Item List. * * To do this, we need to format the item, pin it in memory if required and * account for the space used by the transaction. Once we have done that we * need to release the unused reservation for the transaction, attach the * transaction to the checkpoint context so we carry the busy extents through * to checkpoint completion, and then unlock all the items in the transaction. * * For more specific information about the order of operations in * xfs_log_commit_cil() please refer to the comments in * xfs_trans_commit_iclog(). * * Called with the context lock already held in read mode to lock out * background commit, returns without it held once background commits are * allowed again. */ int xfs_log_commit_cil( struct xfs_mount *mp, struct xfs_trans *tp, struct xfs_log_vec *log_vector, xfs_lsn_t *commit_lsn, int flags) { struct log *log = mp->m_log; int log_flags = 0; int push = 0; if (flags & XFS_TRANS_RELEASE_LOG_RES) log_flags = XFS_LOG_REL_PERM_RESERV; if (XLOG_FORCED_SHUTDOWN(log)) { xlog_cil_free_logvec(log_vector); return XFS_ERROR(EIO); } /* lock out background commit */ down_read(&log->l_cilp->xc_ctx_lock); xlog_cil_format_items(log, log_vector, tp->t_ticket, commit_lsn); /* check we didn't blow the reservation */ if (tp->t_ticket->t_curr_res < 0) xlog_print_tic_res(log->l_mp, tp->t_ticket); /* attach the transaction to the CIL if it has any busy extents */ if (!list_empty(&tp->t_busy)) { spin_lock(&log->l_cilp->xc_cil_lock); list_splice_init(&tp->t_busy, &log->l_cilp->xc_ctx->busy_extents); spin_unlock(&log->l_cilp->xc_cil_lock); } tp->t_commit_lsn = *commit_lsn; xfs_log_done(mp, tp->t_ticket, NULL, log_flags); xfs_trans_unreserve_and_mod_sb(tp); /* check for background commit before unlock */ if (log->l_cilp->xc_ctx->space_used > XLOG_CIL_SPACE_LIMIT(log)) push = 1; up_read(&log->l_cilp->xc_ctx_lock); /* * We need to push CIL every so often so we don't cache more than we * can fit in the log. The limit really is that a checkpoint can't be * more than half the log (the current checkpoint is not allowed to * overwrite the previous checkpoint), but commit latency and memory * usage limit this to a smaller size in most cases. */ if (push) xlog_cil_push(log, 0); return 0; } /* * Mark all items committed and clear busy extents. We free the log vector * chains in a separate pass so that we unpin the log items as quickly as * possible. */ static void xlog_cil_committed( void *args, int abort) { struct xfs_cil_ctx *ctx = args; struct xfs_log_vec *lv; int abortflag = abort ? XFS_LI_ABORTED : 0; struct xfs_busy_extent *busyp, *n; /* unpin all the log items */ for (lv = ctx->lv_chain; lv; lv = lv->lv_next ) { xfs_trans_item_committed(lv->lv_item, ctx->start_lsn, abortflag); } list_for_each_entry_safe(busyp, n, &ctx->busy_extents, list) xfs_alloc_busy_clear(ctx->cil->xc_log->l_mp, busyp); spin_lock(&ctx->cil->xc_cil_lock); list_del(&ctx->committing); spin_unlock(&ctx->cil->xc_cil_lock); xlog_cil_free_logvec(ctx->lv_chain); kmem_free(ctx); } /* * Push the Committed Item List to the log. If the push_now flag is not set, * then it is a background flush and so we can chose to ignore it. */ int xlog_cil_push( struct log *log, int push_now) { struct xfs_cil *cil = log->l_cilp; struct xfs_log_vec *lv; struct xfs_cil_ctx *ctx; struct xfs_cil_ctx *new_ctx; struct xlog_in_core *commit_iclog; struct xlog_ticket *tic; int num_lv; int num_iovecs; int len; int error = 0; struct xfs_trans_header thdr; struct xfs_log_iovec lhdr; struct xfs_log_vec lvhdr = { NULL }; xfs_lsn_t commit_lsn; if (!cil) return 0; new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS); new_ctx->ticket = xlog_cil_ticket_alloc(log); /* lock out transaction commit, but don't block on background push */ if (!down_write_trylock(&cil->xc_ctx_lock)) { if (!push_now) goto out_free_ticket; down_write(&cil->xc_ctx_lock); } ctx = cil->xc_ctx; /* check if we've anything to push */ if (list_empty(&cil->xc_cil)) goto out_skip; /* check for spurious background flush */ if (!push_now && cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) goto out_skip; /* * pull all the log vectors off the items in the CIL, and * remove the items from the CIL. We don't need the CIL lock * here because it's only needed on the transaction commit * side which is currently locked out by the flush lock. */ lv = NULL; num_lv = 0; num_iovecs = 0; len = 0; while (!list_empty(&cil->xc_cil)) { struct xfs_log_item *item; int i; item = list_first_entry(&cil->xc_cil, struct xfs_log_item, li_cil); list_del_init(&item->li_cil); if (!ctx->lv_chain) ctx->lv_chain = item->li_lv; else lv->lv_next = item->li_lv; lv = item->li_lv; item->li_lv = NULL; num_lv++; num_iovecs += lv->lv_niovecs; for (i = 0; i < lv->lv_niovecs; i++) len += lv->lv_iovecp[i].i_len; } /* * initialise the new context and attach it to the CIL. Then attach * the current context to the CIL committing lsit so it can be found * during log forces to extract the commit lsn of the sequence that * needs to be forced. */ INIT_LIST_HEAD(&new_ctx->committing); INIT_LIST_HEAD(&new_ctx->busy_extents); new_ctx->sequence = ctx->sequence + 1; new_ctx->cil = cil; cil->xc_ctx = new_ctx; /* * The switch is now done, so we can drop the context lock and move out * of a shared context. We can't just go straight to the commit record, * though - we need to synchronise with previous and future commits so * that the commit records are correctly ordered in the log to ensure * that we process items during log IO completion in the correct order. * * For example, if we get an EFI in one checkpoint and the EFD in the * next (e.g. due to log forces), we do not want the checkpoint with * the EFD to be committed before the checkpoint with the EFI. Hence * we must strictly order the commit records of the checkpoints so * that: a) the checkpoint callbacks are attached to the iclogs in the * correct order; and b) the checkpoints are replayed in correct order * in log recovery. * * Hence we need to add this context to the committing context list so * that higher sequences will wait for us to write out a commit record * before they do. */ spin_lock(&cil->xc_cil_lock); list_add(&ctx->committing, &cil->xc_committing); spin_unlock(&cil->xc_cil_lock); up_write(&cil->xc_ctx_lock); /* * Build a checkpoint transaction header and write it to the log to * begin the transaction. We need to account for the space used by the * transaction header here as it is not accounted for in xlog_write(). * * The LSN we need to pass to the log items on transaction commit is * the LSN reported by the first log vector write. If we use the commit * record lsn then we can move the tail beyond the grant write head. */ tic = ctx->ticket; thdr.th_magic = XFS_TRANS_HEADER_MAGIC; thdr.th_type = XFS_TRANS_CHECKPOINT; thdr.th_tid = tic->t_tid; thdr.th_num_items = num_iovecs; lhdr.i_addr = (xfs_caddr_t)&thdr; lhdr.i_len = sizeof(xfs_trans_header_t); lhdr.i_type = XLOG_REG_TYPE_TRANSHDR; tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t); lvhdr.lv_niovecs = 1; lvhdr.lv_iovecp = &lhdr; lvhdr.lv_next = ctx->lv_chain; error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0); if (error) goto out_abort; /* * now that we've written the checkpoint into the log, strictly * order the commit records so replay will get them in the right order. */ restart: spin_lock(&cil->xc_cil_lock); list_for_each_entry(new_ctx, &cil->xc_committing, committing) { /* * Higher sequences will wait for this one so skip them. * Don't wait for own own sequence, either. */ if (new_ctx->sequence >= ctx->sequence) continue; if (!new_ctx->commit_lsn) { /* * It is still being pushed! Wait for the push to * complete, then start again from the beginning. */ sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0); goto restart; } } spin_unlock(&cil->xc_cil_lock); commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, 0); if (error || commit_lsn == -1) goto out_abort; /* attach all the transactions w/ busy extents to iclog */ ctx->log_cb.cb_func = xlog_cil_committed; ctx->log_cb.cb_arg = ctx; error = xfs_log_notify(log->l_mp, commit_iclog, &ctx->log_cb); if (error) goto out_abort; /* * now the checkpoint commit is complete and we've attached the * callbacks to the iclog we can assign the commit LSN to the context * and wake up anyone who is waiting for the commit to complete. */ spin_lock(&cil->xc_cil_lock); ctx->commit_lsn = commit_lsn; sv_broadcast(&cil->xc_commit_wait); spin_unlock(&cil->xc_cil_lock); /* release the hounds! */ return xfs_log_release_iclog(log->l_mp, commit_iclog); out_skip: up_write(&cil->xc_ctx_lock); out_free_ticket: xfs_log_ticket_put(new_ctx->ticket); kmem_free(new_ctx); return 0; out_abort: xlog_cil_committed(ctx, XFS_LI_ABORTED); return XFS_ERROR(EIO); } /* * Conditionally push the CIL based on the sequence passed in. * * We only need to push if we haven't already pushed the sequence * number given. Hence the only time we will trigger a push here is * if the push sequence is the same as the current context. * * We return the current commit lsn to allow the callers to determine if a * iclog flush is necessary following this call. * * XXX: Initially, just push the CIL unconditionally and return whatever * commit lsn is there. It'll be empty, so this is broken for now. */ xfs_lsn_t xlog_cil_push_lsn( struct log *log, xfs_lsn_t push_seq) { struct xfs_cil *cil = log->l_cilp; struct xfs_cil_ctx *ctx; xfs_lsn_t commit_lsn = NULLCOMMITLSN; restart: down_write(&cil->xc_ctx_lock); ASSERT(push_seq <= cil->xc_ctx->sequence); /* check to see if we need to force out the current context */ if (push_seq == cil->xc_ctx->sequence) { up_write(&cil->xc_ctx_lock); xlog_cil_push(log, 1); goto restart; } /* * See if we can find a previous sequence still committing. * We can drop the flush lock as soon as we have the cil lock * because we are now only comparing contexts protected by * the cil lock. * * We need to wait for all previous sequence commits to complete * before allowing the force of push_seq to go ahead. Hence block * on commits for those as well. */ spin_lock(&cil->xc_cil_lock); up_write(&cil->xc_ctx_lock); list_for_each_entry(ctx, &cil->xc_committing, committing) { if (ctx->sequence > push_seq) continue; if (!ctx->commit_lsn) { /* * It is still being pushed! Wait for the push to * complete, then start again from the beginning. */ sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0); goto restart; } if (ctx->sequence != push_seq) continue; /* found it! */ commit_lsn = ctx->commit_lsn; } spin_unlock(&cil->xc_cil_lock); return commit_lsn; } /* * Check if the current log item was first committed in this sequence. * We can't rely on just the log item being in the CIL, we have to check * the recorded commit sequence number. * * Note: for this to be used in a non-racy manner, it has to be called with * CIL flushing locked out. As a result, it should only be used during the * transaction commit process when deciding what to format into the item. */ bool xfs_log_item_in_current_chkpt( struct xfs_log_item *lip) { struct xfs_cil_ctx *ctx; if (!(lip->li_mountp->m_flags & XFS_MOUNT_DELAYLOG)) return false; if (list_empty(&lip->li_cil)) return false; ctx = lip->li_mountp->m_log->l_cilp->xc_ctx; /* * li_seq is written on the first commit of a log item to record the * first checkpoint it is written to. Hence if it is different to the * current sequence, we're in a new checkpoint. */ if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0) return false; return true; }
gpl-2.0
cjp256/ubuntu-linux
security/selinux/ss/ebitmap.c
762
10580
/* * Implementation of the extensible bitmap type. * * Author : Stephen Smalley, <sds@epoch.ncsc.mil> */ /* * Updated: Hewlett-Packard <paul@paul-moore.com> * * Added support to import/export the NetLabel category bitmap * * (c) Copyright Hewlett-Packard Development Company, L.P., 2006 */ /* * Updated: KaiGai Kohei <kaigai@ak.jp.nec.com> * Applied standard bit operations to improve bitmap scanning. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/errno.h> #include <net/netlabel.h> #include "ebitmap.h" #include "policydb.h" #define BITS_PER_U64 (sizeof(u64) * 8) int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2) { struct ebitmap_node *n1, *n2; if (e1->highbit != e2->highbit) return 0; n1 = e1->node; n2 = e2->node; while (n1 && n2 && (n1->startbit == n2->startbit) && !memcmp(n1->maps, n2->maps, EBITMAP_SIZE / 8)) { n1 = n1->next; n2 = n2->next; } if (n1 || n2) return 0; return 1; } int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src) { struct ebitmap_node *n, *new, *prev; ebitmap_init(dst); n = src->node; prev = NULL; while (n) { new = kzalloc(sizeof(*new), GFP_ATOMIC); if (!new) { ebitmap_destroy(dst); return -ENOMEM; } new->startbit = n->startbit; memcpy(new->maps, n->maps, EBITMAP_SIZE / 8); new->next = NULL; if (prev) prev->next = new; else dst->node = new; prev = new; n = n->next; } dst->highbit = src->highbit; return 0; } #ifdef CONFIG_NETLABEL /** * ebitmap_netlbl_export - Export an ebitmap into a NetLabel category bitmap * @ebmap: the ebitmap to export * @catmap: the NetLabel category bitmap * * Description: * Export a SELinux extensibile bitmap into a NetLabel category bitmap. * Returns zero on success, negative values on error. * */ int ebitmap_netlbl_export(struct ebitmap *ebmap, struct netlbl_lsm_catmap **catmap) { struct ebitmap_node *e_iter = ebmap->node; unsigned long e_map; u32 offset; unsigned int iter; int rc; if (e_iter == NULL) { *catmap = NULL; return 0; } if (*catmap != NULL) netlbl_catmap_free(*catmap); *catmap = NULL; while (e_iter) { offset = e_iter->startbit; for (iter = 0; iter < EBITMAP_UNIT_NUMS; iter++) { e_map = e_iter->maps[iter]; if (e_map != 0) { rc = netlbl_catmap_setlong(catmap, offset, e_map, GFP_ATOMIC); if (rc != 0) goto netlbl_export_failure; } offset += EBITMAP_UNIT_SIZE; } e_iter = e_iter->next; } return 0; netlbl_export_failure: netlbl_catmap_free(*catmap); return -ENOMEM; } /** * ebitmap_netlbl_import - Import a NetLabel category bitmap into an ebitmap * @ebmap: the ebitmap to import * @catmap: the NetLabel category bitmap * * Description: * Import a NetLabel category bitmap into a SELinux extensibile bitmap. * Returns zero on success, negative values on error. * */ int ebitmap_netlbl_import(struct ebitmap *ebmap, struct netlbl_lsm_catmap *catmap) { int rc; struct ebitmap_node *e_iter = NULL; struct ebitmap_node *e_prev = NULL; u32 offset = 0, idx; unsigned long bitmap; for (;;) { rc = netlbl_catmap_getlong(catmap, &offset, &bitmap); if (rc < 0) goto netlbl_import_failure; if (offset == (u32)-1) return 0; if (e_iter == NULL || offset >= e_iter->startbit + EBITMAP_SIZE) { e_prev = e_iter; e_iter = kzalloc(sizeof(*e_iter), GFP_ATOMIC); if (e_iter == NULL) goto netlbl_import_failure; e_iter->startbit = offset & ~(EBITMAP_SIZE - 1); if (e_prev == NULL) ebmap->node = e_iter; else e_prev->next = e_iter; ebmap->highbit = e_iter->startbit + EBITMAP_SIZE; } /* offset will always be aligned to an unsigned long */ idx = EBITMAP_NODE_INDEX(e_iter, offset); e_iter->maps[idx] = bitmap; /* next */ offset += EBITMAP_UNIT_SIZE; } /* NOTE: we should never reach this return */ return 0; netlbl_import_failure: ebitmap_destroy(ebmap); return -ENOMEM; } #endif /* CONFIG_NETLABEL */ /* * Check to see if all the bits set in e2 are also set in e1. Optionally, * if last_e2bit is non-zero, the highest set bit in e2 cannot exceed * last_e2bit. */ int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit) { struct ebitmap_node *n1, *n2; int i; if (e1->highbit < e2->highbit) return 0; n1 = e1->node; n2 = e2->node; while (n1 && n2 && (n1->startbit <= n2->startbit)) { if (n1->startbit < n2->startbit) { n1 = n1->next; continue; } for (i = EBITMAP_UNIT_NUMS - 1; (i >= 0) && !n2->maps[i]; ) i--; /* Skip trailing NULL map entries */ if (last_e2bit && (i >= 0)) { u32 lastsetbit = n2->startbit + i * EBITMAP_UNIT_SIZE + __fls(n2->maps[i]); if (lastsetbit > last_e2bit) return 0; } while (i >= 0) { if ((n1->maps[i] & n2->maps[i]) != n2->maps[i]) return 0; i--; } n1 = n1->next; n2 = n2->next; } if (n2) return 0; return 1; } int ebitmap_get_bit(struct ebitmap *e, unsigned long bit) { struct ebitmap_node *n; if (e->highbit < bit) return 0; n = e->node; while (n && (n->startbit <= bit)) { if ((n->startbit + EBITMAP_SIZE) > bit) return ebitmap_node_get_bit(n, bit); n = n->next; } return 0; } int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value) { struct ebitmap_node *n, *prev, *new; prev = NULL; n = e->node; while (n && n->startbit <= bit) { if ((n->startbit + EBITMAP_SIZE) > bit) { if (value) { ebitmap_node_set_bit(n, bit); } else { unsigned int s; ebitmap_node_clr_bit(n, bit); s = find_first_bit(n->maps, EBITMAP_SIZE); if (s < EBITMAP_SIZE) return 0; /* drop this node from the bitmap */ if (!n->next) { /* * this was the highest map * within the bitmap */ if (prev) e->highbit = prev->startbit + EBITMAP_SIZE; else e->highbit = 0; } if (prev) prev->next = n->next; else e->node = n->next; kfree(n); } return 0; } prev = n; n = n->next; } if (!value) return 0; new = kzalloc(sizeof(*new), GFP_ATOMIC); if (!new) return -ENOMEM; new->startbit = bit - (bit % EBITMAP_SIZE); ebitmap_node_set_bit(new, bit); if (!n) /* this node will be the highest map within the bitmap */ e->highbit = new->startbit + EBITMAP_SIZE; if (prev) { new->next = prev->next; prev->next = new; } else { new->next = e->node; e->node = new; } return 0; } void ebitmap_destroy(struct ebitmap *e) { struct ebitmap_node *n, *temp; if (!e) return; n = e->node; while (n) { temp = n; n = n->next; kfree(temp); } e->highbit = 0; e->node = NULL; return; } int ebitmap_read(struct ebitmap *e, void *fp) { struct ebitmap_node *n = NULL; u32 mapunit, count, startbit, index; u64 map; __le32 buf[3]; int rc, i; ebitmap_init(e); rc = next_entry(buf, fp, sizeof buf); if (rc < 0) goto out; mapunit = le32_to_cpu(buf[0]); e->highbit = le32_to_cpu(buf[1]); count = le32_to_cpu(buf[2]); if (mapunit != BITS_PER_U64) { printk(KERN_ERR "SELinux: ebitmap: map size %u does not " "match my size %Zd (high bit was %d)\n", mapunit, BITS_PER_U64, e->highbit); goto bad; } /* round up e->highbit */ e->highbit += EBITMAP_SIZE - 1; e->highbit -= (e->highbit % EBITMAP_SIZE); if (!e->highbit) { e->node = NULL; goto ok; } for (i = 0; i < count; i++) { rc = next_entry(&startbit, fp, sizeof(u32)); if (rc < 0) { printk(KERN_ERR "SELinux: ebitmap: truncated map\n"); goto bad; } startbit = le32_to_cpu(startbit); if (startbit & (mapunit - 1)) { printk(KERN_ERR "SELinux: ebitmap start bit (%d) is " "not a multiple of the map unit size (%u)\n", startbit, mapunit); goto bad; } if (startbit > e->highbit - mapunit) { printk(KERN_ERR "SELinux: ebitmap start bit (%d) is " "beyond the end of the bitmap (%u)\n", startbit, (e->highbit - mapunit)); goto bad; } if (!n || startbit >= n->startbit + EBITMAP_SIZE) { struct ebitmap_node *tmp; tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) { printk(KERN_ERR "SELinux: ebitmap: out of memory\n"); rc = -ENOMEM; goto bad; } /* round down */ tmp->startbit = startbit - (startbit % EBITMAP_SIZE); if (n) n->next = tmp; else e->node = tmp; n = tmp; } else if (startbit <= n->startbit) { printk(KERN_ERR "SELinux: ebitmap: start bit %d" " comes after start bit %d\n", startbit, n->startbit); goto bad; } rc = next_entry(&map, fp, sizeof(u64)); if (rc < 0) { printk(KERN_ERR "SELinux: ebitmap: truncated map\n"); goto bad; } map = le64_to_cpu(map); index = (startbit - n->startbit) / EBITMAP_UNIT_SIZE; while (map) { n->maps[index++] = map & (-1UL); map = EBITMAP_SHIFT_UNIT_SIZE(map); } } ok: rc = 0; out: return rc; bad: if (!rc) rc = -EINVAL; ebitmap_destroy(e); goto out; } int ebitmap_write(struct ebitmap *e, void *fp) { struct ebitmap_node *n; u32 count; __le32 buf[3]; u64 map; int bit, last_bit, last_startbit, rc; buf[0] = cpu_to_le32(BITS_PER_U64); count = 0; last_bit = 0; last_startbit = -1; ebitmap_for_each_positive_bit(e, n, bit) { if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) { count++; last_startbit = rounddown(bit, BITS_PER_U64); } last_bit = roundup(bit + 1, BITS_PER_U64); } buf[1] = cpu_to_le32(last_bit); buf[2] = cpu_to_le32(count); rc = put_entry(buf, sizeof(u32), 3, fp); if (rc) return rc; map = 0; last_startbit = INT_MIN; ebitmap_for_each_positive_bit(e, n, bit) { if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) { __le64 buf64[1]; /* this is the very first bit */ if (!map) { last_startbit = rounddown(bit, BITS_PER_U64); map = (u64)1 << (bit - last_startbit); continue; } /* write the last node */ buf[0] = cpu_to_le32(last_startbit); rc = put_entry(buf, sizeof(u32), 1, fp); if (rc) return rc; buf64[0] = cpu_to_le64(map); rc = put_entry(buf64, sizeof(u64), 1, fp); if (rc) return rc; /* set up for the next node */ map = 0; last_startbit = rounddown(bit, BITS_PER_U64); } map |= (u64)1 << (bit - last_startbit); } /* write the last node */ if (map) { __le64 buf64[1]; /* write the last node */ buf[0] = cpu_to_le32(last_startbit); rc = put_entry(buf, sizeof(u32), 1, fp); if (rc) return rc; buf64[0] = cpu_to_le64(map); rc = put_entry(buf64, sizeof(u64), 1, fp); if (rc) return rc; } return 0; }
gpl-2.0