repo_name
string
path
string
copies
string
size
string
content
string
license
string
hfutxqd/android_kernel_zte_s291_msm8974
arch/arm/mach-tegra/reset.c
2700
2173
/* * arch/arm/mach-tegra/reset.c * * Copyright (C) 2011,2012 NVIDIA Corporation. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/init.h> #include <linux/io.h> #include <linux/cpumask.h> #include <linux/bitops.h> #include <asm/cacheflush.h> #include <asm/hardware/cache-l2x0.h> #include <mach/iomap.h> #include <mach/irammap.h> #include "reset.h" #include "fuse.h" #define TEGRA_IRAM_RESET_BASE (TEGRA_IRAM_BASE + \ TEGRA_IRAM_RESET_HANDLER_OFFSET) static bool is_enabled; static void tegra_cpu_reset_handler_enable(void) { void __iomem *iram_base = IO_ADDRESS(TEGRA_IRAM_RESET_BASE); void __iomem *evp_cpu_reset = IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE + 0x100); void __iomem *sb_ctrl = IO_ADDRESS(TEGRA_SB_BASE); u32 reg; BUG_ON(is_enabled); BUG_ON(tegra_cpu_reset_handler_size > TEGRA_IRAM_RESET_HANDLER_SIZE); memcpy(iram_base, (void *)__tegra_cpu_reset_handler_start, tegra_cpu_reset_handler_size); /* * NOTE: This must be the one and only write to the EVP CPU reset * vector in the entire system. */ writel(TEGRA_IRAM_RESET_BASE + tegra_cpu_reset_handler_offset, evp_cpu_reset); wmb(); reg = readl(evp_cpu_reset); /* * Prevent further modifications to the physical reset vector. * NOTE: Has no effect on chips prior to Tegra30. */ if (tegra_chip_id != TEGRA20) { reg = readl(sb_ctrl); reg |= 2; writel(reg, sb_ctrl); wmb(); } is_enabled = true; } void __init tegra_cpu_reset_handler_init(void) { #ifdef CONFIG_SMP __tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_PRESENT] = *((u32 *)cpu_present_mask); __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_SECONDARY] = virt_to_phys((void *)tegra_secondary_startup); #endif tegra_cpu_reset_handler_enable(); }
gpl-2.0
kgp700/Neok-GNexroid-Kernel-JB
arch/arm/mach-omap2/clock2430.c
3212
1834
/* * clock2430.c - OMAP2430-specific clock integration code * * Copyright (C) 2005-2008 Texas Instruments, Inc. * Copyright (C) 2004-2010 Nokia Corporation * * Contacts: * Richard Woodruff <r-woodruff2@ti.com> * Paul Walmsley * * Based on earlier work by Tuukka Tikkanen, Tony Lindgren, * Gordon McNutt and RidgeRun, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #undef DEBUG #include <linux/kernel.h> #include <linux/clk.h> #include <linux/io.h> #include <plat/clock.h> #include "clock.h" #include "clock2xxx.h" #include "cm2xxx_3xxx.h" #include "cm-regbits-24xx.h" /** * omap2430_clk_i2chs_find_idlest - return CM_IDLEST info for 2430 I2CHS * @clk: struct clk * being enabled * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator * * OMAP2430 I2CHS CM_IDLEST bits are in CM_IDLEST1_CORE, but the * CM_*CLKEN bits are in CM_{I,F}CLKEN2_CORE. This custom function * passes back the correct CM_IDLEST register address for I2CHS * modules. No return value. */ static void omap2430_clk_i2chs_find_idlest(struct clk *clk, void __iomem **idlest_reg, u8 *idlest_bit, u8 *idlest_val) { *idlest_reg = OMAP2430_CM_REGADDR(CORE_MOD, CM_IDLEST); *idlest_bit = clk->enable_bit; *idlest_val = OMAP24XX_CM_IDLEST_VAL; } /* 2430 I2CHS has non-standard IDLEST register */ const struct clkops clkops_omap2430_i2chs_wait = { .enable = omap2_dflt_clk_enable, .disable = omap2_dflt_clk_disable, .find_idlest = omap2430_clk_i2chs_find_idlest, .find_companion = omap2_clk_dflt_find_companion, };
gpl-2.0
elelinux/hero_kernel
drivers/ide/ide-pci-generic.c
4236
6194
/* * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org> * Portions (C) Copyright 2002 Red Hat Inc * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * For the avoidance of doubt the "preferred form" of this code is one which * is in an open non patent encumbered format. Where cryptographic key signing * forms part of the process of creating an executable the information * including keys needed to generate an equivalently functional executable * are deemed to be part of the source code. */ #include <linux/types.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/ide.h> #include <linux/init.h> #define DRV_NAME "ide_pci_generic" static int ide_generic_all; /* Set to claim all devices */ module_param_named(all_generic_ide, ide_generic_all, bool, 0444); MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE storage controllers."); static void netcell_quirkproc(ide_drive_t *drive) { /* mark words 85-87 as valid */ drive->id[ATA_ID_CSF_DEFAULT] |= 0x4000; } static const struct ide_port_ops netcell_port_ops = { .quirkproc = netcell_quirkproc, }; #define DECLARE_GENERIC_PCI_DEV(extra_flags) \ { \ .name = DRV_NAME, \ .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | \ extra_flags, \ .swdma_mask = ATA_SWDMA2, \ .mwdma_mask = ATA_MWDMA2, \ .udma_mask = ATA_UDMA6, \ } static const struct ide_port_info generic_chipsets[] __devinitdata = { /* 0: Unknown */ DECLARE_GENERIC_PCI_DEV(0), { /* 1: NS87410 */ .name = DRV_NAME, .enablebits = { {0x43, 0x08, 0x08}, {0x47, 0x08, 0x08} }, .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA, .swdma_mask = ATA_SWDMA2, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, }, /* 2: SAMURAI / HT6565 / HINT_IDE */ DECLARE_GENERIC_PCI_DEV(0), /* 3: UM8673F / UM8886A / UM8886BF */ DECLARE_GENERIC_PCI_DEV(IDE_HFLAG_NO_DMA), /* 4: VIA_IDE / OPTI621V / Piccolo010{2,3,5} */ DECLARE_GENERIC_PCI_DEV(IDE_HFLAG_NO_AUTODMA), { /* 5: VIA8237SATA */ .name = DRV_NAME, .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | IDE_HFLAG_OFF_BOARD, .swdma_mask = ATA_SWDMA2, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, }, { /* 6: Revolution */ .name = DRV_NAME, .port_ops = &netcell_port_ops, .host_flags = IDE_HFLAG_CLEAR_SIMPLEX | IDE_HFLAG_TRUST_BIOS_FOR_DMA | IDE_HFLAG_OFF_BOARD, .swdma_mask = ATA_SWDMA2, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, } }; /** * generic_init_one - called when a PIIX is found * @dev: the generic device * @id: the matching pci id * * Called when the PCI registration layer (or the IDE initialization) * finds a device matching our IDE device tables. */ static int __devinit generic_init_one(struct pci_dev *dev, const struct pci_device_id *id) { const struct ide_port_info *d = &generic_chipsets[id->driver_data]; int ret = -ENODEV; /* Don't use the generic entry unless instructed to do so */ if (id->driver_data == 0 && ide_generic_all == 0) goto out; switch (dev->vendor) { case PCI_VENDOR_ID_UMC: if (dev->device == PCI_DEVICE_ID_UMC_UM8886A && !(PCI_FUNC(dev->devfn) & 1)) goto out; /* UM8886A/BF pair */ break; case PCI_VENDOR_ID_OPTI: if (dev->device == PCI_DEVICE_ID_OPTI_82C558 && !(PCI_FUNC(dev->devfn) & 1)) goto out; break; case PCI_VENDOR_ID_JMICRON: if (dev->device != PCI_DEVICE_ID_JMICRON_JMB368 && PCI_FUNC(dev->devfn) != 1) goto out; break; case PCI_VENDOR_ID_NS: if (dev->device == PCI_DEVICE_ID_NS_87410 && (dev->class >> 8) != PCI_CLASS_STORAGE_IDE) goto out; break; } if (dev->vendor != PCI_VENDOR_ID_JMICRON) { u16 command; pci_read_config_word(dev, PCI_COMMAND, &command); if (!(command & PCI_COMMAND_IO)) { printk(KERN_INFO "%s %s: skipping disabled " "controller\n", d->name, pci_name(dev)); goto out; } } ret = ide_pci_init_one(dev, d, NULL); out: return ret; } static const struct pci_device_id generic_pci_tbl[] = { { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87410), 1 }, { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_SAMURAI_IDE), 2 }, { PCI_VDEVICE(HOLTEK, PCI_DEVICE_ID_HOLTEK_6565), 2 }, { PCI_VDEVICE(UMC, PCI_DEVICE_ID_UMC_UM8673F), 3 }, { PCI_VDEVICE(UMC, PCI_DEVICE_ID_UMC_UM8886A), 3 }, { PCI_VDEVICE(UMC, PCI_DEVICE_ID_UMC_UM8886BF), 3 }, { PCI_VDEVICE(HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), 2 }, { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C561), 4 }, { PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C558), 4 }, #ifdef CONFIG_BLK_DEV_IDE_SATA { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_8237_SATA), 5 }, #endif { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), 4 }, { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), 4 }, { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO_3), 4 }, { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), 4 }, { PCI_VDEVICE(NETCELL, PCI_DEVICE_ID_REVOLUTION), 6 }, /* * Must come last. If you add entries adjust * this table and generic_chipsets[] appropriately. */ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 0 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, generic_pci_tbl); static struct pci_driver generic_pci_driver = { .name = "PCI_IDE", .id_table = generic_pci_tbl, .probe = generic_init_one, .remove = ide_pci_remove, .suspend = ide_pci_suspend, .resume = ide_pci_resume, }; static int __init generic_ide_init(void) { return ide_pci_register_driver(&generic_pci_driver); } static void __exit generic_ide_exit(void) { pci_unregister_driver(&generic_pci_driver); } module_init(generic_ide_init); module_exit(generic_ide_exit); MODULE_AUTHOR("Andre Hedrick"); MODULE_DESCRIPTION("PCI driver module for generic PCI IDE"); MODULE_LICENSE("GPL");
gpl-2.0
psyke83/android_kernel_samsung_msm-codeaurora
drivers/staging/octeon/cvmx-helper-rgmii.c
4748
17448
/***********************license start*************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2008 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information ***********************license end**************************************/ /* * Functions for RGMII/GMII/MII initialization, configuration, * and monitoring. */ #include <asm/octeon/octeon.h> #include "cvmx-config.h" #include "cvmx-mdio.h" #include "cvmx-pko.h" #include "cvmx-helper.h" #include "cvmx-helper-board.h" #include <asm/octeon/cvmx-npi-defs.h> #include "cvmx-gmxx-defs.h" #include "cvmx-asxx-defs.h" #include "cvmx-dbg-defs.h" void __cvmx_interrupt_gmxx_enable(int interface); void __cvmx_interrupt_asxx_enable(int block); /** * Probe RGMII ports and determine the number present * * @interface: Interface to probe * * Returns Number of RGMII/GMII/MII ports (0-4). */ int __cvmx_helper_rgmii_probe(int interface) { int num_ports = 0; union cvmx_gmxx_inf_mode mode; mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface)); if (mode.s.type) { if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) { cvmx_dprintf("ERROR: RGMII initialize called in " "SPI interface\n"); } else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) { /* * On these chips "type" says we're in * GMII/MII mode. This limits us to 2 ports */ num_ports = 2; } else { cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n", __func__); } } else { if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) { num_ports = 4; } else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) { num_ports = 3; } else { cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n", __func__); } } return num_ports; } /** * Put an RGMII interface in loopback mode. Internal packets sent * out will be received back again on the same port. Externally * received packets will echo back out. * * @port: IPD port number to loop. */ void cvmx_helper_rgmii_internal_loopback(int port) { int interface = (port >> 4) & 1; int index = port & 0xf; uint64_t tmp; union cvmx_gmxx_prtx_cfg gmx_cfg; gmx_cfg.u64 = 0; gmx_cfg.s.duplex = 1; gmx_cfg.s.slottime = 1; gmx_cfg.s.speed = 1; cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1); cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200); cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000); cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); tmp = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface)); cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), (1 << index) | tmp); tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface)); cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), (1 << index) | tmp); tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)); cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), (1 << index) | tmp); gmx_cfg.s.en = 1; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); } /** * Workaround ASX setup errata with CN38XX pass1 * * @interface: Interface to setup * @port: Port to setup (0..3) * @cpu_clock_hz: * Chip frequency in Hertz * * Returns Zero on success, negative on failure */ static int __cvmx_helper_errata_asx_pass1(int interface, int port, int cpu_clock_hz) { /* Set hi water mark as per errata GMX-4 */ if (cpu_clock_hz >= 325000000 && cpu_clock_hz < 375000000) cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 12); else if (cpu_clock_hz >= 375000000 && cpu_clock_hz < 437000000) cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 11); else if (cpu_clock_hz >= 437000000 && cpu_clock_hz < 550000000) cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 10); else if (cpu_clock_hz >= 550000000 && cpu_clock_hz < 687000000) cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 9); else cvmx_dprintf("Illegal clock frequency (%d). " "CVMX_ASXX_TX_HI_WATERX not set\n", cpu_clock_hz); return 0; } /** * Configure all of the ASX, GMX, and PKO regsiters required * to get RGMII to function on the supplied interface. * * @interface: PKO Interface to configure (0 or 1) * * Returns Zero on success */ int __cvmx_helper_rgmii_enable(int interface) { int num_ports = cvmx_helper_ports_on_interface(interface); int port; struct cvmx_sysinfo *sys_info_ptr = cvmx_sysinfo_get(); union cvmx_gmxx_inf_mode mode; union cvmx_asxx_tx_prt_en asx_tx; union cvmx_asxx_rx_prt_en asx_rx; mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface)); if (mode.s.en == 0) return -1; if ((OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) && mode.s.type == 1) /* Ignore SPI interfaces */ return -1; /* Configure the ASX registers needed to use the RGMII ports */ asx_tx.u64 = 0; asx_tx.s.prt_en = cvmx_build_mask(num_ports); cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), asx_tx.u64); asx_rx.u64 = 0; asx_rx.s.prt_en = cvmx_build_mask(num_ports); cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), asx_rx.u64); /* Configure the GMX registers needed to use the RGMII ports */ for (port = 0; port < num_ports; port++) { /* Setting of CVMX_GMXX_TXX_THRESH has been moved to __cvmx_helper_setup_gmx() */ if (cvmx_octeon_is_pass1()) __cvmx_helper_errata_asx_pass1(interface, port, sys_info_ptr-> cpu_clock_hz); else { /* * Configure more flexible RGMII preamble * checking. Pass 1 doesn't support this * feature. */ union cvmx_gmxx_rxx_frm_ctl frm_ctl; frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL (port, interface)); /* New field, so must be compile time */ frm_ctl.s.pre_free = 1; cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(port, interface), frm_ctl.u64); } /* * Each pause frame transmitted will ask for about 10M * bit times before resume. If buffer space comes * available before that time has expired, an XON * pause frame (0 time) will be transmitted to restart * the flow. */ cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_TIME(port, interface), 20000); cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL (port, interface), 19000); if (OCTEON_IS_MODEL(OCTEON_CN50XX)) { cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface), 16); cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface), 16); } else { cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface), 24); cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface), 24); } } __cvmx_helper_setup_gmx(interface, num_ports); /* enable the ports now */ for (port = 0; port < num_ports; port++) { union cvmx_gmxx_prtx_cfg gmx_cfg; cvmx_helper_link_autoconf(cvmx_helper_get_ipd_port (interface, port)); gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(port, interface)); gmx_cfg.s.en = 1; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(port, interface), gmx_cfg.u64); } __cvmx_interrupt_asxx_enable(interface); __cvmx_interrupt_gmxx_enable(interface); return 0; } /** * Return the link state of an IPD/PKO port as returned by * auto negotiation. The result of this function may not match * Octeon's link config if auto negotiation has changed since * the last call to cvmx_helper_link_set(). * * @ipd_port: IPD/PKO port to query * * Returns Link state */ cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port) { int interface = cvmx_helper_get_interface_num(ipd_port); int index = cvmx_helper_get_interface_index_num(ipd_port); union cvmx_asxx_prt_loop asxx_prt_loop; asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface)); if (asxx_prt_loop.s.int_loop & (1 << index)) { /* Force 1Gbps full duplex on internal loopback */ cvmx_helper_link_info_t result; result.u64 = 0; result.s.full_duplex = 1; result.s.link_up = 1; result.s.speed = 1000; return result; } else return __cvmx_helper_board_link_get(ipd_port); } /** * Configure an IPD/PKO port for the specified link state. This * function does not influence auto negotiation at the PHY level. * The passed link state must always match the link state returned * by cvmx_helper_link_get(). It is normally best to use * cvmx_helper_link_autoconf() instead. * * @ipd_port: IPD/PKO port to configure * @link_info: The new link state * * Returns Zero on success, negative on failure */ int __cvmx_helper_rgmii_link_set(int ipd_port, cvmx_helper_link_info_t link_info) { int result = 0; int interface = cvmx_helper_get_interface_num(ipd_port); int index = cvmx_helper_get_interface_index_num(ipd_port); union cvmx_gmxx_prtx_cfg original_gmx_cfg; union cvmx_gmxx_prtx_cfg new_gmx_cfg; union cvmx_pko_mem_queue_qos pko_mem_queue_qos; union cvmx_pko_mem_queue_qos pko_mem_queue_qos_save[16]; union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp; union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp_save; int i; /* Ignore speed sets in the simulator */ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) return 0; /* Read the current settings so we know the current enable state */ original_gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); new_gmx_cfg = original_gmx_cfg; /* Disable the lowest level RX */ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) & ~(1 << index)); /* Disable all queues so that TX should become idle */ for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) { int queue = cvmx_pko_get_base_queue(ipd_port) + i; cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue); pko_mem_queue_qos.u64 = cvmx_read_csr(CVMX_PKO_MEM_QUEUE_QOS); pko_mem_queue_qos.s.pid = ipd_port; pko_mem_queue_qos.s.qid = queue; pko_mem_queue_qos_save[i] = pko_mem_queue_qos; pko_mem_queue_qos.s.qos_mask = 0; cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos.u64); } /* Disable backpressure */ gmx_tx_ovr_bp.u64 = cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface)); gmx_tx_ovr_bp_save = gmx_tx_ovr_bp; gmx_tx_ovr_bp.s.bp &= ~(1 << index); gmx_tx_ovr_bp.s.en |= 1 << index; cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp.u64); cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface)); /* * Poll the GMX state machine waiting for it to become * idle. Preferably we should only change speed when it is * idle. If it doesn't become idle we will still do the speed * change, but there is a slight chance that GMX will * lockup. */ cvmx_write_csr(CVMX_NPI_DBG_SELECT, interface * 0x800 + index * 0x100 + 0x880); CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data, data & 7, ==, 0, 10000); CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data, data & 0xf, ==, 0, 10000); /* Disable the port before we make any changes */ new_gmx_cfg.s.en = 0; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64); cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); /* Set full/half duplex */ if (cvmx_octeon_is_pass1()) /* Half duplex is broken for 38XX Pass 1 */ new_gmx_cfg.s.duplex = 1; else if (!link_info.s.link_up) /* Force full duplex on down links */ new_gmx_cfg.s.duplex = 1; else new_gmx_cfg.s.duplex = link_info.s.full_duplex; /* Set the link speed. Anything unknown is set to 1Gbps */ if (link_info.s.speed == 10) { new_gmx_cfg.s.slottime = 0; new_gmx_cfg.s.speed = 0; } else if (link_info.s.speed == 100) { new_gmx_cfg.s.slottime = 0; new_gmx_cfg.s.speed = 0; } else { new_gmx_cfg.s.slottime = 1; new_gmx_cfg.s.speed = 1; } /* Adjust the clocks */ if (link_info.s.speed == 10) { cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 50); cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40); cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0); } else if (link_info.s.speed == 100) { cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 5); cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40); cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0); } else { cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1); cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200); cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000); } if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) { if ((link_info.s.speed == 10) || (link_info.s.speed == 100)) { union cvmx_gmxx_inf_mode mode; mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface)); /* * Port .en .type .p0mii Configuration * ---- --- ----- ------ ----------------------------------------- * X 0 X X All links are disabled. * 0 1 X 0 Port 0 is RGMII * 0 1 X 1 Port 0 is MII * 1 1 0 X Ports 1 and 2 are configured as RGMII ports. * 1 1 1 X Port 1: GMII/MII; Port 2: disabled. GMII or * MII port is selected by GMX_PRT1_CFG[SPEED]. */ /* In MII mode, CLK_CNT = 1. */ if (((index == 0) && (mode.s.p0mii == 1)) || ((index != 0) && (mode.s.type == 1))) { cvmx_write_csr(CVMX_GMXX_TXX_CLK (index, interface), 1); } } } /* Do a read to make sure all setup stuff is complete */ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); /* Save the new GMX setting without enabling the port */ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64); /* Enable the lowest level RX */ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) | (1 << index)); /* Re-enable the TX path */ for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) { int queue = cvmx_pko_get_base_queue(ipd_port) + i; cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue); cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos_save[i].u64); } /* Restore backpressure */ cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp_save.u64); /* Restore the GMX enable state. Port config is complete */ new_gmx_cfg.s.en = original_gmx_cfg.s.en; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64); return result; } /** * Configure a port for internal and/or external loopback. Internal loopback * causes packets sent by the port to be received by Octeon. External loopback * causes packets received from the wire to sent out again. * * @ipd_port: IPD/PKO port to loopback. * @enable_internal: * Non zero if you want internal loopback * @enable_external: * Non zero if you want external loopback * * Returns Zero on success, negative on failure. */ int __cvmx_helper_rgmii_configure_loopback(int ipd_port, int enable_internal, int enable_external) { int interface = cvmx_helper_get_interface_num(ipd_port); int index = cvmx_helper_get_interface_index_num(ipd_port); int original_enable; union cvmx_gmxx_prtx_cfg gmx_cfg; union cvmx_asxx_prt_loop asxx_prt_loop; /* Read the current enable state and save it */ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); original_enable = gmx_cfg.s.en; /* Force port to be disabled */ gmx_cfg.s.en = 0; if (enable_internal) { /* Force speed if we're doing internal loopback */ gmx_cfg.s.duplex = 1; gmx_cfg.s.slottime = 1; gmx_cfg.s.speed = 1; cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1); cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200); cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000); } cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); /* Set the loopback bits */ asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface)); if (enable_internal) asxx_prt_loop.s.int_loop |= 1 << index; else asxx_prt_loop.s.int_loop &= ~(1 << index); if (enable_external) asxx_prt_loop.s.ext_loop |= 1 << index; else asxx_prt_loop.s.ext_loop &= ~(1 << index); cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), asxx_prt_loop.u64); /* Force enables in internal loopback */ if (enable_internal) { uint64_t tmp; tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface)); cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), (1 << index) | tmp); tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)); cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), (1 << index) | tmp); original_enable = 1; } /* Restore the enable state */ gmx_cfg.s.en = original_enable; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); return 0; }
gpl-2.0
sirmordred/samsung-kernel-msm7x30
drivers/s390/block/dasd_eer.c
5516
20445
/* * Character device driver for extended error reporting. * * Copyright (C) 2005 IBM Corporation * extended error reporting for DASD ECKD devices * Author(s): Stefan Weinhuber <wein@de.ibm.com> */ #define KMSG_COMPONENT "dasd-eckd" #include <linux/init.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/poll.h> #include <linux/mutex.h> #include <linux/err.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <linux/atomic.h> #include <asm/ebcdic.h> #include "dasd_int.h" #include "dasd_eckd.h" #ifdef PRINTK_HEADER #undef PRINTK_HEADER #endif /* PRINTK_HEADER */ #define PRINTK_HEADER "dasd(eer):" /* * SECTION: the internal buffer */ /* * The internal buffer is meant to store obaque blobs of data, so it does * not know of higher level concepts like triggers. * It consists of a number of pages that are used as a ringbuffer. Each data * blob is stored in a simple record that consists of an integer, which * contains the size of the following data, and the data bytes themselfes. * * To allow for multiple independent readers we create one internal buffer * each time the device is opened and destroy the buffer when the file is * closed again. The number of pages used for this buffer is determined by * the module parmeter eer_pages. * * One record can be written to a buffer by using the functions * - dasd_eer_start_record (one time per record to write the size to the * buffer and reserve the space for the data) * - dasd_eer_write_buffer (one or more times per record to write the data) * The data can be written in several steps but you will have to compute * the total size up front for the invocation of dasd_eer_start_record. * If the ringbuffer is full, dasd_eer_start_record will remove the required * number of old records. * * A record is typically read in two steps, first read the integer that * specifies the size of the following data, then read the data. * Both can be done by * - dasd_eer_read_buffer * * For all mentioned functions you need to get the bufferlock first and keep * it until a complete record is written or read. * * All information necessary to keep track of an internal buffer is kept in * a struct eerbuffer. The buffer specific to a file pointer is strored in * the private_data field of that file. To be able to write data to all * existing buffers, each buffer is also added to the bufferlist. * If the user does not want to read a complete record in one go, we have to * keep track of the rest of the record. residual stores the number of bytes * that are still to deliver. If the rest of the record is invalidated between * two reads then residual will be set to -1 so that the next read will fail. * All entries in the eerbuffer structure are protected with the bufferlock. * To avoid races between writing to a buffer on the one side and creating * and destroying buffers on the other side, the bufferlock must also be used * to protect the bufferlist. */ static int eer_pages = 5; module_param(eer_pages, int, S_IRUGO|S_IWUSR); struct eerbuffer { struct list_head list; char **buffer; int buffersize; int buffer_page_count; int head; int tail; int residual; }; static LIST_HEAD(bufferlist); static DEFINE_SPINLOCK(bufferlock); static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue); /* * How many free bytes are available on the buffer. * Needs to be called with bufferlock held. */ static int dasd_eer_get_free_bytes(struct eerbuffer *eerb) { if (eerb->head < eerb->tail) return eerb->tail - eerb->head - 1; return eerb->buffersize - eerb->head + eerb->tail -1; } /* * How many bytes of buffer space are used. * Needs to be called with bufferlock held. */ static int dasd_eer_get_filled_bytes(struct eerbuffer *eerb) { if (eerb->head >= eerb->tail) return eerb->head - eerb->tail; return eerb->buffersize - eerb->tail + eerb->head; } /* * The dasd_eer_write_buffer function just copies count bytes of data * to the buffer. Make sure to call dasd_eer_start_record first, to * make sure that enough free space is available. * Needs to be called with bufferlock held. */ static void dasd_eer_write_buffer(struct eerbuffer *eerb, char *data, int count) { unsigned long headindex,localhead; unsigned long rest, len; char *nextdata; nextdata = data; rest = count; while (rest > 0) { headindex = eerb->head / PAGE_SIZE; localhead = eerb->head % PAGE_SIZE; len = min(rest, PAGE_SIZE - localhead); memcpy(eerb->buffer[headindex]+localhead, nextdata, len); nextdata += len; rest -= len; eerb->head += len; if (eerb->head == eerb->buffersize) eerb->head = 0; /* wrap around */ BUG_ON(eerb->head > eerb->buffersize); } } /* * Needs to be called with bufferlock held. */ static int dasd_eer_read_buffer(struct eerbuffer *eerb, char *data, int count) { unsigned long tailindex,localtail; unsigned long rest, len, finalcount; char *nextdata; finalcount = min(count, dasd_eer_get_filled_bytes(eerb)); nextdata = data; rest = finalcount; while (rest > 0) { tailindex = eerb->tail / PAGE_SIZE; localtail = eerb->tail % PAGE_SIZE; len = min(rest, PAGE_SIZE - localtail); memcpy(nextdata, eerb->buffer[tailindex] + localtail, len); nextdata += len; rest -= len; eerb->tail += len; if (eerb->tail == eerb->buffersize) eerb->tail = 0; /* wrap around */ BUG_ON(eerb->tail > eerb->buffersize); } return finalcount; } /* * Whenever you want to write a blob of data to the internal buffer you * have to start by using this function first. It will write the number * of bytes that will be written to the buffer. If necessary it will remove * old records to make room for the new one. * Needs to be called with bufferlock held. */ static int dasd_eer_start_record(struct eerbuffer *eerb, int count) { int tailcount; if (count + sizeof(count) > eerb->buffersize) return -ENOMEM; while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) { if (eerb->residual > 0) { eerb->tail += eerb->residual; if (eerb->tail >= eerb->buffersize) eerb->tail -= eerb->buffersize; eerb->residual = -1; } dasd_eer_read_buffer(eerb, (char *) &tailcount, sizeof(tailcount)); eerb->tail += tailcount; if (eerb->tail >= eerb->buffersize) eerb->tail -= eerb->buffersize; } dasd_eer_write_buffer(eerb, (char*) &count, sizeof(count)); return 0; }; /* * Release pages that are not used anymore. */ static void dasd_eer_free_buffer_pages(char **buf, int no_pages) { int i; for (i = 0; i < no_pages; i++) free_page((unsigned long) buf[i]); } /* * Allocate a new set of memory pages. */ static int dasd_eer_allocate_buffer_pages(char **buf, int no_pages) { int i; for (i = 0; i < no_pages; i++) { buf[i] = (char *) get_zeroed_page(GFP_KERNEL); if (!buf[i]) { dasd_eer_free_buffer_pages(buf, i); return -ENOMEM; } } return 0; } /* * SECTION: The extended error reporting functionality */ /* * When a DASD device driver wants to report an error, it calls the * function dasd_eer_write and gives the respective trigger ID as * parameter. Currently there are four kinds of triggers: * * DASD_EER_FATALERROR: all kinds of unrecoverable I/O problems * DASD_EER_PPRCSUSPEND: PPRC was suspended * DASD_EER_NOPATH: There is no path to the device left. * DASD_EER_STATECHANGE: The state of the device has changed. * * For the first three triggers all required information can be supplied by * the caller. For these triggers a record is written by the function * dasd_eer_write_standard_trigger. * * The DASD_EER_STATECHANGE trigger is special since a sense subsystem * status ccw need to be executed to gather the necessary sense data first. * The dasd_eer_snss function will queue the SNSS request and the request * callback will then call dasd_eer_write with the DASD_EER_STATCHANGE * trigger. * * To avoid memory allocations at runtime, the necessary memory is allocated * when the extended error reporting is enabled for a device (by * dasd_eer_probe). There is one sense subsystem status request for each * eer enabled DASD device. The presence of the cqr in device->eer_cqr * indicates that eer is enable for the device. The use of the snss request * is protected by the DASD_FLAG_EER_IN_USE bit. When this flag indicates * that the cqr is currently in use, dasd_eer_snss cannot start a second * request but sets the DASD_FLAG_EER_SNSS flag instead. The callback of * the SNSS request will check the bit and call dasd_eer_snss again. */ #define SNSS_DATA_SIZE 44 #define DASD_EER_BUSID_SIZE 10 struct dasd_eer_header { __u32 total_size; __u32 trigger; __u64 tv_sec; __u64 tv_usec; char busid[DASD_EER_BUSID_SIZE]; } __attribute__ ((packed)); /* * The following function can be used for those triggers that have * all necessary data available when the function is called. * If the parameter cqr is not NULL, the chain of requests will be searched * for valid sense data, and all valid sense data sets will be added to * the triggers data. */ static void dasd_eer_write_standard_trigger(struct dasd_device *device, struct dasd_ccw_req *cqr, int trigger) { struct dasd_ccw_req *temp_cqr; int data_size; struct timeval tv; struct dasd_eer_header header; unsigned long flags; struct eerbuffer *eerb; char *sense; /* go through cqr chain and count the valid sense data sets */ data_size = 0; for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) if (dasd_get_sense(&temp_cqr->irb)) data_size += 32; header.total_size = sizeof(header) + data_size + 4; /* "EOR" */ header.trigger = trigger; do_gettimeofday(&tv); header.tv_sec = tv.tv_sec; header.tv_usec = tv.tv_usec; strncpy(header.busid, dev_name(&device->cdev->dev), DASD_EER_BUSID_SIZE); spin_lock_irqsave(&bufferlock, flags); list_for_each_entry(eerb, &bufferlist, list) { dasd_eer_start_record(eerb, header.total_size); dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header)); for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) { sense = dasd_get_sense(&temp_cqr->irb); if (sense) dasd_eer_write_buffer(eerb, sense, 32); } dasd_eer_write_buffer(eerb, "EOR", 4); } spin_unlock_irqrestore(&bufferlock, flags); wake_up_interruptible(&dasd_eer_read_wait_queue); } /* * This function writes a DASD_EER_STATECHANGE trigger. */ static void dasd_eer_write_snss_trigger(struct dasd_device *device, struct dasd_ccw_req *cqr, int trigger) { int data_size; int snss_rc; struct timeval tv; struct dasd_eer_header header; unsigned long flags; struct eerbuffer *eerb; snss_rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; if (snss_rc) data_size = 0; else data_size = SNSS_DATA_SIZE; header.total_size = sizeof(header) + data_size + 4; /* "EOR" */ header.trigger = DASD_EER_STATECHANGE; do_gettimeofday(&tv); header.tv_sec = tv.tv_sec; header.tv_usec = tv.tv_usec; strncpy(header.busid, dev_name(&device->cdev->dev), DASD_EER_BUSID_SIZE); spin_lock_irqsave(&bufferlock, flags); list_for_each_entry(eerb, &bufferlist, list) { dasd_eer_start_record(eerb, header.total_size); dasd_eer_write_buffer(eerb, (char *) &header , sizeof(header)); if (!snss_rc) dasd_eer_write_buffer(eerb, cqr->data, SNSS_DATA_SIZE); dasd_eer_write_buffer(eerb, "EOR", 4); } spin_unlock_irqrestore(&bufferlock, flags); wake_up_interruptible(&dasd_eer_read_wait_queue); } /* * This function is called for all triggers. It calls the appropriate * function that writes the actual trigger records. */ void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr, unsigned int id) { if (!device->eer_cqr) return; switch (id) { case DASD_EER_FATALERROR: case DASD_EER_PPRCSUSPEND: dasd_eer_write_standard_trigger(device, cqr, id); break; case DASD_EER_NOPATH: dasd_eer_write_standard_trigger(device, NULL, id); break; case DASD_EER_STATECHANGE: dasd_eer_write_snss_trigger(device, cqr, id); break; default: /* unknown trigger, so we write it without any sense data */ dasd_eer_write_standard_trigger(device, NULL, id); break; } } EXPORT_SYMBOL(dasd_eer_write); /* * Start a sense subsystem status request. * Needs to be called with the device held. */ void dasd_eer_snss(struct dasd_device *device) { struct dasd_ccw_req *cqr; cqr = device->eer_cqr; if (!cqr) /* Device not eer enabled. */ return; if (test_and_set_bit(DASD_FLAG_EER_IN_USE, &device->flags)) { /* Sense subsystem status request in use. */ set_bit(DASD_FLAG_EER_SNSS, &device->flags); return; } /* cdev is already locked, can't use dasd_add_request_head */ clear_bit(DASD_FLAG_EER_SNSS, &device->flags); cqr->status = DASD_CQR_QUEUED; list_add(&cqr->devlist, &device->ccw_queue); dasd_schedule_device_bh(device); } /* * Callback function for use with sense subsystem status request. */ static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data) { struct dasd_device *device = cqr->startdev; unsigned long flags; dasd_eer_write(device, cqr, DASD_EER_STATECHANGE); spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); if (device->eer_cqr == cqr) { clear_bit(DASD_FLAG_EER_IN_USE, &device->flags); if (test_bit(DASD_FLAG_EER_SNSS, &device->flags)) /* Another SNSS has been requested in the meantime. */ dasd_eer_snss(device); cqr = NULL; } spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); if (cqr) /* * Extended error recovery has been switched off while * the SNSS request was running. It could even have * been switched off and on again in which case there * is a new ccw in device->eer_cqr. Free the "old" * snss request now. */ dasd_kfree_request(cqr, device); } /* * Enable error reporting on a given device. */ int dasd_eer_enable(struct dasd_device *device) { struct dasd_ccw_req *cqr; unsigned long flags; struct ccw1 *ccw; if (device->eer_cqr) return 0; if (!device->discipline || strcmp(device->discipline->name, "ECKD")) return -EPERM; /* FIXME: -EMEDIUMTYPE ? */ cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */, SNSS_DATA_SIZE, device); if (IS_ERR(cqr)) return -ENOMEM; cqr->startdev = device; cqr->retries = 255; cqr->expires = 10 * HZ; clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags); ccw = cqr->cpaddr; ccw->cmd_code = DASD_ECKD_CCW_SNSS; ccw->count = SNSS_DATA_SIZE; ccw->flags = 0; ccw->cda = (__u32)(addr_t) cqr->data; cqr->buildclk = get_clock(); cqr->status = DASD_CQR_FILLED; cqr->callback = dasd_eer_snss_cb; spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); if (!device->eer_cqr) { device->eer_cqr = cqr; cqr = NULL; } spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); if (cqr) dasd_kfree_request(cqr, device); return 0; } /* * Disable error reporting on a given device. */ void dasd_eer_disable(struct dasd_device *device) { struct dasd_ccw_req *cqr; unsigned long flags; int in_use; if (!device->eer_cqr) return; spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); cqr = device->eer_cqr; device->eer_cqr = NULL; clear_bit(DASD_FLAG_EER_SNSS, &device->flags); in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); if (cqr && !in_use) dasd_kfree_request(cqr, device); } /* * SECTION: the device operations */ /* * On the one side we need a lock to access our internal buffer, on the * other side a copy_to_user can sleep. So we need to copy the data we have * to transfer in a readbuffer, which is protected by the readbuffer_mutex. */ static char readbuffer[PAGE_SIZE]; static DEFINE_MUTEX(readbuffer_mutex); static int dasd_eer_open(struct inode *inp, struct file *filp) { struct eerbuffer *eerb; unsigned long flags; eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL); if (!eerb) return -ENOMEM; eerb->buffer_page_count = eer_pages; if (eerb->buffer_page_count < 1 || eerb->buffer_page_count > INT_MAX / PAGE_SIZE) { kfree(eerb); DBF_EVENT(DBF_WARNING, "can't open device since module " "parameter eer_pages is smaller than 1 or" " bigger than %d", (int)(INT_MAX / PAGE_SIZE)); return -EINVAL; } eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE; eerb->buffer = kmalloc(eerb->buffer_page_count * sizeof(char *), GFP_KERNEL); if (!eerb->buffer) { kfree(eerb); return -ENOMEM; } if (dasd_eer_allocate_buffer_pages(eerb->buffer, eerb->buffer_page_count)) { kfree(eerb->buffer); kfree(eerb); return -ENOMEM; } filp->private_data = eerb; spin_lock_irqsave(&bufferlock, flags); list_add(&eerb->list, &bufferlist); spin_unlock_irqrestore(&bufferlock, flags); return nonseekable_open(inp,filp); } static int dasd_eer_close(struct inode *inp, struct file *filp) { struct eerbuffer *eerb; unsigned long flags; eerb = (struct eerbuffer *) filp->private_data; spin_lock_irqsave(&bufferlock, flags); list_del(&eerb->list); spin_unlock_irqrestore(&bufferlock, flags); dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count); kfree(eerb->buffer); kfree(eerb); return 0; } static ssize_t dasd_eer_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { int tc,rc; int tailcount,effective_count; unsigned long flags; struct eerbuffer *eerb; eerb = (struct eerbuffer *) filp->private_data; if (mutex_lock_interruptible(&readbuffer_mutex)) return -ERESTARTSYS; spin_lock_irqsave(&bufferlock, flags); if (eerb->residual < 0) { /* the remainder of this record */ /* has been deleted */ eerb->residual = 0; spin_unlock_irqrestore(&bufferlock, flags); mutex_unlock(&readbuffer_mutex); return -EIO; } else if (eerb->residual > 0) { /* OK we still have a second half of a record to deliver */ effective_count = min(eerb->residual, (int) count); eerb->residual -= effective_count; } else { tc = 0; while (!tc) { tc = dasd_eer_read_buffer(eerb, (char *) &tailcount, sizeof(tailcount)); if (!tc) { /* no data available */ spin_unlock_irqrestore(&bufferlock, flags); mutex_unlock(&readbuffer_mutex); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; rc = wait_event_interruptible( dasd_eer_read_wait_queue, eerb->head != eerb->tail); if (rc) return rc; if (mutex_lock_interruptible(&readbuffer_mutex)) return -ERESTARTSYS; spin_lock_irqsave(&bufferlock, flags); } } WARN_ON(tc != sizeof(tailcount)); effective_count = min(tailcount,(int)count); eerb->residual = tailcount - effective_count; } tc = dasd_eer_read_buffer(eerb, readbuffer, effective_count); WARN_ON(tc != effective_count); spin_unlock_irqrestore(&bufferlock, flags); if (copy_to_user(buf, readbuffer, effective_count)) { mutex_unlock(&readbuffer_mutex); return -EFAULT; } mutex_unlock(&readbuffer_mutex); return effective_count; } static unsigned int dasd_eer_poll(struct file *filp, poll_table *ptable) { unsigned int mask; unsigned long flags; struct eerbuffer *eerb; eerb = (struct eerbuffer *) filp->private_data; poll_wait(filp, &dasd_eer_read_wait_queue, ptable); spin_lock_irqsave(&bufferlock, flags); if (eerb->head != eerb->tail) mask = POLLIN | POLLRDNORM ; else mask = 0; spin_unlock_irqrestore(&bufferlock, flags); return mask; } static const struct file_operations dasd_eer_fops = { .open = &dasd_eer_open, .release = &dasd_eer_close, .read = &dasd_eer_read, .poll = &dasd_eer_poll, .owner = THIS_MODULE, .llseek = noop_llseek, }; static struct miscdevice *dasd_eer_dev = NULL; int __init dasd_eer_init(void) { int rc; dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL); if (!dasd_eer_dev) return -ENOMEM; dasd_eer_dev->minor = MISC_DYNAMIC_MINOR; dasd_eer_dev->name = "dasd_eer"; dasd_eer_dev->fops = &dasd_eer_fops; rc = misc_register(dasd_eer_dev); if (rc) { kfree(dasd_eer_dev); dasd_eer_dev = NULL; DBF_EVENT(DBF_ERR, "%s", "dasd_eer_init could not " "register misc device"); return rc; } return 0; } void dasd_eer_exit(void) { if (dasd_eer_dev) { misc_deregister(dasd_eer_dev); kfree(dasd_eer_dev); dasd_eer_dev = NULL; } }
gpl-2.0
crpalmer/android_kernel_samsung_msm8974
arch/mips/pci/msi-octeon.c
7052
12535
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2005-2009, 2010 Cavium Networks */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/msi.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <asm/octeon/octeon.h> #include <asm/octeon/cvmx-npi-defs.h> #include <asm/octeon/cvmx-pci-defs.h> #include <asm/octeon/cvmx-npei-defs.h> #include <asm/octeon/cvmx-pexp-defs.h> #include <asm/octeon/pci-octeon.h> /* * Each bit in msi_free_irq_bitmask represents a MSI interrupt that is * in use. */ static u64 msi_free_irq_bitmask[4]; /* * Each bit in msi_multiple_irq_bitmask tells that the device using * this bit in msi_free_irq_bitmask is also using the next bit. This * is used so we can disable all of the MSI interrupts when a device * uses multiple. */ static u64 msi_multiple_irq_bitmask[4]; /* * This lock controls updates to msi_free_irq_bitmask and * msi_multiple_irq_bitmask. */ static DEFINE_SPINLOCK(msi_free_irq_bitmask_lock); /* * Number of MSI IRQs used. This variable is set up in * the module init time. */ static int msi_irq_size; /** * Called when a driver request MSI interrupts instead of the * legacy INT A-D. This routine will allocate multiple interrupts * for MSI devices that support them. A device can override this by * programming the MSI control bits [6:4] before calling * pci_enable_msi(). * * @dev: Device requesting MSI interrupts * @desc: MSI descriptor * * Returns 0 on success. */ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) { struct msi_msg msg; u16 control; int configured_private_bits; int request_private_bits; int irq = 0; int irq_step; u64 search_mask; int index; /* * Read the MSI config to figure out how many IRQs this device * wants. Most devices only want 1, which will give * configured_private_bits and request_private_bits equal 0. */ pci_read_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, &control); /* * If the number of private bits has been configured then use * that value instead of the requested number. This gives the * driver the chance to override the number of interrupts * before calling pci_enable_msi(). */ configured_private_bits = (control & PCI_MSI_FLAGS_QSIZE) >> 4; if (configured_private_bits == 0) { /* Nothing is configured, so use the hardware requested size */ request_private_bits = (control & PCI_MSI_FLAGS_QMASK) >> 1; } else { /* * Use the number of configured bits, assuming the * driver wanted to override the hardware request * value. */ request_private_bits = configured_private_bits; } /* * The PCI 2.3 spec mandates that there are at most 32 * interrupts. If this device asks for more, only give it one. */ if (request_private_bits > 5) request_private_bits = 0; try_only_one: /* * The IRQs have to be aligned on a power of two based on the * number being requested. */ irq_step = 1 << request_private_bits; /* Mask with one bit for each IRQ */ search_mask = (1 << irq_step) - 1; /* * We're going to search msi_free_irq_bitmask_lock for zero * bits. This represents an MSI interrupt number that isn't in * use. */ spin_lock(&msi_free_irq_bitmask_lock); for (index = 0; index < msi_irq_size/64; index++) { for (irq = 0; irq < 64; irq += irq_step) { if ((msi_free_irq_bitmask[index] & (search_mask << irq)) == 0) { msi_free_irq_bitmask[index] |= search_mask << irq; msi_multiple_irq_bitmask[index] |= (search_mask >> 1) << irq; goto msi_irq_allocated; } } } msi_irq_allocated: spin_unlock(&msi_free_irq_bitmask_lock); /* Make sure the search for available interrupts didn't fail */ if (irq >= 64) { if (request_private_bits) { pr_err("arch_setup_msi_irq: Unable to find %d free interrupts, trying just one", 1 << request_private_bits); request_private_bits = 0; goto try_only_one; } else panic("arch_setup_msi_irq: Unable to find a free MSI interrupt"); } /* MSI interrupts start at logical IRQ OCTEON_IRQ_MSI_BIT0 */ irq += index*64; irq += OCTEON_IRQ_MSI_BIT0; switch (octeon_dma_bar_type) { case OCTEON_DMA_BAR_TYPE_SMALL: /* When not using big bar, Bar 0 is based at 128MB */ msg.address_lo = ((128ul << 20) + CVMX_PCI_MSI_RCV) & 0xffffffff; msg.address_hi = ((128ul << 20) + CVMX_PCI_MSI_RCV) >> 32; case OCTEON_DMA_BAR_TYPE_BIG: /* When using big bar, Bar 0 is based at 0 */ msg.address_lo = (0 + CVMX_PCI_MSI_RCV) & 0xffffffff; msg.address_hi = (0 + CVMX_PCI_MSI_RCV) >> 32; break; case OCTEON_DMA_BAR_TYPE_PCIE: /* When using PCIe, Bar 0 is based at 0 */ /* FIXME CVMX_NPEI_MSI_RCV* other than 0? */ msg.address_lo = (0 + CVMX_NPEI_PCIE_MSI_RCV) & 0xffffffff; msg.address_hi = (0 + CVMX_NPEI_PCIE_MSI_RCV) >> 32; break; default: panic("arch_setup_msi_irq: Invalid octeon_dma_bar_type"); } msg.data = irq - OCTEON_IRQ_MSI_BIT0; /* Update the number of IRQs the device has available to it */ control &= ~PCI_MSI_FLAGS_QSIZE; control |= request_private_bits << 4; pci_write_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, control); irq_set_msi_desc(irq, desc); write_msi_msg(irq, &msg); return 0; } int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { struct msi_desc *entry; int ret; /* * MSI-X is not supported. */ if (type == PCI_CAP_ID_MSIX) return -EINVAL; /* * If an architecture wants to support multiple MSI, it needs to * override arch_setup_msi_irqs() */ if (type == PCI_CAP_ID_MSI && nvec > 1) return 1; list_for_each_entry(entry, &dev->msi_list, list) { ret = arch_setup_msi_irq(dev, entry); if (ret < 0) return ret; if (ret > 0) return -ENOSPC; } return 0; } /** * Called when a device no longer needs its MSI interrupts. All * MSI interrupts for the device are freed. * * @irq: The devices first irq number. There may be multple in sequence. */ void arch_teardown_msi_irq(unsigned int irq) { int number_irqs; u64 bitmask; int index = 0; int irq0; if ((irq < OCTEON_IRQ_MSI_BIT0) || (irq > msi_irq_size + OCTEON_IRQ_MSI_BIT0)) panic("arch_teardown_msi_irq: Attempted to teardown illegal " "MSI interrupt (%d)", irq); irq -= OCTEON_IRQ_MSI_BIT0; index = irq / 64; irq0 = irq % 64; /* * Count the number of IRQs we need to free by looking at the * msi_multiple_irq_bitmask. Each bit set means that the next * IRQ is also owned by this device. */ number_irqs = 0; while ((irq0 + number_irqs < 64) && (msi_multiple_irq_bitmask[index] & (1ull << (irq0 + number_irqs)))) number_irqs++; number_irqs++; /* Mask with one bit for each IRQ */ bitmask = (1 << number_irqs) - 1; /* Shift the mask to the correct bit location */ bitmask <<= irq0; if ((msi_free_irq_bitmask[index] & bitmask) != bitmask) panic("arch_teardown_msi_irq: Attempted to teardown MSI " "interrupt (%d) not in use", irq); /* Checks are done, update the in use bitmask */ spin_lock(&msi_free_irq_bitmask_lock); msi_free_irq_bitmask[index] &= ~bitmask; msi_multiple_irq_bitmask[index] &= ~bitmask; spin_unlock(&msi_free_irq_bitmask_lock); } static DEFINE_RAW_SPINLOCK(octeon_irq_msi_lock); static u64 msi_rcv_reg[4]; static u64 mis_ena_reg[4]; static void octeon_irq_msi_enable_pcie(struct irq_data *data) { u64 en; unsigned long flags; int msi_number = data->irq - OCTEON_IRQ_MSI_BIT0; int irq_index = msi_number >> 6; int irq_bit = msi_number & 0x3f; raw_spin_lock_irqsave(&octeon_irq_msi_lock, flags); en = cvmx_read_csr(mis_ena_reg[irq_index]); en |= 1ull << irq_bit; cvmx_write_csr(mis_ena_reg[irq_index], en); cvmx_read_csr(mis_ena_reg[irq_index]); raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); } static void octeon_irq_msi_disable_pcie(struct irq_data *data) { u64 en; unsigned long flags; int msi_number = data->irq - OCTEON_IRQ_MSI_BIT0; int irq_index = msi_number >> 6; int irq_bit = msi_number & 0x3f; raw_spin_lock_irqsave(&octeon_irq_msi_lock, flags); en = cvmx_read_csr(mis_ena_reg[irq_index]); en &= ~(1ull << irq_bit); cvmx_write_csr(mis_ena_reg[irq_index], en); cvmx_read_csr(mis_ena_reg[irq_index]); raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); } static struct irq_chip octeon_irq_chip_msi_pcie = { .name = "MSI", .irq_enable = octeon_irq_msi_enable_pcie, .irq_disable = octeon_irq_msi_disable_pcie, }; static void octeon_irq_msi_enable_pci(struct irq_data *data) { /* * Octeon PCI doesn't have the ability to mask/unmask MSI * interrupts individually. Instead of masking/unmasking them * in groups of 16, we simple assume MSI devices are well * behaved. MSI interrupts are always enable and the ACK is * assumed to be enough */ } static void octeon_irq_msi_disable_pci(struct irq_data *data) { /* See comment in enable */ } static struct irq_chip octeon_irq_chip_msi_pci = { .name = "MSI", .irq_enable = octeon_irq_msi_enable_pci, .irq_disable = octeon_irq_msi_disable_pci, }; /* * Called by the interrupt handling code when an MSI interrupt * occurs. */ static irqreturn_t __octeon_msi_do_interrupt(int index, u64 msi_bits) { int irq; int bit; bit = fls64(msi_bits); if (bit) { bit--; /* Acknowledge it first. */ cvmx_write_csr(msi_rcv_reg[index], 1ull << bit); irq = bit + OCTEON_IRQ_MSI_BIT0 + 64 * index; do_IRQ(irq); return IRQ_HANDLED; } return IRQ_NONE; } #define OCTEON_MSI_INT_HANDLER_X(x) \ static irqreturn_t octeon_msi_interrupt##x(int cpl, void *dev_id) \ { \ u64 msi_bits = cvmx_read_csr(msi_rcv_reg[(x)]); \ return __octeon_msi_do_interrupt((x), msi_bits); \ } /* * Create octeon_msi_interrupt{0-3} function body */ OCTEON_MSI_INT_HANDLER_X(0); OCTEON_MSI_INT_HANDLER_X(1); OCTEON_MSI_INT_HANDLER_X(2); OCTEON_MSI_INT_HANDLER_X(3); /* * Initializes the MSI interrupt handling code */ int __init octeon_msi_initialize(void) { int irq; struct irq_chip *msi; if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0; msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1; msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2; msi_rcv_reg[3] = CVMX_PEXP_NPEI_MSI_RCV3; mis_ena_reg[0] = CVMX_PEXP_NPEI_MSI_ENB0; mis_ena_reg[1] = CVMX_PEXP_NPEI_MSI_ENB1; mis_ena_reg[2] = CVMX_PEXP_NPEI_MSI_ENB2; mis_ena_reg[3] = CVMX_PEXP_NPEI_MSI_ENB3; msi = &octeon_irq_chip_msi_pcie; } else { msi_rcv_reg[0] = CVMX_NPI_NPI_MSI_RCV; #define INVALID_GENERATE_ADE 0x8700000000000000ULL; msi_rcv_reg[1] = INVALID_GENERATE_ADE; msi_rcv_reg[2] = INVALID_GENERATE_ADE; msi_rcv_reg[3] = INVALID_GENERATE_ADE; mis_ena_reg[0] = INVALID_GENERATE_ADE; mis_ena_reg[1] = INVALID_GENERATE_ADE; mis_ena_reg[2] = INVALID_GENERATE_ADE; mis_ena_reg[3] = INVALID_GENERATE_ADE; msi = &octeon_irq_chip_msi_pci; } for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_LAST; irq++) irq_set_chip_and_handler(irq, msi, handle_simple_irq); if (octeon_has_feature(OCTEON_FEATURE_PCIE)) { if (request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt0, 0, "MSI[0:63]", octeon_msi_interrupt0)) panic("request_irq(OCTEON_IRQ_PCI_MSI0) failed"); if (request_irq(OCTEON_IRQ_PCI_MSI1, octeon_msi_interrupt1, 0, "MSI[64:127]", octeon_msi_interrupt1)) panic("request_irq(OCTEON_IRQ_PCI_MSI1) failed"); if (request_irq(OCTEON_IRQ_PCI_MSI2, octeon_msi_interrupt2, 0, "MSI[127:191]", octeon_msi_interrupt2)) panic("request_irq(OCTEON_IRQ_PCI_MSI2) failed"); if (request_irq(OCTEON_IRQ_PCI_MSI3, octeon_msi_interrupt3, 0, "MSI[192:255]", octeon_msi_interrupt3)) panic("request_irq(OCTEON_IRQ_PCI_MSI3) failed"); msi_irq_size = 256; } else if (octeon_is_pci_host()) { if (request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt0, 0, "MSI[0:15]", octeon_msi_interrupt0)) panic("request_irq(OCTEON_IRQ_PCI_MSI0) failed"); if (request_irq(OCTEON_IRQ_PCI_MSI1, octeon_msi_interrupt0, 0, "MSI[16:31]", octeon_msi_interrupt0)) panic("request_irq(OCTEON_IRQ_PCI_MSI1) failed"); if (request_irq(OCTEON_IRQ_PCI_MSI2, octeon_msi_interrupt0, 0, "MSI[32:47]", octeon_msi_interrupt0)) panic("request_irq(OCTEON_IRQ_PCI_MSI2) failed"); if (request_irq(OCTEON_IRQ_PCI_MSI3, octeon_msi_interrupt0, 0, "MSI[48:63]", octeon_msi_interrupt0)) panic("request_irq(OCTEON_IRQ_PCI_MSI3) failed"); msi_irq_size = 64; } return 0; } subsys_initcall(octeon_msi_initialize);
gpl-2.0
CyanogenMod/android_kernel_sony_msm8974
net/netfilter/ipset/ip_set_getport.c
7308
3479
/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* Get Layer-4 data from the packets */ #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/icmp.h> #include <linux/icmpv6.h> #include <linux/sctp.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <net/ip.h> #include <net/ipv6.h> #include <linux/netfilter/ipset/ip_set_getport.h> #include <linux/export.h> /* We must handle non-linear skbs */ static bool get_port(const struct sk_buff *skb, int protocol, unsigned int protooff, bool src, __be16 *port, u8 *proto) { switch (protocol) { case IPPROTO_TCP: { struct tcphdr _tcph; const struct tcphdr *th; th = skb_header_pointer(skb, protooff, sizeof(_tcph), &_tcph); if (th == NULL) /* No choice either */ return false; *port = src ? th->source : th->dest; break; } case IPPROTO_SCTP: { sctp_sctphdr_t _sh; const sctp_sctphdr_t *sh; sh = skb_header_pointer(skb, protooff, sizeof(_sh), &_sh); if (sh == NULL) /* No choice either */ return false; *port = src ? sh->source : sh->dest; break; } case IPPROTO_UDP: case IPPROTO_UDPLITE: { struct udphdr _udph; const struct udphdr *uh; uh = skb_header_pointer(skb, protooff, sizeof(_udph), &_udph); if (uh == NULL) /* No choice either */ return false; *port = src ? uh->source : uh->dest; break; } case IPPROTO_ICMP: { struct icmphdr _ich; const struct icmphdr *ic; ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich); if (ic == NULL) return false; *port = (__force __be16)htons((ic->type << 8) | ic->code); break; } case IPPROTO_ICMPV6: { struct icmp6hdr _ich; const struct icmp6hdr *ic; ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich); if (ic == NULL) return false; *port = (__force __be16) htons((ic->icmp6_type << 8) | ic->icmp6_code); break; } default: break; } *proto = protocol; return true; } bool ip_set_get_ip4_port(const struct sk_buff *skb, bool src, __be16 *port, u8 *proto) { const struct iphdr *iph = ip_hdr(skb); unsigned int protooff = ip_hdrlen(skb); int protocol = iph->protocol; /* See comments at tcp_match in ip_tables.c */ if (protocol <= 0 || (ntohs(iph->frag_off) & IP_OFFSET)) return false; return get_port(skb, protocol, protooff, src, port, proto); } EXPORT_SYMBOL_GPL(ip_set_get_ip4_port); #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src, __be16 *port, u8 *proto) { int protoff; u8 nexthdr; __be16 frag_off; nexthdr = ipv6_hdr(skb)->nexthdr; protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off); if (protoff < 0) return false; return get_port(skb, nexthdr, protoff, src, port, proto); } EXPORT_SYMBOL_GPL(ip_set_get_ip6_port); #endif bool ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, __be16 *port) { bool ret; u8 proto; switch (pf) { case NFPROTO_IPV4: ret = ip_set_get_ip4_port(skb, src, port, &proto); break; case NFPROTO_IPV6: ret = ip_set_get_ip6_port(skb, src, port, &proto); break; default: return false; } if (!ret) return ret; switch (proto) { case IPPROTO_TCP: case IPPROTO_UDP: return true; default: return false; } } EXPORT_SYMBOL_GPL(ip_set_get_ip_port);
gpl-2.0
febycv/htc_kernel_creamed_glacier
drivers/video/console/softcursor.c
10124
2129
/* * linux/drivers/video/console/softcursor.c * * Generic software cursor for frame buffer devices * * Created 14 Nov 2002 by James Simmons * * This file is subject to the terms and conditions of the GNU General * Public License. See the file COPYING in the main directory of this * archive for more details. */ #include <linux/module.h> #include <linux/string.h> #include <linux/fb.h> #include <linux/slab.h> #include <asm/io.h> #include "fbcon.h" int soft_cursor(struct fb_info *info, struct fb_cursor *cursor) { struct fbcon_ops *ops = info->fbcon_par; unsigned int scan_align = info->pixmap.scan_align - 1; unsigned int buf_align = info->pixmap.buf_align - 1; unsigned int i, size, dsize, s_pitch, d_pitch; struct fb_image *image; u8 *src, *dst; if (info->state != FBINFO_STATE_RUNNING) return 0; s_pitch = (cursor->image.width + 7) >> 3; dsize = s_pitch * cursor->image.height; if (dsize + sizeof(struct fb_image) != ops->cursor_size) { if (ops->cursor_src != NULL) kfree(ops->cursor_src); ops->cursor_size = dsize + sizeof(struct fb_image); ops->cursor_src = kmalloc(ops->cursor_size, GFP_ATOMIC); if (!ops->cursor_src) { ops->cursor_size = 0; return -ENOMEM; } } src = ops->cursor_src + sizeof(struct fb_image); image = (struct fb_image *)ops->cursor_src; *image = cursor->image; d_pitch = (s_pitch + scan_align) & ~scan_align; size = d_pitch * image->height + buf_align; size &= ~buf_align; dst = fb_get_buffer_offset(info, &info->pixmap, size); if (cursor->enable) { switch (cursor->rop) { case ROP_XOR: for (i = 0; i < dsize; i++) src[i] = image->data[i] ^ cursor->mask[i]; break; case ROP_COPY: default: for (i = 0; i < dsize; i++) src[i] = image->data[i] & cursor->mask[i]; break; } } else memcpy(src, image->data, dsize); fb_pad_aligned_buffer(dst, d_pitch, src, s_pitch, image->height); image->data = dst; info->fbops->fb_imageblit(info, image); return 0; } EXPORT_SYMBOL(soft_cursor); MODULE_AUTHOR("James Simmons <jsimmons@users.sf.net>"); MODULE_DESCRIPTION("Generic software cursor"); MODULE_LICENSE("GPL");
gpl-2.0
asoneofus/v350
FADG_4.39A/kernel/drivers/video/riva/riva_hw.c
12940
79994
/***************************************************************************\ |* *| |* Copyright 1993-1999 NVIDIA, Corporation. All rights reserved. *| |* *| |* NOTICE TO USER: The source code is copyrighted under U.S. and *| |* international laws. Users and possessors of this source code are *| |* hereby granted a nonexclusive, royalty-free copyright license to *| |* use this code in individual and commercial software. *| |* *| |* Any use of this source code must include, in the user documenta- *| |* tion and internal comments to the code, notices to the end user *| |* as follows: *| |* *| |* Copyright 1993-1999 NVIDIA, Corporation. All rights reserved. *| |* *| |* NVIDIA, CORPORATION MAKES NO REPRESENTATION ABOUT THE SUITABILITY *| |* OF THIS SOURCE CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" *| |* WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. NVIDIA, CORPOR- *| |* ATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOURCE CODE, *| |* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGE- *| |* MENT, AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL *| |* NVIDIA, CORPORATION BE LIABLE FOR ANY SPECIAL, INDIRECT, INCI- *| |* DENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RE- *| |* SULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION *| |* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF *| |* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOURCE CODE. *| |* *| |* U.S. Government End Users. This source code is a "commercial *| |* item," as that term is defined at 48 C.F.R. 2.101 (OCT 1995), *| |* consisting of "commercial computer software" and "commercial *| |* computer software documentation," as such terms are used in *| |* 48 C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Govern- *| |* ment only as a commercial end item. Consistent with 48 C.F.R. *| |* 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), *| |* all U.S. Government End Users acquire the source code with only *| |* those rights set forth herein. *| |* *| \***************************************************************************/ /* * GPL licensing note -- nVidia is allowing a liberal interpretation of * the documentation restriction above, to merely say that this nVidia's * copyright and disclaimer should be included with all code derived * from this source. -- Jeff Garzik <jgarzik@pobox.com>, 01/Nov/99 */ /* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/nv/riva_hw.c,v 1.33 2002/08/05 20:47:06 mvojkovi Exp $ */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include "riva_hw.h" #include "riva_tbl.h" #include "nv_type.h" /* * This file is an OS-agnostic file used to make RIVA 128 and RIVA TNT * operate identically (except TNT has more memory and better 3D quality. */ static int nv3Busy ( RIVA_HW_INST *chip ) { return ((NV_RD32(&chip->Rop->FifoFree, 0) < chip->FifoEmptyCount) || NV_RD32(&chip->PGRAPH[0x000006B0/4], 0) & 0x01); } static int nv4Busy ( RIVA_HW_INST *chip ) { return ((NV_RD32(&chip->Rop->FifoFree, 0) < chip->FifoEmptyCount) || NV_RD32(&chip->PGRAPH[0x00000700/4], 0) & 0x01); } static int nv10Busy ( RIVA_HW_INST *chip ) { return ((NV_RD32(&chip->Rop->FifoFree, 0) < chip->FifoEmptyCount) || NV_RD32(&chip->PGRAPH[0x00000700/4], 0) & 0x01); } static void vgaLockUnlock ( RIVA_HW_INST *chip, int Lock ) { U008 cr11; VGA_WR08(chip->PCIO, 0x3D4, 0x11); cr11 = VGA_RD08(chip->PCIO, 0x3D5); if(Lock) cr11 |= 0x80; else cr11 &= ~0x80; VGA_WR08(chip->PCIO, 0x3D5, cr11); } static void nv3LockUnlock ( RIVA_HW_INST *chip, int Lock ) { VGA_WR08(chip->PVIO, 0x3C4, 0x06); VGA_WR08(chip->PVIO, 0x3C5, Lock ? 0x99 : 0x57); vgaLockUnlock(chip, Lock); } static void nv4LockUnlock ( RIVA_HW_INST *chip, int Lock ) { VGA_WR08(chip->PCIO, 0x3D4, 0x1F); VGA_WR08(chip->PCIO, 0x3D5, Lock ? 0x99 : 0x57); vgaLockUnlock(chip, Lock); } static int ShowHideCursor ( RIVA_HW_INST *chip, int ShowHide ) { int cursor; cursor = chip->CurrentState->cursor1; chip->CurrentState->cursor1 = (chip->CurrentState->cursor1 & 0xFE) | (ShowHide & 0x01); VGA_WR08(chip->PCIO, 0x3D4, 0x31); VGA_WR08(chip->PCIO, 0x3D5, chip->CurrentState->cursor1); return (cursor & 0x01); } /****************************************************************************\ * * * The video arbitration routines calculate some "magic" numbers. Fixes * * the snow seen when accessing the framebuffer without it. * * It just works (I hope). * * * \****************************************************************************/ #define DEFAULT_GR_LWM 100 #define DEFAULT_VID_LWM 100 #define DEFAULT_GR_BURST_SIZE 256 #define DEFAULT_VID_BURST_SIZE 128 #define VIDEO 0 #define GRAPHICS 1 #define MPORT 2 #define ENGINE 3 #define GFIFO_SIZE 320 #define GFIFO_SIZE_128 256 #define MFIFO_SIZE 120 #define VFIFO_SIZE 256 typedef struct { int gdrain_rate; int vdrain_rate; int mdrain_rate; int gburst_size; int vburst_size; char vid_en; char gr_en; int wcmocc, wcgocc, wcvocc, wcvlwm, wcglwm; int by_gfacc; char vid_only_once; char gr_only_once; char first_vacc; char first_gacc; char first_macc; int vocc; int gocc; int mocc; char cur; char engine_en; char converged; int priority; } nv3_arb_info; typedef struct { int graphics_lwm; int video_lwm; int graphics_burst_size; int video_burst_size; int graphics_hi_priority; int media_hi_priority; int rtl_values; int valid; } nv3_fifo_info; typedef struct { char pix_bpp; char enable_video; char gr_during_vid; char enable_mp; int memory_width; int video_scale; int pclk_khz; int mclk_khz; int mem_page_miss; int mem_latency; char mem_aligned; } nv3_sim_state; typedef struct { int graphics_lwm; int video_lwm; int graphics_burst_size; int video_burst_size; int valid; } nv4_fifo_info; typedef struct { int pclk_khz; int mclk_khz; int nvclk_khz; char mem_page_miss; char mem_latency; int memory_width; char enable_video; char gr_during_vid; char pix_bpp; char mem_aligned; char enable_mp; } nv4_sim_state; typedef struct { int graphics_lwm; int video_lwm; int graphics_burst_size; int video_burst_size; int valid; } nv10_fifo_info; typedef struct { int pclk_khz; int mclk_khz; int nvclk_khz; char mem_page_miss; char mem_latency; u32 memory_type; int memory_width; char enable_video; char gr_during_vid; char pix_bpp; char mem_aligned; char enable_mp; } nv10_sim_state; static int nv3_iterate(nv3_fifo_info *res_info, nv3_sim_state * state, nv3_arb_info *ainfo) { int iter = 0; int tmp; int vfsize, mfsize, gfsize; int mburst_size = 32; int mmisses, gmisses, vmisses; int misses; int vlwm, glwm, mlwm; int last, next, cur; int max_gfsize ; long ns; vlwm = 0; glwm = 0; mlwm = 0; vfsize = 0; gfsize = 0; cur = ainfo->cur; mmisses = 2; gmisses = 2; vmisses = 2; if (ainfo->gburst_size == 128) max_gfsize = GFIFO_SIZE_128; else max_gfsize = GFIFO_SIZE; max_gfsize = GFIFO_SIZE; while (1) { if (ainfo->vid_en) { if (ainfo->wcvocc > ainfo->vocc) ainfo->wcvocc = ainfo->vocc; if (ainfo->wcvlwm > vlwm) ainfo->wcvlwm = vlwm ; ns = 1000000 * ainfo->vburst_size/(state->memory_width/8)/state->mclk_khz; vfsize = ns * ainfo->vdrain_rate / 1000000; vfsize = ainfo->wcvlwm - ainfo->vburst_size + vfsize; } if (state->enable_mp) { if (ainfo->wcmocc > ainfo->mocc) ainfo->wcmocc = ainfo->mocc; } if (ainfo->gr_en) { if (ainfo->wcglwm > glwm) ainfo->wcglwm = glwm ; if (ainfo->wcgocc > ainfo->gocc) ainfo->wcgocc = ainfo->gocc; ns = 1000000 * (ainfo->gburst_size/(state->memory_width/8))/state->mclk_khz; gfsize = (ns * (long) ainfo->gdrain_rate)/1000000; gfsize = ainfo->wcglwm - ainfo->gburst_size + gfsize; } mfsize = 0; if (!state->gr_during_vid && ainfo->vid_en) if (ainfo->vid_en && (ainfo->vocc < 0) && !ainfo->vid_only_once) next = VIDEO; else if (ainfo->mocc < 0) next = MPORT; else if (ainfo->gocc< ainfo->by_gfacc) next = GRAPHICS; else return (0); else switch (ainfo->priority) { case VIDEO: if (ainfo->vid_en && ainfo->vocc<0 && !ainfo->vid_only_once) next = VIDEO; else if (ainfo->gr_en && ainfo->gocc<0 && !ainfo->gr_only_once) next = GRAPHICS; else if (ainfo->mocc<0) next = MPORT; else return (0); break; case GRAPHICS: if (ainfo->gr_en && ainfo->gocc<0 && !ainfo->gr_only_once) next = GRAPHICS; else if (ainfo->vid_en && ainfo->vocc<0 && !ainfo->vid_only_once) next = VIDEO; else if (ainfo->mocc<0) next = MPORT; else return (0); break; default: if (ainfo->mocc<0) next = MPORT; else if (ainfo->gr_en && ainfo->gocc<0 && !ainfo->gr_only_once) next = GRAPHICS; else if (ainfo->vid_en && ainfo->vocc<0 && !ainfo->vid_only_once) next = VIDEO; else return (0); break; } last = cur; cur = next; iter++; switch (cur) { case VIDEO: if (last==cur) misses = 0; else if (ainfo->first_vacc) misses = vmisses; else misses = 1; ainfo->first_vacc = 0; if (last!=cur) { ns = 1000000 * (vmisses*state->mem_page_miss + state->mem_latency)/state->mclk_khz; vlwm = ns * ainfo->vdrain_rate/ 1000000; vlwm = ainfo->vocc - vlwm; } ns = 1000000*(misses*state->mem_page_miss + ainfo->vburst_size)/(state->memory_width/8)/state->mclk_khz; ainfo->vocc = ainfo->vocc + ainfo->vburst_size - ns*ainfo->vdrain_rate/1000000; ainfo->gocc = ainfo->gocc - ns*ainfo->gdrain_rate/1000000; ainfo->mocc = ainfo->mocc - ns*ainfo->mdrain_rate/1000000; break; case GRAPHICS: if (last==cur) misses = 0; else if (ainfo->first_gacc) misses = gmisses; else misses = 1; ainfo->first_gacc = 0; if (last!=cur) { ns = 1000000*(gmisses*state->mem_page_miss + state->mem_latency)/state->mclk_khz ; glwm = ns * ainfo->gdrain_rate/1000000; glwm = ainfo->gocc - glwm; } ns = 1000000*(misses*state->mem_page_miss + ainfo->gburst_size/(state->memory_width/8))/state->mclk_khz; ainfo->vocc = ainfo->vocc + 0 - ns*ainfo->vdrain_rate/1000000; ainfo->gocc = ainfo->gocc + ainfo->gburst_size - ns*ainfo->gdrain_rate/1000000; ainfo->mocc = ainfo->mocc + 0 - ns*ainfo->mdrain_rate/1000000; break; default: if (last==cur) misses = 0; else if (ainfo->first_macc) misses = mmisses; else misses = 1; ainfo->first_macc = 0; ns = 1000000*(misses*state->mem_page_miss + mburst_size/(state->memory_width/8))/state->mclk_khz; ainfo->vocc = ainfo->vocc + 0 - ns*ainfo->vdrain_rate/1000000; ainfo->gocc = ainfo->gocc + 0 - ns*ainfo->gdrain_rate/1000000; ainfo->mocc = ainfo->mocc + mburst_size - ns*ainfo->mdrain_rate/1000000; break; } if (iter>100) { ainfo->converged = 0; return (1); } ns = 1000000*ainfo->gburst_size/(state->memory_width/8)/state->mclk_khz; tmp = ns * ainfo->gdrain_rate/1000000; if (abs(ainfo->gburst_size) + ((abs(ainfo->wcglwm) + 16 ) & ~0x7) - tmp > max_gfsize) { ainfo->converged = 0; return (1); } ns = 1000000*ainfo->vburst_size/(state->memory_width/8)/state->mclk_khz; tmp = ns * ainfo->vdrain_rate/1000000; if (abs(ainfo->vburst_size) + (abs(ainfo->wcvlwm + 32) & ~0xf) - tmp> VFIFO_SIZE) { ainfo->converged = 0; return (1); } if (abs(ainfo->gocc) > max_gfsize) { ainfo->converged = 0; return (1); } if (abs(ainfo->vocc) > VFIFO_SIZE) { ainfo->converged = 0; return (1); } if (abs(ainfo->mocc) > MFIFO_SIZE) { ainfo->converged = 0; return (1); } if (abs(vfsize) > VFIFO_SIZE) { ainfo->converged = 0; return (1); } if (abs(gfsize) > max_gfsize) { ainfo->converged = 0; return (1); } if (abs(mfsize) > MFIFO_SIZE) { ainfo->converged = 0; return (1); } } } static char nv3_arb(nv3_fifo_info * res_info, nv3_sim_state * state, nv3_arb_info *ainfo) { long ens, vns, mns, gns; int mmisses, gmisses, vmisses, eburst_size, mburst_size; int refresh_cycle; refresh_cycle = 0; refresh_cycle = 2*(state->mclk_khz/state->pclk_khz) + 5; mmisses = 2; if (state->mem_aligned) gmisses = 2; else gmisses = 3; vmisses = 2; eburst_size = state->memory_width * 1; mburst_size = 32; gns = 1000000 * (gmisses*state->mem_page_miss + state->mem_latency)/state->mclk_khz; ainfo->by_gfacc = gns*ainfo->gdrain_rate/1000000; ainfo->wcmocc = 0; ainfo->wcgocc = 0; ainfo->wcvocc = 0; ainfo->wcvlwm = 0; ainfo->wcglwm = 0; ainfo->engine_en = 1; ainfo->converged = 1; if (ainfo->engine_en) { ens = 1000000*(state->mem_page_miss + eburst_size/(state->memory_width/8) +refresh_cycle)/state->mclk_khz; ainfo->mocc = state->enable_mp ? 0-ens*ainfo->mdrain_rate/1000000 : 0; ainfo->vocc = ainfo->vid_en ? 0-ens*ainfo->vdrain_rate/1000000 : 0; ainfo->gocc = ainfo->gr_en ? 0-ens*ainfo->gdrain_rate/1000000 : 0; ainfo->cur = ENGINE; ainfo->first_vacc = 1; ainfo->first_gacc = 1; ainfo->first_macc = 1; nv3_iterate(res_info, state,ainfo); } if (state->enable_mp) { mns = 1000000 * (mmisses*state->mem_page_miss + mburst_size/(state->memory_width/8) + refresh_cycle)/state->mclk_khz; ainfo->mocc = state->enable_mp ? 0 : mburst_size - mns*ainfo->mdrain_rate/1000000; ainfo->vocc = ainfo->vid_en ? 0 : 0- mns*ainfo->vdrain_rate/1000000; ainfo->gocc = ainfo->gr_en ? 0: 0- mns*ainfo->gdrain_rate/1000000; ainfo->cur = MPORT; ainfo->first_vacc = 1; ainfo->first_gacc = 1; ainfo->first_macc = 0; nv3_iterate(res_info, state,ainfo); } if (ainfo->gr_en) { ainfo->first_vacc = 1; ainfo->first_gacc = 0; ainfo->first_macc = 1; gns = 1000000*(gmisses*state->mem_page_miss + ainfo->gburst_size/(state->memory_width/8) + refresh_cycle)/state->mclk_khz; ainfo->gocc = ainfo->gburst_size - gns*ainfo->gdrain_rate/1000000; ainfo->vocc = ainfo->vid_en? 0-gns*ainfo->vdrain_rate/1000000 : 0; ainfo->mocc = state->enable_mp ? 0-gns*ainfo->mdrain_rate/1000000: 0; ainfo->cur = GRAPHICS; nv3_iterate(res_info, state,ainfo); } if (ainfo->vid_en) { ainfo->first_vacc = 0; ainfo->first_gacc = 1; ainfo->first_macc = 1; vns = 1000000*(vmisses*state->mem_page_miss + ainfo->vburst_size/(state->memory_width/8) + refresh_cycle)/state->mclk_khz; ainfo->vocc = ainfo->vburst_size - vns*ainfo->vdrain_rate/1000000; ainfo->gocc = ainfo->gr_en? (0-vns*ainfo->gdrain_rate/1000000) : 0; ainfo->mocc = state->enable_mp? 0-vns*ainfo->mdrain_rate/1000000 :0 ; ainfo->cur = VIDEO; nv3_iterate(res_info, state, ainfo); } if (ainfo->converged) { res_info->graphics_lwm = (int)abs(ainfo->wcglwm) + 16; res_info->video_lwm = (int)abs(ainfo->wcvlwm) + 32; res_info->graphics_burst_size = ainfo->gburst_size; res_info->video_burst_size = ainfo->vburst_size; res_info->graphics_hi_priority = (ainfo->priority == GRAPHICS); res_info->media_hi_priority = (ainfo->priority == MPORT); if (res_info->video_lwm > 160) { res_info->graphics_lwm = 256; res_info->video_lwm = 128; res_info->graphics_burst_size = 64; res_info->video_burst_size = 64; res_info->graphics_hi_priority = 0; res_info->media_hi_priority = 0; ainfo->converged = 0; return (0); } if (res_info->video_lwm > 128) { res_info->video_lwm = 128; } return (1); } else { res_info->graphics_lwm = 256; res_info->video_lwm = 128; res_info->graphics_burst_size = 64; res_info->video_burst_size = 64; res_info->graphics_hi_priority = 0; res_info->media_hi_priority = 0; return (0); } } static char nv3_get_param(nv3_fifo_info *res_info, nv3_sim_state * state, nv3_arb_info *ainfo) { int done, g,v, p; done = 0; for (p=0; p < 2; p++) { for (g=128 ; g > 32; g= g>> 1) { for (v=128; v >=32; v = v>> 1) { ainfo->priority = p; ainfo->gburst_size = g; ainfo->vburst_size = v; done = nv3_arb(res_info, state,ainfo); if (done && (g==128)) if ((res_info->graphics_lwm + g) > 256) done = 0; if (done) goto Done; } } } Done: return done; } static void nv3CalcArbitration ( nv3_fifo_info * res_info, nv3_sim_state * state ) { nv3_fifo_info save_info; nv3_arb_info ainfo; char res_gr, res_vid; ainfo.gr_en = 1; ainfo.vid_en = state->enable_video; ainfo.vid_only_once = 0; ainfo.gr_only_once = 0; ainfo.gdrain_rate = (int) state->pclk_khz * (state->pix_bpp/8); ainfo.vdrain_rate = (int) state->pclk_khz * 2; if (state->video_scale != 0) ainfo.vdrain_rate = ainfo.vdrain_rate/state->video_scale; ainfo.mdrain_rate = 33000; res_info->rtl_values = 0; if (!state->gr_during_vid && state->enable_video) { ainfo.gr_only_once = 1; ainfo.gr_en = 1; ainfo.gdrain_rate = 0; res_vid = nv3_get_param(res_info, state, &ainfo); res_vid = ainfo.converged; save_info.video_lwm = res_info->video_lwm; save_info.video_burst_size = res_info->video_burst_size; ainfo.vid_en = 1; ainfo.vid_only_once = 1; ainfo.gr_en = 1; ainfo.gdrain_rate = (int) state->pclk_khz * (state->pix_bpp/8); ainfo.vdrain_rate = 0; res_gr = nv3_get_param(res_info, state, &ainfo); res_gr = ainfo.converged; res_info->video_lwm = save_info.video_lwm; res_info->video_burst_size = save_info.video_burst_size; res_info->valid = res_gr & res_vid; } else { if (!ainfo.gr_en) ainfo.gdrain_rate = 0; if (!ainfo.vid_en) ainfo.vdrain_rate = 0; res_gr = nv3_get_param(res_info, state, &ainfo); res_info->valid = ainfo.converged; } } static void nv3UpdateArbitrationSettings ( unsigned VClk, unsigned pixelDepth, unsigned *burst, unsigned *lwm, RIVA_HW_INST *chip ) { nv3_fifo_info fifo_data; nv3_sim_state sim_data; unsigned int M, N, P, pll, MClk; pll = NV_RD32(&chip->PRAMDAC0[0x00000504/4], 0); M = (pll >> 0) & 0xFF; N = (pll >> 8) & 0xFF; P = (pll >> 16) & 0x0F; MClk = (N * chip->CrystalFreqKHz / M) >> P; sim_data.pix_bpp = (char)pixelDepth; sim_data.enable_video = 0; sim_data.enable_mp = 0; sim_data.video_scale = 1; sim_data.memory_width = (NV_RD32(&chip->PEXTDEV[0x00000000/4], 0) & 0x10) ? 128 : 64; sim_data.memory_width = 128; sim_data.mem_latency = 9; sim_data.mem_aligned = 1; sim_data.mem_page_miss = 11; sim_data.gr_during_vid = 0; sim_data.pclk_khz = VClk; sim_data.mclk_khz = MClk; nv3CalcArbitration(&fifo_data, &sim_data); if (fifo_data.valid) { int b = fifo_data.graphics_burst_size >> 4; *burst = 0; while (b >>= 1) (*burst)++; *lwm = fifo_data.graphics_lwm >> 3; } else { *lwm = 0x24; *burst = 0x2; } } static void nv4CalcArbitration ( nv4_fifo_info *fifo, nv4_sim_state *arb ) { int data, pagemiss, cas,width, video_enable, color_key_enable, bpp, align; int nvclks, mclks, pclks, vpagemiss, crtpagemiss, vbs; int found, mclk_extra, mclk_loop, cbs, m1, p1; int mclk_freq, pclk_freq, nvclk_freq, mp_enable; int us_m, us_n, us_p, video_drain_rate, crtc_drain_rate; int vpm_us, us_video, vlwm, video_fill_us, cpm_us, us_crt,clwm; int craw, vraw; fifo->valid = 1; pclk_freq = arb->pclk_khz; mclk_freq = arb->mclk_khz; nvclk_freq = arb->nvclk_khz; pagemiss = arb->mem_page_miss; cas = arb->mem_latency; width = arb->memory_width >> 6; video_enable = arb->enable_video; color_key_enable = arb->gr_during_vid; bpp = arb->pix_bpp; align = arb->mem_aligned; mp_enable = arb->enable_mp; clwm = 0; vlwm = 0; cbs = 128; pclks = 2; nvclks = 2; nvclks += 2; nvclks += 1; mclks = 5; mclks += 3; mclks += 1; mclks += cas; mclks += 1; mclks += 1; mclks += 1; mclks += 1; mclk_extra = 3; nvclks += 2; nvclks += 1; nvclks += 1; nvclks += 1; if (mp_enable) mclks+=4; nvclks += 0; pclks += 0; found = 0; vbs = 0; while (found != 1) { fifo->valid = 1; found = 1; mclk_loop = mclks+mclk_extra; us_m = mclk_loop *1000*1000 / mclk_freq; us_n = nvclks*1000*1000 / nvclk_freq; us_p = nvclks*1000*1000 / pclk_freq; if (video_enable) { video_drain_rate = pclk_freq * 2; crtc_drain_rate = pclk_freq * bpp/8; vpagemiss = 2; vpagemiss += 1; crtpagemiss = 2; vpm_us = (vpagemiss * pagemiss)*1000*1000/mclk_freq; if (nvclk_freq * 2 > mclk_freq * width) video_fill_us = cbs*1000*1000 / 16 / nvclk_freq ; else video_fill_us = cbs*1000*1000 / (8 * width) / mclk_freq; us_video = vpm_us + us_m + us_n + us_p + video_fill_us; vlwm = us_video * video_drain_rate/(1000*1000); vlwm++; vbs = 128; if (vlwm > 128) vbs = 64; if (vlwm > (256-64)) vbs = 32; if (nvclk_freq * 2 > mclk_freq * width) video_fill_us = vbs *1000*1000/ 16 / nvclk_freq ; else video_fill_us = vbs*1000*1000 / (8 * width) / mclk_freq; cpm_us = crtpagemiss * pagemiss *1000*1000/ mclk_freq; us_crt = us_video +video_fill_us +cpm_us +us_m + us_n +us_p ; clwm = us_crt * crtc_drain_rate/(1000*1000); clwm++; } else { crtc_drain_rate = pclk_freq * bpp/8; crtpagemiss = 2; crtpagemiss += 1; cpm_us = crtpagemiss * pagemiss *1000*1000/ mclk_freq; us_crt = cpm_us + us_m + us_n + us_p ; clwm = us_crt * crtc_drain_rate/(1000*1000); clwm++; } m1 = clwm + cbs - 512; p1 = m1 * pclk_freq / mclk_freq; p1 = p1 * bpp / 8; if ((p1 < m1) && (m1 > 0)) { fifo->valid = 0; found = 0; if (mclk_extra ==0) found = 1; mclk_extra--; } else if (video_enable) { if ((clwm > 511) || (vlwm > 255)) { fifo->valid = 0; found = 0; if (mclk_extra ==0) found = 1; mclk_extra--; } } else { if (clwm > 519) { fifo->valid = 0; found = 0; if (mclk_extra ==0) found = 1; mclk_extra--; } } craw = clwm; vraw = vlwm; if (clwm < 384) clwm = 384; if (vlwm < 128) vlwm = 128; data = (int)(clwm); fifo->graphics_lwm = data; fifo->graphics_burst_size = 128; data = (int)((vlwm+15)); fifo->video_lwm = data; fifo->video_burst_size = vbs; } } static void nv4UpdateArbitrationSettings ( unsigned VClk, unsigned pixelDepth, unsigned *burst, unsigned *lwm, RIVA_HW_INST *chip ) { nv4_fifo_info fifo_data; nv4_sim_state sim_data; unsigned int M, N, P, pll, MClk, NVClk, cfg1; pll = NV_RD32(&chip->PRAMDAC0[0x00000504/4], 0); M = (pll >> 0) & 0xFF; N = (pll >> 8) & 0xFF; P = (pll >> 16) & 0x0F; MClk = (N * chip->CrystalFreqKHz / M) >> P; pll = NV_RD32(&chip->PRAMDAC0[0x00000500/4], 0); M = (pll >> 0) & 0xFF; N = (pll >> 8) & 0xFF; P = (pll >> 16) & 0x0F; NVClk = (N * chip->CrystalFreqKHz / M) >> P; cfg1 = NV_RD32(&chip->PFB[0x00000204/4], 0); sim_data.pix_bpp = (char)pixelDepth; sim_data.enable_video = 0; sim_data.enable_mp = 0; sim_data.memory_width = (NV_RD32(&chip->PEXTDEV[0x00000000/4], 0) & 0x10) ? 128 : 64; sim_data.mem_latency = (char)cfg1 & 0x0F; sim_data.mem_aligned = 1; sim_data.mem_page_miss = (char)(((cfg1 >> 4) &0x0F) + ((cfg1 >> 31) & 0x01)); sim_data.gr_during_vid = 0; sim_data.pclk_khz = VClk; sim_data.mclk_khz = MClk; sim_data.nvclk_khz = NVClk; nv4CalcArbitration(&fifo_data, &sim_data); if (fifo_data.valid) { int b = fifo_data.graphics_burst_size >> 4; *burst = 0; while (b >>= 1) (*burst)++; *lwm = fifo_data.graphics_lwm >> 3; } } static void nv10CalcArbitration ( nv10_fifo_info *fifo, nv10_sim_state *arb ) { int data, pagemiss, cas,width, video_enable, color_key_enable, bpp, align; int nvclks, mclks, pclks, vpagemiss, crtpagemiss, vbs; int nvclk_fill, us_extra; int found, mclk_extra, mclk_loop, cbs, m1; int mclk_freq, pclk_freq, nvclk_freq, mp_enable; int us_m, us_m_min, us_n, us_p, video_drain_rate, crtc_drain_rate; int vus_m, vus_n, vus_p; int vpm_us, us_video, vlwm, cpm_us, us_crt,clwm; int clwm_rnd_down; int craw, m2us, us_pipe, us_pipe_min, vus_pipe, p1clk, p2; int pclks_2_top_fifo, min_mclk_extra; int us_min_mclk_extra; fifo->valid = 1; pclk_freq = arb->pclk_khz; /* freq in KHz */ mclk_freq = arb->mclk_khz; nvclk_freq = arb->nvclk_khz; pagemiss = arb->mem_page_miss; cas = arb->mem_latency; width = arb->memory_width/64; video_enable = arb->enable_video; color_key_enable = arb->gr_during_vid; bpp = arb->pix_bpp; align = arb->mem_aligned; mp_enable = arb->enable_mp; clwm = 0; vlwm = 1024; cbs = 512; vbs = 512; pclks = 4; /* lwm detect. */ nvclks = 3; /* lwm -> sync. */ nvclks += 2; /* fbi bus cycles (1 req + 1 busy) */ mclks = 1; /* 2 edge sync. may be very close to edge so just put one. */ mclks += 1; /* arb_hp_req */ mclks += 5; /* ap_hp_req tiling pipeline */ mclks += 2; /* tc_req latency fifo */ mclks += 2; /* fb_cas_n_ memory request to fbio block */ mclks += 7; /* sm_d_rdv data returned from fbio block */ /* fb.rd.d.Put_gc need to accumulate 256 bits for read */ if (arb->memory_type == 0) if (arb->memory_width == 64) /* 64 bit bus */ mclks += 4; else mclks += 2; else if (arb->memory_width == 64) /* 64 bit bus */ mclks += 2; else mclks += 1; if ((!video_enable) && (arb->memory_width == 128)) { mclk_extra = (bpp == 32) ? 31 : 42; /* Margin of error */ min_mclk_extra = 17; } else { mclk_extra = (bpp == 32) ? 8 : 4; /* Margin of error */ /* mclk_extra = 4; */ /* Margin of error */ min_mclk_extra = 18; } nvclks += 1; /* 2 edge sync. may be very close to edge so just put one. */ nvclks += 1; /* fbi_d_rdv_n */ nvclks += 1; /* Fbi_d_rdata */ nvclks += 1; /* crtfifo load */ if(mp_enable) mclks+=4; /* Mp can get in with a burst of 8. */ /* Extra clocks determined by heuristics */ nvclks += 0; pclks += 0; found = 0; while(found != 1) { fifo->valid = 1; found = 1; mclk_loop = mclks+mclk_extra; us_m = mclk_loop *1000*1000 / mclk_freq; /* Mclk latency in us */ us_m_min = mclks * 1000*1000 / mclk_freq; /* Minimum Mclk latency in us */ us_min_mclk_extra = min_mclk_extra *1000*1000 / mclk_freq; us_n = nvclks*1000*1000 / nvclk_freq;/* nvclk latency in us */ us_p = pclks*1000*1000 / pclk_freq;/* nvclk latency in us */ us_pipe = us_m + us_n + us_p; us_pipe_min = us_m_min + us_n + us_p; us_extra = 0; vus_m = mclk_loop *1000*1000 / mclk_freq; /* Mclk latency in us */ vus_n = (4)*1000*1000 / nvclk_freq;/* nvclk latency in us */ vus_p = 0*1000*1000 / pclk_freq;/* pclk latency in us */ vus_pipe = vus_m + vus_n + vus_p; if(video_enable) { video_drain_rate = pclk_freq * 4; /* MB/s */ crtc_drain_rate = pclk_freq * bpp/8; /* MB/s */ vpagemiss = 1; /* self generating page miss */ vpagemiss += 1; /* One higher priority before */ crtpagemiss = 2; /* self generating page miss */ if(mp_enable) crtpagemiss += 1; /* if MA0 conflict */ vpm_us = (vpagemiss * pagemiss)*1000*1000/mclk_freq; us_video = vpm_us + vus_m; /* Video has separate read return path */ cpm_us = crtpagemiss * pagemiss *1000*1000/ mclk_freq; us_crt = us_video /* Wait for video */ +cpm_us /* CRT Page miss */ +us_m + us_n +us_p /* other latency */ ; clwm = us_crt * crtc_drain_rate/(1000*1000); clwm++; /* fixed point <= float_point - 1. Fixes that */ } else { crtc_drain_rate = pclk_freq * bpp/8; /* bpp * pclk/8 */ crtpagemiss = 1; /* self generating page miss */ crtpagemiss += 1; /* MA0 page miss */ if(mp_enable) crtpagemiss += 1; /* if MA0 conflict */ cpm_us = crtpagemiss * pagemiss *1000*1000/ mclk_freq; us_crt = cpm_us + us_m + us_n + us_p ; clwm = us_crt * crtc_drain_rate/(1000*1000); clwm++; /* fixed point <= float_point - 1. Fixes that */ /* // // Another concern, only for high pclks so don't do this // with video: // What happens if the latency to fetch the cbs is so large that // fifo empties. In that case we need to have an alternate clwm value // based off the total burst fetch // us_crt = (cbs * 1000 * 1000)/ (8*width)/mclk_freq ; us_crt = us_crt + us_m + us_n + us_p + (4 * 1000 * 1000)/mclk_freq; clwm_mt = us_crt * crtc_drain_rate/(1000*1000); clwm_mt ++; if(clwm_mt > clwm) clwm = clwm_mt; */ /* Finally, a heuristic check when width == 64 bits */ if(width == 1){ nvclk_fill = nvclk_freq * 8; if(crtc_drain_rate * 100 >= nvclk_fill * 102) clwm = 0xfff; /*Large number to fail */ else if(crtc_drain_rate * 100 >= nvclk_fill * 98) { clwm = 1024; cbs = 512; us_extra = (cbs * 1000 * 1000)/ (8*width)/mclk_freq ; } } } /* Overfill check: */ clwm_rnd_down = ((int)clwm/8)*8; if (clwm_rnd_down < clwm) clwm += 8; m1 = clwm + cbs - 1024; /* Amount of overfill */ m2us = us_pipe_min + us_min_mclk_extra; pclks_2_top_fifo = (1024-clwm)/(8*width); /* pclk cycles to drain */ p1clk = m2us * pclk_freq/(1000*1000); p2 = p1clk * bpp / 8; /* bytes drained. */ if((p2 < m1) && (m1 > 0)) { fifo->valid = 0; found = 0; if(min_mclk_extra == 0) { if(cbs <= 32) { found = 1; /* Can't adjust anymore! */ } else { cbs = cbs/2; /* reduce the burst size */ } } else { min_mclk_extra--; } } else { if (clwm > 1023){ /* Have some margin */ fifo->valid = 0; found = 0; if(min_mclk_extra == 0) found = 1; /* Can't adjust anymore! */ else min_mclk_extra--; } } craw = clwm; if(clwm < (1024-cbs+8)) clwm = 1024-cbs+8; data = (int)(clwm); /* printf("CRT LWM: %f bytes, prog: 0x%x, bs: 256\n", clwm, data ); */ fifo->graphics_lwm = data; fifo->graphics_burst_size = cbs; /* printf("VID LWM: %f bytes, prog: 0x%x, bs: %d\n, ", vlwm, data, vbs ); */ fifo->video_lwm = 1024; fifo->video_burst_size = 512; } } static void nv10UpdateArbitrationSettings ( unsigned VClk, unsigned pixelDepth, unsigned *burst, unsigned *lwm, RIVA_HW_INST *chip ) { nv10_fifo_info fifo_data; nv10_sim_state sim_data; unsigned int M, N, P, pll, MClk, NVClk, cfg1; pll = NV_RD32(&chip->PRAMDAC0[0x00000504/4], 0); M = (pll >> 0) & 0xFF; N = (pll >> 8) & 0xFF; P = (pll >> 16) & 0x0F; MClk = (N * chip->CrystalFreqKHz / M) >> P; pll = NV_RD32(&chip->PRAMDAC0[0x00000500/4], 0); M = (pll >> 0) & 0xFF; N = (pll >> 8) & 0xFF; P = (pll >> 16) & 0x0F; NVClk = (N * chip->CrystalFreqKHz / M) >> P; cfg1 = NV_RD32(&chip->PFB[0x00000204/4], 0); sim_data.pix_bpp = (char)pixelDepth; sim_data.enable_video = 0; sim_data.enable_mp = 0; sim_data.memory_type = (NV_RD32(&chip->PFB[0x00000200/4], 0) & 0x01) ? 1 : 0; sim_data.memory_width = (NV_RD32(&chip->PEXTDEV[0x00000000/4], 0) & 0x10) ? 128 : 64; sim_data.mem_latency = (char)cfg1 & 0x0F; sim_data.mem_aligned = 1; sim_data.mem_page_miss = (char)(((cfg1 >> 4) &0x0F) + ((cfg1 >> 31) & 0x01)); sim_data.gr_during_vid = 0; sim_data.pclk_khz = VClk; sim_data.mclk_khz = MClk; sim_data.nvclk_khz = NVClk; nv10CalcArbitration(&fifo_data, &sim_data); if (fifo_data.valid) { int b = fifo_data.graphics_burst_size >> 4; *burst = 0; while (b >>= 1) (*burst)++; *lwm = fifo_data.graphics_lwm >> 3; } } static void nForceUpdateArbitrationSettings ( unsigned VClk, unsigned pixelDepth, unsigned *burst, unsigned *lwm, RIVA_HW_INST *chip ) { nv10_fifo_info fifo_data; nv10_sim_state sim_data; unsigned int M, N, P, pll, MClk, NVClk; unsigned int uMClkPostDiv; struct pci_dev *dev; dev = pci_get_bus_and_slot(0, 3); pci_read_config_dword(dev, 0x6C, &uMClkPostDiv); pci_dev_put(dev); uMClkPostDiv = (uMClkPostDiv >> 8) & 0xf; if(!uMClkPostDiv) uMClkPostDiv = 4; MClk = 400000 / uMClkPostDiv; pll = NV_RD32(&chip->PRAMDAC0[0x00000500/4], 0); M = (pll >> 0) & 0xFF; N = (pll >> 8) & 0xFF; P = (pll >> 16) & 0x0F; NVClk = (N * chip->CrystalFreqKHz / M) >> P; sim_data.pix_bpp = (char)pixelDepth; sim_data.enable_video = 0; sim_data.enable_mp = 0; dev = pci_get_bus_and_slot(0, 1); pci_read_config_dword(dev, 0x7C, &sim_data.memory_type); pci_dev_put(dev); sim_data.memory_type = (sim_data.memory_type >> 12) & 1; sim_data.memory_width = 64; sim_data.mem_latency = 3; sim_data.mem_aligned = 1; sim_data.mem_page_miss = 10; sim_data.gr_during_vid = 0; sim_data.pclk_khz = VClk; sim_data.mclk_khz = MClk; sim_data.nvclk_khz = NVClk; nv10CalcArbitration(&fifo_data, &sim_data); if (fifo_data.valid) { int b = fifo_data.graphics_burst_size >> 4; *burst = 0; while (b >>= 1) (*burst)++; *lwm = fifo_data.graphics_lwm >> 3; } } /****************************************************************************\ * * * RIVA Mode State Routines * * * \****************************************************************************/ /* * Calculate the Video Clock parameters for the PLL. */ static int CalcVClock ( int clockIn, int *clockOut, int *mOut, int *nOut, int *pOut, RIVA_HW_INST *chip ) { unsigned lowM, highM, highP; unsigned DeltaNew, DeltaOld; unsigned VClk, Freq; unsigned M, N, P; DeltaOld = 0xFFFFFFFF; VClk = (unsigned)clockIn; if (chip->CrystalFreqKHz == 13500) { lowM = 7; highM = 13 - (chip->Architecture == NV_ARCH_03); } else { lowM = 8; highM = 14 - (chip->Architecture == NV_ARCH_03); } highP = 4 - (chip->Architecture == NV_ARCH_03); for (P = 0; P <= highP; P ++) { Freq = VClk << P; if ((Freq >= 128000) && (Freq <= chip->MaxVClockFreqKHz)) { for (M = lowM; M <= highM; M++) { N = (VClk << P) * M / chip->CrystalFreqKHz; if(N <= 255) { Freq = (chip->CrystalFreqKHz * N / M) >> P; if (Freq > VClk) DeltaNew = Freq - VClk; else DeltaNew = VClk - Freq; if (DeltaNew < DeltaOld) { *mOut = M; *nOut = N; *pOut = P; *clockOut = Freq; DeltaOld = DeltaNew; } } } } } /* non-zero: M/N/P/clock values assigned. zero: error (not set) */ return (DeltaOld != 0xFFFFFFFF); } /* * Calculate extended mode parameters (SVGA) and save in a * mode state structure. */ int CalcStateExt ( RIVA_HW_INST *chip, RIVA_HW_STATE *state, int bpp, int width, int hDisplaySize, int height, int dotClock ) { int pixelDepth; int uninitialized_var(VClk),uninitialized_var(m), uninitialized_var(n), uninitialized_var(p); /* * Save mode parameters. */ state->bpp = bpp; /* this is not bitsPerPixel, it's 8,15,16,32 */ state->width = width; state->height = height; /* * Extended RIVA registers. */ pixelDepth = (bpp + 1)/8; if (!CalcVClock(dotClock, &VClk, &m, &n, &p, chip)) return -EINVAL; switch (chip->Architecture) { case NV_ARCH_03: nv3UpdateArbitrationSettings(VClk, pixelDepth * 8, &(state->arbitration0), &(state->arbitration1), chip); state->cursor0 = 0x00; state->cursor1 = 0x78; state->cursor2 = 0x00000000; state->pllsel = 0x10010100; state->config = ((width + 31)/32) | (((pixelDepth > 2) ? 3 : pixelDepth) << 8) | 0x1000; state->general = 0x00100100; state->repaint1 = hDisplaySize < 1280 ? 0x06 : 0x02; break; case NV_ARCH_04: nv4UpdateArbitrationSettings(VClk, pixelDepth * 8, &(state->arbitration0), &(state->arbitration1), chip); state->cursor0 = 0x00; state->cursor1 = 0xFC; state->cursor2 = 0x00000000; state->pllsel = 0x10000700; state->config = 0x00001114; state->general = bpp == 16 ? 0x00101100 : 0x00100100; state->repaint1 = hDisplaySize < 1280 ? 0x04 : 0x00; break; case NV_ARCH_10: case NV_ARCH_20: case NV_ARCH_30: if((chip->Chipset == NV_CHIP_IGEFORCE2) || (chip->Chipset == NV_CHIP_0x01F0)) { nForceUpdateArbitrationSettings(VClk, pixelDepth * 8, &(state->arbitration0), &(state->arbitration1), chip); } else { nv10UpdateArbitrationSettings(VClk, pixelDepth * 8, &(state->arbitration0), &(state->arbitration1), chip); } state->cursor0 = 0x80 | (chip->CursorStart >> 17); state->cursor1 = (chip->CursorStart >> 11) << 2; state->cursor2 = chip->CursorStart >> 24; state->pllsel = 0x10000700; state->config = NV_RD32(&chip->PFB[0x00000200/4], 0); state->general = bpp == 16 ? 0x00101100 : 0x00100100; state->repaint1 = hDisplaySize < 1280 ? 0x04 : 0x00; break; } /* Paul Richards: below if block borks things in kernel for some reason */ /* Tony: Below is needed to set hardware in DirectColor */ if((bpp != 8) && (chip->Architecture != NV_ARCH_03)) state->general |= 0x00000030; state->vpll = (p << 16) | (n << 8) | m; state->repaint0 = (((width/8)*pixelDepth) & 0x700) >> 3; state->pixel = pixelDepth > 2 ? 3 : pixelDepth; state->offset0 = state->offset1 = state->offset2 = state->offset3 = 0; state->pitch0 = state->pitch1 = state->pitch2 = state->pitch3 = pixelDepth * width; return 0; } /* * Load fixed function state and pre-calculated/stored state. */ #if 0 #define LOAD_FIXED_STATE(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev)/8; i++) \ chip->dev[tbl##Table##dev[i][0]] = tbl##Table##dev[i][1] #define LOAD_FIXED_STATE_8BPP(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev##_8BPP)/8; i++) \ chip->dev[tbl##Table##dev##_8BPP[i][0]] = tbl##Table##dev##_8BPP[i][1] #define LOAD_FIXED_STATE_15BPP(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev##_15BPP)/8; i++) \ chip->dev[tbl##Table##dev##_15BPP[i][0]] = tbl##Table##dev##_15BPP[i][1] #define LOAD_FIXED_STATE_16BPP(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev##_16BPP)/8; i++) \ chip->dev[tbl##Table##dev##_16BPP[i][0]] = tbl##Table##dev##_16BPP[i][1] #define LOAD_FIXED_STATE_32BPP(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev##_32BPP)/8; i++) \ chip->dev[tbl##Table##dev##_32BPP[i][0]] = tbl##Table##dev##_32BPP[i][1] #endif #define LOAD_FIXED_STATE(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev)/8; i++) \ NV_WR32(&chip->dev[tbl##Table##dev[i][0]], 0, tbl##Table##dev[i][1]) #define LOAD_FIXED_STATE_8BPP(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev##_8BPP)/8; i++) \ NV_WR32(&chip->dev[tbl##Table##dev##_8BPP[i][0]], 0, tbl##Table##dev##_8BPP[i][1]) #define LOAD_FIXED_STATE_15BPP(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev##_15BPP)/8; i++) \ NV_WR32(&chip->dev[tbl##Table##dev##_15BPP[i][0]], 0, tbl##Table##dev##_15BPP[i][1]) #define LOAD_FIXED_STATE_16BPP(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev##_16BPP)/8; i++) \ NV_WR32(&chip->dev[tbl##Table##dev##_16BPP[i][0]], 0, tbl##Table##dev##_16BPP[i][1]) #define LOAD_FIXED_STATE_32BPP(tbl,dev) \ for (i = 0; i < sizeof(tbl##Table##dev##_32BPP)/8; i++) \ NV_WR32(&chip->dev[tbl##Table##dev##_32BPP[i][0]], 0, tbl##Table##dev##_32BPP[i][1]) static void UpdateFifoState ( RIVA_HW_INST *chip ) { int i; switch (chip->Architecture) { case NV_ARCH_04: LOAD_FIXED_STATE(nv4,FIFO); chip->Tri03 = NULL; chip->Tri05 = (RivaTexturedTriangle05 __iomem *)&(chip->FIFO[0x0000E000/4]); break; case NV_ARCH_10: case NV_ARCH_20: case NV_ARCH_30: /* * Initialize state for the RivaTriangle3D05 routines. */ LOAD_FIXED_STATE(nv10tri05,PGRAPH); LOAD_FIXED_STATE(nv10,FIFO); chip->Tri03 = NULL; chip->Tri05 = (RivaTexturedTriangle05 __iomem *)&(chip->FIFO[0x0000E000/4]); break; } } static void LoadStateExt ( RIVA_HW_INST *chip, RIVA_HW_STATE *state ) { int i; /* * Load HW fixed function state. */ LOAD_FIXED_STATE(Riva,PMC); LOAD_FIXED_STATE(Riva,PTIMER); switch (chip->Architecture) { case NV_ARCH_03: /* * Make sure frame buffer config gets set before loading PRAMIN. */ NV_WR32(chip->PFB, 0x00000200, state->config); LOAD_FIXED_STATE(nv3,PFIFO); LOAD_FIXED_STATE(nv3,PRAMIN); LOAD_FIXED_STATE(nv3,PGRAPH); switch (state->bpp) { case 15: case 16: LOAD_FIXED_STATE_15BPP(nv3,PRAMIN); LOAD_FIXED_STATE_15BPP(nv3,PGRAPH); chip->Tri03 = (RivaTexturedTriangle03 __iomem *)&(chip->FIFO[0x0000E000/4]); break; case 24: case 32: LOAD_FIXED_STATE_32BPP(nv3,PRAMIN); LOAD_FIXED_STATE_32BPP(nv3,PGRAPH); chip->Tri03 = NULL; break; case 8: default: LOAD_FIXED_STATE_8BPP(nv3,PRAMIN); LOAD_FIXED_STATE_8BPP(nv3,PGRAPH); chip->Tri03 = NULL; break; } for (i = 0x00000; i < 0x00800; i++) NV_WR32(&chip->PRAMIN[0x00000502 + i], 0, (i << 12) | 0x03); NV_WR32(chip->PGRAPH, 0x00000630, state->offset0); NV_WR32(chip->PGRAPH, 0x00000634, state->offset1); NV_WR32(chip->PGRAPH, 0x00000638, state->offset2); NV_WR32(chip->PGRAPH, 0x0000063C, state->offset3); NV_WR32(chip->PGRAPH, 0x00000650, state->pitch0); NV_WR32(chip->PGRAPH, 0x00000654, state->pitch1); NV_WR32(chip->PGRAPH, 0x00000658, state->pitch2); NV_WR32(chip->PGRAPH, 0x0000065C, state->pitch3); break; case NV_ARCH_04: /* * Make sure frame buffer config gets set before loading PRAMIN. */ NV_WR32(chip->PFB, 0x00000200, state->config); LOAD_FIXED_STATE(nv4,PFIFO); LOAD_FIXED_STATE(nv4,PRAMIN); LOAD_FIXED_STATE(nv4,PGRAPH); switch (state->bpp) { case 15: LOAD_FIXED_STATE_15BPP(nv4,PRAMIN); LOAD_FIXED_STATE_15BPP(nv4,PGRAPH); chip->Tri03 = (RivaTexturedTriangle03 __iomem *)&(chip->FIFO[0x0000E000/4]); break; case 16: LOAD_FIXED_STATE_16BPP(nv4,PRAMIN); LOAD_FIXED_STATE_16BPP(nv4,PGRAPH); chip->Tri03 = (RivaTexturedTriangle03 __iomem *)&(chip->FIFO[0x0000E000/4]); break; case 24: case 32: LOAD_FIXED_STATE_32BPP(nv4,PRAMIN); LOAD_FIXED_STATE_32BPP(nv4,PGRAPH); chip->Tri03 = NULL; break; case 8: default: LOAD_FIXED_STATE_8BPP(nv4,PRAMIN); LOAD_FIXED_STATE_8BPP(nv4,PGRAPH); chip->Tri03 = NULL; break; } NV_WR32(chip->PGRAPH, 0x00000640, state->offset0); NV_WR32(chip->PGRAPH, 0x00000644, state->offset1); NV_WR32(chip->PGRAPH, 0x00000648, state->offset2); NV_WR32(chip->PGRAPH, 0x0000064C, state->offset3); NV_WR32(chip->PGRAPH, 0x00000670, state->pitch0); NV_WR32(chip->PGRAPH, 0x00000674, state->pitch1); NV_WR32(chip->PGRAPH, 0x00000678, state->pitch2); NV_WR32(chip->PGRAPH, 0x0000067C, state->pitch3); break; case NV_ARCH_10: case NV_ARCH_20: case NV_ARCH_30: if(chip->twoHeads) { VGA_WR08(chip->PCIO, 0x03D4, 0x44); VGA_WR08(chip->PCIO, 0x03D5, state->crtcOwner); chip->LockUnlock(chip, 0); } LOAD_FIXED_STATE(nv10,PFIFO); LOAD_FIXED_STATE(nv10,PRAMIN); LOAD_FIXED_STATE(nv10,PGRAPH); switch (state->bpp) { case 15: LOAD_FIXED_STATE_15BPP(nv10,PRAMIN); LOAD_FIXED_STATE_15BPP(nv10,PGRAPH); chip->Tri03 = (RivaTexturedTriangle03 __iomem *)&(chip->FIFO[0x0000E000/4]); break; case 16: LOAD_FIXED_STATE_16BPP(nv10,PRAMIN); LOAD_FIXED_STATE_16BPP(nv10,PGRAPH); chip->Tri03 = (RivaTexturedTriangle03 __iomem *)&(chip->FIFO[0x0000E000/4]); break; case 24: case 32: LOAD_FIXED_STATE_32BPP(nv10,PRAMIN); LOAD_FIXED_STATE_32BPP(nv10,PGRAPH); chip->Tri03 = NULL; break; case 8: default: LOAD_FIXED_STATE_8BPP(nv10,PRAMIN); LOAD_FIXED_STATE_8BPP(nv10,PGRAPH); chip->Tri03 = NULL; break; } if(chip->Architecture == NV_ARCH_10) { NV_WR32(chip->PGRAPH, 0x00000640, state->offset0); NV_WR32(chip->PGRAPH, 0x00000644, state->offset1); NV_WR32(chip->PGRAPH, 0x00000648, state->offset2); NV_WR32(chip->PGRAPH, 0x0000064C, state->offset3); NV_WR32(chip->PGRAPH, 0x00000670, state->pitch0); NV_WR32(chip->PGRAPH, 0x00000674, state->pitch1); NV_WR32(chip->PGRAPH, 0x00000678, state->pitch2); NV_WR32(chip->PGRAPH, 0x0000067C, state->pitch3); NV_WR32(chip->PGRAPH, 0x00000680, state->pitch3); } else { NV_WR32(chip->PGRAPH, 0x00000820, state->offset0); NV_WR32(chip->PGRAPH, 0x00000824, state->offset1); NV_WR32(chip->PGRAPH, 0x00000828, state->offset2); NV_WR32(chip->PGRAPH, 0x0000082C, state->offset3); NV_WR32(chip->PGRAPH, 0x00000850, state->pitch0); NV_WR32(chip->PGRAPH, 0x00000854, state->pitch1); NV_WR32(chip->PGRAPH, 0x00000858, state->pitch2); NV_WR32(chip->PGRAPH, 0x0000085C, state->pitch3); NV_WR32(chip->PGRAPH, 0x00000860, state->pitch3); NV_WR32(chip->PGRAPH, 0x00000864, state->pitch3); NV_WR32(chip->PGRAPH, 0x000009A4, NV_RD32(chip->PFB, 0x00000200)); NV_WR32(chip->PGRAPH, 0x000009A8, NV_RD32(chip->PFB, 0x00000204)); } if(chip->twoHeads) { NV_WR32(chip->PCRTC0, 0x00000860, state->head); NV_WR32(chip->PCRTC0, 0x00002860, state->head2); } NV_WR32(chip->PRAMDAC, 0x00000404, NV_RD32(chip->PRAMDAC, 0x00000404) | (1 << 25)); NV_WR32(chip->PMC, 0x00008704, 1); NV_WR32(chip->PMC, 0x00008140, 0); NV_WR32(chip->PMC, 0x00008920, 0); NV_WR32(chip->PMC, 0x00008924, 0); NV_WR32(chip->PMC, 0x00008908, 0x01ffffff); NV_WR32(chip->PMC, 0x0000890C, 0x01ffffff); NV_WR32(chip->PMC, 0x00001588, 0); NV_WR32(chip->PFB, 0x00000240, 0); NV_WR32(chip->PFB, 0x00000250, 0); NV_WR32(chip->PFB, 0x00000260, 0); NV_WR32(chip->PFB, 0x00000270, 0); NV_WR32(chip->PFB, 0x00000280, 0); NV_WR32(chip->PFB, 0x00000290, 0); NV_WR32(chip->PFB, 0x000002A0, 0); NV_WR32(chip->PFB, 0x000002B0, 0); NV_WR32(chip->PGRAPH, 0x00000B00, NV_RD32(chip->PFB, 0x00000240)); NV_WR32(chip->PGRAPH, 0x00000B04, NV_RD32(chip->PFB, 0x00000244)); NV_WR32(chip->PGRAPH, 0x00000B08, NV_RD32(chip->PFB, 0x00000248)); NV_WR32(chip->PGRAPH, 0x00000B0C, NV_RD32(chip->PFB, 0x0000024C)); NV_WR32(chip->PGRAPH, 0x00000B10, NV_RD32(chip->PFB, 0x00000250)); NV_WR32(chip->PGRAPH, 0x00000B14, NV_RD32(chip->PFB, 0x00000254)); NV_WR32(chip->PGRAPH, 0x00000B18, NV_RD32(chip->PFB, 0x00000258)); NV_WR32(chip->PGRAPH, 0x00000B1C, NV_RD32(chip->PFB, 0x0000025C)); NV_WR32(chip->PGRAPH, 0x00000B20, NV_RD32(chip->PFB, 0x00000260)); NV_WR32(chip->PGRAPH, 0x00000B24, NV_RD32(chip->PFB, 0x00000264)); NV_WR32(chip->PGRAPH, 0x00000B28, NV_RD32(chip->PFB, 0x00000268)); NV_WR32(chip->PGRAPH, 0x00000B2C, NV_RD32(chip->PFB, 0x0000026C)); NV_WR32(chip->PGRAPH, 0x00000B30, NV_RD32(chip->PFB, 0x00000270)); NV_WR32(chip->PGRAPH, 0x00000B34, NV_RD32(chip->PFB, 0x00000274)); NV_WR32(chip->PGRAPH, 0x00000B38, NV_RD32(chip->PFB, 0x00000278)); NV_WR32(chip->PGRAPH, 0x00000B3C, NV_RD32(chip->PFB, 0x0000027C)); NV_WR32(chip->PGRAPH, 0x00000B40, NV_RD32(chip->PFB, 0x00000280)); NV_WR32(chip->PGRAPH, 0x00000B44, NV_RD32(chip->PFB, 0x00000284)); NV_WR32(chip->PGRAPH, 0x00000B48, NV_RD32(chip->PFB, 0x00000288)); NV_WR32(chip->PGRAPH, 0x00000B4C, NV_RD32(chip->PFB, 0x0000028C)); NV_WR32(chip->PGRAPH, 0x00000B50, NV_RD32(chip->PFB, 0x00000290)); NV_WR32(chip->PGRAPH, 0x00000B54, NV_RD32(chip->PFB, 0x00000294)); NV_WR32(chip->PGRAPH, 0x00000B58, NV_RD32(chip->PFB, 0x00000298)); NV_WR32(chip->PGRAPH, 0x00000B5C, NV_RD32(chip->PFB, 0x0000029C)); NV_WR32(chip->PGRAPH, 0x00000B60, NV_RD32(chip->PFB, 0x000002A0)); NV_WR32(chip->PGRAPH, 0x00000B64, NV_RD32(chip->PFB, 0x000002A4)); NV_WR32(chip->PGRAPH, 0x00000B68, NV_RD32(chip->PFB, 0x000002A8)); NV_WR32(chip->PGRAPH, 0x00000B6C, NV_RD32(chip->PFB, 0x000002AC)); NV_WR32(chip->PGRAPH, 0x00000B70, NV_RD32(chip->PFB, 0x000002B0)); NV_WR32(chip->PGRAPH, 0x00000B74, NV_RD32(chip->PFB, 0x000002B4)); NV_WR32(chip->PGRAPH, 0x00000B78, NV_RD32(chip->PFB, 0x000002B8)); NV_WR32(chip->PGRAPH, 0x00000B7C, NV_RD32(chip->PFB, 0x000002BC)); NV_WR32(chip->PGRAPH, 0x00000F40, 0x10000000); NV_WR32(chip->PGRAPH, 0x00000F44, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00000040); NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000008); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00000200); for (i = 0; i < (3*16); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00000040); NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00000800); for (i = 0; i < (16*16); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F40, 0x30000000); NV_WR32(chip->PGRAPH, 0x00000F44, 0x00000004); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00006400); for (i = 0; i < (59*4); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00006800); for (i = 0; i < (47*4); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00006C00); for (i = 0; i < (3*4); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00007000); for (i = 0; i < (19*4); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00007400); for (i = 0; i < (12*4); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00007800); for (i = 0; i < (12*4); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00004400); for (i = 0; i < (8*4); i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00000000); for (i = 0; i < 16; i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PGRAPH, 0x00000F50, 0x00000040); for (i = 0; i < 4; i++) NV_WR32(chip->PGRAPH, 0x00000F54, 0x00000000); NV_WR32(chip->PCRTC, 0x00000810, state->cursorConfig); if(chip->flatPanel) { if((chip->Chipset & 0x0ff0) == 0x0110) { NV_WR32(chip->PRAMDAC, 0x0528, state->dither); } else if((chip->Chipset & 0x0ff0) >= 0x0170) { NV_WR32(chip->PRAMDAC, 0x083C, state->dither); } VGA_WR08(chip->PCIO, 0x03D4, 0x53); VGA_WR08(chip->PCIO, 0x03D5, 0); VGA_WR08(chip->PCIO, 0x03D4, 0x54); VGA_WR08(chip->PCIO, 0x03D5, 0); VGA_WR08(chip->PCIO, 0x03D4, 0x21); VGA_WR08(chip->PCIO, 0x03D5, 0xfa); } VGA_WR08(chip->PCIO, 0x03D4, 0x41); VGA_WR08(chip->PCIO, 0x03D5, state->extra); } LOAD_FIXED_STATE(Riva,FIFO); UpdateFifoState(chip); /* * Load HW mode state. */ VGA_WR08(chip->PCIO, 0x03D4, 0x19); VGA_WR08(chip->PCIO, 0x03D5, state->repaint0); VGA_WR08(chip->PCIO, 0x03D4, 0x1A); VGA_WR08(chip->PCIO, 0x03D5, state->repaint1); VGA_WR08(chip->PCIO, 0x03D4, 0x25); VGA_WR08(chip->PCIO, 0x03D5, state->screen); VGA_WR08(chip->PCIO, 0x03D4, 0x28); VGA_WR08(chip->PCIO, 0x03D5, state->pixel); VGA_WR08(chip->PCIO, 0x03D4, 0x2D); VGA_WR08(chip->PCIO, 0x03D5, state->horiz); VGA_WR08(chip->PCIO, 0x03D4, 0x1B); VGA_WR08(chip->PCIO, 0x03D5, state->arbitration0); VGA_WR08(chip->PCIO, 0x03D4, 0x20); VGA_WR08(chip->PCIO, 0x03D5, state->arbitration1); VGA_WR08(chip->PCIO, 0x03D4, 0x30); VGA_WR08(chip->PCIO, 0x03D5, state->cursor0); VGA_WR08(chip->PCIO, 0x03D4, 0x31); VGA_WR08(chip->PCIO, 0x03D5, state->cursor1); VGA_WR08(chip->PCIO, 0x03D4, 0x2F); VGA_WR08(chip->PCIO, 0x03D5, state->cursor2); VGA_WR08(chip->PCIO, 0x03D4, 0x39); VGA_WR08(chip->PCIO, 0x03D5, state->interlace); if(!chip->flatPanel) { NV_WR32(chip->PRAMDAC0, 0x00000508, state->vpll); NV_WR32(chip->PRAMDAC0, 0x0000050C, state->pllsel); if(chip->twoHeads) NV_WR32(chip->PRAMDAC0, 0x00000520, state->vpll2); } else { NV_WR32(chip->PRAMDAC, 0x00000848 , state->scale); } NV_WR32(chip->PRAMDAC, 0x00000600 , state->general); /* * Turn off VBlank enable and reset. */ NV_WR32(chip->PCRTC, 0x00000140, 0); NV_WR32(chip->PCRTC, 0x00000100, chip->VBlankBit); /* * Set interrupt enable. */ NV_WR32(chip->PMC, 0x00000140, chip->EnableIRQ & 0x01); /* * Set current state pointer. */ chip->CurrentState = state; /* * Reset FIFO free and empty counts. */ chip->FifoFreeCount = 0; /* Free count from first subchannel */ chip->FifoEmptyCount = NV_RD32(&chip->Rop->FifoFree, 0); } static void UnloadStateExt ( RIVA_HW_INST *chip, RIVA_HW_STATE *state ) { /* * Save current HW state. */ VGA_WR08(chip->PCIO, 0x03D4, 0x19); state->repaint0 = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x1A); state->repaint1 = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x25); state->screen = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x28); state->pixel = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x2D); state->horiz = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x1B); state->arbitration0 = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x20); state->arbitration1 = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x30); state->cursor0 = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x31); state->cursor1 = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x2F); state->cursor2 = VGA_RD08(chip->PCIO, 0x03D5); VGA_WR08(chip->PCIO, 0x03D4, 0x39); state->interlace = VGA_RD08(chip->PCIO, 0x03D5); state->vpll = NV_RD32(chip->PRAMDAC0, 0x00000508); state->vpll2 = NV_RD32(chip->PRAMDAC0, 0x00000520); state->pllsel = NV_RD32(chip->PRAMDAC0, 0x0000050C); state->general = NV_RD32(chip->PRAMDAC, 0x00000600); state->scale = NV_RD32(chip->PRAMDAC, 0x00000848); state->config = NV_RD32(chip->PFB, 0x00000200); switch (chip->Architecture) { case NV_ARCH_03: state->offset0 = NV_RD32(chip->PGRAPH, 0x00000630); state->offset1 = NV_RD32(chip->PGRAPH, 0x00000634); state->offset2 = NV_RD32(chip->PGRAPH, 0x00000638); state->offset3 = NV_RD32(chip->PGRAPH, 0x0000063C); state->pitch0 = NV_RD32(chip->PGRAPH, 0x00000650); state->pitch1 = NV_RD32(chip->PGRAPH, 0x00000654); state->pitch2 = NV_RD32(chip->PGRAPH, 0x00000658); state->pitch3 = NV_RD32(chip->PGRAPH, 0x0000065C); break; case NV_ARCH_04: state->offset0 = NV_RD32(chip->PGRAPH, 0x00000640); state->offset1 = NV_RD32(chip->PGRAPH, 0x00000644); state->offset2 = NV_RD32(chip->PGRAPH, 0x00000648); state->offset3 = NV_RD32(chip->PGRAPH, 0x0000064C); state->pitch0 = NV_RD32(chip->PGRAPH, 0x00000670); state->pitch1 = NV_RD32(chip->PGRAPH, 0x00000674); state->pitch2 = NV_RD32(chip->PGRAPH, 0x00000678); state->pitch3 = NV_RD32(chip->PGRAPH, 0x0000067C); break; case NV_ARCH_10: case NV_ARCH_20: case NV_ARCH_30: state->offset0 = NV_RD32(chip->PGRAPH, 0x00000640); state->offset1 = NV_RD32(chip->PGRAPH, 0x00000644); state->offset2 = NV_RD32(chip->PGRAPH, 0x00000648); state->offset3 = NV_RD32(chip->PGRAPH, 0x0000064C); state->pitch0 = NV_RD32(chip->PGRAPH, 0x00000670); state->pitch1 = NV_RD32(chip->PGRAPH, 0x00000674); state->pitch2 = NV_RD32(chip->PGRAPH, 0x00000678); state->pitch3 = NV_RD32(chip->PGRAPH, 0x0000067C); if(chip->twoHeads) { state->head = NV_RD32(chip->PCRTC0, 0x00000860); state->head2 = NV_RD32(chip->PCRTC0, 0x00002860); VGA_WR08(chip->PCIO, 0x03D4, 0x44); state->crtcOwner = VGA_RD08(chip->PCIO, 0x03D5); } VGA_WR08(chip->PCIO, 0x03D4, 0x41); state->extra = VGA_RD08(chip->PCIO, 0x03D5); state->cursorConfig = NV_RD32(chip->PCRTC, 0x00000810); if((chip->Chipset & 0x0ff0) == 0x0110) { state->dither = NV_RD32(chip->PRAMDAC, 0x0528); } else if((chip->Chipset & 0x0ff0) >= 0x0170) { state->dither = NV_RD32(chip->PRAMDAC, 0x083C); } break; } } static void SetStartAddress ( RIVA_HW_INST *chip, unsigned start ) { NV_WR32(chip->PCRTC, 0x800, start); } static void SetStartAddress3 ( RIVA_HW_INST *chip, unsigned start ) { int offset = start >> 2; int pan = (start & 3) << 1; unsigned char tmp; /* * Unlock extended registers. */ chip->LockUnlock(chip, 0); /* * Set start address. */ VGA_WR08(chip->PCIO, 0x3D4, 0x0D); VGA_WR08(chip->PCIO, 0x3D5, offset); offset >>= 8; VGA_WR08(chip->PCIO, 0x3D4, 0x0C); VGA_WR08(chip->PCIO, 0x3D5, offset); offset >>= 8; VGA_WR08(chip->PCIO, 0x3D4, 0x19); tmp = VGA_RD08(chip->PCIO, 0x3D5); VGA_WR08(chip->PCIO, 0x3D5, (offset & 0x01F) | (tmp & ~0x1F)); VGA_WR08(chip->PCIO, 0x3D4, 0x2D); tmp = VGA_RD08(chip->PCIO, 0x3D5); VGA_WR08(chip->PCIO, 0x3D5, (offset & 0x60) | (tmp & ~0x60)); /* * 4 pixel pan register. */ offset = VGA_RD08(chip->PCIO, chip->IO + 0x0A); VGA_WR08(chip->PCIO, 0x3C0, 0x13); VGA_WR08(chip->PCIO, 0x3C0, pan); } static void nv3SetSurfaces2D ( RIVA_HW_INST *chip, unsigned surf0, unsigned surf1 ) { RivaSurface __iomem *Surface = (RivaSurface __iomem *)&(chip->FIFO[0x0000E000/4]); RIVA_FIFO_FREE(*chip,Tri03,5); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000003); NV_WR32(&Surface->Offset, 0, surf0); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000004); NV_WR32(&Surface->Offset, 0, surf1); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000013); } static void nv4SetSurfaces2D ( RIVA_HW_INST *chip, unsigned surf0, unsigned surf1 ) { RivaSurface __iomem *Surface = (RivaSurface __iomem *)&(chip->FIFO[0x0000E000/4]); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000003); NV_WR32(&Surface->Offset, 0, surf0); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000004); NV_WR32(&Surface->Offset, 0, surf1); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000014); } static void nv10SetSurfaces2D ( RIVA_HW_INST *chip, unsigned surf0, unsigned surf1 ) { RivaSurface __iomem *Surface = (RivaSurface __iomem *)&(chip->FIFO[0x0000E000/4]); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000003); NV_WR32(&Surface->Offset, 0, surf0); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000004); NV_WR32(&Surface->Offset, 0, surf1); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000014); } static void nv3SetSurfaces3D ( RIVA_HW_INST *chip, unsigned surf0, unsigned surf1 ) { RivaSurface __iomem *Surface = (RivaSurface __iomem *)&(chip->FIFO[0x0000E000/4]); RIVA_FIFO_FREE(*chip,Tri03,5); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000005); NV_WR32(&Surface->Offset, 0, surf0); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000006); NV_WR32(&Surface->Offset, 0, surf1); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000013); } static void nv4SetSurfaces3D ( RIVA_HW_INST *chip, unsigned surf0, unsigned surf1 ) { RivaSurface __iomem *Surface = (RivaSurface __iomem *)&(chip->FIFO[0x0000E000/4]); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000005); NV_WR32(&Surface->Offset, 0, surf0); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000006); NV_WR32(&Surface->Offset, 0, surf1); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000014); } static void nv10SetSurfaces3D ( RIVA_HW_INST *chip, unsigned surf0, unsigned surf1 ) { RivaSurface3D __iomem *Surfaces3D = (RivaSurface3D __iomem *)&(chip->FIFO[0x0000E000/4]); RIVA_FIFO_FREE(*chip,Tri03,4); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000007); NV_WR32(&Surfaces3D->RenderBufferOffset, 0, surf0); NV_WR32(&Surfaces3D->ZBufferOffset, 0, surf1); NV_WR32(&chip->FIFO[0x00003800], 0, 0x80000014); } /****************************************************************************\ * * * Probe RIVA Chip Configuration * * * \****************************************************************************/ static void nv3GetConfig ( RIVA_HW_INST *chip ) { /* * Fill in chip configuration. */ if (NV_RD32(&chip->PFB[0x00000000/4], 0) & 0x00000020) { if (((NV_RD32(chip->PMC, 0x00000000) & 0xF0) == 0x20) && ((NV_RD32(chip->PMC, 0x00000000) & 0x0F) >= 0x02)) { /* * SDRAM 128 ZX. */ chip->RamBandwidthKBytesPerSec = 800000; switch (NV_RD32(chip->PFB, 0x00000000) & 0x03) { case 2: chip->RamAmountKBytes = 1024 * 4; break; case 1: chip->RamAmountKBytes = 1024 * 2; break; default: chip->RamAmountKBytes = 1024 * 8; break; } } else { chip->RamBandwidthKBytesPerSec = 1000000; chip->RamAmountKBytes = 1024 * 8; } } else { /* * SGRAM 128. */ chip->RamBandwidthKBytesPerSec = 1000000; switch (NV_RD32(chip->PFB, 0x00000000) & 0x00000003) { case 0: chip->RamAmountKBytes = 1024 * 8; break; case 2: chip->RamAmountKBytes = 1024 * 4; break; default: chip->RamAmountKBytes = 1024 * 2; break; } } chip->CrystalFreqKHz = (NV_RD32(chip->PEXTDEV, 0x00000000) & 0x00000040) ? 14318 : 13500; chip->CURSOR = &(chip->PRAMIN[0x00008000/4 - 0x0800/4]); chip->VBlankBit = 0x00000100; chip->MaxVClockFreqKHz = 256000; /* * Set chip functions. */ chip->Busy = nv3Busy; chip->ShowHideCursor = ShowHideCursor; chip->LoadStateExt = LoadStateExt; chip->UnloadStateExt = UnloadStateExt; chip->SetStartAddress = SetStartAddress3; chip->SetSurfaces2D = nv3SetSurfaces2D; chip->SetSurfaces3D = nv3SetSurfaces3D; chip->LockUnlock = nv3LockUnlock; } static void nv4GetConfig ( RIVA_HW_INST *chip ) { /* * Fill in chip configuration. */ if (NV_RD32(chip->PFB, 0x00000000) & 0x00000100) { chip->RamAmountKBytes = ((NV_RD32(chip->PFB, 0x00000000) >> 12) & 0x0F) * 1024 * 2 + 1024 * 2; } else { switch (NV_RD32(chip->PFB, 0x00000000) & 0x00000003) { case 0: chip->RamAmountKBytes = 1024 * 32; break; case 1: chip->RamAmountKBytes = 1024 * 4; break; case 2: chip->RamAmountKBytes = 1024 * 8; break; case 3: default: chip->RamAmountKBytes = 1024 * 16; break; } } switch ((NV_RD32(chip->PFB, 0x00000000) >> 3) & 0x00000003) { case 3: chip->RamBandwidthKBytesPerSec = 800000; break; default: chip->RamBandwidthKBytesPerSec = 1000000; break; } chip->CrystalFreqKHz = (NV_RD32(chip->PEXTDEV, 0x00000000) & 0x00000040) ? 14318 : 13500; chip->CURSOR = &(chip->PRAMIN[0x00010000/4 - 0x0800/4]); chip->VBlankBit = 0x00000001; chip->MaxVClockFreqKHz = 350000; /* * Set chip functions. */ chip->Busy = nv4Busy; chip->ShowHideCursor = ShowHideCursor; chip->LoadStateExt = LoadStateExt; chip->UnloadStateExt = UnloadStateExt; chip->SetStartAddress = SetStartAddress; chip->SetSurfaces2D = nv4SetSurfaces2D; chip->SetSurfaces3D = nv4SetSurfaces3D; chip->LockUnlock = nv4LockUnlock; } static void nv10GetConfig ( RIVA_HW_INST *chip, unsigned int chipset ) { struct pci_dev* dev; u32 amt; #ifdef __BIG_ENDIAN /* turn on big endian register access */ if(!(NV_RD32(chip->PMC, 0x00000004) & 0x01000001)) NV_WR32(chip->PMC, 0x00000004, 0x01000001); #endif /* * Fill in chip configuration. */ if(chipset == NV_CHIP_IGEFORCE2) { dev = pci_get_bus_and_slot(0, 1); pci_read_config_dword(dev, 0x7C, &amt); pci_dev_put(dev); chip->RamAmountKBytes = (((amt >> 6) & 31) + 1) * 1024; } else if(chipset == NV_CHIP_0x01F0) { dev = pci_get_bus_and_slot(0, 1); pci_read_config_dword(dev, 0x84, &amt); pci_dev_put(dev); chip->RamAmountKBytes = (((amt >> 4) & 127) + 1) * 1024; } else { switch ((NV_RD32(chip->PFB, 0x0000020C) >> 20) & 0x000000FF) { case 0x02: chip->RamAmountKBytes = 1024 * 2; break; case 0x04: chip->RamAmountKBytes = 1024 * 4; break; case 0x08: chip->RamAmountKBytes = 1024 * 8; break; case 0x10: chip->RamAmountKBytes = 1024 * 16; break; case 0x20: chip->RamAmountKBytes = 1024 * 32; break; case 0x40: chip->RamAmountKBytes = 1024 * 64; break; case 0x80: chip->RamAmountKBytes = 1024 * 128; break; default: chip->RamAmountKBytes = 1024 * 16; break; } } switch ((NV_RD32(chip->PFB, 0x00000000) >> 3) & 0x00000003) { case 3: chip->RamBandwidthKBytesPerSec = 800000; break; default: chip->RamBandwidthKBytesPerSec = 1000000; break; } chip->CrystalFreqKHz = (NV_RD32(chip->PEXTDEV, 0x0000) & (1 << 6)) ? 14318 : 13500; switch (chipset & 0x0ff0) { case 0x0170: case 0x0180: case 0x01F0: case 0x0250: case 0x0280: case 0x0300: case 0x0310: case 0x0320: case 0x0330: case 0x0340: if(NV_RD32(chip->PEXTDEV, 0x0000) & (1 << 22)) chip->CrystalFreqKHz = 27000; break; default: break; } chip->CursorStart = (chip->RamAmountKBytes - 128) * 1024; chip->CURSOR = NULL; /* can't set this here */ chip->VBlankBit = 0x00000001; chip->MaxVClockFreqKHz = 350000; /* * Set chip functions. */ chip->Busy = nv10Busy; chip->ShowHideCursor = ShowHideCursor; chip->LoadStateExt = LoadStateExt; chip->UnloadStateExt = UnloadStateExt; chip->SetStartAddress = SetStartAddress; chip->SetSurfaces2D = nv10SetSurfaces2D; chip->SetSurfaces3D = nv10SetSurfaces3D; chip->LockUnlock = nv4LockUnlock; switch(chipset & 0x0ff0) { case 0x0110: case 0x0170: case 0x0180: case 0x01F0: case 0x0250: case 0x0280: case 0x0300: case 0x0310: case 0x0320: case 0x0330: case 0x0340: chip->twoHeads = TRUE; break; default: chip->twoHeads = FALSE; break; } } int RivaGetConfig ( RIVA_HW_INST *chip, unsigned int chipset ) { /* * Save this so future SW know whats it's dealing with. */ chip->Version = RIVA_SW_VERSION; /* * Chip specific configuration. */ switch (chip->Architecture) { case NV_ARCH_03: nv3GetConfig(chip); break; case NV_ARCH_04: nv4GetConfig(chip); break; case NV_ARCH_10: case NV_ARCH_20: case NV_ARCH_30: nv10GetConfig(chip, chipset); break; default: return (-1); } chip->Chipset = chipset; /* * Fill in FIFO pointers. */ chip->Rop = (RivaRop __iomem *)&(chip->FIFO[0x00000000/4]); chip->Clip = (RivaClip __iomem *)&(chip->FIFO[0x00002000/4]); chip->Patt = (RivaPattern __iomem *)&(chip->FIFO[0x00004000/4]); chip->Pixmap = (RivaPixmap __iomem *)&(chip->FIFO[0x00006000/4]); chip->Blt = (RivaScreenBlt __iomem *)&(chip->FIFO[0x00008000/4]); chip->Bitmap = (RivaBitmap __iomem *)&(chip->FIFO[0x0000A000/4]); chip->Line = (RivaLine __iomem *)&(chip->FIFO[0x0000C000/4]); chip->Tri03 = (RivaTexturedTriangle03 __iomem *)&(chip->FIFO[0x0000E000/4]); return (0); }
gpl-2.0
froggy666uk/Froggy_SensMod_CM10.1
fs/partitions/osf.c
12940
1925
/* * fs/partitions/osf.c * * Code extracted from drivers/block/genhd.c * * Copyright (C) 1991-1998 Linus Torvalds * Re-organised Feb 1998 Russell King */ #include "check.h" #include "osf.h" #define MAX_OSF_PARTITIONS 18 int osf_partition(struct parsed_partitions *state) { int i; int slot = 1; unsigned int npartitions; Sector sect; unsigned char *data; struct disklabel { __le32 d_magic; __le16 d_type,d_subtype; u8 d_typename[16]; u8 d_packname[16]; __le32 d_secsize; __le32 d_nsectors; __le32 d_ntracks; __le32 d_ncylinders; __le32 d_secpercyl; __le32 d_secprtunit; __le16 d_sparespertrack; __le16 d_sparespercyl; __le32 d_acylinders; __le16 d_rpm, d_interleave, d_trackskew, d_cylskew; __le32 d_headswitch, d_trkseek, d_flags; __le32 d_drivedata[5]; __le32 d_spare[5]; __le32 d_magic2; __le16 d_checksum; __le16 d_npartitions; __le32 d_bbsize, d_sbsize; struct d_partition { __le32 p_size; __le32 p_offset; __le32 p_fsize; u8 p_fstype; u8 p_frag; __le16 p_cpg; } d_partitions[MAX_OSF_PARTITIONS]; } * label; struct d_partition * partition; data = read_part_sector(state, 0, &sect); if (!data) return -1; label = (struct disklabel *) (data+64); partition = label->d_partitions; if (le32_to_cpu(label->d_magic) != DISKLABELMAGIC) { put_dev_sector(sect); return 0; } if (le32_to_cpu(label->d_magic2) != DISKLABELMAGIC) { put_dev_sector(sect); return 0; } npartitions = le16_to_cpu(label->d_npartitions); if (npartitions > MAX_OSF_PARTITIONS) { put_dev_sector(sect); return 0; } for (i = 0 ; i < npartitions; i++, partition++) { if (slot == state->limit) break; if (le32_to_cpu(partition->p_size)) put_partition(state, slot, le32_to_cpu(partition->p_offset), le32_to_cpu(partition->p_size)); slot++; } strlcat(state->pp_buf, "\n", PAGE_SIZE); put_dev_sector(sect); return 1; }
gpl-2.0
puglia/PMFS-atomic-patch
fs/ecryptfs/inode.c
141
33574
/** * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 1997-2004 Erez Zadok * Copyright (C) 2001-2004 Stony Brook University * Copyright (C) 2004-2007 International Business Machines Corp. * Author(s): Michael A. Halcrow <mahalcro@us.ibm.com> * Michael C. Thompsion <mcthomps@us.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA * 02111-1307, USA. */ #include <linux/file.h> #include <linux/vmalloc.h> #include <linux/pagemap.h> #include <linux/dcache.h> #include <linux/namei.h> #include <linux/mount.h> #include <linux/crypto.h> #include <linux/fs_stack.h> #include <linux/slab.h> #include <linux/xattr.h> #include <asm/unaligned.h> #include "ecryptfs_kernel.h" static struct dentry *lock_parent(struct dentry *dentry) { struct dentry *dir; dir = dget_parent(dentry); mutex_lock_nested(&(dir->d_inode->i_mutex), I_MUTEX_PARENT); return dir; } static void unlock_dir(struct dentry *dir) { mutex_unlock(&dir->d_inode->i_mutex); dput(dir); } static int ecryptfs_inode_test(struct inode *inode, void *lower_inode) { if (ecryptfs_inode_to_lower(inode) == (struct inode *)lower_inode) return 1; return 0; } static int ecryptfs_inode_set(struct inode *inode, void *opaque) { struct inode *lower_inode = opaque; ecryptfs_set_inode_lower(inode, lower_inode); fsstack_copy_attr_all(inode, lower_inode); /* i_size will be overwritten for encrypted regular files */ fsstack_copy_inode_size(inode, lower_inode); inode->i_ino = lower_inode->i_ino; inode->i_version++; inode->i_mapping->a_ops = &ecryptfs_aops; inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi; if (S_ISLNK(inode->i_mode)) inode->i_op = &ecryptfs_symlink_iops; else if (S_ISDIR(inode->i_mode)) inode->i_op = &ecryptfs_dir_iops; else inode->i_op = &ecryptfs_main_iops; if (S_ISDIR(inode->i_mode)) inode->i_fop = &ecryptfs_dir_fops; else if (special_file(inode->i_mode)) init_special_inode(inode, inode->i_mode, inode->i_rdev); else inode->i_fop = &ecryptfs_main_fops; return 0; } static struct inode *__ecryptfs_get_inode(struct inode *lower_inode, struct super_block *sb) { struct inode *inode; if (lower_inode->i_sb != ecryptfs_superblock_to_lower(sb)) return ERR_PTR(-EXDEV); if (!igrab(lower_inode)) return ERR_PTR(-ESTALE); inode = iget5_locked(sb, (unsigned long)lower_inode, ecryptfs_inode_test, ecryptfs_inode_set, lower_inode); if (!inode) { iput(lower_inode); return ERR_PTR(-EACCES); } if (!(inode->i_state & I_NEW)) iput(lower_inode); return inode; } struct inode *ecryptfs_get_inode(struct inode *lower_inode, struct super_block *sb) { struct inode *inode = __ecryptfs_get_inode(lower_inode, sb); if (!IS_ERR(inode) && (inode->i_state & I_NEW)) unlock_new_inode(inode); return inode; } /** * ecryptfs_interpose * @lower_dentry: Existing dentry in the lower filesystem * @dentry: ecryptfs' dentry * @sb: ecryptfs's super_block * * Interposes upper and lower dentries. * * Returns zero on success; non-zero otherwise */ static int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry, struct super_block *sb) { struct inode *inode = ecryptfs_get_inode(lower_dentry->d_inode, sb); if (IS_ERR(inode)) return PTR_ERR(inode); d_instantiate(dentry, inode); return 0; } static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry, struct inode *inode) { struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir); struct dentry *lower_dir_dentry; int rc; dget(lower_dentry); lower_dir_dentry = lock_parent(lower_dentry); rc = vfs_unlink(lower_dir_inode, lower_dentry); if (rc) { printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc); goto out_unlock; } fsstack_copy_attr_times(dir, lower_dir_inode); set_nlink(inode, ecryptfs_inode_to_lower(inode)->i_nlink); inode->i_ctime = dir->i_ctime; d_drop(dentry); out_unlock: unlock_dir(lower_dir_dentry); dput(lower_dentry); return rc; } /** * ecryptfs_do_create * @directory_inode: inode of the new file's dentry's parent in ecryptfs * @ecryptfs_dentry: New file's dentry in ecryptfs * @mode: The mode of the new file * @nd: nameidata of ecryptfs' parent's dentry & vfsmount * * Creates the underlying file and the eCryptfs inode which will link to * it. It will also update the eCryptfs directory inode to mimic the * stat of the lower directory inode. * * Returns the new eCryptfs inode on success; an ERR_PTR on error condition */ static struct inode * ecryptfs_do_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry, umode_t mode) { int rc; struct dentry *lower_dentry; struct dentry *lower_dir_dentry; struct inode *inode; lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry); lower_dir_dentry = lock_parent(lower_dentry); if (IS_ERR(lower_dir_dentry)) { ecryptfs_printk(KERN_ERR, "Error locking directory of " "dentry\n"); inode = ERR_CAST(lower_dir_dentry); goto out; } rc = vfs_create(lower_dir_dentry->d_inode, lower_dentry, mode, true); if (rc) { printk(KERN_ERR "%s: Failure to create dentry in lower fs; " "rc = [%d]\n", __func__, rc); inode = ERR_PTR(rc); goto out_lock; } inode = __ecryptfs_get_inode(lower_dentry->d_inode, directory_inode->i_sb); if (IS_ERR(inode)) { vfs_unlink(lower_dir_dentry->d_inode, lower_dentry); goto out_lock; } fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode); fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode); out_lock: unlock_dir(lower_dir_dentry); out: return inode; } /** * ecryptfs_initialize_file * * Cause the file to be changed from a basic empty file to an ecryptfs * file with a header and first data page. * * Returns zero on success */ int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry, struct inode *ecryptfs_inode) { struct ecryptfs_crypt_stat *crypt_stat = &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; int rc = 0; if (S_ISDIR(ecryptfs_inode->i_mode)) { ecryptfs_printk(KERN_DEBUG, "This is a directory\n"); crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); goto out; } ecryptfs_printk(KERN_DEBUG, "Initializing crypto context\n"); rc = ecryptfs_new_file_context(ecryptfs_inode); if (rc) { ecryptfs_printk(KERN_ERR, "Error creating new file " "context; rc = [%d]\n", rc); goto out; } rc = ecryptfs_get_lower_file(ecryptfs_dentry, ecryptfs_inode); if (rc) { printk(KERN_ERR "%s: Error attempting to initialize " "the lower file for the dentry with name " "[%s]; rc = [%d]\n", __func__, ecryptfs_dentry->d_name.name, rc); goto out; } rc = ecryptfs_write_metadata(ecryptfs_dentry, ecryptfs_inode); if (rc) printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc); ecryptfs_put_lower_file(ecryptfs_inode); out: return rc; } /** * ecryptfs_create * @dir: The inode of the directory in which to create the file. * @dentry: The eCryptfs dentry * @mode: The mode of the new file. * * Creates a new file. * * Returns zero on success; non-zero on error condition */ static int ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry, umode_t mode, bool excl) { struct inode *ecryptfs_inode; int rc; ecryptfs_inode = ecryptfs_do_create(directory_inode, ecryptfs_dentry, mode); if (unlikely(IS_ERR(ecryptfs_inode))) { ecryptfs_printk(KERN_WARNING, "Failed to create file in" "lower filesystem\n"); rc = PTR_ERR(ecryptfs_inode); goto out; } /* At this point, a file exists on "disk"; we need to make sure * that this on disk file is prepared to be an ecryptfs file */ rc = ecryptfs_initialize_file(ecryptfs_dentry, ecryptfs_inode); if (rc) { ecryptfs_do_unlink(directory_inode, ecryptfs_dentry, ecryptfs_inode); make_bad_inode(ecryptfs_inode); unlock_new_inode(ecryptfs_inode); iput(ecryptfs_inode); goto out; } unlock_new_inode(ecryptfs_inode); d_instantiate(ecryptfs_dentry, ecryptfs_inode); out: return rc; } static int ecryptfs_i_size_read(struct dentry *dentry, struct inode *inode) { struct ecryptfs_crypt_stat *crypt_stat; int rc; rc = ecryptfs_get_lower_file(dentry, inode); if (rc) { printk(KERN_ERR "%s: Error attempting to initialize " "the lower file for the dentry with name " "[%s]; rc = [%d]\n", __func__, dentry->d_name.name, rc); return rc; } crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat; /* TODO: lock for crypt_stat comparison */ if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)) ecryptfs_set_default_sizes(crypt_stat); rc = ecryptfs_read_and_validate_header_region(inode); ecryptfs_put_lower_file(inode); if (rc) { rc = ecryptfs_read_and_validate_xattr_region(dentry, inode); if (!rc) crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR; } /* Must return 0 to allow non-eCryptfs files to be looked up, too */ return 0; } /** * ecryptfs_lookup_interpose - Dentry interposition for a lookup */ static int ecryptfs_lookup_interpose(struct dentry *dentry, struct dentry *lower_dentry, struct inode *dir_inode) { struct inode *inode, *lower_inode = lower_dentry->d_inode; struct ecryptfs_dentry_info *dentry_info; struct vfsmount *lower_mnt; int rc = 0; dentry_info = kmem_cache_alloc(ecryptfs_dentry_info_cache, GFP_KERNEL); if (!dentry_info) { printk(KERN_ERR "%s: Out of memory whilst attempting " "to allocate ecryptfs_dentry_info struct\n", __func__); dput(lower_dentry); return -ENOMEM; } lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent)); fsstack_copy_attr_atime(dir_inode, lower_dentry->d_parent->d_inode); BUG_ON(!d_count(lower_dentry)); ecryptfs_set_dentry_private(dentry, dentry_info); ecryptfs_set_dentry_lower(dentry, lower_dentry); ecryptfs_set_dentry_lower_mnt(dentry, lower_mnt); if (!lower_dentry->d_inode) { /* We want to add because we couldn't find in lower */ d_add(dentry, NULL); return 0; } inode = __ecryptfs_get_inode(lower_inode, dir_inode->i_sb); if (IS_ERR(inode)) { printk(KERN_ERR "%s: Error interposing; rc = [%ld]\n", __func__, PTR_ERR(inode)); return PTR_ERR(inode); } if (S_ISREG(inode->i_mode)) { rc = ecryptfs_i_size_read(dentry, inode); if (rc) { make_bad_inode(inode); return rc; } } if (inode->i_state & I_NEW) unlock_new_inode(inode); d_add(dentry, inode); return rc; } /** * ecryptfs_lookup * @ecryptfs_dir_inode: The eCryptfs directory inode * @ecryptfs_dentry: The eCryptfs dentry that we are looking up * @ecryptfs_nd: nameidata; may be NULL * * Find a file on disk. If the file does not exist, then we'll add it to the * dentry cache and continue on to read it from the disk. */ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, struct dentry *ecryptfs_dentry, unsigned int flags) { char *encrypted_and_encoded_name = NULL; size_t encrypted_and_encoded_name_size; struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL; struct dentry *lower_dir_dentry, *lower_dentry; int rc = 0; lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent); mutex_lock(&lower_dir_dentry->d_inode->i_mutex); lower_dentry = lookup_one_len(ecryptfs_dentry->d_name.name, lower_dir_dentry, ecryptfs_dentry->d_name.len); mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); if (IS_ERR(lower_dentry)) { rc = PTR_ERR(lower_dentry); ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " "[%d] on lower_dentry = [%s]\n", __func__, rc, ecryptfs_dentry->d_name.name); goto out; } if (lower_dentry->d_inode) goto interpose; mount_crypt_stat = &ecryptfs_superblock_to_private( ecryptfs_dentry->d_sb)->mount_crypt_stat; if (!(mount_crypt_stat && (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES))) goto interpose; dput(lower_dentry); rc = ecryptfs_encrypt_and_encode_filename( &encrypted_and_encoded_name, &encrypted_and_encoded_name_size, NULL, mount_crypt_stat, ecryptfs_dentry->d_name.name, ecryptfs_dentry->d_name.len); if (rc) { printk(KERN_ERR "%s: Error attempting to encrypt and encode " "filename; rc = [%d]\n", __func__, rc); goto out; } mutex_lock(&lower_dir_dentry->d_inode->i_mutex); lower_dentry = lookup_one_len(encrypted_and_encoded_name, lower_dir_dentry, encrypted_and_encoded_name_size); mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); if (IS_ERR(lower_dentry)) { rc = PTR_ERR(lower_dentry); ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " "[%d] on lower_dentry = [%s]\n", __func__, rc, encrypted_and_encoded_name); goto out; } interpose: rc = ecryptfs_lookup_interpose(ecryptfs_dentry, lower_dentry, ecryptfs_dir_inode); out: kfree(encrypted_and_encoded_name); return ERR_PTR(rc); } static int ecryptfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) { struct dentry *lower_old_dentry; struct dentry *lower_new_dentry; struct dentry *lower_dir_dentry; u64 file_size_save; int rc; file_size_save = i_size_read(old_dentry->d_inode); lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry); lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry); dget(lower_old_dentry); dget(lower_new_dentry); lower_dir_dentry = lock_parent(lower_new_dentry); rc = vfs_link(lower_old_dentry, lower_dir_dentry->d_inode, lower_new_dentry); if (rc || !lower_new_dentry->d_inode) goto out_lock; rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb); if (rc) goto out_lock; fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode); fsstack_copy_inode_size(dir, lower_dir_dentry->d_inode); set_nlink(old_dentry->d_inode, ecryptfs_inode_to_lower(old_dentry->d_inode)->i_nlink); i_size_write(new_dentry->d_inode, file_size_save); out_lock: unlock_dir(lower_dir_dentry); dput(lower_new_dentry); dput(lower_old_dentry); return rc; } static int ecryptfs_unlink(struct inode *dir, struct dentry *dentry) { return ecryptfs_do_unlink(dir, dentry, dentry->d_inode); } static int ecryptfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { int rc; struct dentry *lower_dentry; struct dentry *lower_dir_dentry; char *encoded_symname; size_t encoded_symlen; struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL; lower_dentry = ecryptfs_dentry_to_lower(dentry); dget(lower_dentry); lower_dir_dentry = lock_parent(lower_dentry); mount_crypt_stat = &ecryptfs_superblock_to_private( dir->i_sb)->mount_crypt_stat; rc = ecryptfs_encrypt_and_encode_filename(&encoded_symname, &encoded_symlen, NULL, mount_crypt_stat, symname, strlen(symname)); if (rc) goto out_lock; rc = vfs_symlink(lower_dir_dentry->d_inode, lower_dentry, encoded_symname); kfree(encoded_symname); if (rc || !lower_dentry->d_inode) goto out_lock; rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb); if (rc) goto out_lock; fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode); fsstack_copy_inode_size(dir, lower_dir_dentry->d_inode); out_lock: unlock_dir(lower_dir_dentry); dput(lower_dentry); if (!dentry->d_inode) d_drop(dentry); return rc; } static int ecryptfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { int rc; struct dentry *lower_dentry; struct dentry *lower_dir_dentry; lower_dentry = ecryptfs_dentry_to_lower(dentry); lower_dir_dentry = lock_parent(lower_dentry); rc = vfs_mkdir(lower_dir_dentry->d_inode, lower_dentry, mode); if (rc || !lower_dentry->d_inode) goto out; rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb); if (rc) goto out; fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode); fsstack_copy_inode_size(dir, lower_dir_dentry->d_inode); set_nlink(dir, lower_dir_dentry->d_inode->i_nlink); out: unlock_dir(lower_dir_dentry); if (!dentry->d_inode) d_drop(dentry); return rc; } static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry) { struct dentry *lower_dentry; struct dentry *lower_dir_dentry; int rc; lower_dentry = ecryptfs_dentry_to_lower(dentry); dget(dentry); lower_dir_dentry = lock_parent(lower_dentry); dget(lower_dentry); rc = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry); dput(lower_dentry); if (!rc && dentry->d_inode) clear_nlink(dentry->d_inode); fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode); set_nlink(dir, lower_dir_dentry->d_inode->i_nlink); unlock_dir(lower_dir_dentry); if (!rc) d_drop(dentry); dput(dentry); return rc; } static int ecryptfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { int rc; struct dentry *lower_dentry; struct dentry *lower_dir_dentry; lower_dentry = ecryptfs_dentry_to_lower(dentry); lower_dir_dentry = lock_parent(lower_dentry); rc = vfs_mknod(lower_dir_dentry->d_inode, lower_dentry, mode, dev); if (rc || !lower_dentry->d_inode) goto out; rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb); if (rc) goto out; fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode); fsstack_copy_inode_size(dir, lower_dir_dentry->d_inode); out: unlock_dir(lower_dir_dentry); if (!dentry->d_inode) d_drop(dentry); return rc; } static int ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { int rc; struct dentry *lower_old_dentry; struct dentry *lower_new_dentry; struct dentry *lower_old_dir_dentry; struct dentry *lower_new_dir_dentry; struct dentry *trap = NULL; struct inode *target_inode; lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry); lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry); dget(lower_old_dentry); dget(lower_new_dentry); lower_old_dir_dentry = dget_parent(lower_old_dentry); lower_new_dir_dentry = dget_parent(lower_new_dentry); target_inode = new_dentry->d_inode; trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry); /* source should not be ancestor of target */ if (trap == lower_old_dentry) { rc = -EINVAL; goto out_lock; } /* target should not be ancestor of source */ if (trap == lower_new_dentry) { rc = -ENOTEMPTY; goto out_lock; } rc = vfs_rename(lower_old_dir_dentry->d_inode, lower_old_dentry, lower_new_dir_dentry->d_inode, lower_new_dentry); if (rc) goto out_lock; if (target_inode) fsstack_copy_attr_all(target_inode, ecryptfs_inode_to_lower(target_inode)); fsstack_copy_attr_all(new_dir, lower_new_dir_dentry->d_inode); if (new_dir != old_dir) fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode); out_lock: unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry); dput(lower_new_dir_dentry); dput(lower_old_dir_dentry); dput(lower_new_dentry); dput(lower_old_dentry); return rc; } static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf, size_t *bufsiz) { struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); char *lower_buf; mm_segment_t old_fs; int rc; lower_buf = kmalloc(PATH_MAX, GFP_KERNEL); if (!lower_buf) { rc = -ENOMEM; goto out; } old_fs = get_fs(); set_fs(get_ds()); rc = lower_dentry->d_inode->i_op->readlink(lower_dentry, (char __user *)lower_buf, PATH_MAX); set_fs(old_fs); if (rc < 0) goto out; rc = ecryptfs_decode_and_decrypt_filename(buf, bufsiz, dentry->d_sb, lower_buf, rc); out: kfree(lower_buf); return rc; } static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd) { char *buf; size_t len = PATH_MAX; int rc; rc = ecryptfs_readlink_lower(dentry, &buf, &len); if (rc) goto out; fsstack_copy_attr_atime(dentry->d_inode, ecryptfs_dentry_to_lower(dentry)->d_inode); buf[len] = '\0'; out: nd_set_link(nd, buf); return NULL; } static void ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr) { char *buf = nd_get_link(nd); if (!IS_ERR(buf)) { /* Free the char* */ kfree(buf); } } /** * upper_size_to_lower_size * @crypt_stat: Crypt_stat associated with file * @upper_size: Size of the upper file * * Calculate the required size of the lower file based on the * specified size of the upper file. This calculation is based on the * number of headers in the underlying file and the extent size. * * Returns Calculated size of the lower file. */ static loff_t upper_size_to_lower_size(struct ecryptfs_crypt_stat *crypt_stat, loff_t upper_size) { loff_t lower_size; lower_size = ecryptfs_lower_header_size(crypt_stat); if (upper_size != 0) { loff_t num_extents; num_extents = upper_size >> crypt_stat->extent_shift; if (upper_size & ~crypt_stat->extent_mask) num_extents++; lower_size += (num_extents * crypt_stat->extent_size); } return lower_size; } /** * truncate_upper * @dentry: The ecryptfs layer dentry * @ia: Address of the ecryptfs inode's attributes * @lower_ia: Address of the lower inode's attributes * * Function to handle truncations modifying the size of the file. Note * that the file sizes are interpolated. When expanding, we are simply * writing strings of 0's out. When truncating, we truncate the upper * inode and update the lower_ia according to the page index * interpolations. If ATTR_SIZE is set in lower_ia->ia_valid upon return, * the caller must use lower_ia in a call to notify_change() to perform * the truncation of the lower inode. * * Returns zero on success; non-zero otherwise */ static int truncate_upper(struct dentry *dentry, struct iattr *ia, struct iattr *lower_ia) { int rc = 0; struct inode *inode = dentry->d_inode; struct ecryptfs_crypt_stat *crypt_stat; loff_t i_size = i_size_read(inode); loff_t lower_size_before_truncate; loff_t lower_size_after_truncate; if (unlikely((ia->ia_size == i_size))) { lower_ia->ia_valid &= ~ATTR_SIZE; return 0; } rc = ecryptfs_get_lower_file(dentry, inode); if (rc) return rc; crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat; /* Switch on growing or shrinking file */ if (ia->ia_size > i_size) { char zero[] = { 0x00 }; lower_ia->ia_valid &= ~ATTR_SIZE; /* Write a single 0 at the last position of the file; * this triggers code that will fill in 0's throughout * the intermediate portion of the previous end of the * file and the new and of the file */ rc = ecryptfs_write(inode, zero, (ia->ia_size - 1), 1); } else { /* ia->ia_size < i_size_read(inode) */ /* We're chopping off all the pages down to the page * in which ia->ia_size is located. Fill in the end of * that page from (ia->ia_size & ~PAGE_CACHE_MASK) to * PAGE_CACHE_SIZE with zeros. */ size_t num_zeros = (PAGE_CACHE_SIZE - (ia->ia_size & ~PAGE_CACHE_MASK)); if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { truncate_setsize(inode, ia->ia_size); lower_ia->ia_size = ia->ia_size; lower_ia->ia_valid |= ATTR_SIZE; goto out; } if (num_zeros) { char *zeros_virt; zeros_virt = kzalloc(num_zeros, GFP_KERNEL); if (!zeros_virt) { rc = -ENOMEM; goto out; } rc = ecryptfs_write(inode, zeros_virt, ia->ia_size, num_zeros); kfree(zeros_virt); if (rc) { printk(KERN_ERR "Error attempting to zero out " "the remainder of the end page on " "reducing truncate; rc = [%d]\n", rc); goto out; } } truncate_setsize(inode, ia->ia_size); rc = ecryptfs_write_inode_size_to_metadata(inode); if (rc) { printk(KERN_ERR "Problem with " "ecryptfs_write_inode_size_to_metadata; " "rc = [%d]\n", rc); goto out; } /* We are reducing the size of the ecryptfs file, and need to * know if we need to reduce the size of the lower file. */ lower_size_before_truncate = upper_size_to_lower_size(crypt_stat, i_size); lower_size_after_truncate = upper_size_to_lower_size(crypt_stat, ia->ia_size); if (lower_size_after_truncate < lower_size_before_truncate) { lower_ia->ia_size = lower_size_after_truncate; lower_ia->ia_valid |= ATTR_SIZE; } else lower_ia->ia_valid &= ~ATTR_SIZE; } out: ecryptfs_put_lower_file(inode); return rc; } static int ecryptfs_inode_newsize_ok(struct inode *inode, loff_t offset) { struct ecryptfs_crypt_stat *crypt_stat; loff_t lower_oldsize, lower_newsize; crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat; lower_oldsize = upper_size_to_lower_size(crypt_stat, i_size_read(inode)); lower_newsize = upper_size_to_lower_size(crypt_stat, offset); if (lower_newsize > lower_oldsize) { /* * The eCryptfs inode and the new *lower* size are mixed here * because we may not have the lower i_mutex held and/or it may * not be appropriate to call inode_newsize_ok() with inodes * from other filesystems. */ return inode_newsize_ok(inode, lower_newsize); } return 0; } /** * ecryptfs_truncate * @dentry: The ecryptfs layer dentry * @new_length: The length to expand the file to * * Simple function that handles the truncation of an eCryptfs inode and * its corresponding lower inode. * * Returns zero on success; non-zero otherwise */ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length) { struct iattr ia = { .ia_valid = ATTR_SIZE, .ia_size = new_length }; struct iattr lower_ia = { .ia_valid = 0 }; int rc; rc = ecryptfs_inode_newsize_ok(dentry->d_inode, new_length); if (rc) return rc; rc = truncate_upper(dentry, &ia, &lower_ia); if (!rc && lower_ia.ia_valid & ATTR_SIZE) { struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); mutex_lock(&lower_dentry->d_inode->i_mutex); rc = notify_change(lower_dentry, &lower_ia); mutex_unlock(&lower_dentry->d_inode->i_mutex); } return rc; } static int ecryptfs_permission(struct inode *inode, int mask) { return inode_permission(ecryptfs_inode_to_lower(inode), mask); } /** * ecryptfs_setattr * @dentry: dentry handle to the inode to modify * @ia: Structure with flags of what to change and values * * Updates the metadata of an inode. If the update is to the size * i.e. truncation, then ecryptfs_truncate will handle the size modification * of both the ecryptfs inode and the lower inode. * * All other metadata changes will be passed right to the lower filesystem, * and we will just update our inode to look like the lower. */ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia) { int rc = 0; struct dentry *lower_dentry; struct iattr lower_ia; struct inode *inode; struct inode *lower_inode; struct ecryptfs_crypt_stat *crypt_stat; crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat; if (!(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED)) ecryptfs_init_crypt_stat(crypt_stat); inode = dentry->d_inode; lower_inode = ecryptfs_inode_to_lower(inode); lower_dentry = ecryptfs_dentry_to_lower(dentry); mutex_lock(&crypt_stat->cs_mutex); if (S_ISDIR(dentry->d_inode->i_mode)) crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); else if (S_ISREG(dentry->d_inode->i_mode) && (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED) || !(crypt_stat->flags & ECRYPTFS_KEY_VALID))) { struct ecryptfs_mount_crypt_stat *mount_crypt_stat; mount_crypt_stat = &ecryptfs_superblock_to_private( dentry->d_sb)->mount_crypt_stat; rc = ecryptfs_get_lower_file(dentry, inode); if (rc) { mutex_unlock(&crypt_stat->cs_mutex); goto out; } rc = ecryptfs_read_metadata(dentry); ecryptfs_put_lower_file(inode); if (rc) { if (!(mount_crypt_stat->flags & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) { rc = -EIO; printk(KERN_WARNING "Either the lower file " "is not in a valid eCryptfs format, " "or the key could not be retrieved. " "Plaintext passthrough mode is not " "enabled; returning -EIO\n"); mutex_unlock(&crypt_stat->cs_mutex); goto out; } rc = 0; crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED | ECRYPTFS_ENCRYPTED); } } mutex_unlock(&crypt_stat->cs_mutex); rc = inode_change_ok(inode, ia); if (rc) goto out; if (ia->ia_valid & ATTR_SIZE) { rc = ecryptfs_inode_newsize_ok(inode, ia->ia_size); if (rc) goto out; } memcpy(&lower_ia, ia, sizeof(lower_ia)); if (ia->ia_valid & ATTR_FILE) lower_ia.ia_file = ecryptfs_file_to_lower(ia->ia_file); if (ia->ia_valid & ATTR_SIZE) { rc = truncate_upper(dentry, ia, &lower_ia); if (rc < 0) goto out; } /* * mode change is for clearing setuid/setgid bits. Allow lower fs * to interpret this in its own way. */ if (lower_ia.ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID)) lower_ia.ia_valid &= ~ATTR_MODE; mutex_lock(&lower_dentry->d_inode->i_mutex); rc = notify_change(lower_dentry, &lower_ia); mutex_unlock(&lower_dentry->d_inode->i_mutex); out: fsstack_copy_attr_all(inode, lower_inode); return rc; } static int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct ecryptfs_mount_crypt_stat *mount_crypt_stat; int rc = 0; mount_crypt_stat = &ecryptfs_superblock_to_private( dentry->d_sb)->mount_crypt_stat; generic_fillattr(dentry->d_inode, stat); if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) { char *target; size_t targetsiz; rc = ecryptfs_readlink_lower(dentry, &target, &targetsiz); if (!rc) { kfree(target); stat->size = targetsiz; } } return rc; } static int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct kstat lower_stat; int rc; rc = vfs_getattr(ecryptfs_dentry_to_lower_path(dentry), &lower_stat); if (!rc) { fsstack_copy_attr_all(dentry->d_inode, ecryptfs_inode_to_lower(dentry->d_inode)); generic_fillattr(dentry->d_inode, stat); stat->blocks = lower_stat.blocks; } return rc; } int ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { int rc = 0; struct dentry *lower_dentry; lower_dentry = ecryptfs_dentry_to_lower(dentry); if (!lower_dentry->d_inode->i_op->setxattr) { rc = -EOPNOTSUPP; goto out; } rc = vfs_setxattr(lower_dentry, name, value, size, flags); if (!rc) fsstack_copy_attr_all(dentry->d_inode, lower_dentry->d_inode); out: return rc; } ssize_t ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name, void *value, size_t size) { int rc = 0; if (!lower_dentry->d_inode->i_op->getxattr) { rc = -EOPNOTSUPP; goto out; } mutex_lock(&lower_dentry->d_inode->i_mutex); rc = lower_dentry->d_inode->i_op->getxattr(lower_dentry, name, value, size); mutex_unlock(&lower_dentry->d_inode->i_mutex); out: return rc; } static ssize_t ecryptfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size) { return ecryptfs_getxattr_lower(ecryptfs_dentry_to_lower(dentry), name, value, size); } static ssize_t ecryptfs_listxattr(struct dentry *dentry, char *list, size_t size) { int rc = 0; struct dentry *lower_dentry; lower_dentry = ecryptfs_dentry_to_lower(dentry); if (!lower_dentry->d_inode->i_op->listxattr) { rc = -EOPNOTSUPP; goto out; } mutex_lock(&lower_dentry->d_inode->i_mutex); rc = lower_dentry->d_inode->i_op->listxattr(lower_dentry, list, size); mutex_unlock(&lower_dentry->d_inode->i_mutex); out: return rc; } static int ecryptfs_removexattr(struct dentry *dentry, const char *name) { int rc = 0; struct dentry *lower_dentry; lower_dentry = ecryptfs_dentry_to_lower(dentry); if (!lower_dentry->d_inode->i_op->removexattr) { rc = -EOPNOTSUPP; goto out; } mutex_lock(&lower_dentry->d_inode->i_mutex); rc = lower_dentry->d_inode->i_op->removexattr(lower_dentry, name); mutex_unlock(&lower_dentry->d_inode->i_mutex); out: return rc; } const struct inode_operations ecryptfs_symlink_iops = { .readlink = generic_readlink, .follow_link = ecryptfs_follow_link, .put_link = ecryptfs_put_link, .permission = ecryptfs_permission, .setattr = ecryptfs_setattr, .getattr = ecryptfs_getattr_link, .setxattr = ecryptfs_setxattr, .getxattr = ecryptfs_getxattr, .listxattr = ecryptfs_listxattr, .removexattr = ecryptfs_removexattr }; const struct inode_operations ecryptfs_dir_iops = { .create = ecryptfs_create, .lookup = ecryptfs_lookup, .link = ecryptfs_link, .unlink = ecryptfs_unlink, .symlink = ecryptfs_symlink, .mkdir = ecryptfs_mkdir, .rmdir = ecryptfs_rmdir, .mknod = ecryptfs_mknod, .rename = ecryptfs_rename, .permission = ecryptfs_permission, .setattr = ecryptfs_setattr, .setxattr = ecryptfs_setxattr, .getxattr = ecryptfs_getxattr, .listxattr = ecryptfs_listxattr, .removexattr = ecryptfs_removexattr }; const struct inode_operations ecryptfs_main_iops = { .permission = ecryptfs_permission, .setattr = ecryptfs_setattr, .getattr = ecryptfs_getattr, .setxattr = ecryptfs_setxattr, .getxattr = ecryptfs_getxattr, .listxattr = ecryptfs_listxattr, .removexattr = ecryptfs_removexattr };
gpl-2.0
embeddedEnsicaen/kernel
arch/arm/mach-ep93xx/edb93xx.c
141
10766
/* * arch/arm/mach-ep93xx/edb93xx.c * Cirrus Logic EDB93xx Development Board support. * * EDB93XX, EDB9301, EDB9307A * Copyright (C) 2008-2009 H Hartley Sweeten <hsweeten@visionengravers.com> * * EDB9302 * Copyright (C) 2006 George Kashperko <george@chas.com.ua> * * EDB9302A, EDB9315, EDB9315A * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> * * EDB9307 * Copyright (C) 2007 Herbert Valerio Riedel <hvr@gnu.org> * * EDB9312 * Copyright (C) 2006 Infosys Technologies Limited * Toufeeq Hussain <toufeeq_hussain@infosys.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/i2c-gpio.h> #include <linux/spi/spi.h> #include <sound/cs4271.h> #include <mach/hardware.h> #include <linux/platform_data/video-ep93xx.h> #include <linux/platform_data/spi-ep93xx.h> #include <mach/gpio-ep93xx.h> #include <asm/hardware/vic.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include "soc.h" static void __init edb93xx_register_flash(void) { if (machine_is_edb9307() || machine_is_edb9312() || machine_is_edb9315()) { ep93xx_register_flash(4, EP93XX_CS6_PHYS_BASE, SZ_32M); } else { ep93xx_register_flash(2, EP93XX_CS6_PHYS_BASE, SZ_16M); } } static struct ep93xx_eth_data __initdata edb93xx_eth_data = { .phy_id = 1, }; /************************************************************************* * EDB93xx i2c peripheral handling *************************************************************************/ static struct i2c_gpio_platform_data __initdata edb93xx_i2c_gpio_data = { .sda_pin = EP93XX_GPIO_LINE_EEDAT, .sda_is_open_drain = 0, .scl_pin = EP93XX_GPIO_LINE_EECLK, .scl_is_open_drain = 0, .udelay = 0, /* default to 100 kHz */ .timeout = 0, /* default to 100 ms */ }; static struct i2c_board_info __initdata edb93xxa_i2c_board_info[] = { { I2C_BOARD_INFO("isl1208", 0x6f), }, }; static struct i2c_board_info __initdata edb93xx_i2c_board_info[] = { { I2C_BOARD_INFO("ds1337", 0x68), }, }; static void __init edb93xx_register_i2c(void) { if (machine_is_edb9302a() || machine_is_edb9307a() || machine_is_edb9315a()) { ep93xx_register_i2c(&edb93xx_i2c_gpio_data, edb93xxa_i2c_board_info, ARRAY_SIZE(edb93xxa_i2c_board_info)); } else if (machine_is_edb9302() || machine_is_edb9307() || machine_is_edb9312() || machine_is_edb9315()) { ep93xx_register_i2c(&edb93xx_i2c_gpio_data, edb93xx_i2c_board_info, ARRAY_SIZE(edb93xx_i2c_board_info)); } } /************************************************************************* * EDB93xx SPI peripheral handling *************************************************************************/ static struct cs4271_platform_data edb93xx_cs4271_data = { .gpio_nreset = -EINVAL, /* filled in later */ }; static int edb93xx_cs4271_hw_setup(struct spi_device *spi) { return gpio_request_one(EP93XX_GPIO_LINE_EGPIO6, GPIOF_OUT_INIT_HIGH, spi->modalias); } static void edb93xx_cs4271_hw_cleanup(struct spi_device *spi) { gpio_free(EP93XX_GPIO_LINE_EGPIO6); } static void edb93xx_cs4271_hw_cs_control(struct spi_device *spi, int value) { gpio_set_value(EP93XX_GPIO_LINE_EGPIO6, value); } static struct ep93xx_spi_chip_ops edb93xx_cs4271_hw = { .setup = edb93xx_cs4271_hw_setup, .cleanup = edb93xx_cs4271_hw_cleanup, .cs_control = edb93xx_cs4271_hw_cs_control, }; static struct spi_board_info edb93xx_spi_board_info[] __initdata = { { .modalias = "cs4271", .platform_data = &edb93xx_cs4271_data, .controller_data = &edb93xx_cs4271_hw, .max_speed_hz = 6000000, .bus_num = 0, .chip_select = 0, .mode = SPI_MODE_3, }, }; static struct ep93xx_spi_info edb93xx_spi_info __initdata = { .num_chipselect = ARRAY_SIZE(edb93xx_spi_board_info), }; static void __init edb93xx_register_spi(void) { if (machine_is_edb9301() || machine_is_edb9302()) edb93xx_cs4271_data.gpio_nreset = EP93XX_GPIO_LINE_EGPIO1; else if (machine_is_edb9302a() || machine_is_edb9307a()) edb93xx_cs4271_data.gpio_nreset = EP93XX_GPIO_LINE_H(2); else if (machine_is_edb9315a()) edb93xx_cs4271_data.gpio_nreset = EP93XX_GPIO_LINE_EGPIO14; ep93xx_register_spi(&edb93xx_spi_info, edb93xx_spi_board_info, ARRAY_SIZE(edb93xx_spi_board_info)); } /************************************************************************* * EDB93xx I2S *************************************************************************/ static struct platform_device edb93xx_audio_device = { .name = "edb93xx-audio", .id = -1, }; static int __init edb93xx_has_audio(void) { return (machine_is_edb9301() || machine_is_edb9302() || machine_is_edb9302a() || machine_is_edb9307a() || machine_is_edb9315a()); } static void __init edb93xx_register_i2s(void) { if (edb93xx_has_audio()) { ep93xx_register_i2s(); platform_device_register(&edb93xx_audio_device); } } /************************************************************************* * EDB93xx pwm *************************************************************************/ static void __init edb93xx_register_pwm(void) { if (machine_is_edb9301() || machine_is_edb9302() || machine_is_edb9302a()) { /* EP9301 and EP9302 only have pwm.1 (EGPIO14) */ ep93xx_register_pwm(0, 1); } else if (machine_is_edb9307() || machine_is_edb9307a()) { /* EP9307 only has pwm.0 (PWMOUT) */ ep93xx_register_pwm(1, 0); } else { /* EP9312 and EP9315 have both */ ep93xx_register_pwm(1, 1); } } /************************************************************************* * EDB93xx framebuffer *************************************************************************/ static struct ep93xxfb_mach_info __initdata edb93xxfb_info = { .num_modes = EP93XXFB_USE_MODEDB, .bpp = 16, .flags = 0, }; static int __init edb93xx_has_fb(void) { /* These platforms have an ep93xx with video capability */ return machine_is_edb9307() || machine_is_edb9307a() || machine_is_edb9312() || machine_is_edb9315() || machine_is_edb9315a(); } static void __init edb93xx_register_fb(void) { if (!edb93xx_has_fb()) return; if (machine_is_edb9307a() || machine_is_edb9315a()) edb93xxfb_info.flags |= EP93XXFB_USE_SDCSN0; else edb93xxfb_info.flags |= EP93XXFB_USE_SDCSN3; ep93xx_register_fb(&edb93xxfb_info); } /************************************************************************* * EDB93xx IDE *************************************************************************/ static int __init edb93xx_has_ide(void) { /* * Although EDB9312 and EDB9315 do have IDE capability, they have * INTRQ line wired as pull-up, which makes using IDE interface * problematic. */ return machine_is_edb9312() || machine_is_edb9315() || machine_is_edb9315a(); } static void __init edb93xx_register_ide(void) { if (!edb93xx_has_ide()) return; ep93xx_register_ide(); } static void __init edb93xx_init_machine(void) { ep93xx_init_devices(); edb93xx_register_flash(); ep93xx_register_eth(&edb93xx_eth_data, 1); edb93xx_register_i2c(); edb93xx_register_spi(); edb93xx_register_i2s(); edb93xx_register_pwm(); edb93xx_register_fb(); edb93xx_register_ide(); } #ifdef CONFIG_MACH_EDB9301 MACHINE_START(EDB9301, "Cirrus Logic EDB9301 Evaluation Board") /* Maintainer: H Hartley Sweeten <hsweeten@visionengravers.com> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9302 MACHINE_START(EDB9302, "Cirrus Logic EDB9302 Evaluation Board") /* Maintainer: George Kashperko <george@chas.com.ua> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9302A MACHINE_START(EDB9302A, "Cirrus Logic EDB9302A Evaluation Board") /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9307 MACHINE_START(EDB9307, "Cirrus Logic EDB9307 Evaluation Board") /* Maintainer: Herbert Valerio Riedel <hvr@gnu.org> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9307A MACHINE_START(EDB9307A, "Cirrus Logic EDB9307A Evaluation Board") /* Maintainer: H Hartley Sweeten <hsweeten@visionengravers.com> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9312 MACHINE_START(EDB9312, "Cirrus Logic EDB9312 Evaluation Board") /* Maintainer: Toufeeq Hussain <toufeeq_hussain@infosys.com> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9315 MACHINE_START(EDB9315, "Cirrus Logic EDB9315 Evaluation Board") /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_EDB9315A MACHINE_START(EDB9315A, "Cirrus Logic EDB9315A Evaluation Board") /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .handle_irq = vic_handle_irq, .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, MACHINE_END #endif
gpl-2.0
yohanes/Acer-BeTouch-E130-Linux-Kernel
drivers/net/wireless/rt2x00/rt2x00leds.c
141
6592
/* Copyright (C) 2004 - 2008 rt2x00 SourceForge Project <http://rt2x00.serialmonkey.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Module: rt2x00lib Abstract: rt2x00 led specific routines. */ #include <linux/kernel.h> #include <linux/module.h> #include "rt2x00.h" #include "rt2x00lib.h" void rt2x00leds_led_quality(struct rt2x00_dev *rt2x00dev, int rssi) { struct rt2x00_led *led = &rt2x00dev->led_qual; unsigned int brightness; if ((led->type != LED_TYPE_QUALITY) || !(led->flags & LED_REGISTERED)) return; /* * Led handling requires a positive value for the rssi, * to do that correctly we need to add the correction. */ rssi += rt2x00dev->rssi_offset; /* * Get the rssi level, this is used to convert the rssi * to a LED value inside the range LED_OFF - LED_FULL. */ if (rssi <= 30) rssi = 0; else if (rssi <= 39) rssi = 1; else if (rssi <= 49) rssi = 2; else if (rssi <= 53) rssi = 3; else if (rssi <= 63) rssi = 4; else rssi = 5; /* * Note that we must _not_ send LED_OFF since the driver * is going to calculate the value and might use it in a * division. */ brightness = ((LED_FULL / 6) * rssi) + 1; if (brightness != led->led_dev.brightness) { led->led_dev.brightness_set(&led->led_dev, brightness); led->led_dev.brightness = brightness; } } static void rt2x00led_led_simple(struct rt2x00_led *led, bool enabled) { unsigned int brightness = enabled ? LED_FULL : LED_OFF; if (!(led->flags & LED_REGISTERED)) return; led->led_dev.brightness_set(&led->led_dev, brightness); led->led_dev.brightness = brightness; } void rt2x00led_led_activity(struct rt2x00_dev *rt2x00dev, bool enabled) { if (rt2x00dev->led_qual.type == LED_TYPE_ACTIVITY) rt2x00led_led_simple(&rt2x00dev->led_qual, enabled); } void rt2x00leds_led_assoc(struct rt2x00_dev *rt2x00dev, bool enabled) { if (rt2x00dev->led_assoc.type == LED_TYPE_ASSOC) rt2x00led_led_simple(&rt2x00dev->led_assoc, enabled); } void rt2x00leds_led_radio(struct rt2x00_dev *rt2x00dev, bool enabled) { if (rt2x00dev->led_radio.type == LED_TYPE_RADIO) rt2x00led_led_simple(&rt2x00dev->led_radio, enabled); } static int rt2x00leds_register_led(struct rt2x00_dev *rt2x00dev, struct rt2x00_led *led, const char *name) { struct device *device = wiphy_dev(rt2x00dev->hw->wiphy); int retval; led->led_dev.name = name; led->led_dev.brightness = LED_OFF; retval = led_classdev_register(device, &led->led_dev); if (retval) { ERROR(rt2x00dev, "Failed to register led handler.\n"); return retval; } led->flags |= LED_REGISTERED; return 0; } void rt2x00leds_register(struct rt2x00_dev *rt2x00dev) { char dev_name[16]; char name[32]; int retval; unsigned long on_period; unsigned long off_period; snprintf(dev_name, sizeof(dev_name), "%s-%s", rt2x00dev->ops->name, wiphy_name(rt2x00dev->hw->wiphy)); if (rt2x00dev->led_radio.flags & LED_INITIALIZED) { snprintf(name, sizeof(name), "%s:radio", dev_name); retval = rt2x00leds_register_led(rt2x00dev, &rt2x00dev->led_radio, name); if (retval) goto exit_fail; } if (rt2x00dev->led_assoc.flags & LED_INITIALIZED) { snprintf(name, sizeof(name), "%s:assoc", dev_name); retval = rt2x00leds_register_led(rt2x00dev, &rt2x00dev->led_assoc, name); if (retval) goto exit_fail; } if (rt2x00dev->led_qual.flags & LED_INITIALIZED) { snprintf(name, sizeof(name), "%s:quality", dev_name); retval = rt2x00leds_register_led(rt2x00dev, &rt2x00dev->led_qual, name); if (retval) goto exit_fail; } /* * Initialize blink time to default value: * On period: 70ms * Off period: 30ms */ if (rt2x00dev->led_radio.led_dev.blink_set) { on_period = 70; off_period = 30; rt2x00dev->led_radio.led_dev.blink_set( &rt2x00dev->led_radio.led_dev, &on_period, &off_period); } return; exit_fail: rt2x00leds_unregister(rt2x00dev); } static void rt2x00leds_unregister_led(struct rt2x00_led *led) { led_classdev_unregister(&led->led_dev); /* * This might look weird, but when we are unregistering while * suspended the led is already off, and since we haven't * fully resumed yet, access to the device might not be * possible yet. */ if (!(led->led_dev.flags & LED_SUSPENDED)) led->led_dev.brightness_set(&led->led_dev, LED_OFF); led->flags &= ~LED_REGISTERED; } void rt2x00leds_unregister(struct rt2x00_dev *rt2x00dev) { if (rt2x00dev->led_qual.flags & LED_REGISTERED) rt2x00leds_unregister_led(&rt2x00dev->led_qual); if (rt2x00dev->led_assoc.flags & LED_REGISTERED) rt2x00leds_unregister_led(&rt2x00dev->led_assoc); if (rt2x00dev->led_radio.flags & LED_REGISTERED) rt2x00leds_unregister_led(&rt2x00dev->led_radio); } static inline void rt2x00leds_suspend_led(struct rt2x00_led *led) { led_classdev_suspend(&led->led_dev); /* This shouldn't be needed, but just to be safe */ led->led_dev.brightness_set(&led->led_dev, LED_OFF); led->led_dev.brightness = LED_OFF; } void rt2x00leds_suspend(struct rt2x00_dev *rt2x00dev) { if (rt2x00dev->led_qual.flags & LED_REGISTERED) rt2x00leds_suspend_led(&rt2x00dev->led_qual); if (rt2x00dev->led_assoc.flags & LED_REGISTERED) rt2x00leds_suspend_led(&rt2x00dev->led_assoc); if (rt2x00dev->led_radio.flags & LED_REGISTERED) rt2x00leds_suspend_led(&rt2x00dev->led_radio); } static inline void rt2x00leds_resume_led(struct rt2x00_led *led) { led_classdev_resume(&led->led_dev); /* Device might have enabled the LEDS during resume */ led->led_dev.brightness_set(&led->led_dev, LED_OFF); led->led_dev.brightness = LED_OFF; } void rt2x00leds_resume(struct rt2x00_dev *rt2x00dev) { if (rt2x00dev->led_radio.flags & LED_REGISTERED) rt2x00leds_resume_led(&rt2x00dev->led_radio); if (rt2x00dev->led_assoc.flags & LED_REGISTERED) rt2x00leds_resume_led(&rt2x00dev->led_assoc); if (rt2x00dev->led_qual.flags & LED_REGISTERED) rt2x00leds_resume_led(&rt2x00dev->led_qual); }
gpl-2.0
chuukai/ponyo-kernel-nAa
kernel/rcutree.c
397
52492
/* * Read-Copy Update mechanism for mutual exclusion * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright IBM Corporation, 2008 * * Authors: Dipankar Sarma <dipankar@in.ibm.com> * Manfred Spraul <manfred@colorfullife.com> * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version * * Based on the original work by Paul McKenney <paulmck@us.ibm.com> * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * * For detailed explanation of Read-Copy Update mechanism see - * Documentation/RCU */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/rcupdate.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/nmi.h> #include <asm/atomic.h> #include <linux/bitops.h> #include <linux/module.h> #include <linux/completion.h> #include <linux/moduleparam.h> #include <linux/percpu.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/mutex.h> #include <linux/time.h> #include "rcutree.h" /* Data structures. */ #define RCU_STATE_INITIALIZER(name) { \ .level = { &name.node[0] }, \ .levelcnt = { \ NUM_RCU_LVL_0, /* root of hierarchy. */ \ NUM_RCU_LVL_1, \ NUM_RCU_LVL_2, \ NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ }, \ .signaled = RCU_GP_IDLE, \ .gpnum = -300, \ .completed = -300, \ .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ .orphan_cbs_list = NULL, \ .orphan_cbs_tail = &name.orphan_cbs_list, \ .orphan_qlen = 0, \ .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \ .n_force_qs = 0, \ .n_force_qs_ngp = 0, \ } struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state); DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); /* * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s * permit this function to be invoked without holding the root rcu_node * structure's ->lock, but of course results can be subject to change. */ static int rcu_gp_in_progress(struct rcu_state *rsp) { return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum); } /* * Note a quiescent state. Because we do not need to know * how many quiescent states passed, just if there was at least * one since the start of the grace period, this just sets a flag. */ void rcu_sched_qs(int cpu) { struct rcu_data *rdp; rdp = &per_cpu(rcu_sched_data, cpu); rdp->passed_quiesc_completed = rdp->completed; barrier(); rdp->passed_quiesc = 1; rcu_preempt_note_context_switch(cpu); } void rcu_bh_qs(int cpu) { struct rcu_data *rdp; rdp = &per_cpu(rcu_bh_data, cpu); rdp->passed_quiesc_completed = rdp->completed; barrier(); rdp->passed_quiesc = 1; } #ifdef CONFIG_NO_HZ DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { .dynticks_nesting = 1, .dynticks = 1, }; #endif /* #ifdef CONFIG_NO_HZ */ static int blimit = 10; /* Maximum callbacks per softirq. */ static int qhimark = 10000; /* If this many pending, ignore blimit. */ static int qlowmark = 100; /* Once only this many pending, use blimit. */ module_param(blimit, int, 0); module_param(qhimark, int, 0); module_param(qlowmark, int, 0); static void force_quiescent_state(struct rcu_state *rsp, int relaxed); static int rcu_pending(int cpu); /* * Return the number of RCU-sched batches processed thus far for debug & stats. */ long rcu_batches_completed_sched(void) { return rcu_sched_state.completed; } EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); /* * Return the number of RCU BH batches processed thus far for debug & stats. */ long rcu_batches_completed_bh(void) { return rcu_bh_state.completed; } EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); /* * Does the CPU have callbacks ready to be invoked? */ static int cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp) { return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]; } /* * Does the current CPU require a yet-as-unscheduled grace period? */ static int cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) { return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp); } /* * Return the root node of the specified rcu_state structure. */ static struct rcu_node *rcu_get_root(struct rcu_state *rsp) { return &rsp->node[0]; } /* * Record the specified "completed" value, which is later used to validate * dynticks counter manipulations and CPU-offline checks. Specify * "rsp->completed - 1" to unconditionally invalidate any future dynticks * manipulations and CPU-offline checks. Such invalidation is useful at * the beginning of a grace period. */ static void dyntick_record_completed(struct rcu_state *rsp, long comp) { rsp->dynticks_completed = comp; } #ifdef CONFIG_SMP /* * Recall the previously recorded value of the completion for dynticks. */ static long dyntick_recall_completed(struct rcu_state *rsp) { return rsp->dynticks_completed; } /* * If the specified CPU is offline, tell the caller that it is in * a quiescent state. Otherwise, whack it with a reschedule IPI. * Grace periods can end up waiting on an offline CPU when that * CPU is in the process of coming online -- it will be added to the * rcu_node bitmasks before it actually makes it online. The same thing * can happen while a CPU is in the process of coming online. Because this * race is quite rare, we check for it after detecting that the grace * period has been delayed rather than checking each and every CPU * each and every time we start a new grace period. */ static int rcu_implicit_offline_qs(struct rcu_data *rdp) { /* * If the CPU is offline, it is in a quiescent state. We can * trust its state not to change because interrupts are disabled. */ if (cpu_is_offline(rdp->cpu)) { rdp->offline_fqs++; return 1; } /* If preemptable RCU, no point in sending reschedule IPI. */ if (rdp->preemptable) return 0; /* The CPU is online, so send it a reschedule IPI. */ if (rdp->cpu != smp_processor_id()) smp_send_reschedule(rdp->cpu); else set_need_resched(); rdp->resched_ipi++; return 0; } #endif /* #ifdef CONFIG_SMP */ #ifdef CONFIG_NO_HZ /** * rcu_enter_nohz - inform RCU that current CPU is entering nohz * * Enter nohz mode, in other words, -leave- the mode in which RCU * read-side critical sections can occur. (Though RCU read-side * critical sections can occur in irq handlers in nohz mode, a possibility * handled by rcu_irq_enter() and rcu_irq_exit()). */ void rcu_enter_nohz(void) { unsigned long flags; struct rcu_dynticks *rdtp; smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ local_irq_save(flags); rdtp = &__get_cpu_var(rcu_dynticks); rdtp->dynticks++; rdtp->dynticks_nesting--; WARN_ON_ONCE(rdtp->dynticks & 0x1); local_irq_restore(flags); } /* * rcu_exit_nohz - inform RCU that current CPU is leaving nohz * * Exit nohz mode, in other words, -enter- the mode in which RCU * read-side critical sections normally occur. */ void rcu_exit_nohz(void) { unsigned long flags; struct rcu_dynticks *rdtp; local_irq_save(flags); rdtp = &__get_cpu_var(rcu_dynticks); rdtp->dynticks++; rdtp->dynticks_nesting++; WARN_ON_ONCE(!(rdtp->dynticks & 0x1)); local_irq_restore(flags); smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ } /** * rcu_nmi_enter - inform RCU of entry to NMI context * * If the CPU was idle with dynamic ticks active, and there is no * irq handler running, this updates rdtp->dynticks_nmi to let the * RCU grace-period handling know that the CPU is active. */ void rcu_nmi_enter(void) { struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); if (rdtp->dynticks & 0x1) return; rdtp->dynticks_nmi++; WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1)); smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ } /** * rcu_nmi_exit - inform RCU of exit from NMI context * * If the CPU was idle with dynamic ticks active, and there is no * irq handler running, this updates rdtp->dynticks_nmi to let the * RCU grace-period handling know that the CPU is no longer active. */ void rcu_nmi_exit(void) { struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); if (rdtp->dynticks & 0x1) return; smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ rdtp->dynticks_nmi++; WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1); } /** * rcu_irq_enter - inform RCU of entry to hard irq context * * If the CPU was idle with dynamic ticks active, this updates the * rdtp->dynticks to let the RCU handling know that the CPU is active. */ void rcu_irq_enter(void) { struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); if (rdtp->dynticks_nesting++) return; rdtp->dynticks++; WARN_ON_ONCE(!(rdtp->dynticks & 0x1)); smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ } /** * rcu_irq_exit - inform RCU of exit from hard irq context * * If the CPU was idle with dynamic ticks active, update the rdp->dynticks * to put let the RCU handling be aware that the CPU is going back to idle * with no ticks. */ void rcu_irq_exit(void) { struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); if (--rdtp->dynticks_nesting) return; smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ rdtp->dynticks++; WARN_ON_ONCE(rdtp->dynticks & 0x1); /* If the interrupt queued a callback, get out of dyntick mode. */ if (__get_cpu_var(rcu_sched_data).nxtlist || __get_cpu_var(rcu_bh_data).nxtlist) set_need_resched(); } #ifdef CONFIG_SMP /* * Snapshot the specified CPU's dynticks counter so that we can later * credit them with an implicit quiescent state. Return 1 if this CPU * is in dynticks idle mode, which is an extended quiescent state. */ static int dyntick_save_progress_counter(struct rcu_data *rdp) { int ret; int snap; int snap_nmi; snap = rdp->dynticks->dynticks; snap_nmi = rdp->dynticks->dynticks_nmi; smp_mb(); /* Order sampling of snap with end of grace period. */ rdp->dynticks_snap = snap; rdp->dynticks_nmi_snap = snap_nmi; ret = ((snap & 0x1) == 0) && ((snap_nmi & 0x1) == 0); if (ret) rdp->dynticks_fqs++; return ret; } /* * Return true if the specified CPU has passed through a quiescent * state by virtue of being in or having passed through an dynticks * idle state since the last call to dyntick_save_progress_counter() * for this same CPU. */ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) { long curr; long curr_nmi; long snap; long snap_nmi; curr = rdp->dynticks->dynticks; snap = rdp->dynticks_snap; curr_nmi = rdp->dynticks->dynticks_nmi; snap_nmi = rdp->dynticks_nmi_snap; smp_mb(); /* force ordering with cpu entering/leaving dynticks. */ /* * If the CPU passed through or entered a dynticks idle phase with * no active irq/NMI handlers, then we can safely pretend that the CPU * already acknowledged the request to pass through a quiescent * state. Either way, that CPU cannot possibly be in an RCU * read-side critical section that started before the beginning * of the current RCU grace period. */ if ((curr != snap || (curr & 0x1) == 0) && (curr_nmi != snap_nmi || (curr_nmi & 0x1) == 0)) { rdp->dynticks_fqs++; return 1; } /* Go check for the CPU being offline. */ return rcu_implicit_offline_qs(rdp); } #endif /* #ifdef CONFIG_SMP */ #else /* #ifdef CONFIG_NO_HZ */ #ifdef CONFIG_SMP static int dyntick_save_progress_counter(struct rcu_data *rdp) { return 0; } static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) { return rcu_implicit_offline_qs(rdp); } #endif /* #ifdef CONFIG_SMP */ #endif /* #else #ifdef CONFIG_NO_HZ */ #ifdef CONFIG_RCU_CPU_STALL_DETECTOR static void record_gp_stall_check_time(struct rcu_state *rsp) { rsp->gp_start = jiffies; rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK; } static void print_other_cpu_stall(struct rcu_state *rsp) { int cpu; long delta; unsigned long flags; struct rcu_node *rnp = rcu_get_root(rsp); /* Only let one CPU complain about others per time interval. */ spin_lock_irqsave(&rnp->lock, flags); delta = jiffies - rsp->jiffies_stall; if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) { spin_unlock_irqrestore(&rnp->lock, flags); return; } rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; /* * Now rat on any tasks that got kicked up to the root rcu_node * due to CPU offlining. */ rcu_print_task_stall(rnp); spin_unlock_irqrestore(&rnp->lock, flags); /* OK, time to rat on our buddy... */ printk(KERN_ERR "INFO: RCU detected CPU stalls:"); rcu_for_each_leaf_node(rsp, rnp) { rcu_print_task_stall(rnp); if (rnp->qsmask == 0) continue; for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) if (rnp->qsmask & (1UL << cpu)) printk(" %d", rnp->grplo + cpu); } printk(" (detected by %d, t=%ld jiffies)\n", smp_processor_id(), (long)(jiffies - rsp->gp_start)); trigger_all_cpu_backtrace(); force_quiescent_state(rsp, 0); /* Kick them all. */ } static void print_cpu_stall(struct rcu_state *rsp) { unsigned long flags; struct rcu_node *rnp = rcu_get_root(rsp); printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n", smp_processor_id(), jiffies - rsp->gp_start); trigger_all_cpu_backtrace(); spin_lock_irqsave(&rnp->lock, flags); if ((long)(jiffies - rsp->jiffies_stall) >= 0) rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; spin_unlock_irqrestore(&rnp->lock, flags); set_need_resched(); /* kick ourselves to get things going. */ } static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) { long delta; struct rcu_node *rnp; delta = jiffies - rsp->jiffies_stall; rnp = rdp->mynode; if ((rnp->qsmask & rdp->grpmask) && delta >= 0) { /* We haven't checked in, so go dump stack. */ print_cpu_stall(rsp); } else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) { /* They had two time units to dump stack, so complain. */ print_other_cpu_stall(rsp); } } #else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ static void record_gp_stall_check_time(struct rcu_state *rsp) { } static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) { } #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ /* * Update CPU-local rcu_data state to record the newly noticed grace period. * This is used both when we started the grace period and when we notice * that someone else started the grace period. The caller must hold the * ->lock of the leaf rcu_node structure corresponding to the current CPU, * and must have irqs disabled. */ static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) { if (rdp->gpnum != rnp->gpnum) { rdp->qs_pending = 1; rdp->passed_quiesc = 0; rdp->gpnum = rnp->gpnum; } } static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) { unsigned long flags; struct rcu_node *rnp; local_irq_save(flags); rnp = rdp->mynode; if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */ !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */ local_irq_restore(flags); return; } __note_new_gpnum(rsp, rnp, rdp); spin_unlock_irqrestore(&rnp->lock, flags); } /* * Did someone else start a new RCU grace period start since we last * checked? Update local state appropriately if so. Must be called * on the CPU corresponding to rdp. */ static int check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp) { unsigned long flags; int ret = 0; local_irq_save(flags); if (rdp->gpnum != rsp->gpnum) { note_new_gpnum(rsp, rdp); ret = 1; } local_irq_restore(flags); return ret; } /* * Advance this CPU's callbacks, but only if the current grace period * has ended. This may be called only from the CPU to whom the rdp * belongs. In addition, the corresponding leaf rcu_node structure's * ->lock must be held by the caller, with irqs disabled. */ static void __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) { /* Did another grace period end? */ if (rdp->completed != rnp->completed) { /* Advance callbacks. No harm if list empty. */ rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL]; rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL]; rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; /* Remember that we saw this grace-period completion. */ rdp->completed = rnp->completed; } } /* * Advance this CPU's callbacks, but only if the current grace period * has ended. This may be called only from the CPU to whom the rdp * belongs. */ static void rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) { unsigned long flags; struct rcu_node *rnp; local_irq_save(flags); rnp = rdp->mynode; if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */ !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */ local_irq_restore(flags); return; } __rcu_process_gp_end(rsp, rnp, rdp); spin_unlock_irqrestore(&rnp->lock, flags); } /* * Do per-CPU grace-period initialization for running CPU. The caller * must hold the lock of the leaf rcu_node structure corresponding to * this CPU. */ static void rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) { /* Prior grace period ended, so advance callbacks for current CPU. */ __rcu_process_gp_end(rsp, rnp, rdp); /* * Because this CPU just now started the new grace period, we know * that all of its callbacks will be covered by this upcoming grace * period, even the ones that were registered arbitrarily recently. * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL. * * Other CPUs cannot be sure exactly when the grace period started. * Therefore, their recently registered callbacks must pass through * an additional RCU_NEXT_READY stage, so that they will be handled * by the next RCU grace period. */ rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; /* Set state so that this CPU will detect the next quiescent state. */ __note_new_gpnum(rsp, rnp, rdp); } /* * Start a new RCU grace period if warranted, re-initializing the hierarchy * in preparation for detecting the next grace period. The caller must hold * the root node's ->lock, which is released before return. Hard irqs must * be disabled. */ static void rcu_start_gp(struct rcu_state *rsp, unsigned long flags) __releases(rcu_get_root(rsp)->lock) { struct rcu_data *rdp = rsp->rda[smp_processor_id()]; struct rcu_node *rnp = rcu_get_root(rsp); if (!cpu_needs_another_gp(rsp, rdp)) { spin_unlock_irqrestore(&rnp->lock, flags); return; } /* Advance to a new grace period and initialize state. */ rsp->gpnum++; WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT); rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; record_gp_stall_check_time(rsp); dyntick_record_completed(rsp, rsp->completed - 1); /* Special-case the common single-level case. */ if (NUM_RCU_NODES == 1) { rcu_preempt_check_blocked_tasks(rnp); rnp->qsmask = rnp->qsmaskinit; rnp->gpnum = rsp->gpnum; rnp->completed = rsp->completed; rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ rcu_start_gp_per_cpu(rsp, rnp, rdp); spin_unlock_irqrestore(&rnp->lock, flags); return; } spin_unlock(&rnp->lock); /* leave irqs disabled. */ /* Exclude any concurrent CPU-hotplug operations. */ spin_lock(&rsp->onofflock); /* irqs already disabled. */ /* * Set the quiescent-state-needed bits in all the rcu_node * structures for all currently online CPUs in breadth-first * order, starting from the root rcu_node structure. This * operation relies on the layout of the hierarchy within the * rsp->node[] array. Note that other CPUs will access only * the leaves of the hierarchy, which still indicate that no * grace period is in progress, at least until the corresponding * leaf node has been initialized. In addition, we have excluded * CPU-hotplug operations. * * Note that the grace period cannot complete until we finish * the initialization process, as there will be at least one * qsmask bit set in the root node until that time, namely the * one corresponding to this CPU, due to the fact that we have * irqs disabled. */ rcu_for_each_node_breadth_first(rsp, rnp) { spin_lock(&rnp->lock); /* irqs already disabled. */ rcu_preempt_check_blocked_tasks(rnp); rnp->qsmask = rnp->qsmaskinit; rnp->gpnum = rsp->gpnum; rnp->completed = rsp->completed; if (rnp == rdp->mynode) rcu_start_gp_per_cpu(rsp, rnp, rdp); spin_unlock(&rnp->lock); /* irqs remain disabled. */ } rnp = rcu_get_root(rsp); spin_lock(&rnp->lock); /* irqs already disabled. */ rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ spin_unlock(&rnp->lock); /* irqs remain disabled. */ spin_unlock_irqrestore(&rsp->onofflock, flags); } /* * Clean up after the prior grace period and let rcu_start_gp() start up * the next grace period if one is needed. Note that the caller must * hold rnp->lock, as required by rcu_start_gp(), which will release it. */ static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) __releases(rcu_get_root(rsp)->lock) { WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); rsp->completed = rsp->gpnum; rsp->signaled = RCU_GP_IDLE; rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ } /* * Similar to cpu_quiet(), for which it is a helper function. Allows * a group of CPUs to be quieted at one go, though all the CPUs in the * group must be represented by the same leaf rcu_node structure. * That structure's lock must be held upon entry, and it is released * before return. */ static void cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, unsigned long flags) __releases(rnp->lock) { struct rcu_node *rnp_c; /* Walk up the rcu_node hierarchy. */ for (;;) { if (!(rnp->qsmask & mask)) { /* Our bit has already been cleared, so done. */ spin_unlock_irqrestore(&rnp->lock, flags); return; } rnp->qsmask &= ~mask; if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) { /* Other bits still set at this level, so done. */ spin_unlock_irqrestore(&rnp->lock, flags); return; } mask = rnp->grpmask; if (rnp->parent == NULL) { /* No more levels. Exit loop holding root lock. */ break; } spin_unlock_irqrestore(&rnp->lock, flags); rnp_c = rnp; rnp = rnp->parent; spin_lock_irqsave(&rnp->lock, flags); WARN_ON_ONCE(rnp_c->qsmask); } /* * Get here if we are the last CPU to pass through a quiescent * state for this grace period. Invoke cpu_quiet_msk_finish() * to clean up and start the next grace period if one is needed. */ cpu_quiet_msk_finish(rsp, flags); /* releases rnp->lock. */ } /* * Record a quiescent state for the specified CPU, which must either be * the current CPU. The lastcomp argument is used to make sure we are * still in the grace period of interest. We don't want to end the current * grace period based on quiescent states detected in an earlier grace * period! */ static void cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) { unsigned long flags; unsigned long mask; struct rcu_node *rnp; rnp = rdp->mynode; spin_lock_irqsave(&rnp->lock, flags); if (lastcomp != ACCESS_ONCE(rsp->completed)) { /* * Someone beat us to it for this grace period, so leave. * The race with GP start is resolved by the fact that we * hold the leaf rcu_node lock, so that the per-CPU bits * cannot yet be initialized -- so we would simply find our * CPU's bit already cleared in cpu_quiet_msk() if this race * occurred. */ rdp->passed_quiesc = 0; /* try again later! */ spin_unlock_irqrestore(&rnp->lock, flags); return; } mask = rdp->grpmask; if ((rnp->qsmask & mask) == 0) { spin_unlock_irqrestore(&rnp->lock, flags); } else { rdp->qs_pending = 0; /* * This GP can't end until cpu checks in, so all of our * callbacks can be processed during the next GP. */ rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */ } } /* * Check to see if there is a new grace period of which this CPU * is not yet aware, and if so, set up local rcu_data state for it. * Otherwise, see if this CPU has just passed through its first * quiescent state for this grace period, and record that fact if so. */ static void rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) { /* If there is now a new grace period, record and return. */ if (check_for_new_grace_period(rsp, rdp)) return; /* * Does this CPU still need to do its part for current grace period? * If no, return and let the other CPUs do their part as well. */ if (!rdp->qs_pending) return; /* * Was there a quiescent state since the beginning of the grace * period? If no, then exit and wait for the next call. */ if (!rdp->passed_quiesc) return; /* Tell RCU we are done (but cpu_quiet() will be the judge of that). */ cpu_quiet(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed); } #ifdef CONFIG_HOTPLUG_CPU /* * Move a dying CPU's RCU callbacks to the ->orphan_cbs_list for the * specified flavor of RCU. The callbacks will be adopted by the next * _rcu_barrier() invocation or by the CPU_DEAD notifier, whichever * comes first. Because this is invoked from the CPU_DYING notifier, * irqs are already disabled. */ static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp) { int i; struct rcu_data *rdp = rsp->rda[smp_processor_id()]; if (rdp->nxtlist == NULL) return; /* irqs disabled, so comparison is stable. */ spin_lock(&rsp->onofflock); /* irqs already disabled. */ *rsp->orphan_cbs_tail = rdp->nxtlist; rsp->orphan_cbs_tail = rdp->nxttail[RCU_NEXT_TAIL]; rdp->nxtlist = NULL; for (i = 0; i < RCU_NEXT_SIZE; i++) rdp->nxttail[i] = &rdp->nxtlist; rsp->orphan_qlen += rdp->qlen; rdp->qlen = 0; spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ } /* * Adopt previously orphaned RCU callbacks. */ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) { unsigned long flags; struct rcu_data *rdp; spin_lock_irqsave(&rsp->onofflock, flags); rdp = rsp->rda[smp_processor_id()]; if (rsp->orphan_cbs_list == NULL) { spin_unlock_irqrestore(&rsp->onofflock, flags); return; } *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list; rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail; rdp->qlen += rsp->orphan_qlen; rsp->orphan_cbs_list = NULL; rsp->orphan_cbs_tail = &rsp->orphan_cbs_list; rsp->orphan_qlen = 0; spin_unlock_irqrestore(&rsp->onofflock, flags); } /* * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy * and move all callbacks from the outgoing CPU to the current one. */ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) { unsigned long flags; long lastcomp; unsigned long mask; struct rcu_data *rdp = rsp->rda[cpu]; struct rcu_node *rnp; /* Exclude any attempts to start a new grace period. */ spin_lock_irqsave(&rsp->onofflock, flags); /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ rnp = rdp->mynode; /* this is the outgoing CPU's rnp. */ mask = rdp->grpmask; /* rnp->grplo is constant. */ do { spin_lock(&rnp->lock); /* irqs already disabled. */ rnp->qsmaskinit &= ~mask; if (rnp->qsmaskinit != 0) { spin_unlock(&rnp->lock); /* irqs remain disabled. */ break; } /* * If there was a task blocking the current grace period, * and if all CPUs have checked in, we need to propagate * the quiescent state up the rcu_node hierarchy. But that * is inconvenient at the moment due to deadlock issues if * this should end the current grace period. So set the * offlined CPU's bit in ->qsmask in order to force the * next force_quiescent_state() invocation to clean up this * mess in a deadlock-free manner. */ if (rcu_preempt_offline_tasks(rsp, rnp, rdp) && !rnp->qsmask) rnp->qsmask |= mask; mask = rnp->grpmask; spin_unlock(&rnp->lock); /* irqs remain disabled. */ rnp = rnp->parent; } while (rnp != NULL); lastcomp = rsp->completed; spin_unlock_irqrestore(&rsp->onofflock, flags); rcu_adopt_orphan_cbs(rsp); } /* * Remove the specified CPU from the RCU hierarchy and move any pending * callbacks that it might have to the current CPU. This code assumes * that at least one CPU in the system will remain running at all times. * Any attempt to offline -all- CPUs is likely to strand RCU callbacks. */ static void rcu_offline_cpu(int cpu) { __rcu_offline_cpu(cpu, &rcu_sched_state); __rcu_offline_cpu(cpu, &rcu_bh_state); rcu_preempt_offline_cpu(cpu); } #else /* #ifdef CONFIG_HOTPLUG_CPU */ static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp) { } static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) { } static void rcu_offline_cpu(int cpu) { } #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ /* * Invoke any RCU callbacks that have made it to the end of their grace * period. Thottle as specified by rdp->blimit. */ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) { unsigned long flags; struct rcu_head *next, *list, **tail; int count; /* If no callbacks are ready, just return.*/ if (!cpu_has_callbacks_ready_to_invoke(rdp)) return; /* * Extract the list of ready callbacks, disabling to prevent * races with call_rcu() from interrupt handlers. */ local_irq_save(flags); list = rdp->nxtlist; rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL]; *rdp->nxttail[RCU_DONE_TAIL] = NULL; tail = rdp->nxttail[RCU_DONE_TAIL]; for (count = RCU_NEXT_SIZE - 1; count >= 0; count--) if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL]) rdp->nxttail[count] = &rdp->nxtlist; local_irq_restore(flags); /* Invoke callbacks. */ count = 0; while (list) { next = list->next; prefetch(next); list->func(list); list = next; if (++count >= rdp->blimit) break; } local_irq_save(flags); /* Update count, and requeue any remaining callbacks. */ rdp->qlen -= count; if (list != NULL) { *tail = rdp->nxtlist; rdp->nxtlist = list; for (count = 0; count < RCU_NEXT_SIZE; count++) if (&rdp->nxtlist == rdp->nxttail[count]) rdp->nxttail[count] = tail; else break; } /* Reinstate batch limit if we have worked down the excess. */ if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) rdp->blimit = blimit; /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) { rdp->qlen_last_fqs_check = 0; rdp->n_force_qs_snap = rsp->n_force_qs; } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark) rdp->qlen_last_fqs_check = rdp->qlen; local_irq_restore(flags); /* Re-raise the RCU softirq if there are callbacks remaining. */ if (cpu_has_callbacks_ready_to_invoke(rdp)) raise_softirq(RCU_SOFTIRQ); } /* * Check to see if this CPU is in a non-context-switch quiescent state * (user mode or idle loop for rcu, non-softirq execution for rcu_bh). * Also schedule the RCU softirq handler. * * This function must be called with hardirqs disabled. It is normally * invoked from the scheduling-clock interrupt. If rcu_pending returns * false, there is no point in invoking rcu_check_callbacks(). */ void rcu_check_callbacks(int cpu, int user) { if (!rcu_pending(cpu)) return; /* if nothing for RCU to do. */ if (user || (idle_cpu(cpu) && rcu_scheduler_active && !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { /* * Get here if this CPU took its interrupt from user * mode or from the idle loop, and if this is not a * nested interrupt. In this case, the CPU is in * a quiescent state, so note it. * * No memory barrier is required here because both * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local * variables that other CPUs neither access nor modify, * at least not while the corresponding CPU is online. */ rcu_sched_qs(cpu); rcu_bh_qs(cpu); } else if (!in_softirq()) { /* * Get here if this CPU did not take its interrupt from * softirq, in other words, if it is not interrupting * a rcu_bh read-side critical section. This is an _bh * critical section, so note it. */ rcu_bh_qs(cpu); } rcu_preempt_check_callbacks(cpu); raise_softirq(RCU_SOFTIRQ); } #ifdef CONFIG_SMP /* * Scan the leaf rcu_node structures, processing dyntick state for any that * have not yet encountered a quiescent state, using the function specified. * Returns 1 if the current grace period ends while scanning (possibly * because we made it end). */ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp, int (*f)(struct rcu_data *)) { unsigned long bit; int cpu; unsigned long flags; unsigned long mask; struct rcu_node *rnp; rcu_for_each_leaf_node(rsp, rnp) { mask = 0; spin_lock_irqsave(&rnp->lock, flags); if (rsp->completed != lastcomp) { spin_unlock_irqrestore(&rnp->lock, flags); return 1; } if (rnp->qsmask == 0) { spin_unlock_irqrestore(&rnp->lock, flags); continue; } cpu = rnp->grplo; bit = 1; for (; cpu <= rnp->grphi; cpu++, bit <<= 1) { if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu])) mask |= bit; } if (mask != 0 && rsp->completed == lastcomp) { /* cpu_quiet_msk() releases rnp->lock. */ cpu_quiet_msk(mask, rsp, rnp, flags); continue; } spin_unlock_irqrestore(&rnp->lock, flags); } return 0; } /* * Force quiescent states on reluctant CPUs, and also detect which * CPUs are in dyntick-idle mode. */ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) { unsigned long flags; long lastcomp; struct rcu_node *rnp = rcu_get_root(rsp); u8 signaled; u8 forcenow; if (!rcu_gp_in_progress(rsp)) return; /* No grace period in progress, nothing to force. */ if (!spin_trylock_irqsave(&rsp->fqslock, flags)) { rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ return; /* Someone else is already on the job. */ } if (relaxed && (long)(rsp->jiffies_force_qs - jiffies) >= 0) goto unlock_ret; /* no emergency and done recently. */ rsp->n_force_qs++; spin_lock(&rnp->lock); lastcomp = rsp->completed; signaled = rsp->signaled; rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; if (lastcomp == rsp->gpnum) { rsp->n_force_qs_ngp++; spin_unlock(&rnp->lock); goto unlock_ret; /* no GP in progress, time updated. */ } spin_unlock(&rnp->lock); switch (signaled) { case RCU_GP_IDLE: case RCU_GP_INIT: break; /* grace period idle or initializing, ignore. */ case RCU_SAVE_DYNTICK: if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK) break; /* So gcc recognizes the dead code. */ /* Record dyntick-idle state. */ if (rcu_process_dyntick(rsp, lastcomp, dyntick_save_progress_counter)) goto unlock_ret; /* fall into next case. */ case RCU_SAVE_COMPLETED: /* Update state, record completion counter. */ forcenow = 0; spin_lock(&rnp->lock); if (lastcomp == rsp->completed && rsp->signaled == signaled) { rsp->signaled = RCU_FORCE_QS; dyntick_record_completed(rsp, lastcomp); forcenow = signaled == RCU_SAVE_COMPLETED; } spin_unlock(&rnp->lock); if (!forcenow) break; /* fall into next case. */ case RCU_FORCE_QS: /* Check dyntick-idle state, send IPI to laggarts. */ if (rcu_process_dyntick(rsp, dyntick_recall_completed(rsp), rcu_implicit_dynticks_qs)) goto unlock_ret; /* Leave state in case more forcing is required. */ break; } unlock_ret: spin_unlock_irqrestore(&rsp->fqslock, flags); } #else /* #ifdef CONFIG_SMP */ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) { set_need_resched(); } #endif /* #else #ifdef CONFIG_SMP */ /* * This does the RCU processing work from softirq context for the * specified rcu_state and rcu_data structures. This may be called * only from the CPU to whom the rdp belongs. */ static void __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) { unsigned long flags; WARN_ON_ONCE(rdp->beenonline == 0); /* * If an RCU GP has gone long enough, go check for dyntick * idle CPUs and, if needed, send resched IPIs. */ if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) force_quiescent_state(rsp, 1); /* * Advance callbacks in response to end of earlier grace * period that some other CPU ended. */ rcu_process_gp_end(rsp, rdp); /* Update RCU state based on any recent quiescent states. */ rcu_check_quiescent_state(rsp, rdp); /* Does this CPU require a not-yet-started grace period? */ if (cpu_needs_another_gp(rsp, rdp)) { spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags); rcu_start_gp(rsp, flags); /* releases above lock */ } /* If there are callbacks ready, invoke them. */ rcu_do_batch(rsp, rdp); } /* * Do softirq processing for the current CPU. */ static void rcu_process_callbacks(struct softirq_action *unused) { /* * Memory references from any prior RCU read-side critical sections * executed by the interrupted code must be seen before any RCU * grace-period manipulations below. */ smp_mb(); /* See above block comment. */ __rcu_process_callbacks(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); rcu_preempt_process_callbacks(); /* * Memory references from any later RCU read-side critical sections * executed by the interrupted code must be seen after any RCU * grace-period manipulations above. */ smp_mb(); /* See above block comment. */ } static void __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), struct rcu_state *rsp) { unsigned long flags; struct rcu_data *rdp; head->func = func; head->next = NULL; smp_mb(); /* Ensure RCU update seen before callback registry. */ /* * Opportunistically note grace-period endings and beginnings. * Note that we might see a beginning right after we see an * end, but never vice versa, since this CPU has to pass through * a quiescent state betweentimes. */ local_irq_save(flags); rdp = rsp->rda[smp_processor_id()]; rcu_process_gp_end(rsp, rdp); check_for_new_grace_period(rsp, rdp); /* Add the callback to our list. */ *rdp->nxttail[RCU_NEXT_TAIL] = head; rdp->nxttail[RCU_NEXT_TAIL] = &head->next; /* Start a new grace period if one not already started. */ if (!rcu_gp_in_progress(rsp)) { unsigned long nestflag; struct rcu_node *rnp_root = rcu_get_root(rsp); spin_lock_irqsave(&rnp_root->lock, nestflag); rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ } /* * Force the grace period if too many callbacks or too long waiting. * Enforce hysteresis, and don't invoke force_quiescent_state() * if some other CPU has recently done so. Also, don't bother * invoking force_quiescent_state() if the newly enqueued callback * is the only one waiting for a grace period to complete. */ if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { rdp->blimit = LONG_MAX; if (rsp->n_force_qs == rdp->n_force_qs_snap && *rdp->nxttail[RCU_DONE_TAIL] != head) force_quiescent_state(rsp, 0); rdp->n_force_qs_snap = rsp->n_force_qs; rdp->qlen_last_fqs_check = rdp->qlen; } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) force_quiescent_state(rsp, 1); local_irq_restore(flags); } /* * Queue an RCU-sched callback for invocation after a grace period. */ void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) { __call_rcu(head, func, &rcu_sched_state); } EXPORT_SYMBOL_GPL(call_rcu_sched); /* * Queue an RCU for invocation after a quicker grace period. */ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) { __call_rcu(head, func, &rcu_bh_state); } EXPORT_SYMBOL_GPL(call_rcu_bh); /* * Check to see if there is any immediate RCU-related work to be done * by the current CPU, for the specified type of RCU, returning 1 if so. * The checks are in order of increasing expense: checks that can be * carried out against CPU-local state are performed first. However, * we must check for CPU stalls first, else we might not get a chance. */ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) { rdp->n_rcu_pending++; /* Check for CPU stalls, if enabled. */ check_cpu_stall(rsp, rdp); /* Is the RCU core waiting for a quiescent state from this CPU? */ if (rdp->qs_pending) { rdp->n_rp_qs_pending++; return 1; } /* Does this CPU have callbacks ready to invoke? */ if (cpu_has_callbacks_ready_to_invoke(rdp)) { rdp->n_rp_cb_ready++; return 1; } /* Has RCU gone idle with this CPU needing another grace period? */ if (cpu_needs_another_gp(rsp, rdp)) { rdp->n_rp_cpu_needs_gp++; return 1; } /* Has another RCU grace period completed? */ if (ACCESS_ONCE(rsp->completed) != rdp->completed) { /* outside lock */ rdp->n_rp_gp_completed++; return 1; } /* Has a new RCU grace period started? */ if (ACCESS_ONCE(rsp->gpnum) != rdp->gpnum) { /* outside lock */ rdp->n_rp_gp_started++; return 1; } /* Has an RCU GP gone long enough to send resched IPIs &c? */ if (rcu_gp_in_progress(rsp) && ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) { rdp->n_rp_need_fqs++; return 1; } /* nothing to do */ rdp->n_rp_need_nothing++; return 0; } /* * Check to see if there is any immediate RCU-related work to be done * by the current CPU, returning 1 if so. This function is part of the * RCU implementation; it is -not- an exported member of the RCU API. */ static int rcu_pending(int cpu) { return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) || __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) || rcu_preempt_pending(cpu); } /* * Check to see if any future RCU-related work will need to be done * by the current CPU, even if none need be done immediately, returning * 1 if so. This function is part of the RCU implementation; it is -not- * an exported member of the RCU API. */ int rcu_needs_cpu(int cpu) { /* RCU callbacks either ready or pending? */ return per_cpu(rcu_sched_data, cpu).nxtlist || per_cpu(rcu_bh_data, cpu).nxtlist || rcu_preempt_needs_cpu(cpu); } static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; static atomic_t rcu_barrier_cpu_count; static DEFINE_MUTEX(rcu_barrier_mutex); static struct completion rcu_barrier_completion; static void rcu_barrier_callback(struct rcu_head *notused) { if (atomic_dec_and_test(&rcu_barrier_cpu_count)) complete(&rcu_barrier_completion); } /* * Called with preemption disabled, and from cross-cpu IRQ context. */ static void rcu_barrier_func(void *type) { int cpu = smp_processor_id(); struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); void (*call_rcu_func)(struct rcu_head *head, void (*func)(struct rcu_head *head)); atomic_inc(&rcu_barrier_cpu_count); call_rcu_func = type; call_rcu_func(head, rcu_barrier_callback); } /* * Orchestrate the specified type of RCU barrier, waiting for all * RCU callbacks of the specified type to complete. */ static void _rcu_barrier(struct rcu_state *rsp, void (*call_rcu_func)(struct rcu_head *head, void (*func)(struct rcu_head *head))) { BUG_ON(in_interrupt()); /* Take mutex to serialize concurrent rcu_barrier() requests. */ mutex_lock(&rcu_barrier_mutex); init_completion(&rcu_barrier_completion); /* * Initialize rcu_barrier_cpu_count to 1, then invoke * rcu_barrier_func() on each CPU, so that each CPU also has * incremented rcu_barrier_cpu_count. Only then is it safe to * decrement rcu_barrier_cpu_count -- otherwise the first CPU * might complete its grace period before all of the other CPUs * did their increment, causing this function to return too * early. */ atomic_set(&rcu_barrier_cpu_count, 1); preempt_disable(); /* stop CPU_DYING from filling orphan_cbs_list */ rcu_adopt_orphan_cbs(rsp); on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1); preempt_enable(); /* CPU_DYING can again fill orphan_cbs_list */ if (atomic_dec_and_test(&rcu_barrier_cpu_count)) complete(&rcu_barrier_completion); wait_for_completion(&rcu_barrier_completion); mutex_unlock(&rcu_barrier_mutex); } /** * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. */ void rcu_barrier_bh(void) { _rcu_barrier(&rcu_bh_state, call_rcu_bh); } EXPORT_SYMBOL_GPL(rcu_barrier_bh); /** * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. */ void rcu_barrier_sched(void) { _rcu_barrier(&rcu_sched_state, call_rcu_sched); } EXPORT_SYMBOL_GPL(rcu_barrier_sched); /* * Do boot-time initialization of a CPU's per-CPU RCU data. */ static void __init rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) { unsigned long flags; int i; struct rcu_data *rdp = rsp->rda[cpu]; struct rcu_node *rnp = rcu_get_root(rsp); /* Set up local state, ensuring consistent view of global state. */ spin_lock_irqsave(&rnp->lock, flags); rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); rdp->nxtlist = NULL; for (i = 0; i < RCU_NEXT_SIZE; i++) rdp->nxttail[i] = &rdp->nxtlist; rdp->qlen = 0; #ifdef CONFIG_NO_HZ rdp->dynticks = &per_cpu(rcu_dynticks, cpu); #endif /* #ifdef CONFIG_NO_HZ */ rdp->cpu = cpu; spin_unlock_irqrestore(&rnp->lock, flags); } /* * Initialize a CPU's per-CPU RCU data. Note that only one online or * offline event can be happening at a given time. Note also that we * can accept some slop in the rsp->completed access due to the fact * that this CPU cannot possibly have any RCU callbacks in flight yet. */ static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) { unsigned long flags; unsigned long mask; struct rcu_data *rdp = rsp->rda[cpu]; struct rcu_node *rnp = rcu_get_root(rsp); /* Set up local state, ensuring consistent view of global state. */ spin_lock_irqsave(&rnp->lock, flags); rdp->passed_quiesc = 0; /* We could be racing with new GP, */ rdp->qs_pending = 1; /* so set up to respond to current GP. */ rdp->beenonline = 1; /* We have now been online. */ rdp->preemptable = preemptable; rdp->qlen_last_fqs_check = 0; rdp->n_force_qs_snap = rsp->n_force_qs; rdp->blimit = blimit; spin_unlock(&rnp->lock); /* irqs remain disabled. */ /* * A new grace period might start here. If so, we won't be part * of it, but that is OK, as we are currently in a quiescent state. */ /* Exclude any attempts to start a new GP on large systems. */ spin_lock(&rsp->onofflock); /* irqs already disabled. */ /* Add CPU to rcu_node bitmasks. */ rnp = rdp->mynode; mask = rdp->grpmask; do { /* Exclude any attempts to start a new GP on small systems. */ spin_lock(&rnp->lock); /* irqs already disabled. */ rnp->qsmaskinit |= mask; mask = rnp->grpmask; if (rnp == rdp->mynode) { rdp->gpnum = rnp->completed; /* if GP in progress... */ rdp->completed = rnp->completed; rdp->passed_quiesc_completed = rnp->completed - 1; } spin_unlock(&rnp->lock); /* irqs already disabled. */ rnp = rnp->parent; } while (rnp != NULL && !(rnp->qsmaskinit & mask)); spin_unlock_irqrestore(&rsp->onofflock, flags); } static void __cpuinit rcu_online_cpu(int cpu) { rcu_init_percpu_data(cpu, &rcu_sched_state, 0); rcu_init_percpu_data(cpu, &rcu_bh_state, 0); rcu_preempt_init_percpu_data(cpu); } /* * Handle CPU online/offline notification events. */ int __cpuinit rcu_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { long cpu = (long)hcpu; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: rcu_online_cpu(cpu); break; case CPU_DYING: case CPU_DYING_FROZEN: /* * preempt_disable() in _rcu_barrier() prevents stop_machine(), * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);" * returns, all online cpus have queued rcu_barrier_func(). * The dying CPU clears its cpu_online_mask bit and * moves all of its RCU callbacks to ->orphan_cbs_list * in the context of stop_machine(), so subsequent calls * to _rcu_barrier() will adopt these callbacks and only * then queue rcu_barrier_func() on all remaining CPUs. */ rcu_send_cbs_to_orphanage(&rcu_bh_state); rcu_send_cbs_to_orphanage(&rcu_sched_state); rcu_preempt_send_cbs_to_orphanage(); break; case CPU_DEAD: case CPU_DEAD_FROZEN: case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: rcu_offline_cpu(cpu); break; default: break; } return NOTIFY_OK; } /* * Compute the per-level fanout, either using the exact fanout specified * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT. */ #ifdef CONFIG_RCU_FANOUT_EXACT static void __init rcu_init_levelspread(struct rcu_state *rsp) { int i; for (i = NUM_RCU_LVLS - 1; i >= 0; i--) rsp->levelspread[i] = CONFIG_RCU_FANOUT; } #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */ static void __init rcu_init_levelspread(struct rcu_state *rsp) { int ccur; int cprv; int i; cprv = NR_CPUS; for (i = NUM_RCU_LVLS - 1; i >= 0; i--) { ccur = rsp->levelcnt[i]; rsp->levelspread[i] = (cprv + ccur - 1) / ccur; cprv = ccur; } } #endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */ /* * Helper function for rcu_init() that initializes one rcu_state structure. */ static void __init rcu_init_one(struct rcu_state *rsp) { int cpustride = 1; int i; int j; struct rcu_node *rnp; /* Initialize the level-tracking arrays. */ for (i = 1; i < NUM_RCU_LVLS; i++) rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1]; rcu_init_levelspread(rsp); /* Initialize the elements themselves, starting from the leaves. */ for (i = NUM_RCU_LVLS - 1; i >= 0; i--) { cpustride *= rsp->levelspread[i]; rnp = rsp->level[i]; for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { if (rnp != rcu_get_root(rsp)) spin_lock_init(&rnp->lock); rnp->gpnum = 0; rnp->qsmask = 0; rnp->qsmaskinit = 0; rnp->grplo = j * cpustride; rnp->grphi = (j + 1) * cpustride - 1; if (rnp->grphi >= NR_CPUS) rnp->grphi = NR_CPUS - 1; if (i == 0) { rnp->grpnum = 0; rnp->grpmask = 0; rnp->parent = NULL; } else { rnp->grpnum = j % rsp->levelspread[i - 1]; rnp->grpmask = 1UL << rnp->grpnum; rnp->parent = rsp->level[i - 1] + j / rsp->levelspread[i - 1]; } rnp->level = i; INIT_LIST_HEAD(&rnp->blocked_tasks[0]); INIT_LIST_HEAD(&rnp->blocked_tasks[1]); } } spin_lock_init(&rcu_get_root(rsp)->lock); } /* * Helper macro for __rcu_init() and __rcu_init_preempt(). To be used * nowhere else! Assigns leaf node pointers into each CPU's rcu_data * structure. */ #define RCU_INIT_FLAVOR(rsp, rcu_data) \ do { \ int i; \ int j; \ struct rcu_node *rnp; \ \ rcu_init_one(rsp); \ rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ j = 0; \ for_each_possible_cpu(i) { \ if (i > rnp[j].grphi) \ j++; \ per_cpu(rcu_data, i).mynode = &rnp[j]; \ (rsp)->rda[i] = &per_cpu(rcu_data, i); \ rcu_boot_init_percpu_data(i, rsp); \ } \ } while (0) void __init __rcu_init(void) { rcu_bootup_announce(); #ifdef CONFIG_RCU_CPU_STALL_DETECTOR printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); __rcu_init_preempt(); open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); } #include "rcutree_plugin.h"
gpl-2.0
cwyy/kernel
drivers/i2c/chips/pca963x.c
397
11521
/* pca963x.c - 4-bit I2C-bus LED driver * * Copyright (C) 2008 HTC Corporation. * Author: Shan-Fu Chiou <sfchiou@gmail.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/leds.h> #include <linux/spinlock.h> #include <linux/workqueue.h> static uint8_t address[] = { 0x02, 0x03, 0x04 }; static DEFINE_SPINLOCK(pca963x_lock); enum op_t { OP_SET_BLINK, OP_SET_GRPPWM, OP_SET_GRPFREQ, OP_SET_BLUE_BRIGHTNESS, OP_SET_GREEN_BRIGHTNESS, OP_SET_RED_BRIGHTNESS, }; enum power_mode { MODE_SLEEP, MODE_NORMAL, }; struct pca963x_t { uint8_t colors[3]; uint8_t blink; uint8_t grppwm; uint8_t grpfreq; }; struct pca963x_data { struct pca963x_t data; uint8_t dirty; uint8_t status; enum power_mode mode; struct work_struct work; struct i2c_client *client; struct led_classdev leds[3]; /* blue, green, red */ }; static ssize_t pca963x_blink_show(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct pca963x_data *pca963x = i2c_get_clientdata(client); if (((pca963x->dirty >> OP_SET_BLINK) & 0x01)) flush_scheduled_work(); return sprintf(buf, "%u\n", pca963x->data.blink); } static ssize_t pca963x_blink_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct pca963x_data *pca963x = i2c_get_clientdata(client); int val = -1; sscanf(buf, "%u", &val); if (val < 0 || val > 1) return -EINVAL; spin_lock(&pca963x_lock); pca963x->dirty |= 1 << OP_SET_BLINK; pca963x->data.blink = val; spin_unlock(&pca963x_lock); schedule_work(&pca963x->work); return count; } static DEVICE_ATTR(blink, 0644, pca963x_blink_show, pca963x_blink_store); static ssize_t pca963x_grpfreq_show(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct pca963x_data *pca963x = i2c_get_clientdata(client); if (((pca963x->dirty >> OP_SET_GRPFREQ) & 0x01)) flush_scheduled_work(); return sprintf(buf, "%u\n", pca963x->data.grpfreq); } static ssize_t pca963x_grpfreq_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct pca963x_data *pca963x = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); if (val > 0xff) return -EINVAL; spin_lock(&pca963x_lock); pca963x->dirty |= 1 << OP_SET_GRPFREQ; pca963x->data.grpfreq = val; spin_unlock(&pca963x_lock); schedule_work(&pca963x->work); return count; } static DEVICE_ATTR(grpfreq, 0644, pca963x_grpfreq_show, pca963x_grpfreq_store); static ssize_t pca963x_grppwm_show(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct pca963x_data *pca963x = i2c_get_clientdata(client); if (((pca963x->dirty >> OP_SET_GRPPWM) & 0x01)) flush_scheduled_work(); return sprintf(buf, "%u\n", pca963x->data.grppwm); } static ssize_t pca963x_grppwm_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct pca963x_data *pca963x = i2c_get_clientdata(client); unsigned long val = simple_strtoul(buf, NULL, 10); if (val > 0xff) return -EINVAL; spin_lock(&pca963x_lock); pca963x->dirty |= 1 << OP_SET_GRPPWM; pca963x->data.grppwm = val; spin_unlock(&pca963x_lock); schedule_work(&pca963x->work); return count; } static DEVICE_ATTR(grppwm, 0644, pca963x_grppwm_show, pca963x_grppwm_store); static void led_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct pca963x_data *pca963x; int idx = 2; spin_lock(&pca963x_lock); if (!strcmp(led_cdev->name, "blue")) { idx = 0; } else if (!strcmp(led_cdev->name, "green")) { idx = 1; } else { idx = 2; } pca963x = container_of(led_cdev, struct pca963x_data, leds[idx]); pca963x->data.colors[idx] = brightness; pca963x->dirty |= (1 << (OP_SET_BLUE_BRIGHTNESS + idx)); spin_unlock(&pca963x_lock); schedule_work(&pca963x->work); } static void pca963x_update_brightness(struct pca963x_data *pca963x, int idx, int brightness) { if (brightness > LED_OFF) { if (brightness == LED_FULL) { pca963x->status &= ~(1 << idx); pca963x->status |= (1 << (idx + 4)); } else { pca963x->status |= (1 << idx); pca963x->status &= ~(1 << (idx + 4)); } } else { pca963x->status &= ~(1 << idx); pca963x->status &= ~(1 << (idx + 4)); } i2c_smbus_write_byte_data(pca963x->client, address[idx], brightness); } static void pca963x_work_func(struct work_struct *work) { int ret; uint8_t dirty = 0; struct pca963x_t work_data; struct pca963x_data *pca963x = container_of(work, struct pca963x_data, work); spin_lock(&pca963x_lock); work_data = pca963x->data; dirty = pca963x->dirty; pca963x->dirty = 0; spin_unlock(&pca963x_lock); ret = i2c_smbus_read_byte_data(pca963x->client, 0x00); /* check if should switch to normal mode */ if (!pca963x->mode) { i2c_smbus_write_byte_data(pca963x->client, 0x00, 0x01); pca963x->mode = MODE_NORMAL; i2c_smbus_write_byte_data(pca963x->client, 0x08, 0xFF); } if ((dirty >> OP_SET_BLINK) & 0x01) { ret = i2c_smbus_read_byte_data(pca963x->client, 0x01); if (work_data.blink) /* enable blinking */ i2c_smbus_write_byte_data(pca963x->client, 0x01, ret | 0x20); else { /* set group duty cycle control to default */ i2c_smbus_write_byte_data(pca963x->client, 0x06, 0xFF); /* set group frequency to default */ i2c_smbus_write_byte_data(pca963x->client, 0x07, 0x00); /* enable dimming */ i2c_smbus_write_byte_data(pca963x->client, 0x01, ret & 0xDF); } } if ((dirty >> OP_SET_GRPPWM) & 0x01) { i2c_smbus_write_byte_data(pca963x->client, 0x06, work_data.grppwm); } if ((dirty >> OP_SET_GRPFREQ) & 0x01) { i2c_smbus_write_byte_data(pca963x->client, 0x07, work_data.grpfreq); } if ((dirty >> OP_SET_BLUE_BRIGHTNESS) & 0x01) pca963x_update_brightness(pca963x, 0, work_data.colors[0]); if ((dirty >> OP_SET_GREEN_BRIGHTNESS) & 0x01) pca963x_update_brightness(pca963x, 1, work_data.colors[1]); if ((dirty >> OP_SET_RED_BRIGHTNESS) & 0x01) pca963x_update_brightness(pca963x, 2, work_data.colors[2]); /* check if could go to low power mode */ if (((pca963x->status & 0x0F) == 0) && (!work_data.blink)) { i2c_smbus_write_byte_data(pca963x->client, 0x08, 0xAA); i2c_smbus_write_byte_data(pca963x->client, 0x00, 0x11); pca963x->mode = MODE_SLEEP; } } static void set_pca963x_default(struct i2c_client *client) { i2c_smbus_write_byte_data(client, 0x00, 0x01); i2c_smbus_write_byte_data(client, 0x01, 0x00); /* set all LEDx brightness off */ i2c_smbus_write_byte_data(client, address[0], LED_OFF); i2c_smbus_write_byte_data(client, address[1], LED_OFF); i2c_smbus_write_byte_data(client, address[2], LED_OFF); /* set group duty cycle control to default */ i2c_smbus_write_byte_data(client, 0x06, 0xFF); /* set group frequency to default */ i2c_smbus_write_byte_data(client, 0x07, 0x00); /* * set LEDx individual brightness and group dimming/blinking * can be controlled by * its PWMx register and GRPPWM registers. */ i2c_smbus_write_byte_data(client, 0x08, 0xFF); /* low power mode. oscillator off */ i2c_smbus_write_byte_data(client, 0x00, 0x11); } static int pca963x_probe(struct i2c_client *client, const struct i2c_device_id *id) { int ret = 0; struct pca963x_data *pca963x; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { ret = -ENODEV; goto exit; } pca963x = kzalloc(sizeof(struct pca963x_data), GFP_KERNEL); if (pca963x == NULL) { ret = -ENOMEM; goto err_alloc_failed; } INIT_WORK(&pca963x->work, pca963x_work_func); pca963x->client = client; pca963x->leds[0].name = "blue"; pca963x->leds[0].brightness = LED_OFF; pca963x->leds[0].brightness_set = led_brightness_set; pca963x->leds[1].name = "green"; pca963x->leds[1].brightness = LED_OFF; pca963x->leds[1].brightness_set = led_brightness_set; pca963x->leds[2].name = "red"; pca963x->leds[2].brightness = LED_OFF; pca963x->leds[2].brightness_set = led_brightness_set; pca963x->dirty = 0; pca963x->status = 0; pca963x->data.colors[0] = LED_OFF; pca963x->data.colors[1] = LED_OFF; pca963x->data.colors[2] = LED_OFF; pca963x->data.blink = 0; pca963x->data.grppwm = 0; pca963x->data.grpfreq = 0; i2c_set_clientdata(client, pca963x); set_pca963x_default(client); pca963x->mode = MODE_SLEEP; /* blue */ ret = led_classdev_register(&client->dev, &pca963x->leds[0]); if (ret < 0) { printk(KERN_ERR "pca963x: led_classdev_register failed\n"); goto err_led0_classdev_register_failed; } /* green */ ret = led_classdev_register(&client->dev, &pca963x->leds[1]); if (ret < 0) { printk(KERN_ERR "pca963x: led_classdev_register failed\n"); goto err_led1_classdev_register_failed; } /* red */ ret = led_classdev_register(&client->dev, &pca963x->leds[2]); if (ret < 0) { printk(KERN_ERR "pca963x: led_classdev_register failed\n"); goto err_led2_classdev_register_failed; } ret = device_create_file(&client->dev, &dev_attr_blink); ret = device_create_file(&client->dev, &dev_attr_grppwm); ret = device_create_file(&client->dev, &dev_attr_grpfreq); return 0; err_led2_classdev_register_failed: led_classdev_unregister(&pca963x->leds[2]); err_led1_classdev_register_failed: led_classdev_unregister(&pca963x->leds[1]); err_led0_classdev_register_failed: led_classdev_unregister(&pca963x->leds[0]); err_alloc_failed: kfree(pca963x); exit: return ret; } static int pca963x_suspend(struct i2c_client *client, pm_message_t mesg) { flush_scheduled_work(); return 0; } static int pca963x_remove(struct i2c_client *client) { struct pca963x_data *pca963x = i2c_get_clientdata(client); cancel_work_sync(&pca963x->work); device_remove_file(&client->dev, &dev_attr_blink); device_remove_file(&client->dev, &dev_attr_grppwm); device_remove_file(&client->dev, &dev_attr_grpfreq); set_pca963x_default(client); led_classdev_unregister(&pca963x->leds[0]); led_classdev_unregister(&pca963x->leds[1]); led_classdev_unregister(&pca963x->leds[2]); kfree(pca963x); return 0; } static const struct i2c_device_id pca963x_id[] = { { "pca963x", 0 }, { } }; static struct i2c_driver pca963x_driver = { .driver = { .name = "pca963x", }, .probe = pca963x_probe, .suspend = pca963x_suspend, .remove = pca963x_remove, .id_table = pca963x_id, }; static int __init pca963x_init(void) { return i2c_add_driver(&pca963x_driver); } static void __exit pca963x_exit(void) { i2c_del_driver(&pca963x_driver); } MODULE_AUTHOR("Shan-Fu Chiou <sfchiou@gmail.com>"); MODULE_DESCRIPTION("pca963x driver"); MODULE_LICENSE("GPL"); module_init(pca963x_init); module_exit(pca963x_exit);
gpl-2.0
dzo/android_kernel_huawei_u8800-1
drivers/net/ethernet/msm/msm_rmnet_sdio.c
397
17598
/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /* * RMNET SDIO Module. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/wakelock.h> #include <linux/if_arp.h> #include <linux/msm_rmnet.h> #ifdef CONFIG_HAS_EARLYSUSPEND #include <linux/earlysuspend.h> #endif #include <mach/sdio_dmux.h> /* Debug message support */ static int msm_rmnet_sdio_debug_mask; module_param_named(debug_enable, msm_rmnet_sdio_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); #define DEBUG_MASK_LVL0 (1U << 0) #define DEBUG_MASK_LVL1 (1U << 1) #define DEBUG_MASK_LVL2 (1U << 2) #define DBG(m, x...) do { \ if (msm_rmnet_sdio_debug_mask & m) \ pr_info(x); \ } while (0) #define DBG0(x...) DBG(DEBUG_MASK_LVL0, x) #define DBG1(x...) DBG(DEBUG_MASK_LVL1, x) #define DBG2(x...) DBG(DEBUG_MASK_LVL2, x) /* Configure device instances */ #define RMNET_DEVICE_COUNT (8) /* allow larger frames */ #define RMNET_DATA_LEN 2000 #define DEVICE_ID_INVALID -1 #define DEVICE_INACTIVE 0 #define DEVICE_ACTIVE 1 #define HEADROOM_FOR_SDIO 8 /* for mux header */ #define HEADROOM_FOR_QOS 8 #define TAILROOM 8 /* for padding by mux layer */ struct rmnet_private { struct net_device_stats stats; uint32_t ch_id; #ifdef CONFIG_MSM_RMNET_DEBUG ktime_t last_packet; unsigned long wakeups_xmit; unsigned long wakeups_rcv; unsigned long timeout_us; #endif struct sk_buff *skb; spinlock_t lock; spinlock_t tx_queue_lock; struct tasklet_struct tsklt; u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */ uint8_t device_up; uint8_t in_reset; }; #ifdef CONFIG_MSM_RMNET_DEBUG static unsigned long timeout_us; #ifdef CONFIG_HAS_EARLYSUSPEND /* * If early suspend is enabled then we specify two timeout values, * screen on (default), and screen is off. */ static unsigned long timeout_suspend_us; static struct device *rmnet0; /* Set timeout in us when the screen is off. */ static ssize_t timeout_suspend_store(struct device *d, struct device_attribute *attr, const char *buf, size_t n) { timeout_suspend_us = strict_strtoul(buf, NULL, 10); return n; } static ssize_t timeout_suspend_show(struct device *d, struct device_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", (unsigned long) timeout_suspend_us); } static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show, timeout_suspend_store); static void rmnet_early_suspend(struct early_suspend *handler) { if (rmnet0) { struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0)); p->timeout_us = timeout_suspend_us; } } static void rmnet_late_resume(struct early_suspend *handler) { if (rmnet0) { struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0)); p->timeout_us = timeout_us; } } static struct early_suspend rmnet_power_suspend = { .suspend = rmnet_early_suspend, .resume = rmnet_late_resume, }; static int __init rmnet_late_init(void) { register_early_suspend(&rmnet_power_suspend); return 0; } late_initcall(rmnet_late_init); #endif /* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */ static int rmnet_cause_wakeup(struct rmnet_private *p) { int ret = 0; ktime_t now; if (p->timeout_us == 0) /* Check if disabled */ return 0; /* Use real (wall) time. */ now = ktime_get_real(); if (ktime_us_delta(now, p->last_packet) > p->timeout_us) ret = 1; p->last_packet = now; return ret; } static ssize_t wakeups_xmit_show(struct device *d, struct device_attribute *attr, char *buf) { struct rmnet_private *p = netdev_priv(to_net_dev(d)); return sprintf(buf, "%lu\n", p->wakeups_xmit); } DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL); static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr, char *buf) { struct rmnet_private *p = netdev_priv(to_net_dev(d)); return sprintf(buf, "%lu\n", p->wakeups_rcv); } DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL); /* Set timeout in us. */ static ssize_t timeout_store(struct device *d, struct device_attribute *attr, const char *buf, size_t n) { #ifndef CONFIG_HAS_EARLYSUSPEND struct rmnet_private *p = netdev_priv(to_net_dev(d)); p->timeout_us = timeout_us = strict_strtoul(buf, NULL, 10); #else /* If using early suspend/resume hooks do not write the value on store. */ timeout_us = strict_strtoul(buf, NULL, 10); #endif return n; } static ssize_t timeout_show(struct device *d, struct device_attribute *attr, char *buf) { struct rmnet_private *p = netdev_priv(to_net_dev(d)); p = netdev_priv(to_net_dev(d)); return sprintf(buf, "%lu\n", timeout_us); } DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store); #endif /* Forward declaration */ static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev) { __be16 protocol = 0; skb->dev = dev; /* Determine L3 protocol */ switch (skb->data[0] & 0xf0) { case 0x40: protocol = htons(ETH_P_IP); break; case 0x60: protocol = htons(ETH_P_IPV6); break; default: pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x", dev->name, skb->data[0] & 0xf0); /* skb will be dropped in upper layer for unknown protocol */ } return protocol; } static int count_this_packet(void *_hdr, int len) { struct ethhdr *hdr = _hdr; if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP)) return 0; return 1; } static int sdio_update_reset_state(struct net_device *dev) { struct rmnet_private *p = netdev_priv(dev); int new_state; new_state = msm_sdio_is_channel_in_reset(p->ch_id); if (p->in_reset != new_state) { p->in_reset = (uint8_t)new_state; if (p->in_reset) netif_carrier_off(dev); else netif_carrier_on(dev); return 1; } return 0; } /* Rx Callback, Called in Work Queue context */ static void sdio_recv_notify(void *dev, struct sk_buff *skb) { struct rmnet_private *p = netdev_priv(dev); unsigned long flags; u32 opmode; if (skb) { skb->dev = dev; /* Handle Rx frame format */ spin_lock_irqsave(&p->lock, flags); opmode = p->operation_mode; spin_unlock_irqrestore(&p->lock, flags); if (RMNET_IS_MODE_IP(opmode)) { /* Driver in IP mode */ skb->protocol = rmnet_ip_type_trans(skb, dev); } else { /* Driver in Ethernet mode */ skb->protocol = eth_type_trans(skb, dev); } if (RMNET_IS_MODE_IP(opmode) || count_this_packet(skb->data, skb->len)) { #ifdef CONFIG_MSM_RMNET_DEBUG p->wakeups_rcv += rmnet_cause_wakeup(p); #endif p->stats.rx_packets++; p->stats.rx_bytes += skb->len; } DBG1("[%s] Rx packet #%lu len=%d\n", ((struct net_device *)dev)->name, p->stats.rx_packets, skb->len); /* Deliver to network stack */ netif_rx(skb); } else { spin_lock_irqsave(&p->lock, flags); if (!sdio_update_reset_state((struct net_device *)dev)) pr_err("[%s] %s: No skb received", ((struct net_device *)dev)->name, __func__); spin_unlock_irqrestore(&p->lock, flags); } } static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev) { struct rmnet_private *p = netdev_priv(dev); int sdio_ret; struct QMI_QOS_HDR_S *qmih; u32 opmode; unsigned long flags; if (!netif_carrier_ok(dev)) { pr_err("[%s] %s: channel in reset", dev->name, __func__); goto xmit_out; } /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */ spin_lock_irqsave(&p->lock, flags); opmode = p->operation_mode; spin_unlock_irqrestore(&p->lock, flags); if (RMNET_IS_MODE_QOS(opmode)) { qmih = (struct QMI_QOS_HDR_S *) skb_push(skb, sizeof(struct QMI_QOS_HDR_S)); qmih->version = 1; qmih->flags = 0; qmih->flow_id = skb->mark; } dev->trans_start = jiffies; sdio_ret = msm_sdio_dmux_write(p->ch_id, skb); if (sdio_ret != 0) { pr_err("[%s] %s: write returned error %d", dev->name, __func__, sdio_ret); goto xmit_out; } if (count_this_packet(skb->data, skb->len)) { p->stats.tx_packets++; p->stats.tx_bytes += skb->len; #ifdef CONFIG_MSM_RMNET_DEBUG p->wakeups_xmit += rmnet_cause_wakeup(p); #endif } DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n", dev->name, p->stats.tx_packets, skb->len, skb->mark); return 0; xmit_out: dev_kfree_skb_any(skb); p->stats.tx_errors++; return 0; } static void sdio_write_done(void *dev, struct sk_buff *skb) { struct rmnet_private *p = netdev_priv(dev); unsigned long flags; if (skb) dev_kfree_skb_any(skb); if (!p->in_reset) { DBG1("%s: write complete skb=%p\n", __func__, skb); spin_lock_irqsave(&p->tx_queue_lock, flags); if (netif_queue_stopped(dev) && msm_sdio_dmux_is_ch_low(p->ch_id)) { DBG0("%s: Low WM hit, waking queue=%p\n", __func__, skb); netif_wake_queue(dev); } spin_unlock_irqrestore(&p->tx_queue_lock, flags); } else { DBG1("%s: write in reset skb=%p\n", __func__, skb); } } static int __rmnet_open(struct net_device *dev) { int r; struct rmnet_private *p = netdev_priv(dev); DBG0("[%s] __rmnet_open()\n", dev->name); if (!p->device_up) { r = msm_sdio_dmux_open(p->ch_id, dev, sdio_recv_notify, sdio_write_done); if (r < 0) return -ENODEV; } p->device_up = DEVICE_ACTIVE; return 0; } static int rmnet_open(struct net_device *dev) { int rc = 0; DBG0("[%s] rmnet_open()\n", dev->name); rc = __rmnet_open(dev); if (rc == 0) netif_start_queue(dev); return rc; } static int __rmnet_close(struct net_device *dev) { struct rmnet_private *p = netdev_priv(dev); int rc = 0; if (p->device_up) { /* do not close rmnet port once up, this causes remote side to hang if tried to open again */ /* rc = msm_sdio_dmux_close(p->ch_id); */ p->device_up = DEVICE_INACTIVE; return rc; } else return -EBADF; } static int rmnet_stop(struct net_device *dev) { DBG0("[%s] rmnet_stop()\n", dev->name); __rmnet_close(dev); netif_stop_queue(dev); return 0; } static int rmnet_change_mtu(struct net_device *dev, int new_mtu) { if (0 > new_mtu || RMNET_DATA_LEN < new_mtu) return -EINVAL; DBG0("[%s] MTU change: old=%d new=%d\n", dev->name, dev->mtu, new_mtu); dev->mtu = new_mtu; return 0; } static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev) { struct rmnet_private *p = netdev_priv(dev); unsigned long flags; if (netif_queue_stopped(dev)) { pr_err("[%s]fatal: rmnet_xmit called when " "netif_queue is stopped", dev->name); return 0; } _rmnet_xmit(skb, dev); spin_lock_irqsave(&p->tx_queue_lock, flags); if (msm_sdio_dmux_is_ch_full(p->ch_id)) { netif_stop_queue(dev); DBG0("%s: High WM hit, stopping queue=%p\n", __func__, skb); } spin_unlock_irqrestore(&p->tx_queue_lock, flags); return 0; } static struct net_device_stats *rmnet_get_stats(struct net_device *dev) { struct rmnet_private *p = netdev_priv(dev); return &p->stats; } static void rmnet_set_multicast_list(struct net_device *dev) { } static void rmnet_tx_timeout(struct net_device *dev) { pr_warning("[%s] rmnet_tx_timeout()\n", dev->name); } static const struct net_device_ops rmnet_ops_ether = { .ndo_open = rmnet_open, .ndo_stop = rmnet_stop, .ndo_start_xmit = rmnet_xmit, .ndo_get_stats = rmnet_get_stats, .ndo_set_rx_mode = rmnet_set_multicast_list, .ndo_tx_timeout = rmnet_tx_timeout, .ndo_do_ioctl = rmnet_ioctl, .ndo_change_mtu = rmnet_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static const struct net_device_ops rmnet_ops_ip = { .ndo_open = rmnet_open, .ndo_stop = rmnet_stop, .ndo_start_xmit = rmnet_xmit, .ndo_get_stats = rmnet_get_stats, .ndo_set_rx_mode = rmnet_set_multicast_list, .ndo_tx_timeout = rmnet_tx_timeout, .ndo_do_ioctl = rmnet_ioctl, .ndo_change_mtu = rmnet_change_mtu, .ndo_set_mac_address = 0, .ndo_validate_addr = 0, }; static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct rmnet_private *p = netdev_priv(dev); u32 old_opmode = p->operation_mode; unsigned long flags; int prev_mtu = dev->mtu; int rc = 0; /* Process IOCTL command */ switch (cmd) { case RMNET_IOCTL_SET_LLP_ETHERNET: /* Set Ethernet protocol */ /* Perform Ethernet config only if in IP mode currently*/ if (p->operation_mode & RMNET_MODE_LLP_IP) { ether_setup(dev); random_ether_addr(dev->dev_addr); dev->mtu = prev_mtu; dev->netdev_ops = &rmnet_ops_ether; spin_lock_irqsave(&p->lock, flags); p->operation_mode &= ~RMNET_MODE_LLP_IP; p->operation_mode |= RMNET_MODE_LLP_ETH; spin_unlock_irqrestore(&p->lock, flags); DBG0("[%s] rmnet_ioctl(): " "set Ethernet protocol mode\n", dev->name); } break; case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */ /* Perform IP config only if in Ethernet mode currently*/ if (p->operation_mode & RMNET_MODE_LLP_ETH) { /* Undo config done in ether_setup() */ dev->header_ops = 0; /* No header */ dev->type = ARPHRD_RAWIP; dev->hard_header_len = 0; dev->mtu = prev_mtu; dev->addr_len = 0; dev->flags &= ~(IFF_BROADCAST| IFF_MULTICAST); dev->needed_headroom = HEADROOM_FOR_SDIO + HEADROOM_FOR_QOS; dev->needed_tailroom = TAILROOM; dev->netdev_ops = &rmnet_ops_ip; spin_lock_irqsave(&p->lock, flags); p->operation_mode &= ~RMNET_MODE_LLP_ETH; p->operation_mode |= RMNET_MODE_LLP_IP; spin_unlock_irqrestore(&p->lock, flags); DBG0("[%s] rmnet_ioctl(): " "set IP protocol mode\n", dev->name); } break; case RMNET_IOCTL_GET_LLP: /* Get link protocol state */ ifr->ifr_ifru.ifru_data = (void *)(p->operation_mode & (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP)); break; case RMNET_IOCTL_SET_QOS_ENABLE: /* Set QoS header enabled */ spin_lock_irqsave(&p->lock, flags); p->operation_mode |= RMNET_MODE_QOS; spin_unlock_irqrestore(&p->lock, flags); DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n", dev->name); break; case RMNET_IOCTL_SET_QOS_DISABLE: /* Set QoS header disabled */ spin_lock_irqsave(&p->lock, flags); p->operation_mode &= ~RMNET_MODE_QOS; spin_unlock_irqrestore(&p->lock, flags); DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n", dev->name); break; case RMNET_IOCTL_GET_QOS: /* Get QoS header state */ ifr->ifr_ifru.ifru_data = (void *)(p->operation_mode & RMNET_MODE_QOS); break; case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */ ifr->ifr_ifru.ifru_data = (void *)p->operation_mode; break; case RMNET_IOCTL_OPEN: /* Open transport port */ rc = __rmnet_open(dev); DBG0("[%s] rmnet_ioctl(): open transport port\n", dev->name); break; case RMNET_IOCTL_CLOSE: /* Close transport port */ rc = __rmnet_close(dev); DBG0("[%s] rmnet_ioctl(): close transport port\n", dev->name); break; default: pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]", dev->name, cmd); return -EINVAL; } DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n", dev->name, __func__, cmd, old_opmode, p->operation_mode); return rc; } static void __init rmnet_setup(struct net_device *dev) { /* Using Ethernet mode by default */ dev->netdev_ops = &rmnet_ops_ether; ether_setup(dev); /* set this after calling ether_setup */ dev->mtu = RMNET_DATA_LEN; dev->needed_headroom = HEADROOM_FOR_SDIO + HEADROOM_FOR_QOS ; dev->needed_tailroom = TAILROOM; random_ether_addr(dev->dev_addr); dev->watchdog_timeo = 1000; /* 10 seconds? */ } static int __init rmnet_init(void) { int ret; struct device *d; struct net_device *dev; struct rmnet_private *p; unsigned n; pr_info("%s: SDIO devices[%d]\n", __func__, RMNET_DEVICE_COUNT); #ifdef CONFIG_MSM_RMNET_DEBUG timeout_us = 0; #ifdef CONFIG_HAS_EARLYSUSPEND timeout_suspend_us = 0; #endif #endif for (n = 0; n < RMNET_DEVICE_COUNT; n++) { dev = alloc_netdev(sizeof(struct rmnet_private), "rmnet_sdio%d", rmnet_setup); if (!dev) return -ENOMEM; d = &(dev->dev); p = netdev_priv(dev); /* Initial config uses Ethernet */ p->operation_mode = RMNET_MODE_LLP_ETH; p->ch_id = n; spin_lock_init(&p->lock); spin_lock_init(&p->tx_queue_lock); #ifdef CONFIG_MSM_RMNET_DEBUG p->timeout_us = timeout_us; p->wakeups_xmit = p->wakeups_rcv = 0; #endif ret = register_netdev(dev); if (ret) { free_netdev(dev); return ret; } #ifdef CONFIG_MSM_RMNET_DEBUG if (device_create_file(d, &dev_attr_timeout)) continue; if (device_create_file(d, &dev_attr_wakeups_xmit)) continue; if (device_create_file(d, &dev_attr_wakeups_rcv)) continue; #ifdef CONFIG_HAS_EARLYSUSPEND if (device_create_file(d, &dev_attr_timeout_suspend)) continue; /* Only care about rmnet0 for suspend/resume tiemout hooks. */ if (n == 0) rmnet0 = d; #endif #endif } return 0; } module_init(rmnet_init); MODULE_DESCRIPTION("MSM RMNET SDIO TRANSPORT"); MODULE_LICENSE("GPL v2");
gpl-2.0
theme/linux
arch/cris/kernel/irq.c
1421
1597
/* * * linux/arch/cris/kernel/irq.c * * Copyright (c) 2000,2007 Axis Communications AB * * Authors: Bjorn Wesen (bjornw@axis.com) * * This file contains the code used by various IRQ handling routines: * asking for different IRQs should be done through these routines * instead of just grabbing them. Thus setups with different IRQ numbers * shouldn't result in any weird surprises, and installing new handlers * should be easier. * */ /* * IRQs are in fact implemented a bit like signal handlers for the kernel. * Naturally it's not a 1:1 relation, but there are similarities. */ #include <linux/module.h> #include <linux/ptrace.h> #include <linux/irq.h> #include <linux/kernel_stat.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/timex.h> #include <linux/random.h> #include <linux/init.h> #include <linux/seq_file.h> #include <linux/errno.h> #include <linux/spinlock.h> #include <asm/io.h> #include <arch/system.h> /* called by the assembler IRQ entry functions defined in irq.h * to dispatch the interrupts to registered handlers */ asmlinkage void do_IRQ(int irq, struct pt_regs * regs) { unsigned long sp; struct pt_regs *old_regs = set_irq_regs(regs); irq_enter(); sp = rdsp(); if (unlikely((sp & (PAGE_SIZE - 1)) < (PAGE_SIZE/8))) { printk("do_IRQ: stack overflow: %lX\n", sp); show_stack(NULL, (unsigned long *)sp); } generic_handle_irq(irq); irq_exit(); set_irq_regs(old_regs); } void weird_irq(void) { local_irq_disable(); printk("weird irq\n"); while(1); }
gpl-2.0
elelinux/hero_kernel
drivers/pci/hotplug/pcihp_slot.c
2701
7231
/* * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001 IBM Corp. * Copyright (C) 2003-2004 Intel Corporation * (c) Copyright 2009 Hewlett-Packard Development Company, L.P. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/pci.h> #include <linux/pci_hotplug.h> static struct hpp_type0 pci_default_type0 = { .revision = 1, .cache_line_size = 8, .latency_timer = 0x40, .enable_serr = 0, .enable_perr = 0, }; static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp) { u16 pci_cmd, pci_bctl; if (!hpp) { /* * Perhaps we *should* use default settings for PCIe, but * pciehp didn't, so we won't either. */ if (pci_is_pcie(dev)) return; dev_info(&dev->dev, "using default PCI settings\n"); hpp = &pci_default_type0; } if (hpp->revision > 1) { dev_warn(&dev->dev, "PCI settings rev %d not supported; using defaults\n", hpp->revision); hpp = &pci_default_type0; } pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size); pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer); pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); if (hpp->enable_serr) pci_cmd |= PCI_COMMAND_SERR; else pci_cmd &= ~PCI_COMMAND_SERR; if (hpp->enable_perr) pci_cmd |= PCI_COMMAND_PARITY; else pci_cmd &= ~PCI_COMMAND_PARITY; pci_write_config_word(dev, PCI_COMMAND, pci_cmd); /* Program bridge control value */ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, hpp->latency_timer); pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); if (hpp->enable_serr) pci_bctl |= PCI_BRIDGE_CTL_SERR; else pci_bctl &= ~PCI_BRIDGE_CTL_SERR; if (hpp->enable_perr) pci_bctl |= PCI_BRIDGE_CTL_PARITY; else pci_bctl &= ~PCI_BRIDGE_CTL_PARITY; pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); } } static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp) { if (hpp) dev_warn(&dev->dev, "PCI-X settings not supported\n"); } static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) { int pos; u16 reg16; u32 reg32; if (!hpp) return; /* Find PCI Express capability */ pos = pci_pcie_cap(dev); if (!pos) return; if (hpp->revision > 1) { dev_warn(&dev->dev, "PCIe settings rev %d not supported\n", hpp->revision); return; } /* Initialize Device Control Register */ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16); reg16 = (reg16 & hpp->pci_exp_devctl_and) | hpp->pci_exp_devctl_or; pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16); /* Initialize Link Control Register */ if (dev->subordinate) { pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &reg16); reg16 = (reg16 & hpp->pci_exp_lnkctl_and) | hpp->pci_exp_lnkctl_or; pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, reg16); } /* Find Advanced Error Reporting Enhanced Capability */ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); if (!pos) return; /* Initialize Uncorrectable Error Mask Register */ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32); reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or; pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32); /* Initialize Uncorrectable Error Severity Register */ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32); reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or; pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32); /* Initialize Correctable Error Mask Register */ pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32); reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or; pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32); /* Initialize Advanced Error Capabilities and Control Register */ pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32); reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or; pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); /* * FIXME: The following two registers are not supported yet. * * o Secondary Uncorrectable Error Severity Register * o Secondary Uncorrectable Error Mask Register */ } /* Program PCIE MaxPayload setting on device: ensure parent maxpayload <= device */ static int pci_set_payload(struct pci_dev *dev) { int pos, ppos; u16 pctl, psz; u16 dctl, dsz, dcap, dmax; struct pci_dev *parent; parent = dev->bus->self; pos = pci_find_capability(dev, PCI_CAP_ID_EXP); if (!pos) return 0; /* Read Device MaxPayload capability and setting */ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &dctl); pci_read_config_word(dev, pos + PCI_EXP_DEVCAP, &dcap); dsz = (dctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5; dmax = (dcap & PCI_EXP_DEVCAP_PAYLOAD); /* Read Parent MaxPayload setting */ ppos = pci_find_capability(parent, PCI_CAP_ID_EXP); if (!ppos) return 0; pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl); psz = (pctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5; /* If parent payload > device max payload -> error * If parent payload > device payload -> set speed * If parent payload <= device payload -> do nothing */ if (psz > dmax) return -1; else if (psz > dsz) { dev_info(&dev->dev, "Setting MaxPayload to %d\n", 128 << psz); pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, (dctl & ~PCI_EXP_DEVCTL_PAYLOAD) + (psz << 5)); } return 0; } void pci_configure_slot(struct pci_dev *dev) { struct pci_dev *cdev; struct hotplug_params hpp; int ret; if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL || (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) return; ret = pci_set_payload(dev); if (ret) dev_warn(&dev->dev, "could not set device max payload\n"); memset(&hpp, 0, sizeof(hpp)); ret = pci_get_hp_params(dev, &hpp); if (ret) dev_warn(&dev->dev, "no hotplug settings from platform\n"); program_hpp_type2(dev, hpp.t2); program_hpp_type1(dev, hpp.t1); program_hpp_type0(dev, hpp.t0); if (dev->subordinate) { list_for_each_entry(cdev, &dev->subordinate->devices, bus_list) pci_configure_slot(cdev); } } EXPORT_SYMBOL_GPL(pci_configure_slot);
gpl-2.0
houzhenggang/bcm63xx-next
drivers/oprofile/cpu_buffer.c
2957
10842
/** * @file cpu_buffer.c * * @remark Copyright 2002-2009 OProfile authors * @remark Read the file COPYING * * @author John Levon <levon@movementarian.org> * @author Barry Kasindorf <barry.kasindorf@amd.com> * @author Robert Richter <robert.richter@amd.com> * * Each CPU has a local buffer that stores PC value/event * pairs. We also log context switches when we notice them. * Eventually each CPU's buffer is processed into the global * event buffer by sync_buffer(). * * We use a local buffer for two reasons: an NMI or similar * interrupt cannot synchronise, and high sampling rates * would lead to catastrophic global synchronisation if * a global buffer was used. */ #include <linux/sched.h> #include <linux/oprofile.h> #include <linux/errno.h> #include "event_buffer.h" #include "cpu_buffer.h" #include "buffer_sync.h" #include "oprof.h" #define OP_BUFFER_FLAGS 0 static struct ring_buffer *op_ring_buffer; DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer); static void wq_sync_buffer(struct work_struct *work); #define DEFAULT_TIMER_EXPIRE (HZ / 10) static int work_enabled; unsigned long oprofile_get_cpu_buffer_size(void) { return oprofile_cpu_buffer_size; } void oprofile_cpu_buffer_inc_smpl_lost(void) { struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); cpu_buf->sample_lost_overflow++; } void free_cpu_buffers(void) { if (op_ring_buffer) ring_buffer_free(op_ring_buffer); op_ring_buffer = NULL; } #define RB_EVENT_HDR_SIZE 4 int alloc_cpu_buffers(void) { int i; unsigned long buffer_size = oprofile_cpu_buffer_size; unsigned long byte_size = buffer_size * (sizeof(struct op_sample) + RB_EVENT_HDR_SIZE); op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); if (!op_ring_buffer) goto fail; for_each_possible_cpu(i) { struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); b->last_task = NULL; b->last_is_kernel = -1; b->tracing = 0; b->buffer_size = buffer_size; b->sample_received = 0; b->sample_lost_overflow = 0; b->backtrace_aborted = 0; b->sample_invalid_eip = 0; b->cpu = i; INIT_DELAYED_WORK(&b->work, wq_sync_buffer); } return 0; fail: free_cpu_buffers(); return -ENOMEM; } void start_cpu_work(void) { int i; work_enabled = 1; for_each_online_cpu(i) { struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); /* * Spread the work by 1 jiffy per cpu so they dont all * fire at once. */ schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i); } } void end_cpu_work(void) { work_enabled = 0; } void flush_cpu_work(void) { int i; for_each_online_cpu(i) { struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); /* these works are per-cpu, no need for flush_sync */ flush_delayed_work(&b->work); } } /* * This function prepares the cpu buffer to write a sample. * * Struct op_entry is used during operations on the ring buffer while * struct op_sample contains the data that is stored in the ring * buffer. Struct entry can be uninitialized. The function reserves a * data array that is specified by size. Use * op_cpu_buffer_write_commit() after preparing the sample. In case of * errors a null pointer is returned, otherwise the pointer to the * sample. * */ struct op_sample *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size) { entry->event = ring_buffer_lock_reserve (op_ring_buffer, sizeof(struct op_sample) + size * sizeof(entry->sample->data[0])); if (!entry->event) return NULL; entry->sample = ring_buffer_event_data(entry->event); entry->size = size; entry->data = entry->sample->data; return entry->sample; } int op_cpu_buffer_write_commit(struct op_entry *entry) { return ring_buffer_unlock_commit(op_ring_buffer, entry->event); } struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu) { struct ring_buffer_event *e; e = ring_buffer_consume(op_ring_buffer, cpu, NULL, NULL); if (!e) return NULL; entry->event = e; entry->sample = ring_buffer_event_data(e); entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample)) / sizeof(entry->sample->data[0]); entry->data = entry->sample->data; return entry->sample; } unsigned long op_cpu_buffer_entries(int cpu) { return ring_buffer_entries_cpu(op_ring_buffer, cpu); } static int op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace, int is_kernel, struct task_struct *task) { struct op_entry entry; struct op_sample *sample; unsigned long flags; int size; flags = 0; if (backtrace) flags |= TRACE_BEGIN; /* notice a switch from user->kernel or vice versa */ is_kernel = !!is_kernel; if (cpu_buf->last_is_kernel != is_kernel) { cpu_buf->last_is_kernel = is_kernel; flags |= KERNEL_CTX_SWITCH; if (is_kernel) flags |= IS_KERNEL; } /* notice a task switch */ if (cpu_buf->last_task != task) { cpu_buf->last_task = task; flags |= USER_CTX_SWITCH; } if (!flags) /* nothing to do */ return 0; if (flags & USER_CTX_SWITCH) size = 1; else size = 0; sample = op_cpu_buffer_write_reserve(&entry, size); if (!sample) return -ENOMEM; sample->eip = ESCAPE_CODE; sample->event = flags; if (size) op_cpu_buffer_add_data(&entry, (unsigned long)task); op_cpu_buffer_write_commit(&entry); return 0; } static inline int op_add_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, unsigned long event) { struct op_entry entry; struct op_sample *sample; sample = op_cpu_buffer_write_reserve(&entry, 0); if (!sample) return -ENOMEM; sample->eip = pc; sample->event = event; return op_cpu_buffer_write_commit(&entry); } /* * This must be safe from any context. * * is_kernel is needed because on some architectures you cannot * tell if you are in kernel or user space simply by looking at * pc. We tag this in the buffer by generating kernel enter/exit * events whenever is_kernel changes */ static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, unsigned long backtrace, int is_kernel, unsigned long event, struct task_struct *task) { struct task_struct *tsk = task ? task : current; cpu_buf->sample_received++; if (pc == ESCAPE_CODE) { cpu_buf->sample_invalid_eip++; return 0; } if (op_add_code(cpu_buf, backtrace, is_kernel, tsk)) goto fail; if (op_add_sample(cpu_buf, pc, event)) goto fail; return 1; fail: cpu_buf->sample_lost_overflow++; return 0; } static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) { cpu_buf->tracing = 1; } static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf) { cpu_buf->tracing = 0; } static inline void __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, unsigned long event, int is_kernel, struct task_struct *task) { struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); unsigned long backtrace = oprofile_backtrace_depth; /* * if log_sample() fail we can't backtrace since we lost the * source of this event */ if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event, task)) /* failed */ return; if (!backtrace) return; oprofile_begin_trace(cpu_buf); oprofile_ops.backtrace(regs, backtrace); oprofile_end_trace(cpu_buf); } void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs, unsigned long event, int is_kernel, struct task_struct *task) { __oprofile_add_ext_sample(pc, regs, event, is_kernel, task); } void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, unsigned long event, int is_kernel) { __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL); } void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) { int is_kernel; unsigned long pc; if (likely(regs)) { is_kernel = !user_mode(regs); pc = profile_pc(regs); } else { is_kernel = 0; /* This value will not be used */ pc = ESCAPE_CODE; /* as this causes an early return. */ } __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL); } /* * Add samples with data to the ring buffer. * * Use oprofile_add_data(&entry, val) to add data and * oprofile_write_commit(&entry) to commit the sample. */ void oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs, unsigned long pc, int code, int size) { struct op_sample *sample; int is_kernel = !user_mode(regs); struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); cpu_buf->sample_received++; /* no backtraces for samples with data */ if (op_add_code(cpu_buf, 0, is_kernel, current)) goto fail; sample = op_cpu_buffer_write_reserve(entry, size + 2); if (!sample) goto fail; sample->eip = ESCAPE_CODE; sample->event = 0; /* no flags */ op_cpu_buffer_add_data(entry, code); op_cpu_buffer_add_data(entry, pc); return; fail: entry->event = NULL; cpu_buf->sample_lost_overflow++; } int oprofile_add_data(struct op_entry *entry, unsigned long val) { if (!entry->event) return 0; return op_cpu_buffer_add_data(entry, val); } int oprofile_add_data64(struct op_entry *entry, u64 val) { if (!entry->event) return 0; if (op_cpu_buffer_get_size(entry) < 2) /* * the function returns 0 to indicate a too small * buffer, even if there is some space left */ return 0; if (!op_cpu_buffer_add_data(entry, (u32)val)) return 0; return op_cpu_buffer_add_data(entry, (u32)(val >> 32)); } int oprofile_write_commit(struct op_entry *entry) { if (!entry->event) return -EINVAL; return op_cpu_buffer_write_commit(entry); } void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) { struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); log_sample(cpu_buf, pc, 0, is_kernel, event, NULL); } void oprofile_add_trace(unsigned long pc) { struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); if (!cpu_buf->tracing) return; /* * broken frame can give an eip with the same value as an * escape code, abort the trace if we get it */ if (pc == ESCAPE_CODE) goto fail; if (op_add_sample(cpu_buf, pc, 0)) goto fail; return; fail: cpu_buf->tracing = 0; cpu_buf->backtrace_aborted++; return; } /* * This serves to avoid cpu buffer overflow, and makes sure * the task mortuary progresses * * By using schedule_delayed_work_on and then schedule_delayed_work * we guarantee this will stay on the correct cpu */ static void wq_sync_buffer(struct work_struct *work) { struct oprofile_cpu_buffer *b = container_of(work, struct oprofile_cpu_buffer, work.work); if (b->cpu != smp_processor_id() && !cpu_online(b->cpu)) { cancel_delayed_work(&b->work); return; } sync_buffer(b->cpu); /* don't re-add the work if we're shutting down */ if (work_enabled) schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE); }
gpl-2.0
GlitchKernel/Glitch
drivers/mmc/host/jz4740_mmc.c
3213
25186
/* * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> * JZ4740 SD/MMC controller driver * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/mmc/host.h> #include <linux/err.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/scatterlist.h> #include <linux/clk.h> #include <linux/bitops.h> #include <linux/gpio.h> #include <asm/mach-jz4740/gpio.h> #include <asm/cacheflush.h> #include <linux/dma-mapping.h> #include <asm/mach-jz4740/jz4740_mmc.h> #define JZ_REG_MMC_STRPCL 0x00 #define JZ_REG_MMC_STATUS 0x04 #define JZ_REG_MMC_CLKRT 0x08 #define JZ_REG_MMC_CMDAT 0x0C #define JZ_REG_MMC_RESTO 0x10 #define JZ_REG_MMC_RDTO 0x14 #define JZ_REG_MMC_BLKLEN 0x18 #define JZ_REG_MMC_NOB 0x1C #define JZ_REG_MMC_SNOB 0x20 #define JZ_REG_MMC_IMASK 0x24 #define JZ_REG_MMC_IREG 0x28 #define JZ_REG_MMC_CMD 0x2C #define JZ_REG_MMC_ARG 0x30 #define JZ_REG_MMC_RESP_FIFO 0x34 #define JZ_REG_MMC_RXFIFO 0x38 #define JZ_REG_MMC_TXFIFO 0x3C #define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7) #define JZ_MMC_STRPCL_EXIT_TRANSFER BIT(6) #define JZ_MMC_STRPCL_START_READWAIT BIT(5) #define JZ_MMC_STRPCL_STOP_READWAIT BIT(4) #define JZ_MMC_STRPCL_RESET BIT(3) #define JZ_MMC_STRPCL_START_OP BIT(2) #define JZ_MMC_STRPCL_CLOCK_CONTROL (BIT(1) | BIT(0)) #define JZ_MMC_STRPCL_CLOCK_STOP BIT(0) #define JZ_MMC_STRPCL_CLOCK_START BIT(1) #define JZ_MMC_STATUS_IS_RESETTING BIT(15) #define JZ_MMC_STATUS_SDIO_INT_ACTIVE BIT(14) #define JZ_MMC_STATUS_PRG_DONE BIT(13) #define JZ_MMC_STATUS_DATA_TRAN_DONE BIT(12) #define JZ_MMC_STATUS_END_CMD_RES BIT(11) #define JZ_MMC_STATUS_DATA_FIFO_AFULL BIT(10) #define JZ_MMC_STATUS_IS_READWAIT BIT(9) #define JZ_MMC_STATUS_CLK_EN BIT(8) #define JZ_MMC_STATUS_DATA_FIFO_FULL BIT(7) #define JZ_MMC_STATUS_DATA_FIFO_EMPTY BIT(6) #define JZ_MMC_STATUS_CRC_RES_ERR BIT(5) #define JZ_MMC_STATUS_CRC_READ_ERROR BIT(4) #define JZ_MMC_STATUS_TIMEOUT_WRITE BIT(3) #define JZ_MMC_STATUS_CRC_WRITE_ERROR BIT(2) #define JZ_MMC_STATUS_TIMEOUT_RES BIT(1) #define JZ_MMC_STATUS_TIMEOUT_READ BIT(0) #define JZ_MMC_STATUS_READ_ERROR_MASK (BIT(4) | BIT(0)) #define JZ_MMC_STATUS_WRITE_ERROR_MASK (BIT(3) | BIT(2)) #define JZ_MMC_CMDAT_IO_ABORT BIT(11) #define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10) #define JZ_MMC_CMDAT_DMA_EN BIT(8) #define JZ_MMC_CMDAT_INIT BIT(7) #define JZ_MMC_CMDAT_BUSY BIT(6) #define JZ_MMC_CMDAT_STREAM BIT(5) #define JZ_MMC_CMDAT_WRITE BIT(4) #define JZ_MMC_CMDAT_DATA_EN BIT(3) #define JZ_MMC_CMDAT_RESPONSE_FORMAT (BIT(2) | BIT(1) | BIT(0)) #define JZ_MMC_CMDAT_RSP_R1 1 #define JZ_MMC_CMDAT_RSP_R2 2 #define JZ_MMC_CMDAT_RSP_R3 3 #define JZ_MMC_IRQ_SDIO BIT(7) #define JZ_MMC_IRQ_TXFIFO_WR_REQ BIT(6) #define JZ_MMC_IRQ_RXFIFO_RD_REQ BIT(5) #define JZ_MMC_IRQ_END_CMD_RES BIT(2) #define JZ_MMC_IRQ_PRG_DONE BIT(1) #define JZ_MMC_IRQ_DATA_TRAN_DONE BIT(0) #define JZ_MMC_CLK_RATE 24000000 enum jz4740_mmc_state { JZ4740_MMC_STATE_READ_RESPONSE, JZ4740_MMC_STATE_TRANSFER_DATA, JZ4740_MMC_STATE_SEND_STOP, JZ4740_MMC_STATE_DONE, }; struct jz4740_mmc_host { struct mmc_host *mmc; struct platform_device *pdev; struct jz4740_mmc_platform_data *pdata; struct clk *clk; int irq; int card_detect_irq; struct resource *mem; void __iomem *base; struct mmc_request *req; struct mmc_command *cmd; unsigned long waiting; uint32_t cmdat; uint16_t irq_mask; spinlock_t lock; struct timer_list timeout_timer; struct sg_mapping_iter miter; enum jz4740_mmc_state state; }; static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host, unsigned int irq, bool enabled) { unsigned long flags; spin_lock_irqsave(&host->lock, flags); if (enabled) host->irq_mask &= ~irq; else host->irq_mask |= irq; spin_unlock_irqrestore(&host->lock, flags); writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK); } static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host, bool start_transfer) { uint16_t val = JZ_MMC_STRPCL_CLOCK_START; if (start_transfer) val |= JZ_MMC_STRPCL_START_OP; writew(val, host->base + JZ_REG_MMC_STRPCL); } static void jz4740_mmc_clock_disable(struct jz4740_mmc_host *host) { uint32_t status; unsigned int timeout = 1000; writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL); do { status = readl(host->base + JZ_REG_MMC_STATUS); } while (status & JZ_MMC_STATUS_CLK_EN && --timeout); } static void jz4740_mmc_reset(struct jz4740_mmc_host *host) { uint32_t status; unsigned int timeout = 1000; writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL); udelay(10); do { status = readl(host->base + JZ_REG_MMC_STATUS); } while (status & JZ_MMC_STATUS_IS_RESETTING && --timeout); } static void jz4740_mmc_request_done(struct jz4740_mmc_host *host) { struct mmc_request *req; req = host->req; host->req = NULL; mmc_request_done(host->mmc, req); } static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host, unsigned int irq) { unsigned int timeout = 0x800; uint16_t status; do { status = readw(host->base + JZ_REG_MMC_IREG); } while (!(status & irq) && --timeout); if (timeout == 0) { set_bit(0, &host->waiting); mod_timer(&host->timeout_timer, jiffies + 5*HZ); jz4740_mmc_set_irq_enabled(host, irq, true); return true; } return false; } static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host, struct mmc_data *data) { int status; status = readl(host->base + JZ_REG_MMC_STATUS); if (status & JZ_MMC_STATUS_WRITE_ERROR_MASK) { if (status & (JZ_MMC_STATUS_TIMEOUT_WRITE)) { host->req->cmd->error = -ETIMEDOUT; data->error = -ETIMEDOUT; } else { host->req->cmd->error = -EIO; data->error = -EIO; } } } static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host, struct mmc_data *data) { struct sg_mapping_iter *miter = &host->miter; void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO; uint32_t *buf; bool timeout; size_t i, j; while (sg_miter_next(miter)) { buf = miter->addr; i = miter->length / 4; j = i / 8; i = i & 0x7; while (j) { timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ); if (unlikely(timeout)) goto poll_timeout; writel(buf[0], fifo_addr); writel(buf[1], fifo_addr); writel(buf[2], fifo_addr); writel(buf[3], fifo_addr); writel(buf[4], fifo_addr); writel(buf[5], fifo_addr); writel(buf[6], fifo_addr); writel(buf[7], fifo_addr); buf += 8; --j; } if (unlikely(i)) { timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ); if (unlikely(timeout)) goto poll_timeout; while (i) { writel(*buf, fifo_addr); ++buf; --i; } } data->bytes_xfered += miter->length; } sg_miter_stop(miter); return false; poll_timeout: miter->consumed = (void *)buf - miter->addr; data->bytes_xfered += miter->consumed; sg_miter_stop(miter); return true; } static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host, struct mmc_data *data) { struct sg_mapping_iter *miter = &host->miter; void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO; uint32_t *buf; uint32_t d; uint16_t status; size_t i, j; unsigned int timeout; while (sg_miter_next(miter)) { buf = miter->addr; i = miter->length; j = i / 32; i = i & 0x1f; while (j) { timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ); if (unlikely(timeout)) goto poll_timeout; buf[0] = readl(fifo_addr); buf[1] = readl(fifo_addr); buf[2] = readl(fifo_addr); buf[3] = readl(fifo_addr); buf[4] = readl(fifo_addr); buf[5] = readl(fifo_addr); buf[6] = readl(fifo_addr); buf[7] = readl(fifo_addr); buf += 8; --j; } if (unlikely(i)) { timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ); if (unlikely(timeout)) goto poll_timeout; while (i >= 4) { *buf++ = readl(fifo_addr); i -= 4; } if (unlikely(i > 0)) { d = readl(fifo_addr); memcpy(buf, &d, i); } } data->bytes_xfered += miter->length; /* This can go away once MIPS implements * flush_kernel_dcache_page */ flush_dcache_page(miter->page); } sg_miter_stop(miter); /* For whatever reason there is sometime one word more in the fifo then * requested */ timeout = 1000; status = readl(host->base + JZ_REG_MMC_STATUS); while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) { d = readl(fifo_addr); status = readl(host->base + JZ_REG_MMC_STATUS); } return false; poll_timeout: miter->consumed = (void *)buf - miter->addr; data->bytes_xfered += miter->consumed; sg_miter_stop(miter); return true; } static void jz4740_mmc_timeout(unsigned long data) { struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)data; if (!test_and_clear_bit(0, &host->waiting)) return; jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, false); host->req->cmd->error = -ETIMEDOUT; jz4740_mmc_request_done(host); } static void jz4740_mmc_read_response(struct jz4740_mmc_host *host, struct mmc_command *cmd) { int i; uint16_t tmp; void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO; if (cmd->flags & MMC_RSP_136) { tmp = readw(fifo_addr); for (i = 0; i < 4; ++i) { cmd->resp[i] = tmp << 24; tmp = readw(fifo_addr); cmd->resp[i] |= tmp << 8; tmp = readw(fifo_addr); cmd->resp[i] |= tmp >> 8; } } else { cmd->resp[0] = readw(fifo_addr) << 24; cmd->resp[0] |= readw(fifo_addr) << 8; cmd->resp[0] |= readw(fifo_addr) & 0xff; } } static void jz4740_mmc_send_command(struct jz4740_mmc_host *host, struct mmc_command *cmd) { uint32_t cmdat = host->cmdat; host->cmdat &= ~JZ_MMC_CMDAT_INIT; jz4740_mmc_clock_disable(host); host->cmd = cmd; if (cmd->flags & MMC_RSP_BUSY) cmdat |= JZ_MMC_CMDAT_BUSY; switch (mmc_resp_type(cmd)) { case MMC_RSP_R1B: case MMC_RSP_R1: cmdat |= JZ_MMC_CMDAT_RSP_R1; break; case MMC_RSP_R2: cmdat |= JZ_MMC_CMDAT_RSP_R2; break; case MMC_RSP_R3: cmdat |= JZ_MMC_CMDAT_RSP_R3; break; default: break; } if (cmd->data) { cmdat |= JZ_MMC_CMDAT_DATA_EN; if (cmd->data->flags & MMC_DATA_WRITE) cmdat |= JZ_MMC_CMDAT_WRITE; if (cmd->data->flags & MMC_DATA_STREAM) cmdat |= JZ_MMC_CMDAT_STREAM; writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN); writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB); } writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD); writel(cmd->arg, host->base + JZ_REG_MMC_ARG); writel(cmdat, host->base + JZ_REG_MMC_CMDAT); jz4740_mmc_clock_enable(host, 1); } static void jz_mmc_prepare_data_transfer(struct jz4740_mmc_host *host) { struct mmc_command *cmd = host->req->cmd; struct mmc_data *data = cmd->data; int direction; if (data->flags & MMC_DATA_READ) direction = SG_MITER_TO_SG; else direction = SG_MITER_FROM_SG; sg_miter_start(&host->miter, data->sg, data->sg_len, direction); } static irqreturn_t jz_mmc_irq_worker(int irq, void *devid) { struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid; struct mmc_command *cmd = host->req->cmd; struct mmc_request *req = host->req; bool timeout = false; if (cmd->error) host->state = JZ4740_MMC_STATE_DONE; switch (host->state) { case JZ4740_MMC_STATE_READ_RESPONSE: if (cmd->flags & MMC_RSP_PRESENT) jz4740_mmc_read_response(host, cmd); if (!cmd->data) break; jz_mmc_prepare_data_transfer(host); case JZ4740_MMC_STATE_TRANSFER_DATA: if (cmd->data->flags & MMC_DATA_READ) timeout = jz4740_mmc_read_data(host, cmd->data); else timeout = jz4740_mmc_write_data(host, cmd->data); if (unlikely(timeout)) { host->state = JZ4740_MMC_STATE_TRANSFER_DATA; break; } jz4740_mmc_transfer_check_state(host, cmd->data); timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE); if (unlikely(timeout)) { host->state = JZ4740_MMC_STATE_SEND_STOP; break; } writew(JZ_MMC_IRQ_DATA_TRAN_DONE, host->base + JZ_REG_MMC_IREG); case JZ4740_MMC_STATE_SEND_STOP: if (!req->stop) break; jz4740_mmc_send_command(host, req->stop); timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_PRG_DONE); if (timeout) { host->state = JZ4740_MMC_STATE_DONE; break; } case JZ4740_MMC_STATE_DONE: break; } if (!timeout) jz4740_mmc_request_done(host); return IRQ_HANDLED; } static irqreturn_t jz_mmc_irq(int irq, void *devid) { struct jz4740_mmc_host *host = devid; struct mmc_command *cmd = host->cmd; uint16_t irq_reg, status, tmp; irq_reg = readw(host->base + JZ_REG_MMC_IREG); tmp = irq_reg; irq_reg &= ~host->irq_mask; tmp &= ~(JZ_MMC_IRQ_TXFIFO_WR_REQ | JZ_MMC_IRQ_RXFIFO_RD_REQ | JZ_MMC_IRQ_PRG_DONE | JZ_MMC_IRQ_DATA_TRAN_DONE); if (tmp != irq_reg) writew(tmp & ~irq_reg, host->base + JZ_REG_MMC_IREG); if (irq_reg & JZ_MMC_IRQ_SDIO) { writew(JZ_MMC_IRQ_SDIO, host->base + JZ_REG_MMC_IREG); mmc_signal_sdio_irq(host->mmc); irq_reg &= ~JZ_MMC_IRQ_SDIO; } if (host->req && cmd && irq_reg) { if (test_and_clear_bit(0, &host->waiting)) { del_timer(&host->timeout_timer); status = readl(host->base + JZ_REG_MMC_STATUS); if (status & JZ_MMC_STATUS_TIMEOUT_RES) { cmd->error = -ETIMEDOUT; } else if (status & JZ_MMC_STATUS_CRC_RES_ERR) { cmd->error = -EIO; } else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR | JZ_MMC_STATUS_CRC_WRITE_ERROR)) { if (cmd->data) cmd->data->error = -EIO; cmd->error = -EIO; } else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR | JZ_MMC_STATUS_CRC_WRITE_ERROR)) { if (cmd->data) cmd->data->error = -EIO; cmd->error = -EIO; } jz4740_mmc_set_irq_enabled(host, irq_reg, false); writew(irq_reg, host->base + JZ_REG_MMC_IREG); return IRQ_WAKE_THREAD; } } return IRQ_HANDLED; } static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate) { int div = 0; int real_rate; jz4740_mmc_clock_disable(host); clk_set_rate(host->clk, JZ_MMC_CLK_RATE); real_rate = clk_get_rate(host->clk); while (real_rate > rate && div < 7) { ++div; real_rate >>= 1; } writew(div, host->base + JZ_REG_MMC_CLKRT); return real_rate; } static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req) { struct jz4740_mmc_host *host = mmc_priv(mmc); host->req = req; writew(0xffff, host->base + JZ_REG_MMC_IREG); writew(JZ_MMC_IRQ_END_CMD_RES, host->base + JZ_REG_MMC_IREG); jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, true); host->state = JZ4740_MMC_STATE_READ_RESPONSE; set_bit(0, &host->waiting); mod_timer(&host->timeout_timer, jiffies + 5*HZ); jz4740_mmc_send_command(host, req->cmd); } static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct jz4740_mmc_host *host = mmc_priv(mmc); if (ios->clock) jz4740_mmc_set_clock_rate(host, ios->clock); switch (ios->power_mode) { case MMC_POWER_UP: jz4740_mmc_reset(host); if (gpio_is_valid(host->pdata->gpio_power)) gpio_set_value(host->pdata->gpio_power, !host->pdata->power_active_low); host->cmdat |= JZ_MMC_CMDAT_INIT; clk_enable(host->clk); break; case MMC_POWER_ON: break; default: if (gpio_is_valid(host->pdata->gpio_power)) gpio_set_value(host->pdata->gpio_power, host->pdata->power_active_low); clk_disable(host->clk); break; } switch (ios->bus_width) { case MMC_BUS_WIDTH_1: host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_4BIT; break; case MMC_BUS_WIDTH_4: host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT; break; default: break; } } static int jz4740_mmc_get_ro(struct mmc_host *mmc) { struct jz4740_mmc_host *host = mmc_priv(mmc); if (!gpio_is_valid(host->pdata->gpio_read_only)) return -ENOSYS; return gpio_get_value(host->pdata->gpio_read_only) ^ host->pdata->read_only_active_low; } static int jz4740_mmc_get_cd(struct mmc_host *mmc) { struct jz4740_mmc_host *host = mmc_priv(mmc); if (!gpio_is_valid(host->pdata->gpio_card_detect)) return -ENOSYS; return gpio_get_value(host->pdata->gpio_card_detect) ^ host->pdata->card_detect_active_low; } static irqreturn_t jz4740_mmc_card_detect_irq(int irq, void *devid) { struct jz4740_mmc_host *host = devid; mmc_detect_change(host->mmc, HZ / 2); return IRQ_HANDLED; } static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) { struct jz4740_mmc_host *host = mmc_priv(mmc); jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable); } static const struct mmc_host_ops jz4740_mmc_ops = { .request = jz4740_mmc_request, .set_ios = jz4740_mmc_set_ios, .get_ro = jz4740_mmc_get_ro, .get_cd = jz4740_mmc_get_cd, .enable_sdio_irq = jz4740_mmc_enable_sdio_irq, }; static const struct jz_gpio_bulk_request jz4740_mmc_pins[] = { JZ_GPIO_BULK_PIN(MSC_CMD), JZ_GPIO_BULK_PIN(MSC_CLK), JZ_GPIO_BULK_PIN(MSC_DATA0), JZ_GPIO_BULK_PIN(MSC_DATA1), JZ_GPIO_BULK_PIN(MSC_DATA2), JZ_GPIO_BULK_PIN(MSC_DATA3), }; static int __devinit jz4740_mmc_request_gpio(struct device *dev, int gpio, const char *name, bool output, int value) { int ret; if (!gpio_is_valid(gpio)) return 0; ret = gpio_request(gpio, name); if (ret) { dev_err(dev, "Failed to request %s gpio: %d\n", name, ret); return ret; } if (output) gpio_direction_output(gpio, value); else gpio_direction_input(gpio); return 0; } static int __devinit jz4740_mmc_request_gpios(struct platform_device *pdev) { int ret; struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data; if (!pdata) return 0; ret = jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_card_detect, "MMC detect change", false, 0); if (ret) goto err; ret = jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_read_only, "MMC read only", false, 0); if (ret) goto err_free_gpio_card_detect; ret = jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_power, "MMC read only", true, pdata->power_active_low); if (ret) goto err_free_gpio_read_only; return 0; err_free_gpio_read_only: if (gpio_is_valid(pdata->gpio_read_only)) gpio_free(pdata->gpio_read_only); err_free_gpio_card_detect: if (gpio_is_valid(pdata->gpio_card_detect)) gpio_free(pdata->gpio_card_detect); err: return ret; } static int __devinit jz4740_mmc_request_cd_irq(struct platform_device *pdev, struct jz4740_mmc_host *host) { struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data; if (!gpio_is_valid(pdata->gpio_card_detect)) return 0; host->card_detect_irq = gpio_to_irq(pdata->gpio_card_detect); if (host->card_detect_irq < 0) { dev_warn(&pdev->dev, "Failed to get card detect irq\n"); return 0; } return request_irq(host->card_detect_irq, jz4740_mmc_card_detect_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "MMC card detect", host); } static void jz4740_mmc_free_gpios(struct platform_device *pdev) { struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data; if (!pdata) return; if (gpio_is_valid(pdata->gpio_power)) gpio_free(pdata->gpio_power); if (gpio_is_valid(pdata->gpio_read_only)) gpio_free(pdata->gpio_read_only); if (gpio_is_valid(pdata->gpio_card_detect)) gpio_free(pdata->gpio_card_detect); } static inline size_t jz4740_mmc_num_pins(struct jz4740_mmc_host *host) { size_t num_pins = ARRAY_SIZE(jz4740_mmc_pins); if (host->pdata && host->pdata->data_1bit) num_pins -= 3; return num_pins; } static int __devinit jz4740_mmc_probe(struct platform_device* pdev) { int ret; struct mmc_host *mmc; struct jz4740_mmc_host *host; struct jz4740_mmc_platform_data *pdata; pdata = pdev->dev.platform_data; mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev); if (!mmc) { dev_err(&pdev->dev, "Failed to alloc mmc host structure\n"); return -ENOMEM; } host = mmc_priv(mmc); host->pdata = pdata; host->irq = platform_get_irq(pdev, 0); if (host->irq < 0) { ret = host->irq; dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret); goto err_free_host; } host->clk = clk_get(&pdev->dev, "mmc"); if (IS_ERR(host->clk)) { ret = PTR_ERR(host->clk); dev_err(&pdev->dev, "Failed to get mmc clock\n"); goto err_free_host; } host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!host->mem) { ret = -ENOENT; dev_err(&pdev->dev, "Failed to get base platform memory\n"); goto err_clk_put; } host->mem = request_mem_region(host->mem->start, resource_size(host->mem), pdev->name); if (!host->mem) { ret = -EBUSY; dev_err(&pdev->dev, "Failed to request base memory region\n"); goto err_clk_put; } host->base = ioremap_nocache(host->mem->start, resource_size(host->mem)); if (!host->base) { ret = -EBUSY; dev_err(&pdev->dev, "Failed to ioremap base memory\n"); goto err_release_mem_region; } ret = jz_gpio_bulk_request(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); if (ret) { dev_err(&pdev->dev, "Failed to request mmc pins: %d\n", ret); goto err_iounmap; } ret = jz4740_mmc_request_gpios(pdev); if (ret) goto err_gpio_bulk_free; mmc->ops = &jz4740_mmc_ops; mmc->f_min = JZ_MMC_CLK_RATE / 128; mmc->f_max = JZ_MMC_CLK_RATE; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->caps = (pdata && pdata->data_1bit) ? 0 : MMC_CAP_4_BIT_DATA; mmc->caps |= MMC_CAP_SDIO_IRQ; mmc->max_blk_size = (1 << 10) - 1; mmc->max_blk_count = (1 << 15) - 1; mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_segs = 128; mmc->max_seg_size = mmc->max_req_size; host->mmc = mmc; host->pdev = pdev; spin_lock_init(&host->lock); host->irq_mask = 0xffff; ret = jz4740_mmc_request_cd_irq(pdev, host); if (ret) { dev_err(&pdev->dev, "Failed to request card detect irq\n"); goto err_free_gpios; } ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0, dev_name(&pdev->dev), host); if (ret) { dev_err(&pdev->dev, "Failed to request irq: %d\n", ret); goto err_free_card_detect_irq; } jz4740_mmc_reset(host); jz4740_mmc_clock_disable(host); setup_timer(&host->timeout_timer, jz4740_mmc_timeout, (unsigned long)host); /* It is not important when it times out, it just needs to timeout. */ set_timer_slack(&host->timeout_timer, HZ); platform_set_drvdata(pdev, host); ret = mmc_add_host(mmc); if (ret) { dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret); goto err_free_irq; } dev_info(&pdev->dev, "JZ SD/MMC card driver registered\n"); return 0; err_free_irq: free_irq(host->irq, host); err_free_card_detect_irq: if (host->card_detect_irq >= 0) free_irq(host->card_detect_irq, host); err_free_gpios: jz4740_mmc_free_gpios(pdev); err_gpio_bulk_free: jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); err_iounmap: iounmap(host->base); err_release_mem_region: release_mem_region(host->mem->start, resource_size(host->mem)); err_clk_put: clk_put(host->clk); err_free_host: platform_set_drvdata(pdev, NULL); mmc_free_host(mmc); return ret; } static int __devexit jz4740_mmc_remove(struct platform_device *pdev) { struct jz4740_mmc_host *host = platform_get_drvdata(pdev); del_timer_sync(&host->timeout_timer); jz4740_mmc_set_irq_enabled(host, 0xff, false); jz4740_mmc_reset(host); mmc_remove_host(host->mmc); free_irq(host->irq, host); if (host->card_detect_irq >= 0) free_irq(host->card_detect_irq, host); jz4740_mmc_free_gpios(pdev); jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); iounmap(host->base); release_mem_region(host->mem->start, resource_size(host->mem)); clk_put(host->clk); platform_set_drvdata(pdev, NULL); mmc_free_host(host->mmc); return 0; } #ifdef CONFIG_PM static int jz4740_mmc_suspend(struct device *dev) { struct jz4740_mmc_host *host = dev_get_drvdata(dev); mmc_suspend_host(host->mmc); jz_gpio_bulk_suspend(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); return 0; } static int jz4740_mmc_resume(struct device *dev) { struct jz4740_mmc_host *host = dev_get_drvdata(dev); jz_gpio_bulk_resume(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); mmc_resume_host(host->mmc); return 0; } const struct dev_pm_ops jz4740_mmc_pm_ops = { .suspend = jz4740_mmc_suspend, .resume = jz4740_mmc_resume, .poweroff = jz4740_mmc_suspend, .restore = jz4740_mmc_resume, }; #define JZ4740_MMC_PM_OPS (&jz4740_mmc_pm_ops) #else #define JZ4740_MMC_PM_OPS NULL #endif static struct platform_driver jz4740_mmc_driver = { .probe = jz4740_mmc_probe, .remove = __devexit_p(jz4740_mmc_remove), .driver = { .name = "jz4740-mmc", .owner = THIS_MODULE, .pm = JZ4740_MMC_PM_OPS, }, }; static int __init jz4740_mmc_init(void) { return platform_driver_register(&jz4740_mmc_driver); } module_init(jz4740_mmc_init); static void __exit jz4740_mmc_exit(void) { platform_driver_unregister(&jz4740_mmc_driver); } module_exit(jz4740_mmc_exit); MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
gpl-2.0
garwynn/SC02E_LJF_Kernel
arch/um/kernel/process.c
3469
9603
/* * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Copyright 2003 PathScale, Inc. * Licensed under the GPL */ #include <linux/stddef.h> #include <linux/err.h> #include <linux/hardirq.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/personality.h> #include <linux/proc_fs.h> #include <linux/ptrace.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/tick.h> #include <linux/threads.h> #include <asm/current.h> #include <asm/pgtable.h> #include <asm/uaccess.h> #include "as-layout.h" #include "kern_util.h" #include "os.h" #include "skas.h" #include "tlb.h" /* * This is a per-cpu array. A processor only modifies its entry and it only * cares about its entry, so it's OK if another processor is modifying its * entry. */ struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } }; static inline int external_pid(void) { /* FIXME: Need to look up userspace_pid by cpu */ return userspace_pid[0]; } int pid_to_processor_id(int pid) { int i; for (i = 0; i < ncpus; i++) { if (cpu_tasks[i].pid == pid) return i; } return -1; } void free_stack(unsigned long stack, int order) { free_pages(stack, order); } unsigned long alloc_stack(int order, int atomic) { unsigned long page; gfp_t flags = GFP_KERNEL; if (atomic) flags = GFP_ATOMIC; page = __get_free_pages(flags, order); return page; } int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) { int pid; current->thread.request.u.thread.proc = fn; current->thread.request.u.thread.arg = arg; pid = do_fork(CLONE_VM | CLONE_UNTRACED | flags, 0, &current->thread.regs, 0, NULL, NULL); return pid; } static inline void set_current(struct task_struct *task) { cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task) { external_pid(), task }); } extern void arch_switch_to(struct task_struct *to); void *_switch_to(void *prev, void *next, void *last) { struct task_struct *from = prev; struct task_struct *to = next; to->thread.prev_sched = from; set_current(to); do { current->thread.saved_task = NULL; switch_threads(&from->thread.switch_buf, &to->thread.switch_buf); arch_switch_to(current); if (current->thread.saved_task) show_regs(&(current->thread.regs)); to = current->thread.saved_task; from = current; } while (current->thread.saved_task); return current->thread.prev_sched; } void interrupt_end(void) { if (need_resched()) schedule(); if (test_tsk_thread_flag(current, TIF_SIGPENDING)) do_signal(); } void exit_thread(void) { } void *get_current(void) { return current; } /* * This is called magically, by its address being stuffed in a jmp_buf * and being longjmp-d to. */ void new_thread_handler(void) { int (*fn)(void *), n; void *arg; if (current->thread.prev_sched != NULL) schedule_tail(current->thread.prev_sched); current->thread.prev_sched = NULL; fn = current->thread.request.u.thread.proc; arg = current->thread.request.u.thread.arg; /* * The return value is 1 if the kernel thread execs a process, * 0 if it just exits */ n = run_kernel_thread(fn, arg, &current->thread.exec_buf); if (n == 1) { /* Handle any immediate reschedules or signals */ interrupt_end(); userspace(&current->thread.regs.regs); } else do_exit(0); } /* Called magically, see new_thread_handler above */ void fork_handler(void) { force_flush_all(); schedule_tail(current->thread.prev_sched); /* * XXX: if interrupt_end() calls schedule, this call to * arch_switch_to isn't needed. We could want to apply this to * improve performance. -bb */ arch_switch_to(current); current->thread.prev_sched = NULL; /* Handle any immediate reschedules or signals */ interrupt_end(); userspace(&current->thread.regs.regs); } int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long stack_top, struct task_struct * p, struct pt_regs *regs) { void (*handler)(void); int ret = 0; p->thread = (struct thread_struct) INIT_THREAD; if (current->thread.forking) { memcpy(&p->thread.regs.regs, &regs->regs, sizeof(p->thread.regs.regs)); REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.gp, 0); if (sp != 0) REGS_SP(p->thread.regs.regs.gp) = sp; handler = fork_handler; arch_copy_thread(&current->thread.arch, &p->thread.arch); } else { get_safe_registers(p->thread.regs.regs.gp); p->thread.request.u.thread = current->thread.request.u.thread; handler = new_thread_handler; } new_thread(task_stack_page(p), &p->thread.switch_buf, handler); if (current->thread.forking) { clear_flushed_tls(p); /* * Set a new TLS for the child thread? */ if (clone_flags & CLONE_SETTLS) ret = arch_copy_tls(p); } return ret; } void initial_thread_cb(void (*proc)(void *), void *arg) { int save_kmalloc_ok = kmalloc_ok; kmalloc_ok = 0; initial_thread_cb_skas(proc, arg); kmalloc_ok = save_kmalloc_ok; } void default_idle(void) { unsigned long long nsecs; while (1) { /* endless idle loop with no priority at all */ /* * although we are an idle CPU, we do not want to * get into the scheduler unnecessarily. */ if (need_resched()) schedule(); tick_nohz_stop_sched_tick(1); nsecs = disable_timer(); idle_sleep(nsecs); tick_nohz_restart_sched_tick(); } } void cpu_idle(void) { cpu_tasks[current_thread_info()->cpu].pid = os_getpid(); default_idle(); } int __cant_sleep(void) { return in_atomic() || irqs_disabled() || in_interrupt(); /* Is in_interrupt() really needed? */ } int user_context(unsigned long sp) { unsigned long stack; stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER); return stack != (unsigned long) current_thread_info(); } extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end; void do_uml_exitcalls(void) { exitcall_t *call; call = &__uml_exitcall_end; while (--call >= &__uml_exitcall_begin) (*call)(); } char *uml_strdup(const char *string) { return kstrdup(string, GFP_KERNEL); } int copy_to_user_proc(void __user *to, void *from, int size) { return copy_to_user(to, from, size); } int copy_from_user_proc(void *to, void __user *from, int size) { return copy_from_user(to, from, size); } int clear_user_proc(void __user *buf, int size) { return clear_user(buf, size); } int strlen_user_proc(char __user *str) { return strlen_user(str); } int smp_sigio_handler(void) { #ifdef CONFIG_SMP int cpu = current_thread_info()->cpu; IPI_handler(cpu); if (cpu != 0) return 1; #endif return 0; } int cpu(void) { return current_thread_info()->cpu; } static atomic_t using_sysemu = ATOMIC_INIT(0); int sysemu_supported; void set_using_sysemu(int value) { if (value > sysemu_supported) return; atomic_set(&using_sysemu, value); } int get_using_sysemu(void) { return atomic_read(&using_sysemu); } static int sysemu_proc_show(struct seq_file *m, void *v) { seq_printf(m, "%d\n", get_using_sysemu()); return 0; } static int sysemu_proc_open(struct inode *inode, struct file *file) { return single_open(file, sysemu_proc_show, NULL); } static ssize_t sysemu_proc_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { char tmp[2]; if (copy_from_user(tmp, buf, 1)) return -EFAULT; if (tmp[0] >= '0' && tmp[0] <= '2') set_using_sysemu(tmp[0] - '0'); /* We use the first char, but pretend to write everything */ return count; } static const struct file_operations sysemu_proc_fops = { .owner = THIS_MODULE, .open = sysemu_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = sysemu_proc_write, }; int __init make_proc_sysemu(void) { struct proc_dir_entry *ent; if (!sysemu_supported) return 0; ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops); if (ent == NULL) { printk(KERN_WARNING "Failed to register /proc/sysemu\n"); return 0; } return 0; } late_initcall(make_proc_sysemu); int singlestepping(void * t) { struct task_struct *task = t ? t : current; if (!(task->ptrace & PT_DTRACE)) return 0; if (task->thread.singlestep_syscall) return 1; return 2; } /* * Only x86 and x86_64 have an arch_align_stack(). * All other arches have "#define arch_align_stack(x) (x)" * in their asm/system.h * As this is included in UML from asm-um/system-generic.h, * we can use it to behave as the subarch does. */ #ifndef arch_align_stack unsigned long arch_align_stack(unsigned long sp) { if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) sp -= get_random_int() % 8192; return sp & ~0xf; } #endif unsigned long get_wchan(struct task_struct *p) { unsigned long stack_page, sp, ip; bool seen_sched = 0; if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING)) return 0; stack_page = (unsigned long) task_stack_page(p); /* Bail if the process has no kernel stack for some reason */ if (stack_page == 0) return 0; sp = p->thread.switch_buf->JB_SP; /* * Bail if the stack pointer is below the bottom of the kernel * stack for some reason */ if (sp < stack_page) return 0; while (sp < stack_page + THREAD_SIZE) { ip = *((unsigned long *) sp); if (in_sched_functions(ip)) /* Ignore everything until we're above the scheduler */ seen_sched = 1; else if (kernel_text_address(ip) && seen_sched) return ip; sp += sizeof(unsigned long); } return 0; } int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu) { int cpu = current_thread_info()->cpu; return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu); }
gpl-2.0
sebirdman/m7_kernel_dev
kernel/compat.c
4749
31208
/* * linux/kernel/compat.c * * Kernel compatibililty routines for e.g. 32 bit syscall support * on 64 bit kernels. * * Copyright (C) 2002-2003 Stephen Rothwell, IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/linkage.h> #include <linux/compat.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/signal.h> #include <linux/sched.h> /* for MAX_SCHEDULE_TIMEOUT */ #include <linux/syscalls.h> #include <linux/unistd.h> #include <linux/security.h> #include <linux/timex.h> #include <linux/export.h> #include <linux/migrate.h> #include <linux/posix-timers.h> #include <linux/times.h> #include <linux/ptrace.h> #include <linux/gfp.h> #include <asm/uaccess.h> /* * Get/set struct timeval with struct timespec on the native side */ static int compat_get_timeval_convert(struct timespec *o, struct compat_timeval __user *i) { long usec; if (get_user(o->tv_sec, &i->tv_sec) || get_user(usec, &i->tv_usec)) return -EFAULT; o->tv_nsec = usec * 1000; return 0; } static int compat_put_timeval_convert(struct compat_timeval __user *o, struct timeval *i) { return (put_user(i->tv_sec, &o->tv_sec) || put_user(i->tv_usec, &o->tv_usec)) ? -EFAULT : 0; } static int compat_get_timex(struct timex *txc, struct compat_timex __user *utp) { memset(txc, 0, sizeof(struct timex)); if (!access_ok(VERIFY_READ, utp, sizeof(struct compat_timex)) || __get_user(txc->modes, &utp->modes) || __get_user(txc->offset, &utp->offset) || __get_user(txc->freq, &utp->freq) || __get_user(txc->maxerror, &utp->maxerror) || __get_user(txc->esterror, &utp->esterror) || __get_user(txc->status, &utp->status) || __get_user(txc->constant, &utp->constant) || __get_user(txc->precision, &utp->precision) || __get_user(txc->tolerance, &utp->tolerance) || __get_user(txc->time.tv_sec, &utp->time.tv_sec) || __get_user(txc->time.tv_usec, &utp->time.tv_usec) || __get_user(txc->tick, &utp->tick) || __get_user(txc->ppsfreq, &utp->ppsfreq) || __get_user(txc->jitter, &utp->jitter) || __get_user(txc->shift, &utp->shift) || __get_user(txc->stabil, &utp->stabil) || __get_user(txc->jitcnt, &utp->jitcnt) || __get_user(txc->calcnt, &utp->calcnt) || __get_user(txc->errcnt, &utp->errcnt) || __get_user(txc->stbcnt, &utp->stbcnt)) return -EFAULT; return 0; } static int compat_put_timex(struct compat_timex __user *utp, struct timex *txc) { if (!access_ok(VERIFY_WRITE, utp, sizeof(struct compat_timex)) || __put_user(txc->modes, &utp->modes) || __put_user(txc->offset, &utp->offset) || __put_user(txc->freq, &utp->freq) || __put_user(txc->maxerror, &utp->maxerror) || __put_user(txc->esterror, &utp->esterror) || __put_user(txc->status, &utp->status) || __put_user(txc->constant, &utp->constant) || __put_user(txc->precision, &utp->precision) || __put_user(txc->tolerance, &utp->tolerance) || __put_user(txc->time.tv_sec, &utp->time.tv_sec) || __put_user(txc->time.tv_usec, &utp->time.tv_usec) || __put_user(txc->tick, &utp->tick) || __put_user(txc->ppsfreq, &utp->ppsfreq) || __put_user(txc->jitter, &utp->jitter) || __put_user(txc->shift, &utp->shift) || __put_user(txc->stabil, &utp->stabil) || __put_user(txc->jitcnt, &utp->jitcnt) || __put_user(txc->calcnt, &utp->calcnt) || __put_user(txc->errcnt, &utp->errcnt) || __put_user(txc->stbcnt, &utp->stbcnt) || __put_user(txc->tai, &utp->tai)) return -EFAULT; return 0; } asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz) { if (tv) { struct timeval ktv; do_gettimeofday(&ktv); if (compat_put_timeval_convert(tv, &ktv)) return -EFAULT; } if (tz) { if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) return -EFAULT; } return 0; } asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz) { struct timespec kts; struct timezone ktz; if (tv) { if (compat_get_timeval_convert(&kts, tv)) return -EFAULT; } if (tz) { if (copy_from_user(&ktz, tz, sizeof(ktz))) return -EFAULT; } return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); } int get_compat_timeval(struct timeval *tv, const struct compat_timeval __user *ctv) { return (!access_ok(VERIFY_READ, ctv, sizeof(*ctv)) || __get_user(tv->tv_sec, &ctv->tv_sec) || __get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0; } EXPORT_SYMBOL_GPL(get_compat_timeval); int put_compat_timeval(const struct timeval *tv, struct compat_timeval __user *ctv) { return (!access_ok(VERIFY_WRITE, ctv, sizeof(*ctv)) || __put_user(tv->tv_sec, &ctv->tv_sec) || __put_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0; } EXPORT_SYMBOL_GPL(put_compat_timeval); int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts) { return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) || __get_user(ts->tv_sec, &cts->tv_sec) || __get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; } EXPORT_SYMBOL_GPL(get_compat_timespec); int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user *cts) { return (!access_ok(VERIFY_WRITE, cts, sizeof(*cts)) || __put_user(ts->tv_sec, &cts->tv_sec) || __put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; } EXPORT_SYMBOL_GPL(put_compat_timespec); int compat_get_timeval(struct timeval *tv, const void __user *utv) { if (COMPAT_USE_64BIT_TIME) return copy_from_user(tv, utv, sizeof *tv) ? -EFAULT : 0; else return get_compat_timeval(tv, utv); } EXPORT_SYMBOL_GPL(compat_get_timeval); int compat_put_timeval(const struct timeval *tv, void __user *utv) { if (COMPAT_USE_64BIT_TIME) return copy_to_user(utv, tv, sizeof *tv) ? -EFAULT : 0; else return put_compat_timeval(tv, utv); } EXPORT_SYMBOL_GPL(compat_put_timeval); int compat_get_timespec(struct timespec *ts, const void __user *uts) { if (COMPAT_USE_64BIT_TIME) return copy_from_user(ts, uts, sizeof *ts) ? -EFAULT : 0; else return get_compat_timespec(ts, uts); } EXPORT_SYMBOL_GPL(compat_get_timespec); int compat_put_timespec(const struct timespec *ts, void __user *uts) { if (COMPAT_USE_64BIT_TIME) return copy_to_user(uts, ts, sizeof *ts) ? -EFAULT : 0; else return put_compat_timespec(ts, uts); } EXPORT_SYMBOL_GPL(compat_put_timespec); static long compat_nanosleep_restart(struct restart_block *restart) { struct compat_timespec __user *rmtp; struct timespec rmt; mm_segment_t oldfs; long ret; restart->nanosleep.rmtp = (struct timespec __user *) &rmt; oldfs = get_fs(); set_fs(KERNEL_DS); ret = hrtimer_nanosleep_restart(restart); set_fs(oldfs); if (ret) { rmtp = restart->nanosleep.compat_rmtp; if (rmtp && put_compat_timespec(&rmt, rmtp)) return -EFAULT; } return ret; } asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, struct compat_timespec __user *rmtp) { struct timespec tu, rmt; mm_segment_t oldfs; long ret; if (get_compat_timespec(&tu, rqtp)) return -EFAULT; if (!timespec_valid(&tu)) return -EINVAL; oldfs = get_fs(); set_fs(KERNEL_DS); ret = hrtimer_nanosleep(&tu, rmtp ? (struct timespec __user *)&rmt : NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC); set_fs(oldfs); if (ret) { struct restart_block *restart = &current_thread_info()->restart_block; restart->fn = compat_nanosleep_restart; restart->nanosleep.compat_rmtp = rmtp; if (rmtp && put_compat_timespec(&rmt, rmtp)) return -EFAULT; } return ret; } static inline long get_compat_itimerval(struct itimerval *o, struct compat_itimerval __user *i) { return (!access_ok(VERIFY_READ, i, sizeof(*i)) || (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) | __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) | __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) | __get_user(o->it_value.tv_usec, &i->it_value.tv_usec))); } static inline long put_compat_itimerval(struct compat_itimerval __user *o, struct itimerval *i) { return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) | __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) | __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) | __put_user(i->it_value.tv_usec, &o->it_value.tv_usec))); } asmlinkage long compat_sys_getitimer(int which, struct compat_itimerval __user *it) { struct itimerval kit; int error; error = do_getitimer(which, &kit); if (!error && put_compat_itimerval(it, &kit)) error = -EFAULT; return error; } asmlinkage long compat_sys_setitimer(int which, struct compat_itimerval __user *in, struct compat_itimerval __user *out) { struct itimerval kin, kout; int error; if (in) { if (get_compat_itimerval(&kin, in)) return -EFAULT; } else memset(&kin, 0, sizeof(kin)); error = do_setitimer(which, &kin, out ? &kout : NULL); if (error || !out) return error; if (put_compat_itimerval(out, &kout)) return -EFAULT; return 0; } static compat_clock_t clock_t_to_compat_clock_t(clock_t x) { return compat_jiffies_to_clock_t(clock_t_to_jiffies(x)); } asmlinkage long compat_sys_times(struct compat_tms __user *tbuf) { if (tbuf) { struct tms tms; struct compat_tms tmp; do_sys_times(&tms); /* Convert our struct tms to the compat version. */ tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime); tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime); tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime); tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime); if (copy_to_user(tbuf, &tmp, sizeof(tmp))) return -EFAULT; } force_successful_syscall_return(); return compat_jiffies_to_clock_t(jiffies); } #ifdef __ARCH_WANT_SYS_SIGPENDING /* * Assumption: old_sigset_t and compat_old_sigset_t are both * types that can be passed to put_user()/get_user(). */ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set) { old_sigset_t s; long ret; mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); ret = sys_sigpending((old_sigset_t __user *) &s); set_fs(old_fs); if (ret == 0) ret = put_user(s, set); return ret; } #endif #ifdef __ARCH_WANT_SYS_SIGPROCMASK /* * sys_sigprocmask SIG_SETMASK sets the first (compat) word of the * blocked set of signals to the supplied signal set */ static inline void compat_sig_setmask(sigset_t *blocked, compat_sigset_word set) { memcpy(blocked->sig, &set, sizeof(set)); } asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *nset, compat_old_sigset_t __user *oset) { old_sigset_t old_set, new_set; sigset_t new_blocked; old_set = current->blocked.sig[0]; if (nset) { if (get_user(new_set, nset)) return -EFAULT; new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); new_blocked = current->blocked; switch (how) { case SIG_BLOCK: sigaddsetmask(&new_blocked, new_set); break; case SIG_UNBLOCK: sigdelsetmask(&new_blocked, new_set); break; case SIG_SETMASK: compat_sig_setmask(&new_blocked, new_set); break; default: return -EINVAL; } set_current_blocked(&new_blocked); } if (oset) { if (put_user(old_set, oset)) return -EFAULT; } return 0; } #endif asmlinkage long compat_sys_setrlimit(unsigned int resource, struct compat_rlimit __user *rlim) { struct rlimit r; if (!access_ok(VERIFY_READ, rlim, sizeof(*rlim)) || __get_user(r.rlim_cur, &rlim->rlim_cur) || __get_user(r.rlim_max, &rlim->rlim_max)) return -EFAULT; if (r.rlim_cur == COMPAT_RLIM_INFINITY) r.rlim_cur = RLIM_INFINITY; if (r.rlim_max == COMPAT_RLIM_INFINITY) r.rlim_max = RLIM_INFINITY; return do_prlimit(current, resource, &r, NULL); } #ifdef COMPAT_RLIM_OLD_INFINITY asmlinkage long compat_sys_old_getrlimit(unsigned int resource, struct compat_rlimit __user *rlim) { struct rlimit r; int ret; mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); ret = sys_old_getrlimit(resource, &r); set_fs(old_fs); if (!ret) { if (r.rlim_cur > COMPAT_RLIM_OLD_INFINITY) r.rlim_cur = COMPAT_RLIM_INFINITY; if (r.rlim_max > COMPAT_RLIM_OLD_INFINITY) r.rlim_max = COMPAT_RLIM_INFINITY; if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) || __put_user(r.rlim_cur, &rlim->rlim_cur) || __put_user(r.rlim_max, &rlim->rlim_max)) return -EFAULT; } return ret; } #endif asmlinkage long compat_sys_getrlimit(unsigned int resource, struct compat_rlimit __user *rlim) { struct rlimit r; int ret; ret = do_prlimit(current, resource, NULL, &r); if (!ret) { if (r.rlim_cur > COMPAT_RLIM_INFINITY) r.rlim_cur = COMPAT_RLIM_INFINITY; if (r.rlim_max > COMPAT_RLIM_INFINITY) r.rlim_max = COMPAT_RLIM_INFINITY; if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) || __put_user(r.rlim_cur, &rlim->rlim_cur) || __put_user(r.rlim_max, &rlim->rlim_max)) return -EFAULT; } return ret; } int put_compat_rusage(const struct rusage *r, struct compat_rusage __user *ru) { if (!access_ok(VERIFY_WRITE, ru, sizeof(*ru)) || __put_user(r->ru_utime.tv_sec, &ru->ru_utime.tv_sec) || __put_user(r->ru_utime.tv_usec, &ru->ru_utime.tv_usec) || __put_user(r->ru_stime.tv_sec, &ru->ru_stime.tv_sec) || __put_user(r->ru_stime.tv_usec, &ru->ru_stime.tv_usec) || __put_user(r->ru_maxrss, &ru->ru_maxrss) || __put_user(r->ru_ixrss, &ru->ru_ixrss) || __put_user(r->ru_idrss, &ru->ru_idrss) || __put_user(r->ru_isrss, &ru->ru_isrss) || __put_user(r->ru_minflt, &ru->ru_minflt) || __put_user(r->ru_majflt, &ru->ru_majflt) || __put_user(r->ru_nswap, &ru->ru_nswap) || __put_user(r->ru_inblock, &ru->ru_inblock) || __put_user(r->ru_oublock, &ru->ru_oublock) || __put_user(r->ru_msgsnd, &ru->ru_msgsnd) || __put_user(r->ru_msgrcv, &ru->ru_msgrcv) || __put_user(r->ru_nsignals, &ru->ru_nsignals) || __put_user(r->ru_nvcsw, &ru->ru_nvcsw) || __put_user(r->ru_nivcsw, &ru->ru_nivcsw)) return -EFAULT; return 0; } asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru) { struct rusage r; int ret; mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); ret = sys_getrusage(who, (struct rusage __user *) &r); set_fs(old_fs); if (ret) return ret; if (put_compat_rusage(&r, ru)) return -EFAULT; return 0; } asmlinkage long compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, struct compat_rusage __user *ru) { if (!ru) { return sys_wait4(pid, stat_addr, options, NULL); } else { struct rusage r; int ret; unsigned int status; mm_segment_t old_fs = get_fs(); set_fs (KERNEL_DS); ret = sys_wait4(pid, (stat_addr ? (unsigned int __user *) &status : NULL), options, (struct rusage __user *) &r); set_fs (old_fs); if (ret > 0) { if (put_compat_rusage(&r, ru)) return -EFAULT; if (stat_addr && put_user(status, stat_addr)) return -EFAULT; } return ret; } } asmlinkage long compat_sys_waitid(int which, compat_pid_t pid, struct compat_siginfo __user *uinfo, int options, struct compat_rusage __user *uru) { siginfo_t info; struct rusage ru; long ret; mm_segment_t old_fs = get_fs(); memset(&info, 0, sizeof(info)); set_fs(KERNEL_DS); ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options, uru ? (struct rusage __user *)&ru : NULL); set_fs(old_fs); if ((ret < 0) || (info.si_signo == 0)) return ret; if (uru) { ret = put_compat_rusage(&ru, uru); if (ret) return ret; } BUG_ON(info.si_code & __SI_MASK); info.si_code |= __SI_CHLD; return copy_siginfo_to_user32(uinfo, &info); } static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr, unsigned len, struct cpumask *new_mask) { unsigned long *k; if (len < cpumask_size()) memset(new_mask, 0, cpumask_size()); else if (len > cpumask_size()) len = cpumask_size(); k = cpumask_bits(new_mask); return compat_get_bitmap(k, user_mask_ptr, len * 8); } asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid, unsigned int len, compat_ulong_t __user *user_mask_ptr) { cpumask_var_t new_mask; int retval; if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) return -ENOMEM; retval = compat_get_user_cpu_mask(user_mask_ptr, len, new_mask); if (retval) goto out; retval = sched_setaffinity(pid, new_mask); out: free_cpumask_var(new_mask); return retval; } asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, compat_ulong_t __user *user_mask_ptr) { int ret; cpumask_var_t mask; if ((len * BITS_PER_BYTE) < nr_cpu_ids) return -EINVAL; if (len & (sizeof(compat_ulong_t)-1)) return -EINVAL; if (!alloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; ret = sched_getaffinity(pid, mask); if (ret == 0) { size_t retlen = min_t(size_t, len, cpumask_size()); if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8)) ret = -EFAULT; else ret = retlen; } free_cpumask_var(mask); return ret; } int get_compat_itimerspec(struct itimerspec *dst, const struct compat_itimerspec __user *src) { if (get_compat_timespec(&dst->it_interval, &src->it_interval) || get_compat_timespec(&dst->it_value, &src->it_value)) return -EFAULT; return 0; } int put_compat_itimerspec(struct compat_itimerspec __user *dst, const struct itimerspec *src) { if (put_compat_timespec(&src->it_interval, &dst->it_interval) || put_compat_timespec(&src->it_value, &dst->it_value)) return -EFAULT; return 0; } long compat_sys_timer_create(clockid_t which_clock, struct compat_sigevent __user *timer_event_spec, timer_t __user *created_timer_id) { struct sigevent __user *event = NULL; if (timer_event_spec) { struct sigevent kevent; event = compat_alloc_user_space(sizeof(*event)); if (get_compat_sigevent(&kevent, timer_event_spec) || copy_to_user(event, &kevent, sizeof(*event))) return -EFAULT; } return sys_timer_create(which_clock, event, created_timer_id); } long compat_sys_timer_settime(timer_t timer_id, int flags, struct compat_itimerspec __user *new, struct compat_itimerspec __user *old) { long err; mm_segment_t oldfs; struct itimerspec newts, oldts; if (!new) return -EINVAL; if (get_compat_itimerspec(&newts, new)) return -EFAULT; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_timer_settime(timer_id, flags, (struct itimerspec __user *) &newts, (struct itimerspec __user *) &oldts); set_fs(oldfs); if (!err && old && put_compat_itimerspec(old, &oldts)) return -EFAULT; return err; } long compat_sys_timer_gettime(timer_t timer_id, struct compat_itimerspec __user *setting) { long err; mm_segment_t oldfs; struct itimerspec ts; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_timer_gettime(timer_id, (struct itimerspec __user *) &ts); set_fs(oldfs); if (!err && put_compat_itimerspec(setting, &ts)) return -EFAULT; return err; } long compat_sys_clock_settime(clockid_t which_clock, struct compat_timespec __user *tp) { long err; mm_segment_t oldfs; struct timespec ts; if (get_compat_timespec(&ts, tp)) return -EFAULT; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_clock_settime(which_clock, (struct timespec __user *) &ts); set_fs(oldfs); return err; } long compat_sys_clock_gettime(clockid_t which_clock, struct compat_timespec __user *tp) { long err; mm_segment_t oldfs; struct timespec ts; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_clock_gettime(which_clock, (struct timespec __user *) &ts); set_fs(oldfs); if (!err && put_compat_timespec(&ts, tp)) return -EFAULT; return err; } long compat_sys_clock_adjtime(clockid_t which_clock, struct compat_timex __user *utp) { struct timex txc; mm_segment_t oldfs; int err, ret; err = compat_get_timex(&txc, utp); if (err) return err; oldfs = get_fs(); set_fs(KERNEL_DS); ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc); set_fs(oldfs); err = compat_put_timex(utp, &txc); if (err) return err; return ret; } long compat_sys_clock_getres(clockid_t which_clock, struct compat_timespec __user *tp) { long err; mm_segment_t oldfs; struct timespec ts; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_clock_getres(which_clock, (struct timespec __user *) &ts); set_fs(oldfs); if (!err && tp && put_compat_timespec(&ts, tp)) return -EFAULT; return err; } static long compat_clock_nanosleep_restart(struct restart_block *restart) { long err; mm_segment_t oldfs; struct timespec tu; struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp; restart->nanosleep.rmtp = (struct timespec __user *) &tu; oldfs = get_fs(); set_fs(KERNEL_DS); err = clock_nanosleep_restart(restart); set_fs(oldfs); if ((err == -ERESTART_RESTARTBLOCK) && rmtp && put_compat_timespec(&tu, rmtp)) return -EFAULT; if (err == -ERESTART_RESTARTBLOCK) { restart->fn = compat_clock_nanosleep_restart; restart->nanosleep.compat_rmtp = rmtp; } return err; } long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, struct compat_timespec __user *rqtp, struct compat_timespec __user *rmtp) { long err; mm_segment_t oldfs; struct timespec in, out; struct restart_block *restart; if (get_compat_timespec(&in, rqtp)) return -EFAULT; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_clock_nanosleep(which_clock, flags, (struct timespec __user *) &in, (struct timespec __user *) &out); set_fs(oldfs); if ((err == -ERESTART_RESTARTBLOCK) && rmtp && put_compat_timespec(&out, rmtp)) return -EFAULT; if (err == -ERESTART_RESTARTBLOCK) { restart = &current_thread_info()->restart_block; restart->fn = compat_clock_nanosleep_restart; restart->nanosleep.compat_rmtp = rmtp; } return err; } /* * We currently only need the following fields from the sigevent * structure: sigev_value, sigev_signo, sig_notify and (sometimes * sigev_notify_thread_id). The others are handled in user mode. * We also assume that copying sigev_value.sival_int is sufficient * to keep all the bits of sigev_value.sival_ptr intact. */ int get_compat_sigevent(struct sigevent *event, const struct compat_sigevent __user *u_event) { memset(event, 0, sizeof(*event)); return (!access_ok(VERIFY_READ, u_event, sizeof(*u_event)) || __get_user(event->sigev_value.sival_int, &u_event->sigev_value.sival_int) || __get_user(event->sigev_signo, &u_event->sigev_signo) || __get_user(event->sigev_notify, &u_event->sigev_notify) || __get_user(event->sigev_notify_thread_id, &u_event->sigev_notify_thread_id)) ? -EFAULT : 0; } long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, unsigned long bitmap_size) { int i, j; unsigned long m; compat_ulong_t um; unsigned long nr_compat_longs; /* align bitmap up to nearest compat_long_t boundary */ bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); if (!access_ok(VERIFY_READ, umask, bitmap_size / 8)) return -EFAULT; nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) { m = 0; for (j = 0; j < sizeof(m)/sizeof(um); j++) { /* * We dont want to read past the end of the userspace * bitmap. We must however ensure the end of the * kernel bitmap is zeroed. */ if (nr_compat_longs-- > 0) { if (__get_user(um, umask)) return -EFAULT; } else { um = 0; } umask++; m |= (long)um << (j * BITS_PER_COMPAT_LONG); } *mask++ = m; } return 0; } long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, unsigned long bitmap_size) { int i, j; unsigned long m; compat_ulong_t um; unsigned long nr_compat_longs; /* align bitmap up to nearest compat_long_t boundary */ bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); if (!access_ok(VERIFY_WRITE, umask, bitmap_size / 8)) return -EFAULT; nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) { m = *mask++; for (j = 0; j < sizeof(m)/sizeof(um); j++) { um = m; /* * We dont want to write past the end of the userspace * bitmap. */ if (nr_compat_longs-- > 0) { if (__put_user(um, umask)) return -EFAULT; } umask++; m >>= 4*sizeof(um); m >>= 4*sizeof(um); } } return 0; } void sigset_from_compat (sigset_t *set, compat_sigset_t *compat) { switch (_NSIG_WORDS) { case 4: set->sig[3] = compat->sig[6] | (((long)compat->sig[7]) << 32 ); case 3: set->sig[2] = compat->sig[4] | (((long)compat->sig[5]) << 32 ); case 2: set->sig[1] = compat->sig[2] | (((long)compat->sig[3]) << 32 ); case 1: set->sig[0] = compat->sig[0] | (((long)compat->sig[1]) << 32 ); } } EXPORT_SYMBOL_GPL(sigset_from_compat); asmlinkage long compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese, struct compat_siginfo __user *uinfo, struct compat_timespec __user *uts, compat_size_t sigsetsize) { compat_sigset_t s32; sigset_t s; struct timespec t; siginfo_t info; long ret; if (sigsetsize != sizeof(sigset_t)) return -EINVAL; if (copy_from_user(&s32, uthese, sizeof(compat_sigset_t))) return -EFAULT; sigset_from_compat(&s, &s32); if (uts) { if (get_compat_timespec(&t, uts)) return -EFAULT; } ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); if (ret > 0 && uinfo) { if (copy_siginfo_to_user32(uinfo, &info)) ret = -EFAULT; } return ret; } asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig, struct compat_siginfo __user *uinfo) { siginfo_t info; if (copy_siginfo_from_user32(&info, uinfo)) return -EFAULT; return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); } #ifdef __ARCH_WANT_COMPAT_SYS_TIME /* compat_time_t is a 32 bit "long" and needs to get converted. */ asmlinkage long compat_sys_time(compat_time_t __user * tloc) { compat_time_t i; struct timeval tv; do_gettimeofday(&tv); i = tv.tv_sec; if (tloc) { if (put_user(i,tloc)) return -EFAULT; } force_successful_syscall_return(); return i; } asmlinkage long compat_sys_stime(compat_time_t __user *tptr) { struct timespec tv; int err; if (get_user(tv.tv_sec, tptr)) return -EFAULT; tv.tv_nsec = 0; err = security_settime(&tv, NULL); if (err) return err; do_settimeofday(&tv); return 0; } #endif /* __ARCH_WANT_COMPAT_SYS_TIME */ #ifdef __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, compat_size_t sigsetsize) { sigset_t newset; compat_sigset_t newset32; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) return -EINVAL; if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t))) return -EFAULT; sigset_from_compat(&newset, &newset32); sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); current->saved_sigmask = current->blocked; set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); set_restore_sigmask(); return -ERESTARTNOHAND; } #endif /* __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND */ asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp) { struct timex txc; int err, ret; err = compat_get_timex(&txc, utp); if (err) return err; ret = do_adjtimex(&txc); err = compat_put_timex(utp, &txc); if (err) return err; return ret; } #ifdef CONFIG_NUMA asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_pages, compat_uptr_t __user *pages32, const int __user *nodes, int __user *status, int flags) { const void __user * __user *pages; int i; pages = compat_alloc_user_space(nr_pages * sizeof(void *)); for (i = 0; i < nr_pages; i++) { compat_uptr_t p; if (get_user(p, pages32 + i) || put_user(compat_ptr(p), pages + i)) return -EFAULT; } return sys_move_pages(pid, nr_pages, pages, nodes, status, flags); } asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes, const compat_ulong_t __user *new_nodes) { unsigned long __user *old = NULL; unsigned long __user *new = NULL; nodemask_t tmp_mask; unsigned long nr_bits; unsigned long size; nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES); size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (old_nodes) { if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits)) return -EFAULT; old = compat_alloc_user_space(new_nodes ? size * 2 : size); if (new_nodes) new = old + size / sizeof(unsigned long); if (copy_to_user(old, nodes_addr(tmp_mask), size)) return -EFAULT; } if (new_nodes) { if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits)) return -EFAULT; if (new == NULL) new = compat_alloc_user_space(size); if (copy_to_user(new, nodes_addr(tmp_mask), size)) return -EFAULT; } return sys_migrate_pages(pid, nr_bits + 1, old, new); } #endif struct compat_sysinfo { s32 uptime; u32 loads[3]; u32 totalram; u32 freeram; u32 sharedram; u32 bufferram; u32 totalswap; u32 freeswap; u16 procs; u16 pad; u32 totalhigh; u32 freehigh; u32 mem_unit; char _f[20-2*sizeof(u32)-sizeof(int)]; }; asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info) { struct sysinfo s; do_sysinfo(&s); /* Check to see if any memory value is too large for 32-bit and scale * down if needed */ if ((s.totalram >> 32) || (s.totalswap >> 32)) { int bitcount = 0; while (s.mem_unit < PAGE_SIZE) { s.mem_unit <<= 1; bitcount++; } s.totalram >>= bitcount; s.freeram >>= bitcount; s.sharedram >>= bitcount; s.bufferram >>= bitcount; s.totalswap >>= bitcount; s.freeswap >>= bitcount; s.totalhigh >>= bitcount; s.freehigh >>= bitcount; } if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) || __put_user (s.uptime, &info->uptime) || __put_user (s.loads[0], &info->loads[0]) || __put_user (s.loads[1], &info->loads[1]) || __put_user (s.loads[2], &info->loads[2]) || __put_user (s.totalram, &info->totalram) || __put_user (s.freeram, &info->freeram) || __put_user (s.sharedram, &info->sharedram) || __put_user (s.bufferram, &info->bufferram) || __put_user (s.totalswap, &info->totalswap) || __put_user (s.freeswap, &info->freeswap) || __put_user (s.procs, &info->procs) || __put_user (s.totalhigh, &info->totalhigh) || __put_user (s.freehigh, &info->freehigh) || __put_user (s.mem_unit, &info->mem_unit)) return -EFAULT; return 0; } /* * Allocate user-space memory for the duration of a single system call, * in order to marshall parameters inside a compat thunk. */ void __user *compat_alloc_user_space(unsigned long len) { void __user *ptr; /* If len would occupy more than half of the entire compat space... */ if (unlikely(len > (((compat_uptr_t)~0) >> 1))) return NULL; ptr = arch_compat_alloc_user_space(len); if (unlikely(!access_ok(VERIFY_WRITE, ptr, len))) return NULL; return ptr; } EXPORT_SYMBOL_GPL(compat_alloc_user_space);
gpl-2.0
TheNotOnly/linux-3.5
arch/arm/mach-mmp/brownstone.c
5005
5416
/* * linux/arch/arm/mach-mmp/brownstone.c * * Support for the Marvell Brownstone Development Platform. * * Copyright (C) 2009-2010 Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * publishhed by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/regulator/machine.h> #include <linux/regulator/max8649.h> #include <linux/regulator/fixed.h> #include <linux/mfd/max8925.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/addr-map.h> #include <mach/mfp-mmp2.h> #include <mach/mmp2.h> #include <mach/irqs.h> #include "common.h" #define BROWNSTONE_NR_IRQS (MMP_NR_IRQS + 40) #define GPIO_5V_ENABLE (89) static unsigned long brownstone_pin_config[] __initdata = { /* UART1 */ GPIO29_UART1_RXD, GPIO30_UART1_TXD, /* UART3 */ GPIO51_UART3_RXD, GPIO52_UART3_TXD, /* DFI */ GPIO168_DFI_D0, GPIO167_DFI_D1, GPIO166_DFI_D2, GPIO165_DFI_D3, GPIO107_DFI_D4, GPIO106_DFI_D5, GPIO105_DFI_D6, GPIO104_DFI_D7, GPIO111_DFI_D8, GPIO164_DFI_D9, GPIO163_DFI_D10, GPIO162_DFI_D11, GPIO161_DFI_D12, GPIO110_DFI_D13, GPIO109_DFI_D14, GPIO108_DFI_D15, GPIO143_ND_nCS0, GPIO144_ND_nCS1, GPIO147_ND_nWE, GPIO148_ND_nRE, GPIO150_ND_ALE, GPIO149_ND_CLE, GPIO112_ND_RDY0, GPIO160_ND_RDY1, /* PMIC */ PMIC_PMIC_INT | MFP_LPM_EDGE_FALL, /* MMC0 */ GPIO131_MMC1_DAT3 | MFP_PULL_HIGH, GPIO132_MMC1_DAT2 | MFP_PULL_HIGH, GPIO133_MMC1_DAT1 | MFP_PULL_HIGH, GPIO134_MMC1_DAT0 | MFP_PULL_HIGH, GPIO136_MMC1_CMD | MFP_PULL_HIGH, GPIO139_MMC1_CLK, GPIO140_MMC1_CD | MFP_PULL_LOW, GPIO141_MMC1_WP | MFP_PULL_LOW, /* MMC1 */ GPIO37_MMC2_DAT3 | MFP_PULL_HIGH, GPIO38_MMC2_DAT2 | MFP_PULL_HIGH, GPIO39_MMC2_DAT1 | MFP_PULL_HIGH, GPIO40_MMC2_DAT0 | MFP_PULL_HIGH, GPIO41_MMC2_CMD | MFP_PULL_HIGH, GPIO42_MMC2_CLK, /* MMC2 */ GPIO165_MMC3_DAT7 | MFP_PULL_HIGH, GPIO162_MMC3_DAT6 | MFP_PULL_HIGH, GPIO166_MMC3_DAT5 | MFP_PULL_HIGH, GPIO163_MMC3_DAT4 | MFP_PULL_HIGH, GPIO167_MMC3_DAT3 | MFP_PULL_HIGH, GPIO164_MMC3_DAT2 | MFP_PULL_HIGH, GPIO168_MMC3_DAT1 | MFP_PULL_HIGH, GPIO111_MMC3_DAT0 | MFP_PULL_HIGH, GPIO112_MMC3_CMD | MFP_PULL_HIGH, GPIO151_MMC3_CLK, /* 5V regulator */ GPIO89_GPIO, }; static struct regulator_consumer_supply max8649_supply[] = { REGULATOR_SUPPLY("vcc_core", NULL), }; static struct regulator_init_data max8649_init_data = { .constraints = { .name = "vcc_core range", .min_uV = 1150000, .max_uV = 1280000, .always_on = 1, .boot_on = 1, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, }, .num_consumer_supplies = 1, .consumer_supplies = &max8649_supply[0], }; static struct max8649_platform_data brownstone_max8649_info = { .mode = 2, /* VID1 = 1, VID0 = 0 */ .extclk = 0, .ramp_timing = MAX8649_RAMP_32MV, .regulator = &max8649_init_data, }; static struct regulator_consumer_supply brownstone_v_5vp_supplies[] = { REGULATOR_SUPPLY("v_5vp", NULL), }; static struct regulator_init_data brownstone_v_5vp_data = { .constraints = { .valid_ops_mask = REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(brownstone_v_5vp_supplies), .consumer_supplies = brownstone_v_5vp_supplies, }; static struct fixed_voltage_config brownstone_v_5vp = { .supply_name = "v_5vp", .microvolts = 5000000, .gpio = GPIO_5V_ENABLE, .enable_high = 1, .enabled_at_boot = 1, .init_data = &brownstone_v_5vp_data, }; static struct platform_device brownstone_v_5vp_device = { .name = "reg-fixed-voltage", .id = 1, .dev = { .platform_data = &brownstone_v_5vp, }, }; static struct max8925_platform_data brownstone_max8925_info = { .irq_base = MMP_NR_IRQS, }; static struct i2c_board_info brownstone_twsi1_info[] = { [0] = { .type = "max8649", .addr = 0x60, .platform_data = &brownstone_max8649_info, }, [1] = { .type = "max8925", .addr = 0x3c, .irq = IRQ_MMP2_PMIC, .platform_data = &brownstone_max8925_info, }, }; static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc0 = { .clk_delay_cycles = 0x1f, }; static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc2 = { .clk_delay_cycles = 0x1f, .flags = PXA_FLAG_CARD_PERMANENT | PXA_FLAG_SD_8_BIT_CAPABLE_SLOT, }; static struct sram_platdata mmp2_asram_platdata = { .pool_name = "asram", .granularity = SRAM_GRANULARITY, }; static struct sram_platdata mmp2_isram_platdata = { .pool_name = "isram", .granularity = SRAM_GRANULARITY, }; static void __init brownstone_init(void) { mfp_config(ARRAY_AND_SIZE(brownstone_pin_config)); /* on-chip devices */ mmp2_add_uart(1); mmp2_add_uart(3); platform_device_register(&mmp2_device_gpio); mmp2_add_twsi(1, NULL, ARRAY_AND_SIZE(brownstone_twsi1_info)); mmp2_add_sdhost(0, &mmp2_sdh_platdata_mmc0); /* SD/MMC */ mmp2_add_sdhost(2, &mmp2_sdh_platdata_mmc2); /* eMMC */ mmp2_add_asram(&mmp2_asram_platdata); mmp2_add_isram(&mmp2_isram_platdata); /* enable 5v regulator */ platform_device_register(&brownstone_v_5vp_device); } MACHINE_START(BROWNSTONE, "Brownstone Development Platform") /* Maintainer: Haojian Zhuang <haojian.zhuang@marvell.com> */ .map_io = mmp_map_io, .nr_irqs = BROWNSTONE_NR_IRQS, .init_irq = mmp2_init_irq, .timer = &mmp2_timer, .init_machine = brownstone_init, .restart = mmp_restart, MACHINE_END
gpl-2.0
Kenepo/roots_kk_lge_msm8974
arch/arm/mach-ixp4xx/nslu2-setup.c
5005
7157
/* * arch/arm/mach-ixp4xx/nslu2-setup.c * * NSLU2 board-setup * * Copyright (C) 2008 Rod Whitby <rod@whitby.id.au> * * based on ixdp425-setup.c: * Copyright (C) 2003-2004 MontaVista Software, Inc. * based on nslu2-power.c: * Copyright (C) 2005 Tower Technologies * * Author: Mark Rakes <mrakes at mac.com> * Author: Rod Whitby <rod@whitby.id.au> * Author: Alessandro Zummo <a.zummo@towertech.it> * Maintainers: http://www.nslu2-linux.org/ * */ #include <linux/gpio.h> #include <linux/if_ether.h> #include <linux/irq.h> #include <linux/serial.h> #include <linux/serial_8250.h> #include <linux/leds.h> #include <linux/reboot.h> #include <linux/i2c.h> #include <linux/i2c-gpio.h> #include <linux/io.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> #include <asm/mach/time.h> #define NSLU2_SDA_PIN 7 #define NSLU2_SCL_PIN 6 /* NSLU2 Timer */ #define NSLU2_FREQ 66000000 /* Buttons */ #define NSLU2_PB_GPIO 5 /* power button */ #define NSLU2_PO_GPIO 8 /* power off */ #define NSLU2_RB_GPIO 12 /* reset button */ /* Buzzer */ #define NSLU2_GPIO_BUZZ 4 /* LEDs */ #define NSLU2_LED_RED_GPIO 0 #define NSLU2_LED_GRN_GPIO 1 #define NSLU2_LED_DISK1_GPIO 3 #define NSLU2_LED_DISK2_GPIO 2 static struct flash_platform_data nslu2_flash_data = { .map_name = "cfi_probe", .width = 2, }; static struct resource nslu2_flash_resource = { .flags = IORESOURCE_MEM, }; static struct platform_device nslu2_flash = { .name = "IXP4XX-Flash", .id = 0, .dev.platform_data = &nslu2_flash_data, .num_resources = 1, .resource = &nslu2_flash_resource, }; static struct i2c_gpio_platform_data nslu2_i2c_gpio_data = { .sda_pin = NSLU2_SDA_PIN, .scl_pin = NSLU2_SCL_PIN, }; static struct i2c_board_info __initdata nslu2_i2c_board_info [] = { { I2C_BOARD_INFO("x1205", 0x6f), }, }; static struct gpio_led nslu2_led_pins[] = { { .name = "nslu2:green:ready", .gpio = NSLU2_LED_GRN_GPIO, }, { .name = "nslu2:red:status", .gpio = NSLU2_LED_RED_GPIO, }, { .name = "nslu2:green:disk-1", .gpio = NSLU2_LED_DISK1_GPIO, .active_low = true, }, { .name = "nslu2:green:disk-2", .gpio = NSLU2_LED_DISK2_GPIO, .active_low = true, }, }; static struct gpio_led_platform_data nslu2_led_data = { .num_leds = ARRAY_SIZE(nslu2_led_pins), .leds = nslu2_led_pins, }; static struct platform_device nslu2_leds = { .name = "leds-gpio", .id = -1, .dev.platform_data = &nslu2_led_data, }; static struct platform_device nslu2_i2c_gpio = { .name = "i2c-gpio", .id = 0, .dev = { .platform_data = &nslu2_i2c_gpio_data, }, }; static struct platform_device nslu2_beeper = { .name = "ixp4xx-beeper", .id = NSLU2_GPIO_BUZZ, .num_resources = 0, }; static struct resource nslu2_uart_resources[] = { { .start = IXP4XX_UART1_BASE_PHYS, .end = IXP4XX_UART1_BASE_PHYS + 0x0fff, .flags = IORESOURCE_MEM, }, { .start = IXP4XX_UART2_BASE_PHYS, .end = IXP4XX_UART2_BASE_PHYS + 0x0fff, .flags = IORESOURCE_MEM, } }; static struct plat_serial8250_port nslu2_uart_data[] = { { .mapbase = IXP4XX_UART1_BASE_PHYS, .membase = (char *)IXP4XX_UART1_BASE_VIRT + REG_OFFSET, .irq = IRQ_IXP4XX_UART1, .flags = UPF_BOOT_AUTOCONF, .iotype = UPIO_MEM, .regshift = 2, .uartclk = IXP4XX_UART_XTAL, }, { .mapbase = IXP4XX_UART2_BASE_PHYS, .membase = (char *)IXP4XX_UART2_BASE_VIRT + REG_OFFSET, .irq = IRQ_IXP4XX_UART2, .flags = UPF_BOOT_AUTOCONF, .iotype = UPIO_MEM, .regshift = 2, .uartclk = IXP4XX_UART_XTAL, }, { } }; static struct platform_device nslu2_uart = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev.platform_data = nslu2_uart_data, .num_resources = 2, .resource = nslu2_uart_resources, }; /* Built-in 10/100 Ethernet MAC interfaces */ static struct eth_plat_info nslu2_plat_eth[] = { { .phy = 1, .rxq = 3, .txreadyq = 20, } }; static struct platform_device nslu2_eth[] = { { .name = "ixp4xx_eth", .id = IXP4XX_ETH_NPEB, .dev.platform_data = nslu2_plat_eth, } }; static struct platform_device *nslu2_devices[] __initdata = { &nslu2_i2c_gpio, &nslu2_flash, &nslu2_beeper, &nslu2_leds, &nslu2_eth[0], }; static void nslu2_power_off(void) { /* This causes the box to drop the power and go dead. */ /* enable the pwr cntl gpio */ gpio_line_config(NSLU2_PO_GPIO, IXP4XX_GPIO_OUT); /* do the deed */ gpio_line_set(NSLU2_PO_GPIO, IXP4XX_GPIO_HIGH); } static irqreturn_t nslu2_power_handler(int irq, void *dev_id) { /* Signal init to do the ctrlaltdel action, this will bypass init if * it hasn't started and do a kernel_restart. */ ctrl_alt_del(); return IRQ_HANDLED; } static irqreturn_t nslu2_reset_handler(int irq, void *dev_id) { /* This is the paper-clip reset, it shuts the machine down directly. */ machine_power_off(); return IRQ_HANDLED; } static void __init nslu2_timer_init(void) { /* The xtal on this machine is non-standard. */ ixp4xx_timer_freq = NSLU2_FREQ; /* Call standard timer_init function. */ ixp4xx_timer_init(); } static struct sys_timer nslu2_timer = { .init = nslu2_timer_init, }; static void __init nslu2_init(void) { uint8_t __iomem *f; int i; ixp4xx_sys_init(); nslu2_flash_resource.start = IXP4XX_EXP_BUS_BASE(0); nslu2_flash_resource.end = IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1; i2c_register_board_info(0, nslu2_i2c_board_info, ARRAY_SIZE(nslu2_i2c_board_info)); /* * This is only useful on a modified machine, but it is valuable * to have it first in order to see debug messages, and so that * it does *not* get removed if platform_add_devices fails! */ (void)platform_device_register(&nslu2_uart); platform_add_devices(nslu2_devices, ARRAY_SIZE(nslu2_devices)); pm_power_off = nslu2_power_off; if (request_irq(gpio_to_irq(NSLU2_RB_GPIO), &nslu2_reset_handler, IRQF_DISABLED | IRQF_TRIGGER_LOW, "NSLU2 reset button", NULL) < 0) { printk(KERN_DEBUG "Reset Button IRQ %d not available\n", gpio_to_irq(NSLU2_RB_GPIO)); } if (request_irq(gpio_to_irq(NSLU2_PB_GPIO), &nslu2_power_handler, IRQF_DISABLED | IRQF_TRIGGER_HIGH, "NSLU2 power button", NULL) < 0) { printk(KERN_DEBUG "Power Button IRQ %d not available\n", gpio_to_irq(NSLU2_PB_GPIO)); } /* * Map in a portion of the flash and read the MAC address. * Since it is stored in BE in the flash itself, we need to * byteswap it if we're in LE mode. */ f = ioremap(IXP4XX_EXP_BUS_BASE(0), 0x40000); if (f) { for (i = 0; i < 6; i++) #ifdef __ARMEB__ nslu2_plat_eth[0].hwaddr[i] = readb(f + 0x3FFB0 + i); #else nslu2_plat_eth[0].hwaddr[i] = readb(f + 0x3FFB0 + (i^3)); #endif iounmap(f); } printk(KERN_INFO "NSLU2: Using MAC address %pM for port 0\n", nslu2_plat_eth[0].hwaddr); } MACHINE_START(NSLU2, "Linksys NSLU2") /* Maintainer: www.nslu2-linux.org */ .atag_offset = 0x100, .map_io = ixp4xx_map_io, .init_early = ixp4xx_init_early, .init_irq = ixp4xx_init_irq, .timer = &nslu2_timer, .init_machine = nslu2_init, #if defined(CONFIG_PCI) .dma_zone_size = SZ_64M, #endif .restart = ixp4xx_restart, MACHINE_END
gpl-2.0
vakkov/android_kernel_samsung_tuna
drivers/crypto/caam/error.c
7053
8106
/* * CAAM Error Reporting * * Copyright 2009-2011 Freescale Semiconductor, Inc. */ #include "compat.h" #include "regs.h" #include "intern.h" #include "desc.h" #include "jr.h" #include "error.h" #define SPRINTFCAT(str, format, param, max_alloc) \ { \ char *tmp; \ \ tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \ sprintf(tmp, format, param); \ strcat(str, tmp); \ kfree(tmp); \ } static void report_jump_idx(u32 status, char *outstr) { u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >> JRSTA_DECOERR_INDEX_SHIFT; if (status & JRSTA_DECOERR_JUMP) strcat(outstr, "jump tgt desc idx "); else strcat(outstr, "desc idx "); SPRINTFCAT(outstr, "%d: ", idx, sizeof("255")); } static void report_ccb_status(u32 status, char *outstr) { char *cha_id_list[] = { "", "AES", "DES, 3DES", "ARC4", "MD5, SHA-1, SH-224, SHA-256, SHA-384, SHA-512", "RNG", "SNOW f8", "Kasumi f8, f9", "All Public Key Algorithms", "CRC", "SNOW f9", }; char *err_id_list[] = { "None. No error.", "Mode error.", "Data size error.", "Key size error.", "PKHA A memory size error.", "PKHA B memory size error.", "Data arrived out of sequence error.", "PKHA divide-by-zero error.", "PKHA modulus even error.", "DES key parity error.", "ICV check failed.", "Hardware error.", "Unsupported CCM AAD size.", "Class 1 CHA is not reset", "Invalid CHA combination was selected", "Invalid CHA selected.", }; u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >> JRSTA_CCBERR_CHAID_SHIFT; u8 err_id = status & JRSTA_CCBERR_ERRID_MASK; report_jump_idx(status, outstr); if (cha_id < ARRAY_SIZE(cha_id_list)) { SPRINTFCAT(outstr, "%s: ", cha_id_list[cha_id], strlen(cha_id_list[cha_id])); } else { SPRINTFCAT(outstr, "unidentified cha_id value 0x%02x: ", cha_id, sizeof("ff")); } if (err_id < ARRAY_SIZE(err_id_list)) { SPRINTFCAT(outstr, "%s", err_id_list[err_id], strlen(err_id_list[err_id])); } else { SPRINTFCAT(outstr, "unidentified err_id value 0x%02x", err_id, sizeof("ff")); } } static void report_jump_status(u32 status, char *outstr) { SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__)); } static void report_deco_status(u32 status, char *outstr) { const struct { u8 value; char *error_text; } desc_error_list[] = { { 0x00, "None. No error." }, { 0x01, "SGT Length Error. The descriptor is trying to read " "more data than is contained in the SGT table." }, { 0x02, "Reserved." }, { 0x03, "Job Ring Control Error. There is a bad value in the " "Job Ring Control register." }, { 0x04, "Invalid Descriptor Command. The Descriptor Command " "field is invalid." }, { 0x05, "Reserved." }, { 0x06, "Invalid KEY Command" }, { 0x07, "Invalid LOAD Command" }, { 0x08, "Invalid STORE Command" }, { 0x09, "Invalid OPERATION Command" }, { 0x0A, "Invalid FIFO LOAD Command" }, { 0x0B, "Invalid FIFO STORE Command" }, { 0x0C, "Invalid MOVE Command" }, { 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is " "invalid because the target is not a Job Header " "Command, or the jump is from a Trusted Descriptor to " "a Job Descriptor, or because the target Descriptor " "contains a Shared Descriptor." }, { 0x0E, "Invalid MATH Command" }, { 0x0F, "Invalid SIGNATURE Command" }, { 0x10, "Invalid Sequence Command. A SEQ IN PTR OR SEQ OUT PTR " "Command is invalid or a SEQ KEY, SEQ LOAD, SEQ FIFO " "LOAD, or SEQ FIFO STORE decremented the input or " "output sequence length below 0. This error may result " "if a built-in PROTOCOL Command has encountered a " "malformed PDU." }, { 0x11, "Skip data type invalid. The type must be 0xE or 0xF."}, { 0x12, "Shared Descriptor Header Error" }, { 0x13, "Header Error. Invalid length or parity, or certain " "other problems." }, { 0x14, "Burster Error. Burster has gotten to an illegal " "state" }, { 0x15, "Context Register Length Error. The descriptor is " "trying to read or write past the end of the Context " "Register. A SEQ LOAD or SEQ STORE with the VLF bit " "set was executed with too large a length in the " "variable length register (VSOL for SEQ STORE or VSIL " "for SEQ LOAD)." }, { 0x16, "DMA Error" }, { 0x17, "Reserved." }, { 0x1A, "Job failed due to JR reset" }, { 0x1B, "Job failed due to Fail Mode" }, { 0x1C, "DECO Watchdog timer timeout error" }, { 0x1D, "DECO tried to copy a key from another DECO but the " "other DECO's Key Registers were locked" }, { 0x1E, "DECO attempted to copy data from a DECO that had an " "unmasked Descriptor error" }, { 0x1F, "LIODN error. DECO was trying to share from itself or " "from another DECO but the two Non-SEQ LIODN values " "didn't match or the 'shared from' DECO's Descriptor " "required that the SEQ LIODNs be the same and they " "aren't." }, { 0x20, "DECO has completed a reset initiated via the DRR " "register" }, { 0x21, "Nonce error. When using EKT (CCM) key encryption " "option in the FIFO STORE Command, the Nonce counter " "reached its maximum value and this encryption mode " "can no longer be used." }, { 0x22, "Meta data is too large (> 511 bytes) for TLS decap " "(input frame; block ciphers) and IPsec decap (output " "frame, when doing the next header byte update) and " "DCRC (output frame)." }, { 0x80, "DNR (do not run) error" }, { 0x81, "undefined protocol command" }, { 0x82, "invalid setting in PDB" }, { 0x83, "Anti-replay LATE error" }, { 0x84, "Anti-replay REPLAY error" }, { 0x85, "Sequence number overflow" }, { 0x86, "Sigver invalid signature" }, { 0x87, "DSA Sign Illegal test descriptor" }, { 0x88, "Protocol Format Error - A protocol has seen an error " "in the format of data received. When running RSA, " "this means that formatting with random padding was " "used, and did not follow the form: 0x00, 0x02, 8-to-N " "bytes of non-zero pad, 0x00, F data." }, { 0x89, "Protocol Size Error - A protocol has seen an error in " "size. When running RSA, pdb size N < (size of F) when " "no formatting is used; or pdb size N < (F + 11) when " "formatting is used." }, { 0xC1, "Blob Command error: Undefined mode" }, { 0xC2, "Blob Command error: Secure Memory Blob mode error" }, { 0xC4, "Blob Command error: Black Blob key or input size " "error" }, { 0xC5, "Blob Command error: Invalid key destination" }, { 0xC8, "Blob Command error: Trusted/Secure mode error" }, { 0xF0, "IPsec TTL or hop limit field either came in as 0, " "or was decremented to 0" }, { 0xF1, "3GPP HFN matches or exceeds the Threshold" }, }; u8 desc_error = status & JRSTA_DECOERR_ERROR_MASK; int i; report_jump_idx(status, outstr); for (i = 0; i < ARRAY_SIZE(desc_error_list); i++) if (desc_error_list[i].value == desc_error) break; if (i != ARRAY_SIZE(desc_error_list) && desc_error_list[i].error_text) { SPRINTFCAT(outstr, "%s", desc_error_list[i].error_text, strlen(desc_error_list[i].error_text)); } else { SPRINTFCAT(outstr, "unidentified error value 0x%02x", desc_error, sizeof("ff")); } } static void report_jr_status(u32 status, char *outstr) { SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__)); } static void report_cond_code_status(u32 status, char *outstr) { SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__)); } char *caam_jr_strstatus(char *outstr, u32 status) { struct stat_src { void (*report_ssed)(u32 status, char *outstr); char *error; } status_src[] = { { NULL, "No error" }, { NULL, NULL }, { report_ccb_status, "CCB" }, { report_jump_status, "Jump" }, { report_deco_status, "DECO" }, { NULL, NULL }, { report_jr_status, "Job Ring" }, { report_cond_code_status, "Condition Code" }, }; u32 ssrc = status >> JRSTA_SSRC_SHIFT; sprintf(outstr, "%s: ", status_src[ssrc].error); if (status_src[ssrc].report_ssed) status_src[ssrc].report_ssed(status, outstr); return outstr; } EXPORT_SYMBOL(caam_jr_strstatus);
gpl-2.0
itead/ITEADSW_kernel
scripts/dtc/libfdt/fdt.c
7309
5370
/* * libfdt - Flat Device Tree manipulation * Copyright (C) 2006 David Gibson, IBM Corporation. * * libfdt is dual licensed: you can use it either under the terms of * the GPL, or the BSD license, at your option. * * a) This library is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, * MA 02110-1301 USA * * Alternatively, * * b) Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "libfdt_env.h" #include <fdt.h> #include <libfdt.h> #include "libfdt_internal.h" int fdt_check_header(const void *fdt) { if (fdt_magic(fdt) == FDT_MAGIC) { /* Complete tree */ if (fdt_version(fdt) < FDT_FIRST_SUPPORTED_VERSION) return -FDT_ERR_BADVERSION; if (fdt_last_comp_version(fdt) > FDT_LAST_SUPPORTED_VERSION) return -FDT_ERR_BADVERSION; } else if (fdt_magic(fdt) == FDT_SW_MAGIC) { /* Unfinished sequential-write blob */ if (fdt_size_dt_struct(fdt) == 0) return -FDT_ERR_BADSTATE; } else { return -FDT_ERR_BADMAGIC; } return 0; } const void *fdt_offset_ptr(const void *fdt, int offset, int len) { const char *p; if (fdt_version(fdt) >= 0x11) if (((offset + len) < offset) || ((offset + len) > fdt_size_dt_struct(fdt))) return NULL; p = _fdt_offset_ptr(fdt, offset); if (p + len < p) return NULL; return p; } uint32_t fdt_next_tag(const void *fdt, int offset, int *nextoffset) { const uint32_t *tagp, *lenp; uint32_t tag; const char *p; if (offset % FDT_TAGSIZE) return -1; tagp = fdt_offset_ptr(fdt, offset, FDT_TAGSIZE); if (! tagp) return FDT_END; /* premature end */ tag = fdt32_to_cpu(*tagp); offset += FDT_TAGSIZE; switch (tag) { case FDT_BEGIN_NODE: /* skip name */ do { p = fdt_offset_ptr(fdt, offset++, 1); } while (p && (*p != '\0')); if (! p) return FDT_END; break; case FDT_PROP: lenp = fdt_offset_ptr(fdt, offset, sizeof(*lenp)); if (! lenp) return FDT_END; /* skip name offset, length and value */ offset += 2*FDT_TAGSIZE + fdt32_to_cpu(*lenp); break; } if (nextoffset) *nextoffset = FDT_TAGALIGN(offset); return tag; } int _fdt_check_node_offset(const void *fdt, int offset) { if ((offset < 0) || (offset % FDT_TAGSIZE) || (fdt_next_tag(fdt, offset, &offset) != FDT_BEGIN_NODE)) return -FDT_ERR_BADOFFSET; return offset; } int fdt_next_node(const void *fdt, int offset, int *depth) { int nextoffset = 0; uint32_t tag; if (offset >= 0) if ((nextoffset = _fdt_check_node_offset(fdt, offset)) < 0) return nextoffset; do { offset = nextoffset; tag = fdt_next_tag(fdt, offset, &nextoffset); switch (tag) { case FDT_PROP: case FDT_NOP: break; case FDT_BEGIN_NODE: if (depth) (*depth)++; break; case FDT_END_NODE: if (depth) (*depth)--; break; case FDT_END: return -FDT_ERR_NOTFOUND; default: return -FDT_ERR_BADSTRUCTURE; } } while (tag != FDT_BEGIN_NODE); return offset; } const char *_fdt_find_string(const char *strtab, int tabsize, const char *s) { int len = strlen(s) + 1; const char *last = strtab + tabsize - len; const char *p; for (p = strtab; p <= last; p++) if (memcmp(p, s, len) == 0) return p; return NULL; } int fdt_move(const void *fdt, void *buf, int bufsize) { FDT_CHECK_HEADER(fdt); if (fdt_totalsize(fdt) > bufsize) return -FDT_ERR_NOSPACE; memmove(buf, fdt, fdt_totalsize(fdt)); return 0; }
gpl-2.0
alexforsale/android_kernel_xiaomi_armani
arch/avr32/boards/merisc/merisc_sysfs.c
9101
1335
/* * Merisc sysfs exports * * Copyright (C) 2008 Martinsson Elektronik AB * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/timer.h> #include <linux/err.h> #include <linux/ctype.h> #include "merisc.h" static ssize_t merisc_model_show(struct class *class, char *buf) { ssize_t ret = 0; sprintf(buf, "%s\n", merisc_model()); ret = strlen(buf) + 1; return ret; } static ssize_t merisc_revision_show(struct class *class, char *buf) { ssize_t ret = 0; sprintf(buf, "%s\n", merisc_revision()); ret = strlen(buf) + 1; return ret; } static struct class_attribute merisc_class_attrs[] = { __ATTR(model, S_IRUGO, merisc_model_show, NULL), __ATTR(revision, S_IRUGO, merisc_revision_show, NULL), __ATTR_NULL, }; struct class merisc_class = { .name = "merisc", .owner = THIS_MODULE, .class_attrs = merisc_class_attrs, }; static int __init merisc_sysfs_init(void) { int status; status = class_register(&merisc_class); if (status < 0) return status; return 0; } postcore_initcall(merisc_sysfs_init);
gpl-2.0
Snuzzo/Kitten_Kernel
drivers/misc/iwmc3200top/log.c
9101
8042
/* * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver * drivers/misc/iwmc3200top/log.c * * Copyright (C) 2009 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com> * - * */ #include <linux/kernel.h> #include <linux/mmc/sdio_func.h> #include <linux/slab.h> #include <linux/ctype.h> #include "fw-msg.h" #include "iwmc3200top.h" #include "log.h" /* Maximal hexadecimal string size of the FW memdump message */ #define LOG_MSG_SIZE_MAX 12400 /* iwmct_logdefs is a global used by log macros */ u8 iwmct_logdefs[LOG_SRC_MAX]; static u8 iwmct_fw_logdefs[FW_LOG_SRC_MAX]; static int _log_set_log_filter(u8 *logdefs, int size, u8 src, u8 logmask) { int i; if (src < size) logdefs[src] = logmask; else if (src == LOG_SRC_ALL) for (i = 0; i < size; i++) logdefs[i] = logmask; else return -1; return 0; } int iwmct_log_set_filter(u8 src, u8 logmask) { return _log_set_log_filter(iwmct_logdefs, LOG_SRC_MAX, src, logmask); } int iwmct_log_set_fw_filter(u8 src, u8 logmask) { return _log_set_log_filter(iwmct_fw_logdefs, FW_LOG_SRC_MAX, src, logmask); } static int log_msg_format_hex(char *str, int slen, u8 *ibuf, int ilen, char *pref) { int pos = 0; int i; int len; for (pos = 0, i = 0; pos < slen - 2 && pref[i] != '\0'; i++, pos++) str[pos] = pref[i]; for (i = 0; pos < slen - 2 && i < ilen; pos += len, i++) len = snprintf(&str[pos], slen - pos - 1, " %2.2X", ibuf[i]); if (i < ilen) return -1; return 0; } /* NOTE: This function is not thread safe. Currently it's called only from sdio rx worker - no race there */ void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len) { struct top_msg *msg; static char logbuf[LOG_MSG_SIZE_MAX]; msg = (struct top_msg *)buf; if (len < sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr)) { LOG_ERROR(priv, FW_MSG, "Log message from TOP " "is too short %d (expected %zd)\n", len, sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr)); return; } if (!(iwmct_fw_logdefs[msg->u.log.log_hdr.logsource] & BIT(msg->u.log.log_hdr.severity)) || !(iwmct_logdefs[LOG_SRC_FW_MSG] & BIT(msg->u.log.log_hdr.severity))) return; switch (msg->hdr.category) { case COMM_CATEGORY_TESTABILITY: if (!(iwmct_logdefs[LOG_SRC_TST] & BIT(msg->u.log.log_hdr.severity))) return; if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf, le16_to_cpu(msg->hdr.length) + sizeof(msg->hdr), "<TST>")) LOG_WARNING(priv, TST, "TOP TST message is too long, truncating..."); LOG_WARNING(priv, TST, "%s\n", logbuf); break; case COMM_CATEGORY_DEBUG: if (msg->hdr.opcode == OP_DBG_ZSTR_MSG) LOG_INFO(priv, FW_MSG, "%s %s", "<DBG>", ((u8 *)msg) + sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr)); else { if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf, le16_to_cpu(msg->hdr.length) + sizeof(msg->hdr), "<DBG>")) LOG_WARNING(priv, FW_MSG, "TOP DBG message is too long," "truncating..."); LOG_WARNING(priv, FW_MSG, "%s\n", logbuf); } break; default: break; } } static int _log_get_filter_str(u8 *logdefs, int logdefsz, char *buf, int size) { int i, pos, len; for (i = 0, pos = 0; (pos < size-1) && (i < logdefsz); i++) { len = snprintf(&buf[pos], size - pos - 1, "0x%02X%02X,", i, logdefs[i]); pos += len; } buf[pos-1] = '\n'; buf[pos] = '\0'; if (i < logdefsz) return -1; return 0; } int log_get_filter_str(char *buf, int size) { return _log_get_filter_str(iwmct_logdefs, LOG_SRC_MAX, buf, size); } int log_get_fw_filter_str(char *buf, int size) { return _log_get_filter_str(iwmct_fw_logdefs, FW_LOG_SRC_MAX, buf, size); } #define HEXADECIMAL_RADIX 16 #define LOG_SRC_FORMAT 7 /* log level is in format of "0xXXXX," */ ssize_t show_iwmct_log_level(struct device *d, struct device_attribute *attr, char *buf) { struct iwmct_priv *priv = dev_get_drvdata(d); char *str_buf; int buf_size; ssize_t ret; buf_size = (LOG_SRC_FORMAT * LOG_SRC_MAX) + 1; str_buf = kzalloc(buf_size, GFP_KERNEL); if (!str_buf) { LOG_ERROR(priv, DEBUGFS, "failed to allocate %d bytes\n", buf_size); ret = -ENOMEM; goto exit; } if (log_get_filter_str(str_buf, buf_size) < 0) { ret = -EINVAL; goto exit; } ret = sprintf(buf, "%s", str_buf); exit: kfree(str_buf); return ret; } ssize_t store_iwmct_log_level(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct iwmct_priv *priv = dev_get_drvdata(d); char *token, *str_buf = NULL; long val; ssize_t ret = count; u8 src, mask; if (!count) goto exit; str_buf = kzalloc(count, GFP_KERNEL); if (!str_buf) { LOG_ERROR(priv, DEBUGFS, "failed to allocate %zd bytes\n", count); ret = -ENOMEM; goto exit; } memcpy(str_buf, buf, count); while ((token = strsep(&str_buf, ",")) != NULL) { while (isspace(*token)) ++token; if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) { LOG_ERROR(priv, DEBUGFS, "failed to convert string to long %s\n", token); ret = -EINVAL; goto exit; } mask = val & 0xFF; src = (val & 0XFF00) >> 8; iwmct_log_set_filter(src, mask); } exit: kfree(str_buf); return ret; } ssize_t show_iwmct_log_level_fw(struct device *d, struct device_attribute *attr, char *buf) { struct iwmct_priv *priv = dev_get_drvdata(d); char *str_buf; int buf_size; ssize_t ret; buf_size = (LOG_SRC_FORMAT * FW_LOG_SRC_MAX) + 2; str_buf = kzalloc(buf_size, GFP_KERNEL); if (!str_buf) { LOG_ERROR(priv, DEBUGFS, "failed to allocate %d bytes\n", buf_size); ret = -ENOMEM; goto exit; } if (log_get_fw_filter_str(str_buf, buf_size) < 0) { ret = -EINVAL; goto exit; } ret = sprintf(buf, "%s", str_buf); exit: kfree(str_buf); return ret; } ssize_t store_iwmct_log_level_fw(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct iwmct_priv *priv = dev_get_drvdata(d); struct top_msg cmd; char *token, *str_buf = NULL; ssize_t ret = count; u16 cmdlen = 0; int i; long val; u8 src, mask; if (!count) goto exit; str_buf = kzalloc(count, GFP_KERNEL); if (!str_buf) { LOG_ERROR(priv, DEBUGFS, "failed to allocate %zd bytes\n", count); ret = -ENOMEM; goto exit; } memcpy(str_buf, buf, count); cmd.hdr.type = COMM_TYPE_H2D; cmd.hdr.category = COMM_CATEGORY_DEBUG; cmd.hdr.opcode = CMD_DBG_LOG_LEVEL; for (i = 0; ((token = strsep(&str_buf, ",")) != NULL) && (i < FW_LOG_SRC_MAX); i++) { while (isspace(*token)) ++token; if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) { LOG_ERROR(priv, DEBUGFS, "failed to convert string to long %s\n", token); ret = -EINVAL; goto exit; } mask = val & 0xFF; /* LSB */ src = (val & 0XFF00) >> 8; /* 2nd least significant byte. */ iwmct_log_set_fw_filter(src, mask); cmd.u.logdefs[i].logsource = src; cmd.u.logdefs[i].sevmask = mask; } cmd.hdr.length = cpu_to_le16(i * sizeof(cmd.u.logdefs[0])); cmdlen = (i * sizeof(cmd.u.logdefs[0]) + sizeof(cmd.hdr)); ret = iwmct_send_hcmd(priv, (u8 *)&cmd, cmdlen); if (ret) { LOG_ERROR(priv, DEBUGFS, "Failed to send %d bytes of fwcmd, ret=%zd\n", cmdlen, ret); goto exit; } else LOG_INFO(priv, DEBUGFS, "fwcmd sent (%d bytes)\n", cmdlen); ret = count; exit: kfree(str_buf); return ret; }
gpl-2.0
aloeffler/linux308
arch/avr32/mm/cache.c
10125
3817
/* * Copyright (C) 2004-2006 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/highmem.h> #include <linux/unistd.h> #include <asm/cacheflush.h> #include <asm/cachectl.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/syscalls.h> /* * If you attempt to flush anything more than this, you need superuser * privileges. The value is completely arbitrary. */ #define CACHEFLUSH_MAX_LEN 1024 void invalidate_dcache_region(void *start, size_t size) { unsigned long v, begin, end, linesz, mask; linesz = boot_cpu_data.dcache.linesz; mask = linesz - 1; /* when first and/or last cachelines are shared, flush them * instead of invalidating ... never discard valid data! */ begin = (unsigned long)start; end = begin + size; if (begin & mask) { flush_dcache_line(start); begin += linesz; } if (end & mask) { flush_dcache_line((void *)end); end &= ~mask; } /* remaining cachelines only need invalidation */ for (v = begin; v < end; v += linesz) invalidate_dcache_line((void *)v); flush_write_buffer(); } void clean_dcache_region(void *start, size_t size) { unsigned long v, begin, end, linesz; linesz = boot_cpu_data.dcache.linesz; begin = (unsigned long)start & ~(linesz - 1); end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1); for (v = begin; v < end; v += linesz) clean_dcache_line((void *)v); flush_write_buffer(); } void flush_dcache_region(void *start, size_t size) { unsigned long v, begin, end, linesz; linesz = boot_cpu_data.dcache.linesz; begin = (unsigned long)start & ~(linesz - 1); end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1); for (v = begin; v < end; v += linesz) flush_dcache_line((void *)v); flush_write_buffer(); } void invalidate_icache_region(void *start, size_t size) { unsigned long v, begin, end, linesz; linesz = boot_cpu_data.icache.linesz; begin = (unsigned long)start & ~(linesz - 1); end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1); for (v = begin; v < end; v += linesz) invalidate_icache_line((void *)v); } static inline void __flush_icache_range(unsigned long start, unsigned long end) { unsigned long v, linesz; linesz = boot_cpu_data.dcache.linesz; for (v = start; v < end; v += linesz) { clean_dcache_line((void *)v); invalidate_icache_line((void *)v); } flush_write_buffer(); } /* * This one is called after a module has been loaded. */ void flush_icache_range(unsigned long start, unsigned long end) { unsigned long linesz; linesz = boot_cpu_data.dcache.linesz; __flush_icache_range(start & ~(linesz - 1), (end + linesz - 1) & ~(linesz - 1)); } /* * This one is called from __do_fault() and do_swap_page(). */ void flush_icache_page(struct vm_area_struct *vma, struct page *page) { if (vma->vm_flags & VM_EXEC) { void *v = page_address(page); __flush_icache_range((unsigned long)v, (unsigned long)v + PAGE_SIZE); } } asmlinkage int sys_cacheflush(int operation, void __user *addr, size_t len) { int ret; if (len > CACHEFLUSH_MAX_LEN) { ret = -EPERM; if (!capable(CAP_SYS_ADMIN)) goto out; } ret = -EFAULT; if (!access_ok(VERIFY_WRITE, addr, len)) goto out; switch (operation) { case CACHE_IFLUSH: flush_icache_range((unsigned long)addr, (unsigned long)addr + len); ret = 0; break; default: ret = -EINVAL; } out: return ret; } void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { memcpy(dst, src, len); if (vma->vm_flags & VM_EXEC) flush_icache_range((unsigned long)dst, (unsigned long)dst + len); }
gpl-2.0
OnePlusOSS/android_kernel_oneplus_one
drivers/ide/ide-devsets.c
12941
3915
#include <linux/kernel.h> #include <linux/gfp.h> #include <linux/ide.h> DEFINE_MUTEX(ide_setting_mtx); ide_devset_get(io_32bit, io_32bit); static int set_io_32bit(ide_drive_t *drive, int arg) { if (drive->dev_flags & IDE_DFLAG_NO_IO_32BIT) return -EPERM; if (arg < 0 || arg > 1 + (SUPPORT_VLB_SYNC << 1)) return -EINVAL; drive->io_32bit = arg; return 0; } ide_devset_get_flag(ksettings, IDE_DFLAG_KEEP_SETTINGS); static int set_ksettings(ide_drive_t *drive, int arg) { if (arg < 0 || arg > 1) return -EINVAL; if (arg) drive->dev_flags |= IDE_DFLAG_KEEP_SETTINGS; else drive->dev_flags &= ~IDE_DFLAG_KEEP_SETTINGS; return 0; } ide_devset_get_flag(using_dma, IDE_DFLAG_USING_DMA); static int set_using_dma(ide_drive_t *drive, int arg) { #ifdef CONFIG_BLK_DEV_IDEDMA int err = -EPERM; if (arg < 0 || arg > 1) return -EINVAL; if (ata_id_has_dma(drive->id) == 0) goto out; if (drive->hwif->dma_ops == NULL) goto out; err = 0; if (arg) { if (ide_set_dma(drive)) err = -EIO; } else ide_dma_off(drive); out: return err; #else if (arg < 0 || arg > 1) return -EINVAL; return -EPERM; #endif } /* * handle HDIO_SET_PIO_MODE ioctl abusers here, eventually it will go away */ static int set_pio_mode_abuse(ide_hwif_t *hwif, u8 req_pio) { switch (req_pio) { case 202: case 201: case 200: case 102: case 101: case 100: return (hwif->host_flags & IDE_HFLAG_ABUSE_DMA_MODES) ? 1 : 0; case 9: case 8: return (hwif->host_flags & IDE_HFLAG_ABUSE_PREFETCH) ? 1 : 0; case 7: case 6: return (hwif->host_flags & IDE_HFLAG_ABUSE_FAST_DEVSEL) ? 1 : 0; default: return 0; } } static int set_pio_mode(ide_drive_t *drive, int arg) { ide_hwif_t *hwif = drive->hwif; const struct ide_port_ops *port_ops = hwif->port_ops; if (arg < 0 || arg > 255) return -EINVAL; if (port_ops == NULL || port_ops->set_pio_mode == NULL || (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)) return -ENOSYS; if (set_pio_mode_abuse(drive->hwif, arg)) { drive->pio_mode = arg + XFER_PIO_0; if (arg == 8 || arg == 9) { unsigned long flags; /* take lock for IDE_DFLAG_[NO_]UNMASK/[NO_]IO_32BIT */ spin_lock_irqsave(&hwif->lock, flags); port_ops->set_pio_mode(hwif, drive); spin_unlock_irqrestore(&hwif->lock, flags); } else port_ops->set_pio_mode(hwif, drive); } else { int keep_dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA); ide_set_pio(drive, arg); if (hwif->host_flags & IDE_HFLAG_SET_PIO_MODE_KEEP_DMA) { if (keep_dma) ide_dma_on(drive); } } return 0; } ide_devset_get_flag(unmaskirq, IDE_DFLAG_UNMASK); static int set_unmaskirq(ide_drive_t *drive, int arg) { if (drive->dev_flags & IDE_DFLAG_NO_UNMASK) return -EPERM; if (arg < 0 || arg > 1) return -EINVAL; if (arg) drive->dev_flags |= IDE_DFLAG_UNMASK; else drive->dev_flags &= ~IDE_DFLAG_UNMASK; return 0; } ide_ext_devset_rw_sync(io_32bit, io_32bit); ide_ext_devset_rw_sync(keepsettings, ksettings); ide_ext_devset_rw_sync(unmaskirq, unmaskirq); ide_ext_devset_rw_sync(using_dma, using_dma); __IDE_DEVSET(pio_mode, DS_SYNC, NULL, set_pio_mode); int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting, int arg) { struct request_queue *q = drive->queue; struct request *rq; int ret = 0; if (!(setting->flags & DS_SYNC)) return setting->set(drive, arg); rq = blk_get_request(q, READ, __GFP_WAIT); rq->cmd_type = REQ_TYPE_SPECIAL; rq->cmd_len = 5; rq->cmd[0] = REQ_DEVSET_EXEC; *(int *)&rq->cmd[1] = arg; rq->special = setting->set; if (blk_execute_rq(q, NULL, rq, 0)) ret = rq->errors; blk_put_request(rq); return ret; } ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq) { int err, (*setfunc)(ide_drive_t *, int) = rq->special; err = setfunc(drive, *(int *)&rq->cmd[1]); if (err) rq->errors = err; ide_complete_rq(drive, err, blk_rq_bytes(rq)); return ide_stopped; }
gpl-2.0
kirananto/android_kernel_cyanogen_msm8916-1
drivers/gud/MobiCoreKernelApi/connection.c
910
4750
/* * Copyright (c) 2013 TRUSTONIC LIMITED * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/types.h> #include <linux/slab.h> #include <linux/netlink.h> #include <linux/skbuff.h> #include <linux/netlink.h> #include <linux/semaphore.h> #include <linux/time.h> #include <net/sock.h> #include <net/net_namespace.h> #include "connection.h" #include "common.h" /* Define the initial state of the Data Available Semaphore */ #define SEM_NO_DATA_AVAILABLE 0 struct connection *connection_new(void) { struct connection *conn; conn = kzalloc(sizeof(*conn), GFP_KERNEL); if (conn == NULL) { MCDRV_DBG_ERROR(mc_kapi, "Allocation failure"); return NULL; } conn->sequence_magic = mcapi_unique_id(); mutex_init(&conn->data_lock); sema_init(&conn->data_available_sem, SEM_NO_DATA_AVAILABLE); mcapi_insert_connection(conn); return conn; } void connection_cleanup(struct connection *conn) { if (!conn) return; kfree_skb(conn->skb); mcapi_remove_connection(conn->sequence_magic); kfree(conn); } bool connection_connect(struct connection *conn, pid_t dest) { /* Nothing to connect */ conn->peer_pid = dest; return true; } size_t connection_read_data_msg(struct connection *conn, void *buffer, uint32_t len) { size_t ret = -1; MCDRV_DBG_VERBOSE(mc_kapi, "reading connection data %u, connection data left %u", len, conn->data_len); /* trying to read more than the left data */ if (len > conn->data_len) { ret = conn->data_len; memcpy(buffer, conn->data_start, conn->data_len); conn->data_len = 0; } else { ret = len; memcpy(buffer, conn->data_start, len); conn->data_len -= len; conn->data_start += len; } if (conn->data_len == 0) { conn->data_start = NULL; kfree_skb(conn->skb); conn->skb = NULL; } MCDRV_DBG_VERBOSE(mc_kapi, "read %zu", ret); return ret; } size_t connection_read_datablock(struct connection *conn, void *buffer, uint32_t len) { return connection_read_data(conn, buffer, len, -1); } size_t connection_read_data(struct connection *conn, void *buffer, uint32_t len, int32_t timeout) { size_t ret = 0; MCDRV_ASSERT(buffer != NULL); MCDRV_ASSERT(conn->socket_descriptor != NULL); MCDRV_DBG_VERBOSE(mc_kapi, "read data len = %u for PID = %u", len, conn->sequence_magic); do { /* * Wait until data is available or timeout * msecs_to_jiffies(-1) -> wait forever for the sem */ if (down_timeout(&(conn->data_available_sem), msecs_to_jiffies(timeout))) { MCDRV_DBG_VERBOSE(mc_kapi, "Timeout reading the data sem"); ret = -2; break; } if (mutex_lock_interruptible(&(conn->data_lock))) { MCDRV_DBG_ERROR(mc_kapi, "interrupted reading the data sem"); ret = -1; break; } /* Have data, use it */ if (conn->data_len > 0) ret = connection_read_data_msg(conn, buffer, len); mutex_unlock(&(conn->data_lock)); /* There is still some data left */ if (conn->data_len > 0) up(&conn->data_available_sem); } while (0); return ret; } size_t connection_write_data(struct connection *conn, void *buffer, uint32_t len) { struct sk_buff *skb = NULL; struct nlmsghdr *nlh; int ret = 0; MCDRV_DBG_VERBOSE(mc_kapi, "buffer length %u from pid %u\n", len, conn->sequence_magic); do { skb = nlmsg_new(NLMSG_SPACE(len), GFP_KERNEL); if (!skb) { ret = -1; break; } nlh = nlmsg_put(skb, 0, conn->sequence_magic, 2, NLMSG_LENGTH(len), NLM_F_REQUEST); if (!nlh) { ret = -1; kfree_skb(skb); break; } memcpy(NLMSG_DATA(nlh), buffer, len); /* netlink_unicast frees skb */ netlink_unicast(conn->socket_descriptor, skb, conn->peer_pid, MSG_DONTWAIT); ret = len; } while (0); return ret; } int connection_process(struct connection *conn, struct sk_buff *skb) { int ret = 0; do { if (mutex_lock_interruptible(&(conn->data_lock))) { MCDRV_DBG_ERROR(mc_kapi, "Interrupted getting data semaphore!"); ret = -1; break; } kfree_skb(conn->skb); /* Get a reference to the incoming skb */ conn->skb = skb_get(skb); if (conn->skb) { conn->data_msg = nlmsg_hdr(conn->skb); conn->data_len = NLMSG_PAYLOAD(conn->data_msg, 0); conn->data_start = NLMSG_DATA(conn->data_msg); up(&(conn->data_available_sem)); } mutex_unlock(&(conn->data_lock)); ret = 0; } while (0); return ret; }
gpl-2.0
ML-Design/android-2.6.35
arch/s390/kernel/irq.c
1678
2374
/* * arch/s390/kernel/irq.c * * Copyright IBM Corp. 2004,2007 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Thomas Spatzier (tspat@de.ibm.com) * * This file contains interrupt related functions. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/kernel_stat.h> #include <linux/interrupt.h> #include <linux/seq_file.h> #include <linux/cpu.h> #include <linux/proc_fs.h> #include <linux/profile.h> /* * show_interrupts is needed by /proc/interrupts. */ int show_interrupts(struct seq_file *p, void *v) { static const char *intrclass_names[] = { "EXT", "I/O", }; int i = *(loff_t *) v, j; get_online_cpus(); if (i == 0) { seq_puts(p, " "); for_each_online_cpu(j) seq_printf(p, "CPU%d ",j); seq_putc(p, '\n'); } if (i < NR_IRQS) { seq_printf(p, "%s: ", intrclass_names[i]); #ifndef CONFIG_SMP seq_printf(p, "%10u ", kstat_irqs(i)); #else for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); #endif seq_putc(p, '\n'); } put_online_cpus(); return 0; } /* * For compatibilty only. S/390 specific setup of interrupts et al. is done * much later in init_channel_subsystem(). */ void __init init_IRQ(void) { /* nothing... */ } /* * Switch to the asynchronous interrupt stack for softirq execution. */ asmlinkage void do_softirq(void) { unsigned long flags, old, new; if (in_interrupt()) return; local_irq_save(flags); if (local_softirq_pending()) { /* Get current stack pointer. */ asm volatile("la %0,0(15)" : "=a" (old)); /* Check against async. stack address range. */ new = S390_lowcore.async_stack; if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) { /* Need to switch to the async. stack. */ new -= STACK_FRAME_OVERHEAD; ((struct stack_frame *) new)->back_chain = old; asm volatile(" la 15,0(%0)\n" " basr 14,%2\n" " la 15,0(%1)\n" : : "a" (new), "a" (old), "a" (__do_softirq) : "0", "1", "2", "3", "4", "5", "14", "cc", "memory" ); } else /* We are already on the async stack. */ __do_softirq(); } local_irq_restore(flags); } #ifdef CONFIG_PROC_FS void init_irq_proc(void) { struct proc_dir_entry *root_irq_dir; root_irq_dir = proc_mkdir("irq", NULL); create_prof_cpu_mask(root_irq_dir); } #endif
gpl-2.0
bubby323/samsung-kernel-c1spr
arch/mips/math-emu/sp_tint.c
1678
3059
/* IEEE754 floating point arithmetic * single precision */ /* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * http://www.algor.co.uk * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## */ #include <linux/kernel.h> #include "ieee754sp.h" int ieee754sp_tint(ieee754sp x) { COMPXSP; CLEARCX; EXPLODEXSP; FLUSHXSP; switch (xc) { case IEEE754_CLASS_SNAN: case IEEE754_CLASS_QNAN: case IEEE754_CLASS_INF: SETCX(IEEE754_INVALID_OPERATION); return ieee754si_xcpt(ieee754si_indef(), "sp_tint", x); case IEEE754_CLASS_ZERO: return 0; case IEEE754_CLASS_DNORM: case IEEE754_CLASS_NORM: break; } if (xe >= 31) { /* look for valid corner case */ if (xe == 31 && xs && xm == SP_HIDDEN_BIT) return -0x80000000; /* Set invalid. We will only use overflow for floating point overflow */ SETCX(IEEE754_INVALID_OPERATION); return ieee754si_xcpt(ieee754si_indef(), "sp_tint", x); } /* oh gawd */ if (xe > SP_MBITS) { xm <<= xe - SP_MBITS; } else { u32 residue; int round; int sticky; int odd; if (xe < -1) { residue = xm; round = 0; sticky = residue != 0; xm = 0; } else { /* Shifting a u32 32 times does not work, * so we do it in two steps. Be aware that xe * may be -1 */ residue = xm << (xe + 1); residue <<= 31 - SP_MBITS; round = (residue >> 31) != 0; sticky = (residue << 1) != 0; xm >>= SP_MBITS - xe; } odd = (xm & 0x1) != 0x0; switch (ieee754_csr.rm) { case IEEE754_RN: if (round && (sticky || odd)) xm++; break; case IEEE754_RZ: break; case IEEE754_RU: /* toward +Infinity */ if ((round || sticky) && !xs) xm++; break; case IEEE754_RD: /* toward -Infinity */ if ((round || sticky) && xs) xm++; break; } if ((xm >> 31) != 0) { /* This can happen after rounding */ SETCX(IEEE754_INVALID_OPERATION); return ieee754si_xcpt(ieee754si_indef(), "sp_tint", x); } if (round || sticky) SETCX(IEEE754_INEXACT); } if (xs) return -xm; else return xm; } unsigned int ieee754sp_tuns(ieee754sp x) { ieee754sp hb = ieee754sp_1e31(); /* what if x < 0 ?? */ if (ieee754sp_lt(x, hb)) return (unsigned) ieee754sp_tint(x); return (unsigned) ieee754sp_tint(ieee754sp_sub(x, hb)) | ((unsigned) 1 << 31); }
gpl-2.0
mythos234/NB_ET_Kernel
drivers/usb/host/ehci-tegra.c
1934
22530
/* * EHCI-compliant USB host controller driver for NVIDIA Tegra SoCs * * Copyright (C) 2010 Google, Inc. * Copyright (C) 2009 - 2013 NVIDIA Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * */ #include <linux/clk.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/platform_data/tegra_usb.h> #include <linux/irq.h> #include <linux/usb/otg.h> #include <linux/gpio.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/pm_runtime.h> #include <linux/usb/ehci_def.h> #include <linux/usb/tegra_usb_phy.h> #include <linux/clk/tegra.h> #define TEGRA_USB_BASE 0xC5000000 #define TEGRA_USB2_BASE 0xC5004000 #define TEGRA_USB3_BASE 0xC5008000 /* PORTSC registers */ #define TEGRA_USB_PORTSC1 0x184 #define TEGRA_USB_PORTSC1_PTS(x) (((x) & 0x3) << 30) #define TEGRA_USB_PORTSC1_PHCD (1 << 23) #define TEGRA_USB_DMA_ALIGN 32 struct tegra_ehci_hcd { struct ehci_hcd *ehci; struct tegra_usb_phy *phy; struct clk *clk; struct usb_phy *transceiver; int host_resumed; int port_resuming; bool needs_double_reset; enum tegra_usb_phy_port_speed port_speed; }; static void tegra_ehci_power_up(struct usb_hcd *hcd) { struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller); clk_prepare_enable(tegra->clk); usb_phy_set_suspend(hcd->phy, 0); tegra->host_resumed = 1; } static void tegra_ehci_power_down(struct usb_hcd *hcd) { struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller); tegra->host_resumed = 0; usb_phy_set_suspend(hcd->phy, 1); clk_disable_unprepare(tegra->clk); } static int tegra_ehci_internal_port_reset( struct ehci_hcd *ehci, u32 __iomem *portsc_reg ) { u32 temp; unsigned long flags; int retval = 0; int i, tries; u32 saved_usbintr; spin_lock_irqsave(&ehci->lock, flags); saved_usbintr = ehci_readl(ehci, &ehci->regs->intr_enable); /* disable USB interrupt */ ehci_writel(ehci, 0, &ehci->regs->intr_enable); spin_unlock_irqrestore(&ehci->lock, flags); /* * Here we have to do Port Reset at most twice for * Port Enable bit to be set. */ for (i = 0; i < 2; i++) { temp = ehci_readl(ehci, portsc_reg); temp |= PORT_RESET; ehci_writel(ehci, temp, portsc_reg); mdelay(10); temp &= ~PORT_RESET; ehci_writel(ehci, temp, portsc_reg); mdelay(1); tries = 100; do { mdelay(1); /* * Up to this point, Port Enable bit is * expected to be set after 2 ms waiting. * USB1 usually takes extra 45 ms, for safety, * we take 100 ms as timeout. */ temp = ehci_readl(ehci, portsc_reg); } while (!(temp & PORT_PE) && tries--); if (temp & PORT_PE) break; } if (i == 2) retval = -ETIMEDOUT; /* * Clear Connect Status Change bit if it's set. * We can't clear PORT_PEC. It will also cause PORT_PE to be cleared. */ if (temp & PORT_CSC) ehci_writel(ehci, PORT_CSC, portsc_reg); /* * Write to clear any interrupt status bits that might be set * during port reset. */ temp = ehci_readl(ehci, &ehci->regs->status); ehci_writel(ehci, temp, &ehci->regs->status); /* restore original interrupt enable bits */ ehci_writel(ehci, saved_usbintr, &ehci->regs->intr_enable); return retval; } static int tegra_ehci_hub_control( struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength ) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller); u32 __iomem *status_reg; u32 temp; unsigned long flags; int retval = 0; status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1]; spin_lock_irqsave(&ehci->lock, flags); if (typeReq == GetPortStatus) { temp = ehci_readl(ehci, status_reg); if (tegra->port_resuming && !(temp & PORT_SUSPEND)) { /* Resume completed, re-enable disconnect detection */ tegra->port_resuming = 0; tegra_usb_phy_postresume(hcd->phy); } } else if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) { temp = ehci_readl(ehci, status_reg); if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) { retval = -EPIPE; goto done; } temp &= ~(PORT_RWC_BITS | PORT_WKCONN_E); temp |= PORT_WKDISC_E | PORT_WKOC_E; ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); /* * If a transaction is in progress, there may be a delay in * suspending the port. Poll until the port is suspended. */ if (handshake(ehci, status_reg, PORT_SUSPEND, PORT_SUSPEND, 5000)) pr_err("%s: timeout waiting for SUSPEND\n", __func__); set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports); goto done; } /* For USB1 port we need to issue Port Reset twice internally */ if (tegra->needs_double_reset && (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_RESET)) { spin_unlock_irqrestore(&ehci->lock, flags); return tegra_ehci_internal_port_reset(ehci, status_reg); } /* * Tegra host controller will time the resume operation to clear the bit * when the port control state switches to HS or FS Idle. This behavior * is different from EHCI where the host controller driver is required * to set this bit to a zero after the resume duration is timed in the * driver. */ else if (typeReq == ClearPortFeature && wValue == USB_PORT_FEAT_SUSPEND) { temp = ehci_readl(ehci, status_reg); if ((temp & PORT_RESET) || !(temp & PORT_PE)) { retval = -EPIPE; goto done; } if (!(temp & PORT_SUSPEND)) goto done; /* Disable disconnect detection during port resume */ tegra_usb_phy_preresume(hcd->phy); ehci->reset_done[wIndex-1] = jiffies + msecs_to_jiffies(25); temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); /* start resume signalling */ ehci_writel(ehci, temp | PORT_RESUME, status_reg); set_bit(wIndex-1, &ehci->resuming_ports); spin_unlock_irqrestore(&ehci->lock, flags); msleep(20); spin_lock_irqsave(&ehci->lock, flags); /* Poll until the controller clears RESUME and SUSPEND */ if (handshake(ehci, status_reg, PORT_RESUME, 0, 2000)) pr_err("%s: timeout waiting for RESUME\n", __func__); if (handshake(ehci, status_reg, PORT_SUSPEND, 0, 2000)) pr_err("%s: timeout waiting for SUSPEND\n", __func__); ehci->reset_done[wIndex-1] = 0; clear_bit(wIndex-1, &ehci->resuming_ports); tegra->port_resuming = 1; goto done; } spin_unlock_irqrestore(&ehci->lock, flags); /* Handle the hub control events here */ return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength); done: spin_unlock_irqrestore(&ehci->lock, flags); return retval; } static void tegra_ehci_restart(struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); ehci_reset(ehci); /* setup the frame list and Async q heads */ ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list); ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next); /* setup the command register and set the controller in RUN mode */ ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET); ehci->command |= CMD_RUN; ehci_writel(ehci, ehci->command, &ehci->regs->command); down_write(&ehci_cf_port_reset_rwsem); ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag); /* flush posted writes */ ehci_readl(ehci, &ehci->regs->command); up_write(&ehci_cf_port_reset_rwsem); } static void tegra_ehci_shutdown(struct usb_hcd *hcd) { struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller); /* ehci_shutdown touches the USB controller registers, make sure * controller has clocks to it */ if (!tegra->host_resumed) tegra_ehci_power_up(hcd); ehci_shutdown(hcd); } static int tegra_ehci_setup(struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); /* EHCI registers start at offset 0x100 */ ehci->caps = hcd->regs + 0x100; /* switch to host mode */ hcd->has_tt = 1; return ehci_setup(hcd); } struct dma_aligned_buffer { void *kmalloc_ptr; void *old_xfer_buffer; u8 data[0]; }; static void free_dma_aligned_buffer(struct urb *urb) { struct dma_aligned_buffer *temp; if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) return; temp = container_of(urb->transfer_buffer, struct dma_aligned_buffer, data); if (usb_urb_dir_in(urb)) memcpy(temp->old_xfer_buffer, temp->data, urb->transfer_buffer_length); urb->transfer_buffer = temp->old_xfer_buffer; kfree(temp->kmalloc_ptr); urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; } static int alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) { struct dma_aligned_buffer *temp, *kmalloc_ptr; size_t kmalloc_size; if (urb->num_sgs || urb->sg || urb->transfer_buffer_length == 0 || !((uintptr_t)urb->transfer_buffer & (TEGRA_USB_DMA_ALIGN - 1))) return 0; /* Allocate a buffer with enough padding for alignment */ kmalloc_size = urb->transfer_buffer_length + sizeof(struct dma_aligned_buffer) + TEGRA_USB_DMA_ALIGN - 1; kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); if (!kmalloc_ptr) return -ENOMEM; /* Position our struct dma_aligned_buffer such that data is aligned */ temp = PTR_ALIGN(kmalloc_ptr + 1, TEGRA_USB_DMA_ALIGN) - 1; temp->kmalloc_ptr = kmalloc_ptr; temp->old_xfer_buffer = urb->transfer_buffer; if (usb_urb_dir_out(urb)) memcpy(temp->data, urb->transfer_buffer, urb->transfer_buffer_length); urb->transfer_buffer = temp->data; urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; return 0; } static int tegra_ehci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) { int ret; ret = alloc_dma_aligned_buffer(urb, mem_flags); if (ret) return ret; ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); if (ret) free_dma_aligned_buffer(urb); return ret; } static void tegra_ehci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) { usb_hcd_unmap_urb_for_dma(hcd, urb); free_dma_aligned_buffer(urb); } static const struct hc_driver tegra_ehci_hc_driver = { .description = hcd_name, .product_desc = "Tegra EHCI Host Controller", .hcd_priv_size = sizeof(struct ehci_hcd), .flags = HCD_USB2 | HCD_MEMORY, /* standard ehci functions */ .irq = ehci_irq, .start = ehci_run, .stop = ehci_stop, .urb_enqueue = ehci_urb_enqueue, .urb_dequeue = ehci_urb_dequeue, .endpoint_disable = ehci_endpoint_disable, .endpoint_reset = ehci_endpoint_reset, .get_frame_number = ehci_get_frame, .hub_status_data = ehci_hub_status_data, .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, .relinquish_port = ehci_relinquish_port, .port_handed_over = ehci_port_handed_over, /* modified ehci functions for tegra */ .reset = tegra_ehci_setup, .shutdown = tegra_ehci_shutdown, .map_urb_for_dma = tegra_ehci_map_urb_for_dma, .unmap_urb_for_dma = tegra_ehci_unmap_urb_for_dma, .hub_control = tegra_ehci_hub_control, #ifdef CONFIG_PM .bus_suspend = ehci_bus_suspend, .bus_resume = ehci_bus_resume, #endif }; static int setup_vbus_gpio(struct platform_device *pdev, struct tegra_ehci_platform_data *pdata) { int err = 0; int gpio; gpio = pdata->vbus_gpio; if (!gpio_is_valid(gpio)) gpio = of_get_named_gpio(pdev->dev.of_node, "nvidia,vbus-gpio", 0); if (!gpio_is_valid(gpio)) return 0; err = gpio_request(gpio, "vbus_gpio"); if (err) { dev_err(&pdev->dev, "can't request vbus gpio %d", gpio); return err; } err = gpio_direction_output(gpio, 1); if (err) { dev_err(&pdev->dev, "can't enable vbus\n"); return err; } return err; } #ifdef CONFIG_PM static int controller_suspend(struct device *dev) { struct tegra_ehci_hcd *tegra = platform_get_drvdata(to_platform_device(dev)); struct ehci_hcd *ehci = tegra->ehci; struct usb_hcd *hcd = ehci_to_hcd(ehci); struct ehci_regs __iomem *hw = ehci->regs; unsigned long flags; if (time_before(jiffies, ehci->next_statechange)) msleep(10); ehci_halt(ehci); spin_lock_irqsave(&ehci->lock, flags); tegra->port_speed = (readl(&hw->port_status[0]) >> 26) & 0x3; clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); spin_unlock_irqrestore(&ehci->lock, flags); tegra_ehci_power_down(hcd); return 0; } static int controller_resume(struct device *dev) { struct tegra_ehci_hcd *tegra = platform_get_drvdata(to_platform_device(dev)); struct ehci_hcd *ehci = tegra->ehci; struct usb_hcd *hcd = ehci_to_hcd(ehci); struct ehci_regs __iomem *hw = ehci->regs; unsigned long val; set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); tegra_ehci_power_up(hcd); if (tegra->port_speed > TEGRA_USB_PHY_PORT_SPEED_HIGH) { /* Wait for the phy to detect new devices * before we restart the controller */ msleep(10); goto restart; } /* Force the phy to keep data lines in suspend state */ tegra_ehci_phy_restore_start(hcd->phy, tegra->port_speed); /* Enable host mode */ tdi_reset(ehci); /* Enable Port Power */ val = readl(&hw->port_status[0]); val |= PORT_POWER; writel(val, &hw->port_status[0]); udelay(10); /* Check if the phy resume from LP0. When the phy resume from LP0 * USB register will be reset. */ if (!readl(&hw->async_next)) { /* Program the field PTC based on the saved speed mode */ val = readl(&hw->port_status[0]); val &= ~PORT_TEST(~0); if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_HIGH) val |= PORT_TEST_FORCE; else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_FULL) val |= PORT_TEST(6); else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW) val |= PORT_TEST(7); writel(val, &hw->port_status[0]); udelay(10); /* Disable test mode by setting PTC field to NORMAL_OP */ val = readl(&hw->port_status[0]); val &= ~PORT_TEST(~0); writel(val, &hw->port_status[0]); udelay(10); } /* Poll until CCS is enabled */ if (handshake(ehci, &hw->port_status[0], PORT_CONNECT, PORT_CONNECT, 2000)) { pr_err("%s: timeout waiting for PORT_CONNECT\n", __func__); goto restart; } /* Poll until PE is enabled */ if (handshake(ehci, &hw->port_status[0], PORT_PE, PORT_PE, 2000)) { pr_err("%s: timeout waiting for USB_PORTSC1_PE\n", __func__); goto restart; } /* Clear the PCI status, to avoid an interrupt taken upon resume */ val = readl(&hw->status); val |= STS_PCD; writel(val, &hw->status); /* Put controller in suspend mode by writing 1 to SUSP bit of PORTSC */ val = readl(&hw->port_status[0]); if ((val & PORT_POWER) && (val & PORT_PE)) { val |= PORT_SUSPEND; writel(val, &hw->port_status[0]); /* Wait until port suspend completes */ if (handshake(ehci, &hw->port_status[0], PORT_SUSPEND, PORT_SUSPEND, 1000)) { pr_err("%s: timeout waiting for PORT_SUSPEND\n", __func__); goto restart; } } tegra_ehci_phy_restore_end(hcd->phy); goto done; restart: if (tegra->port_speed <= TEGRA_USB_PHY_PORT_SPEED_HIGH) tegra_ehci_phy_restore_end(hcd->phy); tegra_ehci_restart(hcd); done: tegra_usb_phy_preresume(hcd->phy); tegra->port_resuming = 1; return 0; } static int tegra_ehci_suspend(struct device *dev) { struct tegra_ehci_hcd *tegra = platform_get_drvdata(to_platform_device(dev)); struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci); int rc = 0; /* * When system sleep is supported and USB controller wakeup is * implemented: If the controller is runtime-suspended and the * wakeup setting needs to be changed, call pm_runtime_resume(). */ if (HCD_HW_ACCESSIBLE(hcd)) rc = controller_suspend(dev); return rc; } static int tegra_ehci_resume(struct device *dev) { int rc; rc = controller_resume(dev); if (rc == 0) { pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); } return rc; } static int tegra_ehci_runtime_suspend(struct device *dev) { return controller_suspend(dev); } static int tegra_ehci_runtime_resume(struct device *dev) { return controller_resume(dev); } static const struct dev_pm_ops tegra_ehci_pm_ops = { .suspend = tegra_ehci_suspend, .resume = tegra_ehci_resume, .runtime_suspend = tegra_ehci_runtime_suspend, .runtime_resume = tegra_ehci_runtime_resume, }; #endif /* Bits of PORTSC1, which will get cleared by writing 1 into them */ #define TEGRA_PORTSC1_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC) static void tegra_ehci_set_pts(struct usb_phy *x, u8 pts_val) { unsigned long val; struct usb_hcd *hcd = bus_to_hcd(x->otg->host); void __iomem *base = hcd->regs; val = readl(base + TEGRA_USB_PORTSC1) & ~TEGRA_PORTSC1_RWC_BITS; val &= ~TEGRA_USB_PORTSC1_PTS(3); val |= TEGRA_USB_PORTSC1_PTS(pts_val & 3); writel(val, base + TEGRA_USB_PORTSC1); } static void tegra_ehci_set_phcd(struct usb_phy *x, bool enable) { unsigned long val; struct usb_hcd *hcd = bus_to_hcd(x->otg->host); void __iomem *base = hcd->regs; val = readl(base + TEGRA_USB_PORTSC1) & ~TEGRA_PORTSC1_RWC_BITS; if (enable) val |= TEGRA_USB_PORTSC1_PHCD; else val &= ~TEGRA_USB_PORTSC1_PHCD; writel(val, base + TEGRA_USB_PORTSC1); } static int tegra_ehci_probe(struct platform_device *pdev) { struct resource *res; struct usb_hcd *hcd; struct tegra_ehci_hcd *tegra; struct tegra_ehci_platform_data *pdata; int err = 0; int irq; int instance = pdev->id; struct usb_phy *u_phy; pdata = pdev->dev.platform_data; if (!pdata) { dev_err(&pdev->dev, "Platform data missing\n"); return -EINVAL; } /* Right now device-tree probed devices don't get dma_mask set. * Since shared usb code relies on it, set it here for now. * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; if (!pdev->dev.coherent_dma_mask) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); setup_vbus_gpio(pdev, pdata); tegra = devm_kzalloc(&pdev->dev, sizeof(struct tegra_ehci_hcd), GFP_KERNEL); if (!tegra) return -ENOMEM; hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { dev_err(&pdev->dev, "Unable to create HCD\n"); return -ENOMEM; } platform_set_drvdata(pdev, tegra); tegra->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(tegra->clk)) { dev_err(&pdev->dev, "Can't get ehci clock\n"); err = PTR_ERR(tegra->clk); goto fail_clk; } err = clk_prepare_enable(tegra->clk); if (err) goto fail_clk; tegra_periph_reset_assert(tegra->clk); udelay(1); tegra_periph_reset_deassert(tegra->clk); tegra->needs_double_reset = of_property_read_bool(pdev->dev.of_node, "nvidia,needs-double-reset"); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "Failed to get I/O memory\n"); err = -ENXIO; goto fail_io; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); hcd->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!hcd->regs) { dev_err(&pdev->dev, "Failed to remap I/O memory\n"); err = -ENOMEM; goto fail_io; } /* This is pretty ugly and needs to be fixed when we do only * device-tree probing. Old code relies on the platform_device * numbering that we lack for device-tree-instantiated devices. */ if (instance < 0) { switch (res->start) { case TEGRA_USB_BASE: instance = 0; break; case TEGRA_USB2_BASE: instance = 1; break; case TEGRA_USB3_BASE: instance = 2; break; default: err = -ENODEV; dev_err(&pdev->dev, "unknown usb instance\n"); goto fail_io; } } tegra->phy = tegra_usb_phy_open(&pdev->dev, instance, hcd->regs, pdata->phy_config, TEGRA_USB_PHY_MODE_HOST, tegra_ehci_set_pts, tegra_ehci_set_phcd); if (IS_ERR(tegra->phy)) { dev_err(&pdev->dev, "Failed to open USB phy\n"); err = -ENXIO; goto fail_io; } hcd->phy = u_phy = &tegra->phy->u_phy; usb_phy_init(hcd->phy); u_phy->otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg), GFP_KERNEL); if (!u_phy->otg) { dev_err(&pdev->dev, "Failed to alloc memory for otg\n"); err = -ENOMEM; goto fail_io; } u_phy->otg->host = hcd_to_bus(hcd); err = usb_phy_set_suspend(hcd->phy, 0); if (err) { dev_err(&pdev->dev, "Failed to power on the phy\n"); goto fail_phy; } tegra->host_resumed = 1; tegra->ehci = hcd_to_ehci(hcd); irq = platform_get_irq(pdev, 0); if (!irq) { dev_err(&pdev->dev, "Failed to get IRQ\n"); err = -ENODEV; goto fail_phy; } if (pdata->operating_mode == TEGRA_USB_OTG) { tegra->transceiver = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2); if (!IS_ERR(tegra->transceiver)) otg_set_host(tegra->transceiver->otg, &hcd->self); } else { tegra->transceiver = ERR_PTR(-ENODEV); } err = usb_add_hcd(hcd, irq, IRQF_SHARED); if (err) { dev_err(&pdev->dev, "Failed to add USB HCD\n"); goto fail; } pm_runtime_set_active(&pdev->dev); pm_runtime_get_noresume(&pdev->dev); /* Don't skip the pm_runtime_forbid call if wakeup isn't working */ /* if (!pdata->power_down_on_bus_suspend) */ pm_runtime_forbid(&pdev->dev); pm_runtime_enable(&pdev->dev); pm_runtime_put_sync(&pdev->dev); return err; fail: if (!IS_ERR(tegra->transceiver)) otg_set_host(tegra->transceiver->otg, NULL); fail_phy: usb_phy_shutdown(hcd->phy); fail_io: clk_disable_unprepare(tegra->clk); fail_clk: usb_put_hcd(hcd); return err; } static int tegra_ehci_remove(struct platform_device *pdev) { struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev); struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci); pm_runtime_get_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); if (!IS_ERR(tegra->transceiver)) otg_set_host(tegra->transceiver->otg, NULL); usb_phy_shutdown(hcd->phy); usb_remove_hcd(hcd); usb_put_hcd(hcd); clk_disable_unprepare(tegra->clk); return 0; } static void tegra_ehci_hcd_shutdown(struct platform_device *pdev) { struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev); struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci); if (hcd->driver->shutdown) hcd->driver->shutdown(hcd); } static struct of_device_id tegra_ehci_of_match[] = { { .compatible = "nvidia,tegra20-ehci", }, { }, }; static struct platform_driver tegra_ehci_driver = { .probe = tegra_ehci_probe, .remove = tegra_ehci_remove, .shutdown = tegra_ehci_hcd_shutdown, .driver = { .name = "tegra-ehci", .of_match_table = tegra_ehci_of_match, #ifdef CONFIG_PM .pm = &tegra_ehci_pm_ops, #endif } };
gpl-2.0
KylinUI/android_kernel_lge_mako
arch/arm/mach-msm/acpuclock-fsm9xxx.c
3214
1671
/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/io.h> #include <linux/platform_device.h> #include <mach/board.h> #include "acpuclock.h" /* Registers */ #define PLL1_CTL_ADDR (MSM_CLK_CTL_BASE + 0x604) static unsigned long acpuclk_9xxx_get_rate(int cpu) { unsigned int pll1_ctl; unsigned int pll1_l, pll1_div2; unsigned int pll1_khz; pll1_ctl = readl_relaxed(PLL1_CTL_ADDR); pll1_l = ((pll1_ctl >> 3) & 0x3f) * 2; pll1_div2 = pll1_ctl & 0x20000; pll1_khz = 19200 * pll1_l; if (pll1_div2) pll1_khz >>= 1; return pll1_khz; } static struct acpuclk_data acpuclk_9xxx_data = { .get_rate = acpuclk_9xxx_get_rate, }; static int __init acpuclk_9xxx_probe(struct platform_device *pdev) { acpuclk_register(&acpuclk_9xxx_data); pr_info("ACPU running at %lu KHz\n", acpuclk_get_rate(0)); return 0; } static struct platform_driver acpuclk_9xxx_driver = { .driver = { .name = "acpuclk-9xxx", .owner = THIS_MODULE, }, }; static int __init acpuclk_9xxx_init(void) { return platform_driver_probe(&acpuclk_9xxx_driver, acpuclk_9xxx_probe); } device_initcall(acpuclk_9xxx_init);
gpl-2.0
cile/pyramid-kernel
drivers/i2c/busses/i2c-ixp2000.c
3982
4373
/* * drivers/i2c/busses/i2c-ixp2000.c * * I2C adapter for IXP2000 systems using GPIOs for I2C bus * * Author: Deepak Saxena <dsaxena@plexity.net> * Based on IXDP2400 code by: Naeem M. Afzal <naeem.m.afzal@intel.com> * Made generic by: Jeff Daly <jeffrey.daly@intel.com> * * Copyright (c) 2003-2004 MontaVista Software Inc. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. * * From Jeff Daly: * * I2C adapter driver for Intel IXDP2xxx platforms. This should work for any * IXP2000 platform if it uses the HW GPIO in the same manner. Basically, * SDA and SCL GPIOs have external pullups. Setting the respective GPIO to * an input will make the signal a '1' via the pullup. Setting them to * outputs will pull them down. * * The GPIOs are open drain signals and are used as configuration strap inputs * during power-up so there's generally a buffer on the board that needs to be * 'enabled' to drive the GPIOs. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include <linux/slab.h> #include <mach/hardware.h> /* Pick up IXP2000-specific bits */ #include <mach/gpio.h> static inline int ixp2000_scl_pin(void *data) { return ((struct ixp2000_i2c_pins*)data)->scl_pin; } static inline int ixp2000_sda_pin(void *data) { return ((struct ixp2000_i2c_pins*)data)->sda_pin; } static void ixp2000_bit_setscl(void *data, int val) { int i = 5000; if (val) { gpio_line_config(ixp2000_scl_pin(data), GPIO_IN); while(!gpio_line_get(ixp2000_scl_pin(data)) && i--); } else { gpio_line_config(ixp2000_scl_pin(data), GPIO_OUT); } } static void ixp2000_bit_setsda(void *data, int val) { if (val) { gpio_line_config(ixp2000_sda_pin(data), GPIO_IN); } else { gpio_line_config(ixp2000_sda_pin(data), GPIO_OUT); } } static int ixp2000_bit_getscl(void *data) { return gpio_line_get(ixp2000_scl_pin(data)); } static int ixp2000_bit_getsda(void *data) { return gpio_line_get(ixp2000_sda_pin(data)); } struct ixp2000_i2c_data { struct ixp2000_i2c_pins *gpio_pins; struct i2c_adapter adapter; struct i2c_algo_bit_data algo_data; }; static int ixp2000_i2c_remove(struct platform_device *plat_dev) { struct ixp2000_i2c_data *drv_data = platform_get_drvdata(plat_dev); platform_set_drvdata(plat_dev, NULL); i2c_del_adapter(&drv_data->adapter); kfree(drv_data); return 0; } static int ixp2000_i2c_probe(struct platform_device *plat_dev) { int err; struct ixp2000_i2c_pins *gpio = plat_dev->dev.platform_data; struct ixp2000_i2c_data *drv_data = kzalloc(sizeof(struct ixp2000_i2c_data), GFP_KERNEL); if (!drv_data) return -ENOMEM; drv_data->gpio_pins = gpio; drv_data->algo_data.data = gpio; drv_data->algo_data.setsda = ixp2000_bit_setsda; drv_data->algo_data.setscl = ixp2000_bit_setscl; drv_data->algo_data.getsda = ixp2000_bit_getsda; drv_data->algo_data.getscl = ixp2000_bit_getscl; drv_data->algo_data.udelay = 6; drv_data->algo_data.timeout = HZ; strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name, sizeof(drv_data->adapter.name)); drv_data->adapter.algo_data = &drv_data->algo_data, drv_data->adapter.dev.parent = &plat_dev->dev; gpio_line_config(gpio->sda_pin, GPIO_IN); gpio_line_config(gpio->scl_pin, GPIO_IN); gpio_line_set(gpio->scl_pin, 0); gpio_line_set(gpio->sda_pin, 0); if ((err = i2c_bit_add_bus(&drv_data->adapter)) != 0) { dev_err(&plat_dev->dev, "Could not install, error %d\n", err); kfree(drv_data); return err; } platform_set_drvdata(plat_dev, drv_data); return 0; } static struct platform_driver ixp2000_i2c_driver = { .probe = ixp2000_i2c_probe, .remove = ixp2000_i2c_remove, .driver = { .name = "IXP2000-I2C", .owner = THIS_MODULE, }, }; static int __init ixp2000_i2c_init(void) { return platform_driver_register(&ixp2000_i2c_driver); } static void __exit ixp2000_i2c_exit(void) { platform_driver_unregister(&ixp2000_i2c_driver); } module_init(ixp2000_i2c_init); module_exit(ixp2000_i2c_exit); MODULE_AUTHOR ("Deepak Saxena <dsaxena@plexity.net>"); MODULE_DESCRIPTION("IXP2000 GPIO-based I2C bus driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:IXP2000-I2C");
gpl-2.0
sktjdgns1189/android_kernel_samsung_SHW-M130K
drivers/net/irda/sa1100_ir.c
4238
22082
/* * linux/drivers/net/irda/sa1100_ir.c * * Copyright (C) 2000-2001 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Infra-red driver for the StrongARM SA1100 embedded microprocessor * * Note that we don't have to worry about the SA1111's DMA bugs in here, * so we use the straight forward dma_map_* functions with a null pointer. * * This driver takes one kernel command line parameter, sa1100ir=, with * the following options: * max_rate:baudrate - set the maximum baud rate * power_leve:level - set the transmitter power level * tx_lpm:0|1 - set transmit low power mode */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/rtnetlink.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <net/irda/irda.h> #include <net/irda/wrapper.h> #include <net/irda/irda_device.h> #include <asm/irq.h> #include <mach/dma.h> #include <mach/hardware.h> #include <asm/mach/irda.h> static int power_level = 3; static int tx_lpm; static int max_rate = 4000000; struct sa1100_irda { unsigned char hscr0; unsigned char utcr4; unsigned char power; unsigned char open; int speed; int newspeed; struct sk_buff *txskb; struct sk_buff *rxskb; dma_addr_t txbuf_dma; dma_addr_t rxbuf_dma; dma_regs_t *txdma; dma_regs_t *rxdma; struct device *dev; struct irda_platform_data *pdata; struct irlap_cb *irlap; struct qos_info qos; iobuff_t tx_buff; iobuff_t rx_buff; }; #define IS_FIR(si) ((si)->speed >= 4000000) #define HPSIR_MAX_RXLEN 2047 /* * Allocate and map the receive buffer, unless it is already allocated. */ static int sa1100_irda_rx_alloc(struct sa1100_irda *si) { if (si->rxskb) return 0; si->rxskb = alloc_skb(HPSIR_MAX_RXLEN + 1, GFP_ATOMIC); if (!si->rxskb) { printk(KERN_ERR "sa1100_ir: out of memory for RX SKB\n"); return -ENOMEM; } /* * Align any IP headers that may be contained * within the frame. */ skb_reserve(si->rxskb, 1); si->rxbuf_dma = dma_map_single(si->dev, si->rxskb->data, HPSIR_MAX_RXLEN, DMA_FROM_DEVICE); return 0; } /* * We want to get here as soon as possible, and get the receiver setup. * We use the existing buffer. */ static void sa1100_irda_rx_dma_start(struct sa1100_irda *si) { if (!si->rxskb) { printk(KERN_ERR "sa1100_ir: rx buffer went missing\n"); return; } /* * First empty receive FIFO */ Ser2HSCR0 = si->hscr0 | HSCR0_HSSP; /* * Enable the DMA, receiver and receive interrupt. */ sa1100_clear_dma(si->rxdma); sa1100_start_dma(si->rxdma, si->rxbuf_dma, HPSIR_MAX_RXLEN); Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_RXE; } /* * Set the IrDA communications speed. */ static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed) { unsigned long flags; int brd, ret = -EINVAL; switch (speed) { case 9600: case 19200: case 38400: case 57600: case 115200: brd = 3686400 / (16 * speed) - 1; /* * Stop the receive DMA. */ if (IS_FIR(si)) sa1100_stop_dma(si->rxdma); local_irq_save(flags); Ser2UTCR3 = 0; Ser2HSCR0 = HSCR0_UART; Ser2UTCR1 = brd >> 8; Ser2UTCR2 = brd; /* * Clear status register */ Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID; Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE; if (si->pdata->set_speed) si->pdata->set_speed(si->dev, speed); si->speed = speed; local_irq_restore(flags); ret = 0; break; case 4000000: local_irq_save(flags); si->hscr0 = 0; Ser2HSSR0 = 0xff; Ser2HSCR0 = si->hscr0 | HSCR0_HSSP; Ser2UTCR3 = 0; si->speed = speed; if (si->pdata->set_speed) si->pdata->set_speed(si->dev, speed); sa1100_irda_rx_alloc(si); sa1100_irda_rx_dma_start(si); local_irq_restore(flags); break; default: break; } return ret; } /* * Control the power state of the IrDA transmitter. * State: * 0 - off * 1 - short range, lowest power * 2 - medium range, medium power * 3 - maximum range, high power * * Currently, only assabet is known to support this. */ static int __sa1100_irda_set_power(struct sa1100_irda *si, unsigned int state) { int ret = 0; if (si->pdata->set_power) ret = si->pdata->set_power(si->dev, state); return ret; } static inline int sa1100_set_power(struct sa1100_irda *si, unsigned int state) { int ret; ret = __sa1100_irda_set_power(si, state); if (ret == 0) si->power = state; return ret; } static int sa1100_irda_startup(struct sa1100_irda *si) { int ret; /* * Ensure that the ports for this device are setup correctly. */ if (si->pdata->startup) { ret = si->pdata->startup(si->dev); if (ret) return ret; } /* * Configure PPC for IRDA - we want to drive TXD2 low. * We also want to drive this pin low during sleep. */ PPSR &= ~PPC_TXD2; PSDR &= ~PPC_TXD2; PPDR |= PPC_TXD2; /* * Enable HP-SIR modulation, and ensure that the port is disabled. */ Ser2UTCR3 = 0; Ser2HSCR0 = HSCR0_UART; Ser2UTCR4 = si->utcr4; Ser2UTCR0 = UTCR0_8BitData; Ser2HSCR2 = HSCR2_TrDataH | HSCR2_RcDataL; /* * Clear status register */ Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID; ret = sa1100_irda_set_speed(si, si->speed = 9600); if (ret) { Ser2UTCR3 = 0; Ser2HSCR0 = 0; if (si->pdata->shutdown) si->pdata->shutdown(si->dev); } return ret; } static void sa1100_irda_shutdown(struct sa1100_irda *si) { /* * Stop all DMA activity. */ sa1100_stop_dma(si->rxdma); sa1100_stop_dma(si->txdma); /* Disable the port. */ Ser2UTCR3 = 0; Ser2HSCR0 = 0; if (si->pdata->shutdown) si->pdata->shutdown(si->dev); } #ifdef CONFIG_PM /* * Suspend the IrDA interface. */ static int sa1100_irda_suspend(struct platform_device *pdev, pm_message_t state) { struct net_device *dev = platform_get_drvdata(pdev); struct sa1100_irda *si; if (!dev) return 0; si = netdev_priv(dev); if (si->open) { /* * Stop the transmit queue */ netif_device_detach(dev); disable_irq(dev->irq); sa1100_irda_shutdown(si); __sa1100_irda_set_power(si, 0); } return 0; } /* * Resume the IrDA interface. */ static int sa1100_irda_resume(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sa1100_irda *si; if (!dev) return 0; si = netdev_priv(dev); if (si->open) { /* * If we missed a speed change, initialise at the new speed * directly. It is debatable whether this is actually * required, but in the interests of continuing from where * we left off it is desirable. The converse argument is * that we should re-negotiate at 9600 baud again. */ if (si->newspeed) { si->speed = si->newspeed; si->newspeed = 0; } sa1100_irda_startup(si); __sa1100_irda_set_power(si, si->power); enable_irq(dev->irq); /* * This automatically wakes up the queue */ netif_device_attach(dev); } return 0; } #else #define sa1100_irda_suspend NULL #define sa1100_irda_resume NULL #endif /* * HP-SIR format interrupt service routines. */ static void sa1100_irda_hpsir_irq(struct net_device *dev) { struct sa1100_irda *si = netdev_priv(dev); int status; status = Ser2UTSR0; /* * Deal with any receive errors first. The bytes in error may be * the only bytes in the receive FIFO, so we do this first. */ while (status & UTSR0_EIF) { int stat, data; stat = Ser2UTSR1; data = Ser2UTDR; if (stat & (UTSR1_FRE | UTSR1_ROR)) { dev->stats.rx_errors++; if (stat & UTSR1_FRE) dev->stats.rx_frame_errors++; if (stat & UTSR1_ROR) dev->stats.rx_fifo_errors++; } else async_unwrap_char(dev, &dev->stats, &si->rx_buff, data); status = Ser2UTSR0; } /* * We must clear certain bits. */ Ser2UTSR0 = status & (UTSR0_RID | UTSR0_RBB | UTSR0_REB); if (status & UTSR0_RFS) { /* * There are at least 4 bytes in the FIFO. Read 3 bytes * and leave the rest to the block below. */ async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR); async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR); async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR); } if (status & (UTSR0_RFS | UTSR0_RID)) { /* * Fifo contains more than 1 character. */ do { async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR); } while (Ser2UTSR1 & UTSR1_RNE); } if (status & UTSR0_TFS && si->tx_buff.len) { /* * Transmitter FIFO is not full */ do { Ser2UTDR = *si->tx_buff.data++; si->tx_buff.len -= 1; } while (Ser2UTSR1 & UTSR1_TNF && si->tx_buff.len); if (si->tx_buff.len == 0) { dev->stats.tx_packets++; dev->stats.tx_bytes += si->tx_buff.data - si->tx_buff.head; /* * We need to ensure that the transmitter has * finished. */ do rmb(); while (Ser2UTSR1 & UTSR1_TBY); /* * Ok, we've finished transmitting. Now enable * the receiver. Sometimes we get a receive IRQ * immediately after a transmit... */ Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID; Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE; if (si->newspeed) { sa1100_irda_set_speed(si, si->newspeed); si->newspeed = 0; } /* I'm hungry! */ netif_wake_queue(dev); } } } static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev) { struct sk_buff *skb = si->rxskb; dma_addr_t dma_addr; unsigned int len, stat, data; if (!skb) { printk(KERN_ERR "sa1100_ir: SKB is NULL!\n"); return; } /* * Get the current data position. */ dma_addr = sa1100_get_dma_pos(si->rxdma); len = dma_addr - si->rxbuf_dma; if (len > HPSIR_MAX_RXLEN) len = HPSIR_MAX_RXLEN; dma_unmap_single(si->dev, si->rxbuf_dma, len, DMA_FROM_DEVICE); do { /* * Read Status, and then Data. */ stat = Ser2HSSR1; rmb(); data = Ser2HSDR; if (stat & (HSSR1_CRE | HSSR1_ROR)) { dev->stats.rx_errors++; if (stat & HSSR1_CRE) dev->stats.rx_crc_errors++; if (stat & HSSR1_ROR) dev->stats.rx_frame_errors++; } else skb->data[len++] = data; /* * If we hit the end of frame, there's * no point in continuing. */ if (stat & HSSR1_EOF) break; } while (Ser2HSSR0 & HSSR0_EIF); if (stat & HSSR1_EOF) { si->rxskb = NULL; skb_put(skb, len); skb->dev = dev; skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); dev->stats.rx_packets++; dev->stats.rx_bytes += len; /* * Before we pass the buffer up, allocate a new one. */ sa1100_irda_rx_alloc(si); netif_rx(skb); } else { /* * Remap the buffer. */ si->rxbuf_dma = dma_map_single(si->dev, si->rxskb->data, HPSIR_MAX_RXLEN, DMA_FROM_DEVICE); } } /* * FIR format interrupt service routine. We only have to * handle RX events; transmit events go via the TX DMA handler. * * No matter what, we disable RX, process, and the restart RX. */ static void sa1100_irda_fir_irq(struct net_device *dev) { struct sa1100_irda *si = netdev_priv(dev); /* * Stop RX DMA */ sa1100_stop_dma(si->rxdma); /* * Framing error - we throw away the packet completely. * Clearing RXE flushes the error conditions and data * from the fifo. */ if (Ser2HSSR0 & (HSSR0_FRE | HSSR0_RAB)) { dev->stats.rx_errors++; if (Ser2HSSR0 & HSSR0_FRE) dev->stats.rx_frame_errors++; /* * Clear out the DMA... */ Ser2HSCR0 = si->hscr0 | HSCR0_HSSP; /* * Clear selected status bits now, so we * don't miss them next time around. */ Ser2HSSR0 = HSSR0_FRE | HSSR0_RAB; } /* * Deal with any receive errors. The any of the lowest * 8 bytes in the FIFO may contain an error. We must read * them one by one. The "error" could even be the end of * packet! */ if (Ser2HSSR0 & HSSR0_EIF) sa1100_irda_fir_error(si, dev); /* * No matter what happens, we must restart reception. */ sa1100_irda_rx_dma_start(si); } static irqreturn_t sa1100_irda_irq(int irq, void *dev_id) { struct net_device *dev = dev_id; if (IS_FIR(((struct sa1100_irda *)netdev_priv(dev)))) sa1100_irda_fir_irq(dev); else sa1100_irda_hpsir_irq(dev); return IRQ_HANDLED; } /* * TX DMA completion handler. */ static void sa1100_irda_txdma_irq(void *id) { struct net_device *dev = id; struct sa1100_irda *si = netdev_priv(dev); struct sk_buff *skb = si->txskb; si->txskb = NULL; /* * Wait for the transmission to complete. Unfortunately, * the hardware doesn't give us an interrupt to indicate * "end of frame". */ do rmb(); while (!(Ser2HSSR0 & HSSR0_TUR) || Ser2HSSR1 & HSSR1_TBY); /* * Clear the transmit underrun bit. */ Ser2HSSR0 = HSSR0_TUR; /* * Do we need to change speed? Note that we're lazy * here - we don't free the old rxskb. We don't need * to allocate a buffer either. */ if (si->newspeed) { sa1100_irda_set_speed(si, si->newspeed); si->newspeed = 0; } /* * Start reception. This disables the transmitter for * us. This will be using the existing RX buffer. */ sa1100_irda_rx_dma_start(si); /* * Account and free the packet. */ if (skb) { dma_unmap_single(si->dev, si->txbuf_dma, skb->len, DMA_TO_DEVICE); dev->stats.tx_packets ++; dev->stats.tx_bytes += skb->len; dev_kfree_skb_irq(skb); } /* * Make sure that the TX queue is available for sending * (for retries). TX has priority over RX at all times. */ netif_wake_queue(dev); } static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) { struct sa1100_irda *si = netdev_priv(dev); int speed = irda_get_next_speed(skb); /* * Does this packet contain a request to change the interface * speed? If so, remember it until we complete the transmission * of this frame. */ if (speed != si->speed && speed != -1) si->newspeed = speed; /* * If this is an empty frame, we can bypass a lot. */ if (skb->len == 0) { if (si->newspeed) { si->newspeed = 0; sa1100_irda_set_speed(si, speed); } dev_kfree_skb(skb); return NETDEV_TX_OK; } if (!IS_FIR(si)) { netif_stop_queue(dev); si->tx_buff.data = si->tx_buff.head; si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize); /* * Set the transmit interrupt enable. This will fire * off an interrupt immediately. Note that we disable * the receiver so we won't get spurious characteres * received. */ Ser2UTCR3 = UTCR3_TIE | UTCR3_TXE; dev_kfree_skb(skb); } else { int mtt = irda_get_mtt(skb); /* * We must not be transmitting... */ BUG_ON(si->txskb); netif_stop_queue(dev); si->txskb = skb; si->txbuf_dma = dma_map_single(si->dev, skb->data, skb->len, DMA_TO_DEVICE); sa1100_start_dma(si->txdma, si->txbuf_dma, skb->len); /* * If we have a mean turn-around time, impose the specified * specified delay. We could shorten this by timing from * the point we received the packet. */ if (mtt) udelay(mtt); Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_TXE; } return NETDEV_TX_OK; } static int sa1100_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd) { struct if_irda_req *rq = (struct if_irda_req *)ifreq; struct sa1100_irda *si = netdev_priv(dev); int ret = -EOPNOTSUPP; switch (cmd) { case SIOCSBANDWIDTH: if (capable(CAP_NET_ADMIN)) { /* * We are unable to set the speed if the * device is not running. */ if (si->open) { ret = sa1100_irda_set_speed(si, rq->ifr_baudrate); } else { printk("sa1100_irda_ioctl: SIOCSBANDWIDTH: !netif_running\n"); ret = 0; } } break; case SIOCSMEDIABUSY: ret = -EPERM; if (capable(CAP_NET_ADMIN)) { irda_device_set_media_busy(dev, TRUE); ret = 0; } break; case SIOCGRECEIVING: rq->ifr_receiving = IS_FIR(si) ? 0 : si->rx_buff.state != OUTSIDE_FRAME; break; default: break; } return ret; } static int sa1100_irda_start(struct net_device *dev) { struct sa1100_irda *si = netdev_priv(dev); int err; si->speed = 9600; err = request_irq(dev->irq, sa1100_irda_irq, 0, dev->name, dev); if (err) goto err_irq; err = sa1100_request_dma(DMA_Ser2HSSPRd, "IrDA receive", NULL, NULL, &si->rxdma); if (err) goto err_rx_dma; err = sa1100_request_dma(DMA_Ser2HSSPWr, "IrDA transmit", sa1100_irda_txdma_irq, dev, &si->txdma); if (err) goto err_tx_dma; /* * The interrupt must remain disabled for now. */ disable_irq(dev->irq); /* * Setup the serial port for the specified speed. */ err = sa1100_irda_startup(si); if (err) goto err_startup; /* * Open a new IrLAP layer instance. */ si->irlap = irlap_open(dev, &si->qos, "sa1100"); err = -ENOMEM; if (!si->irlap) goto err_irlap; /* * Now enable the interrupt and start the queue */ si->open = 1; sa1100_set_power(si, power_level); /* low power mode */ enable_irq(dev->irq); netif_start_queue(dev); return 0; err_irlap: si->open = 0; sa1100_irda_shutdown(si); err_startup: sa1100_free_dma(si->txdma); err_tx_dma: sa1100_free_dma(si->rxdma); err_rx_dma: free_irq(dev->irq, dev); err_irq: return err; } static int sa1100_irda_stop(struct net_device *dev) { struct sa1100_irda *si = netdev_priv(dev); disable_irq(dev->irq); sa1100_irda_shutdown(si); /* * If we have been doing DMA receive, make sure we * tidy that up cleanly. */ if (si->rxskb) { dma_unmap_single(si->dev, si->rxbuf_dma, HPSIR_MAX_RXLEN, DMA_FROM_DEVICE); dev_kfree_skb(si->rxskb); si->rxskb = NULL; } /* Stop IrLAP */ if (si->irlap) { irlap_close(si->irlap); si->irlap = NULL; } netif_stop_queue(dev); si->open = 0; /* * Free resources */ sa1100_free_dma(si->txdma); sa1100_free_dma(si->rxdma); free_irq(dev->irq, dev); sa1100_set_power(si, 0); return 0; } static int sa1100_irda_init_iobuf(iobuff_t *io, int size) { io->head = kmalloc(size, GFP_KERNEL | GFP_DMA); if (io->head != NULL) { io->truesize = size; io->in_frame = FALSE; io->state = OUTSIDE_FRAME; io->data = io->head; } return io->head ? 0 : -ENOMEM; } static const struct net_device_ops sa1100_irda_netdev_ops = { .ndo_open = sa1100_irda_start, .ndo_stop = sa1100_irda_stop, .ndo_start_xmit = sa1100_irda_hard_xmit, .ndo_do_ioctl = sa1100_irda_ioctl, }; static int sa1100_irda_probe(struct platform_device *pdev) { struct net_device *dev; struct sa1100_irda *si; unsigned int baudrate_mask; int err; if (!pdev->dev.platform_data) return -EINVAL; err = request_mem_region(__PREG(Ser2UTCR0), 0x24, "IrDA") ? 0 : -EBUSY; if (err) goto err_mem_1; err = request_mem_region(__PREG(Ser2HSCR0), 0x1c, "IrDA") ? 0 : -EBUSY; if (err) goto err_mem_2; err = request_mem_region(__PREG(Ser2HSCR2), 0x04, "IrDA") ? 0 : -EBUSY; if (err) goto err_mem_3; dev = alloc_irdadev(sizeof(struct sa1100_irda)); if (!dev) goto err_mem_4; si = netdev_priv(dev); si->dev = &pdev->dev; si->pdata = pdev->dev.platform_data; /* * Initialise the HP-SIR buffers */ err = sa1100_irda_init_iobuf(&si->rx_buff, 14384); if (err) goto err_mem_5; err = sa1100_irda_init_iobuf(&si->tx_buff, 4000); if (err) goto err_mem_5; dev->netdev_ops = &sa1100_irda_netdev_ops; dev->irq = IRQ_Ser2ICP; irda_init_max_qos_capabilies(&si->qos); /* * We support original IRDA up to 115k2. (we don't currently * support 4Mbps). Min Turn Time set to 1ms or greater. */ baudrate_mask = IR_9600; switch (max_rate) { case 4000000: baudrate_mask |= IR_4000000 << 8; case 115200: baudrate_mask |= IR_115200; case 57600: baudrate_mask |= IR_57600; case 38400: baudrate_mask |= IR_38400; case 19200: baudrate_mask |= IR_19200; } si->qos.baud_rate.bits &= baudrate_mask; si->qos.min_turn_time.bits = 7; irda_qos_bits_to_value(&si->qos); si->utcr4 = UTCR4_HPSIR; if (tx_lpm) si->utcr4 |= UTCR4_Z1_6us; /* * Initially enable HP-SIR modulation, and ensure that the port * is disabled. */ Ser2UTCR3 = 0; Ser2UTCR4 = si->utcr4; Ser2HSCR0 = HSCR0_UART; err = register_netdev(dev); if (err == 0) platform_set_drvdata(pdev, dev); if (err) { err_mem_5: kfree(si->tx_buff.head); kfree(si->rx_buff.head); free_netdev(dev); err_mem_4: release_mem_region(__PREG(Ser2HSCR2), 0x04); err_mem_3: release_mem_region(__PREG(Ser2HSCR0), 0x1c); err_mem_2: release_mem_region(__PREG(Ser2UTCR0), 0x24); } err_mem_1: return err; } static int sa1100_irda_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); if (dev) { struct sa1100_irda *si = netdev_priv(dev); unregister_netdev(dev); kfree(si->tx_buff.head); kfree(si->rx_buff.head); free_netdev(dev); } release_mem_region(__PREG(Ser2HSCR2), 0x04); release_mem_region(__PREG(Ser2HSCR0), 0x1c); release_mem_region(__PREG(Ser2UTCR0), 0x24); return 0; } static struct platform_driver sa1100ir_driver = { .probe = sa1100_irda_probe, .remove = sa1100_irda_remove, .suspend = sa1100_irda_suspend, .resume = sa1100_irda_resume, .driver = { .name = "sa11x0-ir", .owner = THIS_MODULE, }, }; static int __init sa1100_irda_init(void) { /* * Limit power level a sensible range. */ if (power_level < 1) power_level = 1; if (power_level > 3) power_level = 3; return platform_driver_register(&sa1100ir_driver); } static void __exit sa1100_irda_exit(void) { platform_driver_unregister(&sa1100ir_driver); } module_init(sa1100_irda_init); module_exit(sa1100_irda_exit); module_param(power_level, int, 0); module_param(tx_lpm, int, 0); module_param(max_rate, int, 0); MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); MODULE_DESCRIPTION("StrongARM SA1100 IrDA driver"); MODULE_LICENSE("GPL"); MODULE_PARM_DESC(power_level, "IrDA power level, 1 (low) to 3 (high)"); MODULE_PARM_DESC(tx_lpm, "Enable transmitter low power (1.6us) mode"); MODULE_PARM_DESC(max_rate, "Maximum baud rate (4000000, 115200, 57600, 38400, 19200, 9600)"); MODULE_ALIAS("platform:sa11x0-ir");
gpl-2.0
cb22/tf300tg_jb_kernel
drivers/net/sonic.c
4238
22081
/* * sonic.c * * (C) 2005 Finn Thain * * Converted to DMA API, added zero-copy buffer handling, and * (from the mac68k project) introduced dhd's support for 16-bit cards. * * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de) * * This driver is based on work from Andreas Busse, but most of * the code is rewritten. * * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de) * * Core code included by system sonic drivers * * And... partially rewritten again by David Huggins-Daines in order * to cope with screwed up Macintosh NICs that may or may not use * 16-bit DMA. * * (C) 1999 David Huggins-Daines <dhd@debian.org> * */ /* * Sources: Olivetti M700-10 Risc Personal Computer hardware handbook, * National Semiconductors data sheet for the DP83932B Sonic Ethernet * controller, and the files "8390.c" and "skeleton.c" in this directory. * * Additional sources: Nat Semi data sheet for the DP83932C and Nat Semi * Application Note AN-746, the files "lance.c" and "ibmlana.c". See also * the NetBSD file "sys/arch/mac68k/dev/if_sn.c". */ /* * Open/initialize the SONIC controller. * * This routine should set everything up anew at each open, even * registers that "should" only need to be set once at boot, so that * there is non-reboot way to recover if something goes wrong. */ static int sonic_open(struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); int i; if (sonic_debug > 2) printk("sonic_open: initializing sonic driver.\n"); for (i = 0; i < SONIC_NUM_RRS; i++) { struct sk_buff *skb = dev_alloc_skb(SONIC_RBSIZE + 2); if (skb == NULL) { while(i > 0) { /* free any that were allocated successfully */ i--; dev_kfree_skb(lp->rx_skb[i]); lp->rx_skb[i] = NULL; } printk(KERN_ERR "%s: couldn't allocate receive buffers\n", dev->name); return -ENOMEM; } /* align IP header unless DMA requires otherwise */ if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2) skb_reserve(skb, 2); lp->rx_skb[i] = skb; } for (i = 0; i < SONIC_NUM_RRS; i++) { dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE), SONIC_RBSIZE, DMA_FROM_DEVICE); if (!laddr) { while(i > 0) { /* free any that were mapped successfully */ i--; dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE); lp->rx_laddr[i] = (dma_addr_t)0; } for (i = 0; i < SONIC_NUM_RRS; i++) { dev_kfree_skb(lp->rx_skb[i]); lp->rx_skb[i] = NULL; } printk(KERN_ERR "%s: couldn't map rx DMA buffers\n", dev->name); return -ENOMEM; } lp->rx_laddr[i] = laddr; } /* * Initialize the SONIC */ sonic_init(dev); netif_start_queue(dev); if (sonic_debug > 2) printk("sonic_open: Initialization done.\n"); return 0; } /* * Close the SONIC device */ static int sonic_close(struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); int i; if (sonic_debug > 2) printk("sonic_close\n"); netif_stop_queue(dev); /* * stop the SONIC, disable interrupts */ SONIC_WRITE(SONIC_IMR, 0); SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); /* unmap and free skbs that haven't been transmitted */ for (i = 0; i < SONIC_NUM_TDS; i++) { if(lp->tx_laddr[i]) { dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE); lp->tx_laddr[i] = (dma_addr_t)0; } if(lp->tx_skb[i]) { dev_kfree_skb(lp->tx_skb[i]); lp->tx_skb[i] = NULL; } } /* unmap and free the receive buffers */ for (i = 0; i < SONIC_NUM_RRS; i++) { if(lp->rx_laddr[i]) { dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE); lp->rx_laddr[i] = (dma_addr_t)0; } if(lp->rx_skb[i]) { dev_kfree_skb(lp->rx_skb[i]); lp->rx_skb[i] = NULL; } } return 0; } static void sonic_tx_timeout(struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); int i; /* * put the Sonic into software-reset mode and * disable all interrupts before releasing DMA buffers */ SONIC_WRITE(SONIC_IMR, 0); SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); /* We could resend the original skbs. Easier to re-initialise. */ for (i = 0; i < SONIC_NUM_TDS; i++) { if(lp->tx_laddr[i]) { dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE); lp->tx_laddr[i] = (dma_addr_t)0; } if(lp->tx_skb[i]) { dev_kfree_skb(lp->tx_skb[i]); lp->tx_skb[i] = NULL; } } /* Try to restart the adaptor. */ sonic_init(dev); lp->stats.tx_errors++; dev->trans_start = jiffies; /* prevent tx timeout */ netif_wake_queue(dev); } /* * transmit packet * * Appends new TD during transmission thus avoiding any TX interrupts * until we run out of TDs. * This routine interacts closely with the ISR in that it may, * set tx_skb[i] * reset the status flags of the new TD * set and reset EOL flags * stop the tx queue * The ISR interacts with this routine in various ways. It may, * reset tx_skb[i] * test the EOL and status flags of the TDs * wake the tx queue * Concurrently with all of this, the SONIC is potentially writing to * the status flags of the TDs. * Until some mutual exclusion is added, this code will not work with SMP. However, * MIPS Jazz machines and m68k Macs were all uni-processor machines. */ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); dma_addr_t laddr; int length; int entry = lp->next_tx; if (sonic_debug > 2) printk("sonic_send_packet: skb=%p, dev=%p\n", skb, dev); length = skb->len; if (length < ETH_ZLEN) { if (skb_padto(skb, ETH_ZLEN)) return NETDEV_TX_OK; length = ETH_ZLEN; } /* * Map the packet data into the logical DMA address space */ laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE); if (!laddr) { printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name); dev_kfree_skb(skb); return NETDEV_TX_BUSY; } sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */ sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */ sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */ sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff); sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16); sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length); sonic_tda_put(dev, entry, SONIC_TD_LINK, sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL); /* * Must set tx_skb[entry] only after clearing status, and * before clearing EOL and before stopping queue */ wmb(); lp->tx_len[entry] = length; lp->tx_laddr[entry] = laddr; lp->tx_skb[entry] = skb; wmb(); sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK, sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK) & ~SONIC_EOL); lp->eol_tx = entry; lp->next_tx = (entry + 1) & SONIC_TDS_MASK; if (lp->tx_skb[lp->next_tx] != NULL) { /* The ring is full, the ISR has yet to process the next TD. */ if (sonic_debug > 3) printk("%s: stopping queue\n", dev->name); netif_stop_queue(dev); /* after this packet, wait for ISR to free up some TDAs */ } else netif_start_queue(dev); if (sonic_debug > 2) printk("sonic_send_packet: issuing Tx command\n"); SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP); return NETDEV_TX_OK; } /* * The typical workload of the driver: * Handle the network interface interrupts. */ static irqreturn_t sonic_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct sonic_local *lp = netdev_priv(dev); int status; if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT)) return IRQ_NONE; do { if (status & SONIC_INT_PKTRX) { if (sonic_debug > 2) printk("%s: packet rx\n", dev->name); sonic_rx(dev); /* got packet(s) */ SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */ } if (status & SONIC_INT_TXDN) { int entry = lp->cur_tx; int td_status; int freed_some = 0; /* At this point, cur_tx is the index of a TD that is one of: * unallocated/freed (status set & tx_skb[entry] clear) * allocated and sent (status set & tx_skb[entry] set ) * allocated and not yet sent (status clear & tx_skb[entry] set ) * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear) */ if (sonic_debug > 2) printk("%s: tx done\n", dev->name); while (lp->tx_skb[entry] != NULL) { if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0) break; if (td_status & 0x0001) { lp->stats.tx_packets++; lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE); } else { lp->stats.tx_errors++; if (td_status & 0x0642) lp->stats.tx_aborted_errors++; if (td_status & 0x0180) lp->stats.tx_carrier_errors++; if (td_status & 0x0020) lp->stats.tx_window_errors++; if (td_status & 0x0004) lp->stats.tx_fifo_errors++; } /* We must free the original skb */ dev_kfree_skb_irq(lp->tx_skb[entry]); lp->tx_skb[entry] = NULL; /* and unmap DMA buffer */ dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE); lp->tx_laddr[entry] = (dma_addr_t)0; freed_some = 1; if (sonic_tda_get(dev, entry, SONIC_TD_LINK) & SONIC_EOL) { entry = (entry + 1) & SONIC_TDS_MASK; break; } entry = (entry + 1) & SONIC_TDS_MASK; } if (freed_some || lp->tx_skb[entry] == NULL) netif_wake_queue(dev); /* The ring is no longer full */ lp->cur_tx = entry; SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */ } /* * check error conditions */ if (status & SONIC_INT_RFO) { if (sonic_debug > 1) printk("%s: rx fifo overrun\n", dev->name); lp->stats.rx_fifo_errors++; SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */ } if (status & SONIC_INT_RDE) { if (sonic_debug > 1) printk("%s: rx descriptors exhausted\n", dev->name); lp->stats.rx_dropped++; SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */ } if (status & SONIC_INT_RBAE) { if (sonic_debug > 1) printk("%s: rx buffer area exceeded\n", dev->name); lp->stats.rx_dropped++; SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */ } /* counter overruns; all counters are 16bit wide */ if (status & SONIC_INT_FAE) { lp->stats.rx_frame_errors += 65536; SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */ } if (status & SONIC_INT_CRC) { lp->stats.rx_crc_errors += 65536; SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */ } if (status & SONIC_INT_MP) { lp->stats.rx_missed_errors += 65536; SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */ } /* transmit error */ if (status & SONIC_INT_TXER) { if ((SONIC_READ(SONIC_TCR) & SONIC_TCR_FU) && (sonic_debug > 2)) printk(KERN_ERR "%s: tx fifo underrun\n", dev->name); SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */ } /* bus retry */ if (status & SONIC_INT_BR) { printk(KERN_ERR "%s: Bus retry occurred! Device interrupt disabled.\n", dev->name); /* ... to help debug DMA problems causing endless interrupts. */ /* Bounce the eth interface to turn on the interrupt again. */ SONIC_WRITE(SONIC_IMR, 0); SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */ } /* load CAM done */ if (status & SONIC_INT_LCD) SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */ } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT)); return IRQ_HANDLED; } /* * We have a good packet(s), pass it/them up the network stack. */ static void sonic_rx(struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); int status; int entry = lp->cur_rx; while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) { struct sk_buff *used_skb; struct sk_buff *new_skb; dma_addr_t new_laddr; u16 bufadr_l; u16 bufadr_h; int pkt_len; status = sonic_rda_get(dev, entry, SONIC_RD_STATUS); if (status & SONIC_RCR_PRX) { /* Malloc up new buffer. */ new_skb = dev_alloc_skb(SONIC_RBSIZE + 2); if (new_skb == NULL) { printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name); lp->stats.rx_dropped++; break; } /* provide 16 byte IP header alignment unless DMA requires otherwise */ if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2) skb_reserve(new_skb, 2); new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE), SONIC_RBSIZE, DMA_FROM_DEVICE); if (!new_laddr) { dev_kfree_skb(new_skb); printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name); lp->stats.rx_dropped++; break; } /* now we have a new skb to replace it, pass the used one up the stack */ dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE); used_skb = lp->rx_skb[entry]; pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN); skb_trim(used_skb, pkt_len); used_skb->protocol = eth_type_trans(used_skb, dev); netif_rx(used_skb); lp->stats.rx_packets++; lp->stats.rx_bytes += pkt_len; /* and insert the new skb */ lp->rx_laddr[entry] = new_laddr; lp->rx_skb[entry] = new_skb; bufadr_l = (unsigned long)new_laddr & 0xffff; bufadr_h = (unsigned long)new_laddr >> 16; sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l); sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h); } else { /* This should only happen, if we enable accepting broken packets. */ lp->stats.rx_errors++; if (status & SONIC_RCR_FAER) lp->stats.rx_frame_errors++; if (status & SONIC_RCR_CRCR) lp->stats.rx_crc_errors++; } if (status & SONIC_RCR_LPKT) { /* * this was the last packet out of the current receive buffer * give the buffer back to the SONIC */ lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode); if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff; SONIC_WRITE(SONIC_RWP, lp->cur_rwp); if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) { if (sonic_debug > 2) printk("%s: rx buffer exhausted\n", dev->name); SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */ } } else printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n", dev->name); /* * give back the descriptor */ sonic_rda_put(dev, entry, SONIC_RD_LINK, sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL); sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1); sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL); lp->eol_rx = entry; lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK; } /* * If any worth-while packets have been received, netif_rx() * has done a mark_bh(NET_BH) for us and will work on them * when we get to the bottom-half routine. */ } /* * Get the current statistics. * This may be called with the device open or closed. */ static struct net_device_stats *sonic_get_stats(struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); /* read the tally counter from the SONIC and reset them */ lp->stats.rx_crc_errors += SONIC_READ(SONIC_CRCT); SONIC_WRITE(SONIC_CRCT, 0xffff); lp->stats.rx_frame_errors += SONIC_READ(SONIC_FAET); SONIC_WRITE(SONIC_FAET, 0xffff); lp->stats.rx_missed_errors += SONIC_READ(SONIC_MPT); SONIC_WRITE(SONIC_MPT, 0xffff); return &lp->stats; } /* * Set or clear the multicast filter for this adaptor. */ static void sonic_multicast_list(struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); unsigned int rcr; struct netdev_hw_addr *ha; unsigned char *addr; int i; rcr = SONIC_READ(SONIC_RCR) & ~(SONIC_RCR_PRO | SONIC_RCR_AMC); rcr |= SONIC_RCR_BRD; /* accept broadcast packets */ if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */ rcr |= SONIC_RCR_PRO; } else { if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 15)) { rcr |= SONIC_RCR_AMC; } else { if (sonic_debug > 2) printk("sonic_multicast_list: mc_count %d\n", netdev_mc_count(dev)); sonic_set_cam_enable(dev, 1); /* always enable our own address */ i = 1; netdev_for_each_mc_addr(ha, dev) { addr = ha->addr; sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]); sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]); sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]); sonic_set_cam_enable(dev, sonic_get_cam_enable(dev) | (1 << i)); i++; } SONIC_WRITE(SONIC_CDC, 16); /* issue Load CAM command */ SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff); SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM); } } if (sonic_debug > 2) printk("sonic_multicast_list: setting RCR=%x\n", rcr); SONIC_WRITE(SONIC_RCR, rcr); } /* * Initialize the SONIC ethernet controller. */ static int sonic_init(struct net_device *dev) { unsigned int cmd; struct sonic_local *lp = netdev_priv(dev); int i; /* * put the Sonic into software-reset mode and * disable all interrupts */ SONIC_WRITE(SONIC_IMR, 0); SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); /* * clear software reset flag, disable receiver, clear and * enable interrupts, then completely initialize the SONIC */ SONIC_WRITE(SONIC_CMD, 0); SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS); /* * initialize the receive resource area */ if (sonic_debug > 2) printk("sonic_init: initialize receive resource area\n"); for (i = 0; i < SONIC_NUM_RRS; i++) { u16 bufadr_l = (unsigned long)lp->rx_laddr[i] & 0xffff; u16 bufadr_h = (unsigned long)lp->rx_laddr[i] >> 16; sonic_rra_put(dev, i, SONIC_RR_BUFADR_L, bufadr_l); sonic_rra_put(dev, i, SONIC_RR_BUFADR_H, bufadr_h); sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_L, SONIC_RBSIZE >> 1); sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_H, 0); } /* initialize all RRA registers */ lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff; lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff; SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff); SONIC_WRITE(SONIC_REA, lp->rra_end); SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff); SONIC_WRITE(SONIC_RWP, lp->cur_rwp); SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16); SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1)); /* load the resource pointers */ if (sonic_debug > 3) printk("sonic_init: issuing RRRA command\n"); SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA); i = 0; while (i++ < 100) { if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA) break; } if (sonic_debug > 2) printk("sonic_init: status=%x i=%d\n", SONIC_READ(SONIC_CMD), i); /* * Initialize the receive descriptors so that they * become a circular linked list, ie. let the last * descriptor point to the first again. */ if (sonic_debug > 2) printk("sonic_init: initialize receive descriptors\n"); for (i=0; i<SONIC_NUM_RDS; i++) { sonic_rda_put(dev, i, SONIC_RD_STATUS, 0); sonic_rda_put(dev, i, SONIC_RD_PKTLEN, 0); sonic_rda_put(dev, i, SONIC_RD_PKTPTR_L, 0); sonic_rda_put(dev, i, SONIC_RD_PKTPTR_H, 0); sonic_rda_put(dev, i, SONIC_RD_SEQNO, 0); sonic_rda_put(dev, i, SONIC_RD_IN_USE, 1); sonic_rda_put(dev, i, SONIC_RD_LINK, lp->rda_laddr + ((i+1) * SIZEOF_SONIC_RD * SONIC_BUS_SCALE(lp->dma_bitmode))); } /* fix last descriptor */ sonic_rda_put(dev, SONIC_NUM_RDS - 1, SONIC_RD_LINK, (lp->rda_laddr & 0xffff) | SONIC_EOL); lp->eol_rx = SONIC_NUM_RDS - 1; lp->cur_rx = 0; SONIC_WRITE(SONIC_URDA, lp->rda_laddr >> 16); SONIC_WRITE(SONIC_CRDA, lp->rda_laddr & 0xffff); /* * initialize transmit descriptors */ if (sonic_debug > 2) printk("sonic_init: initialize transmit descriptors\n"); for (i = 0; i < SONIC_NUM_TDS; i++) { sonic_tda_put(dev, i, SONIC_TD_STATUS, 0); sonic_tda_put(dev, i, SONIC_TD_CONFIG, 0); sonic_tda_put(dev, i, SONIC_TD_PKTSIZE, 0); sonic_tda_put(dev, i, SONIC_TD_FRAG_COUNT, 0); sonic_tda_put(dev, i, SONIC_TD_LINK, (lp->tda_laddr & 0xffff) + (i + 1) * SIZEOF_SONIC_TD * SONIC_BUS_SCALE(lp->dma_bitmode)); lp->tx_skb[i] = NULL; } /* fix last descriptor */ sonic_tda_put(dev, SONIC_NUM_TDS - 1, SONIC_TD_LINK, (lp->tda_laddr & 0xffff)); SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16); SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff); lp->cur_tx = lp->next_tx = 0; lp->eol_tx = SONIC_NUM_TDS - 1; /* * put our own address to CAM desc[0] */ sonic_cda_put(dev, 0, SONIC_CD_CAP0, dev->dev_addr[1] << 8 | dev->dev_addr[0]); sonic_cda_put(dev, 0, SONIC_CD_CAP1, dev->dev_addr[3] << 8 | dev->dev_addr[2]); sonic_cda_put(dev, 0, SONIC_CD_CAP2, dev->dev_addr[5] << 8 | dev->dev_addr[4]); sonic_set_cam_enable(dev, 1); for (i = 0; i < 16; i++) sonic_cda_put(dev, i, SONIC_CD_ENTRY_POINTER, i); /* * initialize CAM registers */ SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff); SONIC_WRITE(SONIC_CDC, 16); /* * load the CAM */ SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM); i = 0; while (i++ < 100) { if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD) break; } if (sonic_debug > 2) { printk("sonic_init: CMD=%x, ISR=%x\n, i=%d", SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i); } /* * enable receiver, disable loopback * and enable all interrupts */ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP); SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT); SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT); SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT); cmd = SONIC_READ(SONIC_CMD); if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0) printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd); if (sonic_debug > 2) printk("sonic_init: new status=%x\n", SONIC_READ(SONIC_CMD)); return 0; } MODULE_LICENSE("GPL");
gpl-2.0
santod/nuk3rn3l_htc_msm8960-lollipop
arch/arm/mach-exynos/setup-spi.c
4750
1852
/* linux/arch/arm/mach-exynos4/setup-spi.c * * Copyright (C) 2011 Samsung Electronics Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <linux/platform_device.h> #include <plat/gpio-cfg.h> #include <plat/s3c64xx-spi.h> #ifdef CONFIG_S3C64XX_DEV_SPI0 struct s3c64xx_spi_info s3c64xx_spi0_pdata __initdata = { .fifo_lvl_mask = 0x1ff, .rx_lvl_offset = 15, .high_speed = 1, .clk_from_cmu = true, .tx_st_done = 25, }; int s3c64xx_spi0_cfg_gpio(struct platform_device *dev) { s3c_gpio_cfgpin(EXYNOS4_GPB(0), S3C_GPIO_SFN(2)); s3c_gpio_setpull(EXYNOS4_GPB(0), S3C_GPIO_PULL_UP); s3c_gpio_cfgall_range(EXYNOS4_GPB(2), 2, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); return 0; } #endif #ifdef CONFIG_S3C64XX_DEV_SPI1 struct s3c64xx_spi_info s3c64xx_spi1_pdata __initdata = { .fifo_lvl_mask = 0x7f, .rx_lvl_offset = 15, .high_speed = 1, .clk_from_cmu = true, .tx_st_done = 25, }; int s3c64xx_spi1_cfg_gpio(struct platform_device *dev) { s3c_gpio_cfgpin(EXYNOS4_GPB(4), S3C_GPIO_SFN(2)); s3c_gpio_setpull(EXYNOS4_GPB(4), S3C_GPIO_PULL_UP); s3c_gpio_cfgall_range(EXYNOS4_GPB(6), 2, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); return 0; } #endif #ifdef CONFIG_S3C64XX_DEV_SPI2 struct s3c64xx_spi_info s3c64xx_spi2_pdata __initdata = { .fifo_lvl_mask = 0x7f, .rx_lvl_offset = 15, .high_speed = 1, .clk_from_cmu = true, .tx_st_done = 25, }; int s3c64xx_spi2_cfg_gpio(struct platform_device *dev) { s3c_gpio_cfgpin(EXYNOS4_GPC1(1), S3C_GPIO_SFN(5)); s3c_gpio_setpull(EXYNOS4_GPC1(1), S3C_GPIO_PULL_UP); s3c_gpio_cfgall_range(EXYNOS4_GPC1(3), 2, S3C_GPIO_SFN(5), S3C_GPIO_PULL_UP); return 0; } #endif
gpl-2.0
BuzzBumbleBee/kernel
arch/arm/plat-s5p/dev-uart.c
5006
2894
/* linux/arch/arm/plat-s5p/dev-uart.c * * Copyright (c) 2009 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * Base S5P UART resource and device definitions * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/platform_device.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <mach/map.h> #include <plat/devs.h> /* Serial port registrations */ static struct resource s5p_uart0_resource[] = { [0] = { .start = S5P_PA_UART0, .end = S5P_PA_UART0 + S5P_SZ_UART - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_UART0, .end = IRQ_UART0, .flags = IORESOURCE_IRQ, }, }; static struct resource s5p_uart1_resource[] = { [0] = { .start = S5P_PA_UART1, .end = S5P_PA_UART1 + S5P_SZ_UART - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_UART1, .end = IRQ_UART1, .flags = IORESOURCE_IRQ, }, }; static struct resource s5p_uart2_resource[] = { [0] = { .start = S5P_PA_UART2, .end = S5P_PA_UART2 + S5P_SZ_UART - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_UART2, .end = IRQ_UART2, .flags = IORESOURCE_IRQ, }, }; static struct resource s5p_uart3_resource[] = { #if CONFIG_SERIAL_SAMSUNG_UARTS > 3 [0] = { .start = S5P_PA_UART3, .end = S5P_PA_UART3 + S5P_SZ_UART - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_UART3, .end = IRQ_UART3, .flags = IORESOURCE_IRQ, }, #endif }; static struct resource s5p_uart4_resource[] = { #if CONFIG_SERIAL_SAMSUNG_UARTS > 4 [0] = { .start = S5P_PA_UART4, .end = S5P_PA_UART4 + S5P_SZ_UART - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_UART4, .end = IRQ_UART4, .flags = IORESOURCE_IRQ, }, #endif }; static struct resource s5p_uart5_resource[] = { #if CONFIG_SERIAL_SAMSUNG_UARTS > 5 [0] = { .start = S5P_PA_UART5, .end = S5P_PA_UART5 + S5P_SZ_UART - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_UART5, .end = IRQ_UART5, .flags = IORESOURCE_IRQ, }, #endif }; struct s3c24xx_uart_resources s5p_uart_resources[] __initdata = { [0] = { .resources = s5p_uart0_resource, .nr_resources = ARRAY_SIZE(s5p_uart0_resource), }, [1] = { .resources = s5p_uart1_resource, .nr_resources = ARRAY_SIZE(s5p_uart1_resource), }, [2] = { .resources = s5p_uart2_resource, .nr_resources = ARRAY_SIZE(s5p_uart2_resource), }, [3] = { .resources = s5p_uart3_resource, .nr_resources = ARRAY_SIZE(s5p_uart3_resource), }, [4] = { .resources = s5p_uart4_resource, .nr_resources = ARRAY_SIZE(s5p_uart4_resource), }, [5] = { .resources = s5p_uart5_resource, .nr_resources = ARRAY_SIZE(s5p_uart5_resource), }, };
gpl-2.0
friedrich420/N3-AEL-Kernel-Lollipop-Ed.
arch/arm/mach-orion5x/lsmini-setup.c
5006
7273
/* * arch/arm/mach-orion5x/lsmini-setup.c * * Maintainer: Alexey Kopytko <alexey@kopytko.ru> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mtd/physmap.h> #include <linux/mv643xx_eth.h> #include <linux/leds.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <linux/i2c.h> #include <linux/ata_platform.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/orion5x.h> #include "common.h" #include "mpp.h" /***************************************************************************** * Linkstation Mini Info ****************************************************************************/ /* * 256K NOR flash Device bus boot chip select */ #define LSMINI_NOR_BOOT_BASE 0xf4000000 #define LSMINI_NOR_BOOT_SIZE SZ_256K /***************************************************************************** * 256KB NOR Flash on BOOT Device ****************************************************************************/ static struct physmap_flash_data lsmini_nor_flash_data = { .width = 1, }; static struct resource lsmini_nor_flash_resource = { .flags = IORESOURCE_MEM, .start = LSMINI_NOR_BOOT_BASE, .end = LSMINI_NOR_BOOT_BASE + LSMINI_NOR_BOOT_SIZE - 1, }; static struct platform_device lsmini_nor_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &lsmini_nor_flash_data, }, .num_resources = 1, .resource = &lsmini_nor_flash_resource, }; /***************************************************************************** * Ethernet ****************************************************************************/ static struct mv643xx_eth_platform_data lsmini_eth_data = { .phy_addr = 8, }; /***************************************************************************** * RTC 5C372a on I2C bus ****************************************************************************/ static struct i2c_board_info __initdata lsmini_i2c_rtc = { I2C_BOARD_INFO("rs5c372a", 0x32), }; /***************************************************************************** * LEDs attached to GPIO ****************************************************************************/ #define LSMINI_GPIO_LED_ALARM 2 #define LSMINI_GPIO_LED_INFO 3 #define LSMINI_GPIO_LED_FUNC 9 #define LSMINI_GPIO_LED_PWR 14 static struct gpio_led lsmini_led_pins[] = { { .name = "alarm:red", .gpio = LSMINI_GPIO_LED_ALARM, .active_low = 1, }, { .name = "info:amber", .gpio = LSMINI_GPIO_LED_INFO, .active_low = 1, }, { .name = "func:blue:top", .gpio = LSMINI_GPIO_LED_FUNC, .active_low = 1, }, { .name = "power:blue:bottom", .gpio = LSMINI_GPIO_LED_PWR, }, }; static struct gpio_led_platform_data lsmini_led_data = { .leds = lsmini_led_pins, .num_leds = ARRAY_SIZE(lsmini_led_pins), }; static struct platform_device lsmini_leds = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &lsmini_led_data, }, }; /**************************************************************************** * GPIO Attached Keys ****************************************************************************/ #define LSMINI_GPIO_KEY_FUNC 15 #define LSMINI_GPIO_KEY_POWER 18 #define LSMINI_GPIO_KEY_AUTOPOWER 17 #define LSMINI_SW_POWER 0x00 #define LSMINI_SW_AUTOPOWER 0x01 static struct gpio_keys_button lsmini_buttons[] = { { .code = KEY_OPTION, .gpio = LSMINI_GPIO_KEY_FUNC, .desc = "Function Button", .active_low = 1, }, { .type = EV_SW, .code = LSMINI_SW_POWER, .gpio = LSMINI_GPIO_KEY_POWER, .desc = "Power-on Switch", .active_low = 1, }, { .type = EV_SW, .code = LSMINI_SW_AUTOPOWER, .gpio = LSMINI_GPIO_KEY_AUTOPOWER, .desc = "Power-auto Switch", .active_low = 1, }, }; static struct gpio_keys_platform_data lsmini_button_data = { .buttons = lsmini_buttons, .nbuttons = ARRAY_SIZE(lsmini_buttons), }; static struct platform_device lsmini_button_device = { .name = "gpio-keys", .id = -1, .num_resources = 0, .dev = { .platform_data = &lsmini_button_data, }, }; /***************************************************************************** * SATA ****************************************************************************/ static struct mv_sata_platform_data lsmini_sata_data = { .n_ports = 2, }; /***************************************************************************** * Linkstation Mini specific power off method: reboot ****************************************************************************/ /* * On the Linkstation Mini, the shutdown process is following: * - Userland monitors key events until the power switch goes to off position * - The board reboots * - U-boot starts and goes into an idle mode waiting for the user * to move the switch to ON position */ static void lsmini_power_off(void) { orion5x_restart('h', NULL); } /***************************************************************************** * General Setup ****************************************************************************/ #define LSMINI_GPIO_USB_POWER 16 #define LSMINI_GPIO_AUTO_POWER 17 #define LSMINI_GPIO_POWER 18 #define LSMINI_GPIO_HDD_POWER0 1 #define LSMINI_GPIO_HDD_POWER1 19 static unsigned int lsmini_mpp_modes[] __initdata = { MPP0_UNUSED, /* LED_RESERVE1 (unused) */ MPP1_GPIO, /* HDD_PWR */ MPP2_GPIO, /* LED_ALARM */ MPP3_GPIO, /* LED_INFO */ MPP4_UNUSED, MPP5_UNUSED, MPP6_UNUSED, MPP7_UNUSED, MPP8_UNUSED, MPP9_GPIO, /* LED_FUNC */ MPP10_UNUSED, MPP11_UNUSED, /* LED_ETH (dummy) */ MPP12_UNUSED, MPP13_UNUSED, MPP14_GPIO, /* LED_PWR */ MPP15_GPIO, /* FUNC */ MPP16_GPIO, /* USB_PWR */ MPP17_GPIO, /* AUTO_POWER */ MPP18_GPIO, /* POWER */ MPP19_GPIO, /* HDD_PWR1 */ 0, }; static void __init lsmini_init(void) { /* * Setup basic Orion functions. Need to be called early. */ orion5x_init(); orion5x_mpp_conf(lsmini_mpp_modes); /* * Configure peripherals. */ orion5x_ehci0_init(); orion5x_ehci1_init(); orion5x_eth_init(&lsmini_eth_data); orion5x_i2c_init(); orion5x_sata_init(&lsmini_sata_data); orion5x_uart0_init(); orion5x_xor_init(); orion5x_setup_dev_boot_win(LSMINI_NOR_BOOT_BASE, LSMINI_NOR_BOOT_SIZE); platform_device_register(&lsmini_nor_flash); platform_device_register(&lsmini_button_device); platform_device_register(&lsmini_leds); i2c_register_board_info(0, &lsmini_i2c_rtc, 1); /* enable USB power */ gpio_set_value(LSMINI_GPIO_USB_POWER, 1); /* register power-off method */ pm_power_off = lsmini_power_off; pr_info("%s: finished\n", __func__); } #ifdef CONFIG_MACH_LINKSTATION_MINI MACHINE_START(LINKSTATION_MINI, "Buffalo Linkstation Mini") /* Maintainer: Alexey Kopytko <alexey@kopytko.ru> */ .atag_offset = 0x100, .init_machine = lsmini_init, .map_io = orion5x_map_io, .init_early = orion5x_init_early, .init_irq = orion5x_init_irq, .timer = &orion5x_timer, .fixup = tag_fixup_mem32, .restart = orion5x_restart, MACHINE_END #endif
gpl-2.0
Ultrax5/android_kernel_sony_msm8960t
drivers/gpu/drm/gma500/mdfld_device.c
5262
18972
/************************************************************************** * Copyright (c) 2011, Intel Corporation. * All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * **************************************************************************/ #include "psb_drv.h" #include "mid_bios.h" #include "mdfld_output.h" #include "mdfld_dsi_output.h" #include "tc35876x-dsi-lvds.h" #include <asm/intel_scu_ipc.h> #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE #define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF #define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */ #define BLC_PWM_FREQ_CALC_CONSTANT 32 #define MHz 1000000 #define BRIGHTNESS_MIN_LEVEL 1 #define BRIGHTNESS_MAX_LEVEL 100 #define BRIGHTNESS_MASK 0xFF #define BLC_POLARITY_NORMAL 0 #define BLC_POLARITY_INVERSE 1 #define BLC_ADJUSTMENT_MAX 100 #define MDFLD_BLC_PWM_PRECISION_FACTOR 10 #define MDFLD_BLC_MAX_PWM_REG_FREQ 0xFFFE #define MDFLD_BLC_MIN_PWM_REG_FREQ 0x2 #define MDFLD_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE) #define MDFLD_BACKLIGHT_PWM_CTL_SHIFT (16) static struct backlight_device *mdfld_backlight_device; int mdfld_set_brightness(struct backlight_device *bd) { struct drm_device *dev = (struct drm_device *)bl_get_data(mdfld_backlight_device); struct drm_psb_private *dev_priv = dev->dev_private; int level = bd->props.brightness; DRM_DEBUG_DRIVER("backlight level set to %d\n", level); /* Perform value bounds checking */ if (level < BRIGHTNESS_MIN_LEVEL) level = BRIGHTNESS_MIN_LEVEL; if (gma_power_begin(dev, false)) { u32 adjusted_level = 0; /* * Adjust the backlight level with the percent in * dev_priv->blc_adj2 */ adjusted_level = level * dev_priv->blc_adj2; adjusted_level = adjusted_level / BLC_ADJUSTMENT_MAX; dev_priv->brightness_adjusted = adjusted_level; if (mdfld_get_panel_type(dev, 0) == TC35876X) { if (dev_priv->dpi_panel_on[0] || dev_priv->dpi_panel_on[2]) tc35876x_brightness_control(dev, dev_priv->brightness_adjusted); } else { if (dev_priv->dpi_panel_on[0]) mdfld_dsi_brightness_control(dev, 0, dev_priv->brightness_adjusted); } if (dev_priv->dpi_panel_on[2]) mdfld_dsi_brightness_control(dev, 2, dev_priv->brightness_adjusted); gma_power_end(dev); } /* cache the brightness for later use */ dev_priv->brightness = level; return 0; } static int mdfld_get_brightness(struct backlight_device *bd) { struct drm_device *dev = (struct drm_device *)bl_get_data(mdfld_backlight_device); struct drm_psb_private *dev_priv = dev->dev_private; DRM_DEBUG_DRIVER("brightness = 0x%x \n", dev_priv->brightness); /* return locally cached var instead of HW read (due to DPST etc.) */ return dev_priv->brightness; } static const struct backlight_ops mdfld_ops = { .get_brightness = mdfld_get_brightness, .update_status = mdfld_set_brightness, }; static int device_backlight_init(struct drm_device *dev) { struct drm_psb_private *dev_priv = (struct drm_psb_private *) dev->dev_private; dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX; dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX; return 0; } static int mdfld_backlight_init(struct drm_device *dev) { struct backlight_properties props; int ret = 0; memset(&props, 0, sizeof(struct backlight_properties)); props.max_brightness = BRIGHTNESS_MAX_LEVEL; props.type = BACKLIGHT_PLATFORM; mdfld_backlight_device = backlight_device_register("mdfld-bl", NULL, (void *)dev, &mdfld_ops, &props); if (IS_ERR(mdfld_backlight_device)) return PTR_ERR(mdfld_backlight_device); ret = device_backlight_init(dev); if (ret) return ret; mdfld_backlight_device->props.brightness = BRIGHTNESS_MAX_LEVEL; mdfld_backlight_device->props.max_brightness = BRIGHTNESS_MAX_LEVEL; backlight_update_status(mdfld_backlight_device); return 0; } #endif struct backlight_device *mdfld_get_backlight_device(void) { #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE return mdfld_backlight_device; #else return NULL; #endif } /* * mdfld_save_display_registers * * Description: We are going to suspend so save current display * register state. * * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio */ static int mdfld_save_display_registers(struct drm_device *dev, int pipe) { struct drm_psb_private *dev_priv = dev->dev_private; struct medfield_state *regs = &dev_priv->regs.mdfld; int i; /* register */ u32 dpll_reg = MRST_DPLL_A; u32 fp_reg = MRST_FPA0; u32 pipeconf_reg = PIPEACONF; u32 htot_reg = HTOTAL_A; u32 hblank_reg = HBLANK_A; u32 hsync_reg = HSYNC_A; u32 vtot_reg = VTOTAL_A; u32 vblank_reg = VBLANK_A; u32 vsync_reg = VSYNC_A; u32 pipesrc_reg = PIPEASRC; u32 dspstride_reg = DSPASTRIDE; u32 dsplinoff_reg = DSPALINOFF; u32 dsptileoff_reg = DSPATILEOFF; u32 dspsize_reg = DSPASIZE; u32 dsppos_reg = DSPAPOS; u32 dspsurf_reg = DSPASURF; u32 mipi_reg = MIPI; u32 dspcntr_reg = DSPACNTR; u32 dspstatus_reg = PIPEASTAT; u32 palette_reg = PALETTE_A; /* pointer to values */ u32 *dpll_val = &regs->saveDPLL_A; u32 *fp_val = &regs->saveFPA0; u32 *pipeconf_val = &regs->savePIPEACONF; u32 *htot_val = &regs->saveHTOTAL_A; u32 *hblank_val = &regs->saveHBLANK_A; u32 *hsync_val = &regs->saveHSYNC_A; u32 *vtot_val = &regs->saveVTOTAL_A; u32 *vblank_val = &regs->saveVBLANK_A; u32 *vsync_val = &regs->saveVSYNC_A; u32 *pipesrc_val = &regs->savePIPEASRC; u32 *dspstride_val = &regs->saveDSPASTRIDE; u32 *dsplinoff_val = &regs->saveDSPALINOFF; u32 *dsptileoff_val = &regs->saveDSPATILEOFF; u32 *dspsize_val = &regs->saveDSPASIZE; u32 *dsppos_val = &regs->saveDSPAPOS; u32 *dspsurf_val = &regs->saveDSPASURF; u32 *mipi_val = &regs->saveMIPI; u32 *dspcntr_val = &regs->saveDSPACNTR; u32 *dspstatus_val = &regs->saveDSPASTATUS; u32 *palette_val = regs->save_palette_a; switch (pipe) { case 0: break; case 1: /* regester */ dpll_reg = MDFLD_DPLL_B; fp_reg = MDFLD_DPLL_DIV0; pipeconf_reg = PIPEBCONF; htot_reg = HTOTAL_B; hblank_reg = HBLANK_B; hsync_reg = HSYNC_B; vtot_reg = VTOTAL_B; vblank_reg = VBLANK_B; vsync_reg = VSYNC_B; pipesrc_reg = PIPEBSRC; dspstride_reg = DSPBSTRIDE; dsplinoff_reg = DSPBLINOFF; dsptileoff_reg = DSPBTILEOFF; dspsize_reg = DSPBSIZE; dsppos_reg = DSPBPOS; dspsurf_reg = DSPBSURF; dspcntr_reg = DSPBCNTR; dspstatus_reg = PIPEBSTAT; palette_reg = PALETTE_B; /* values */ dpll_val = &regs->saveDPLL_B; fp_val = &regs->saveFPB0; pipeconf_val = &regs->savePIPEBCONF; htot_val = &regs->saveHTOTAL_B; hblank_val = &regs->saveHBLANK_B; hsync_val = &regs->saveHSYNC_B; vtot_val = &regs->saveVTOTAL_B; vblank_val = &regs->saveVBLANK_B; vsync_val = &regs->saveVSYNC_B; pipesrc_val = &regs->savePIPEBSRC; dspstride_val = &regs->saveDSPBSTRIDE; dsplinoff_val = &regs->saveDSPBLINOFF; dsptileoff_val = &regs->saveDSPBTILEOFF; dspsize_val = &regs->saveDSPBSIZE; dsppos_val = &regs->saveDSPBPOS; dspsurf_val = &regs->saveDSPBSURF; dspcntr_val = &regs->saveDSPBCNTR; dspstatus_val = &regs->saveDSPBSTATUS; palette_val = regs->save_palette_b; break; case 2: /* register */ pipeconf_reg = PIPECCONF; htot_reg = HTOTAL_C; hblank_reg = HBLANK_C; hsync_reg = HSYNC_C; vtot_reg = VTOTAL_C; vblank_reg = VBLANK_C; vsync_reg = VSYNC_C; pipesrc_reg = PIPECSRC; dspstride_reg = DSPCSTRIDE; dsplinoff_reg = DSPCLINOFF; dsptileoff_reg = DSPCTILEOFF; dspsize_reg = DSPCSIZE; dsppos_reg = DSPCPOS; dspsurf_reg = DSPCSURF; mipi_reg = MIPI_C; dspcntr_reg = DSPCCNTR; dspstatus_reg = PIPECSTAT; palette_reg = PALETTE_C; /* pointer to values */ pipeconf_val = &regs->savePIPECCONF; htot_val = &regs->saveHTOTAL_C; hblank_val = &regs->saveHBLANK_C; hsync_val = &regs->saveHSYNC_C; vtot_val = &regs->saveVTOTAL_C; vblank_val = &regs->saveVBLANK_C; vsync_val = &regs->saveVSYNC_C; pipesrc_val = &regs->savePIPECSRC; dspstride_val = &regs->saveDSPCSTRIDE; dsplinoff_val = &regs->saveDSPCLINOFF; dsptileoff_val = &regs->saveDSPCTILEOFF; dspsize_val = &regs->saveDSPCSIZE; dsppos_val = &regs->saveDSPCPOS; dspsurf_val = &regs->saveDSPCSURF; mipi_val = &regs->saveMIPI_C; dspcntr_val = &regs->saveDSPCCNTR; dspstatus_val = &regs->saveDSPCSTATUS; palette_val = regs->save_palette_c; break; default: DRM_ERROR("%s, invalid pipe number.\n", __func__); return -EINVAL; } /* Pipe & plane A info */ *dpll_val = PSB_RVDC32(dpll_reg); *fp_val = PSB_RVDC32(fp_reg); *pipeconf_val = PSB_RVDC32(pipeconf_reg); *htot_val = PSB_RVDC32(htot_reg); *hblank_val = PSB_RVDC32(hblank_reg); *hsync_val = PSB_RVDC32(hsync_reg); *vtot_val = PSB_RVDC32(vtot_reg); *vblank_val = PSB_RVDC32(vblank_reg); *vsync_val = PSB_RVDC32(vsync_reg); *pipesrc_val = PSB_RVDC32(pipesrc_reg); *dspstride_val = PSB_RVDC32(dspstride_reg); *dsplinoff_val = PSB_RVDC32(dsplinoff_reg); *dsptileoff_val = PSB_RVDC32(dsptileoff_reg); *dspsize_val = PSB_RVDC32(dspsize_reg); *dsppos_val = PSB_RVDC32(dsppos_reg); *dspsurf_val = PSB_RVDC32(dspsurf_reg); *dspcntr_val = PSB_RVDC32(dspcntr_reg); *dspstatus_val = PSB_RVDC32(dspstatus_reg); /*save palette (gamma) */ for (i = 0; i < 256; i++) palette_val[i] = PSB_RVDC32(palette_reg + (i << 2)); if (pipe == 1) { regs->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL); regs->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS); regs->saveHDMIPHYMISCCTL = PSB_RVDC32(HDMIPHYMISCCTL); regs->saveHDMIB_CONTROL = PSB_RVDC32(HDMIB_CONTROL); return 0; } *mipi_val = PSB_RVDC32(mipi_reg); return 0; } /* * mdfld_restore_display_registers * * Description: We are going to resume so restore display register state. * * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio */ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe) { /* To get panel out of ULPS mode. */ u32 temp = 0; u32 device_ready_reg = DEVICE_READY_REG; struct drm_psb_private *dev_priv = dev->dev_private; struct mdfld_dsi_config *dsi_config = NULL; struct medfield_state *regs = &dev_priv->regs.mdfld; u32 i = 0; u32 dpll = 0; u32 timeout = 0; /* regester */ u32 dpll_reg = MRST_DPLL_A; u32 fp_reg = MRST_FPA0; u32 pipeconf_reg = PIPEACONF; u32 htot_reg = HTOTAL_A; u32 hblank_reg = HBLANK_A; u32 hsync_reg = HSYNC_A; u32 vtot_reg = VTOTAL_A; u32 vblank_reg = VBLANK_A; u32 vsync_reg = VSYNC_A; u32 pipesrc_reg = PIPEASRC; u32 dspstride_reg = DSPASTRIDE; u32 dsplinoff_reg = DSPALINOFF; u32 dsptileoff_reg = DSPATILEOFF; u32 dspsize_reg = DSPASIZE; u32 dsppos_reg = DSPAPOS; u32 dspsurf_reg = DSPASURF; u32 dspstatus_reg = PIPEASTAT; u32 mipi_reg = MIPI; u32 dspcntr_reg = DSPACNTR; u32 palette_reg = PALETTE_A; /* values */ u32 dpll_val = regs->saveDPLL_A & ~DPLL_VCO_ENABLE; u32 fp_val = regs->saveFPA0; u32 pipeconf_val = regs->savePIPEACONF; u32 htot_val = regs->saveHTOTAL_A; u32 hblank_val = regs->saveHBLANK_A; u32 hsync_val = regs->saveHSYNC_A; u32 vtot_val = regs->saveVTOTAL_A; u32 vblank_val = regs->saveVBLANK_A; u32 vsync_val = regs->saveVSYNC_A; u32 pipesrc_val = regs->savePIPEASRC; u32 dspstride_val = regs->saveDSPASTRIDE; u32 dsplinoff_val = regs->saveDSPALINOFF; u32 dsptileoff_val = regs->saveDSPATILEOFF; u32 dspsize_val = regs->saveDSPASIZE; u32 dsppos_val = regs->saveDSPAPOS; u32 dspsurf_val = regs->saveDSPASURF; u32 dspstatus_val = regs->saveDSPASTATUS; u32 mipi_val = regs->saveMIPI; u32 dspcntr_val = regs->saveDSPACNTR; u32 *palette_val = regs->save_palette_a; switch (pipe) { case 0: dsi_config = dev_priv->dsi_configs[0]; break; case 1: /* regester */ dpll_reg = MDFLD_DPLL_B; fp_reg = MDFLD_DPLL_DIV0; pipeconf_reg = PIPEBCONF; htot_reg = HTOTAL_B; hblank_reg = HBLANK_B; hsync_reg = HSYNC_B; vtot_reg = VTOTAL_B; vblank_reg = VBLANK_B; vsync_reg = VSYNC_B; pipesrc_reg = PIPEBSRC; dspstride_reg = DSPBSTRIDE; dsplinoff_reg = DSPBLINOFF; dsptileoff_reg = DSPBTILEOFF; dspsize_reg = DSPBSIZE; dsppos_reg = DSPBPOS; dspsurf_reg = DSPBSURF; dspcntr_reg = DSPBCNTR; dspstatus_reg = PIPEBSTAT; palette_reg = PALETTE_B; /* values */ dpll_val = regs->saveDPLL_B & ~DPLL_VCO_ENABLE; fp_val = regs->saveFPB0; pipeconf_val = regs->savePIPEBCONF; htot_val = regs->saveHTOTAL_B; hblank_val = regs->saveHBLANK_B; hsync_val = regs->saveHSYNC_B; vtot_val = regs->saveVTOTAL_B; vblank_val = regs->saveVBLANK_B; vsync_val = regs->saveVSYNC_B; pipesrc_val = regs->savePIPEBSRC; dspstride_val = regs->saveDSPBSTRIDE; dsplinoff_val = regs->saveDSPBLINOFF; dsptileoff_val = regs->saveDSPBTILEOFF; dspsize_val = regs->saveDSPBSIZE; dsppos_val = regs->saveDSPBPOS; dspsurf_val = regs->saveDSPBSURF; dspcntr_val = regs->saveDSPBCNTR; dspstatus_val = regs->saveDSPBSTATUS; palette_val = regs->save_palette_b; break; case 2: /* regester */ pipeconf_reg = PIPECCONF; htot_reg = HTOTAL_C; hblank_reg = HBLANK_C; hsync_reg = HSYNC_C; vtot_reg = VTOTAL_C; vblank_reg = VBLANK_C; vsync_reg = VSYNC_C; pipesrc_reg = PIPECSRC; dspstride_reg = DSPCSTRIDE; dsplinoff_reg = DSPCLINOFF; dsptileoff_reg = DSPCTILEOFF; dspsize_reg = DSPCSIZE; dsppos_reg = DSPCPOS; dspsurf_reg = DSPCSURF; mipi_reg = MIPI_C; dspcntr_reg = DSPCCNTR; dspstatus_reg = PIPECSTAT; palette_reg = PALETTE_C; /* values */ pipeconf_val = regs->savePIPECCONF; htot_val = regs->saveHTOTAL_C; hblank_val = regs->saveHBLANK_C; hsync_val = regs->saveHSYNC_C; vtot_val = regs->saveVTOTAL_C; vblank_val = regs->saveVBLANK_C; vsync_val = regs->saveVSYNC_C; pipesrc_val = regs->savePIPECSRC; dspstride_val = regs->saveDSPCSTRIDE; dsplinoff_val = regs->saveDSPCLINOFF; dsptileoff_val = regs->saveDSPCTILEOFF; dspsize_val = regs->saveDSPCSIZE; dsppos_val = regs->saveDSPCPOS; dspsurf_val = regs->saveDSPCSURF; mipi_val = regs->saveMIPI_C; dspcntr_val = regs->saveDSPCCNTR; dspstatus_val = regs->saveDSPCSTATUS; palette_val = regs->save_palette_c; dsi_config = dev_priv->dsi_configs[1]; break; default: DRM_ERROR("%s, invalid pipe number.\n", __func__); return -EINVAL; } /*make sure VGA plane is off. it initializes to on after reset!*/ PSB_WVDC32(0x80000000, VGACNTRL); if (pipe == 1) { PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, dpll_reg); PSB_RVDC32(dpll_reg); PSB_WVDC32(fp_val, fp_reg); } else { dpll = PSB_RVDC32(dpll_reg); if (!(dpll & DPLL_VCO_ENABLE)) { /* When ungating power of DPLL, needs to wait 0.5us before enable the VCO */ if (dpll & MDFLD_PWR_GATE_EN) { dpll &= ~MDFLD_PWR_GATE_EN; PSB_WVDC32(dpll, dpll_reg); /* FIXME_MDFLD PO - change 500 to 1 after PO */ udelay(500); } PSB_WVDC32(fp_val, fp_reg); PSB_WVDC32(dpll_val, dpll_reg); /* FIXME_MDFLD PO - change 500 to 1 after PO */ udelay(500); dpll_val |= DPLL_VCO_ENABLE; PSB_WVDC32(dpll_val, dpll_reg); PSB_RVDC32(dpll_reg); /* wait for DSI PLL to lock */ while (timeout < 20000 && !(PSB_RVDC32(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) { udelay(150); timeout++; } if (timeout == 20000) { DRM_ERROR("%s, can't lock DSIPLL.\n", __func__); return -EINVAL; } } } /* Restore mode */ PSB_WVDC32(htot_val, htot_reg); PSB_WVDC32(hblank_val, hblank_reg); PSB_WVDC32(hsync_val, hsync_reg); PSB_WVDC32(vtot_val, vtot_reg); PSB_WVDC32(vblank_val, vblank_reg); PSB_WVDC32(vsync_val, vsync_reg); PSB_WVDC32(pipesrc_val, pipesrc_reg); PSB_WVDC32(dspstatus_val, dspstatus_reg); /*set up the plane*/ PSB_WVDC32(dspstride_val, dspstride_reg); PSB_WVDC32(dsplinoff_val, dsplinoff_reg); PSB_WVDC32(dsptileoff_val, dsptileoff_reg); PSB_WVDC32(dspsize_val, dspsize_reg); PSB_WVDC32(dsppos_val, dsppos_reg); PSB_WVDC32(dspsurf_val, dspsurf_reg); if (pipe == 1) { /* restore palette (gamma) */ /*DRM_UDELAY(50000); */ for (i = 0; i < 256; i++) PSB_WVDC32(palette_val[i], palette_reg + (i << 2)); PSB_WVDC32(regs->savePFIT_CONTROL, PFIT_CONTROL); PSB_WVDC32(regs->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS); /*TODO: resume HDMI port */ /*TODO: resume pipe*/ /*enable the plane*/ PSB_WVDC32(dspcntr_val & ~DISPLAY_PLANE_ENABLE, dspcntr_reg); return 0; } /*set up pipe related registers*/ PSB_WVDC32(mipi_val, mipi_reg); /*setup MIPI adapter + MIPI IP registers*/ if (dsi_config) mdfld_dsi_controller_init(dsi_config, pipe); if (in_atomic() || in_interrupt()) mdelay(20); else msleep(20); /*enable the plane*/ PSB_WVDC32(dspcntr_val, dspcntr_reg); if (in_atomic() || in_interrupt()) mdelay(20); else msleep(20); /* LP Hold Release */ temp = REG_READ(mipi_reg); temp |= LP_OUTPUT_HOLD_RELEASE; REG_WRITE(mipi_reg, temp); mdelay(1); /* Set DSI host to exit from Utra Low Power State */ temp = REG_READ(device_ready_reg); temp &= ~ULPS_MASK; temp |= 0x3; temp |= EXIT_ULPS_DEV_READY; REG_WRITE(device_ready_reg, temp); mdelay(1); temp = REG_READ(device_ready_reg); temp &= ~ULPS_MASK; temp |= EXITING_ULPS; REG_WRITE(device_ready_reg, temp); mdelay(1); /*enable the pipe*/ PSB_WVDC32(pipeconf_val, pipeconf_reg); /* restore palette (gamma) */ /*DRM_UDELAY(50000); */ for (i = 0; i < 256; i++) PSB_WVDC32(palette_val[i], palette_reg + (i << 2)); return 0; } static int mdfld_save_registers(struct drm_device *dev) { /* mdfld_save_cursor_overlay_registers(dev); */ mdfld_save_display_registers(dev, 0); mdfld_save_display_registers(dev, 2); mdfld_disable_crtc(dev, 0); mdfld_disable_crtc(dev, 2); return 0; } static int mdfld_restore_registers(struct drm_device *dev) { mdfld_restore_display_registers(dev, 2); mdfld_restore_display_registers(dev, 0); /* mdfld_restore_cursor_overlay_registers(dev); */ return 0; } static int mdfld_power_down(struct drm_device *dev) { /* FIXME */ return 0; } static int mdfld_power_up(struct drm_device *dev) { /* FIXME */ return 0; } const struct psb_ops mdfld_chip_ops = { .name = "mdfld", .accel_2d = 0, .pipes = 3, .crtcs = 3, .sgx_offset = MRST_SGX_OFFSET, .chip_setup = mid_chip_setup, .crtc_helper = &mdfld_helper_funcs, .crtc_funcs = &psb_intel_crtc_funcs, .output_init = mdfld_output_init, #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE .backlight_init = mdfld_backlight_init, #endif .save_regs = mdfld_save_registers, .restore_regs = mdfld_restore_registers, .power_down = mdfld_power_down, .power_up = mdfld_power_up, };
gpl-2.0
bju2000/Sense7_Kernel_b2wlj
drivers/gpu/drm/gma500/cdv_device.c
5262
13084
/************************************************************************** * Copyright (c) 2011, Intel Corporation. * All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * **************************************************************************/ #include <linux/backlight.h> #include <drm/drmP.h> #include <drm/drm.h> #include "gma_drm.h" #include "psb_drv.h" #include "psb_reg.h" #include "psb_intel_reg.h" #include "intel_bios.h" #include "cdv_device.h" #define VGA_SR_INDEX 0x3c4 #define VGA_SR_DATA 0x3c5 static void cdv_disable_vga(struct drm_device *dev) { u8 sr1; u32 vga_reg; vga_reg = VGACNTRL; outb(1, VGA_SR_INDEX); sr1 = inb(VGA_SR_DATA); outb(sr1 | 1<<5, VGA_SR_DATA); udelay(300); REG_WRITE(vga_reg, VGA_DISP_DISABLE); REG_READ(vga_reg); } static int cdv_output_init(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; cdv_disable_vga(dev); cdv_intel_crt_init(dev, &dev_priv->mode_dev); cdv_intel_lvds_init(dev, &dev_priv->mode_dev); /* These bits indicate HDMI not SDVO on CDV, but we don't yet support the HDMI interface */ if (REG_READ(SDVOB) & SDVO_DETECTED) cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB); if (REG_READ(SDVOC) & SDVO_DETECTED) cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOC); return 0; } #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE /* * Poulsbo Backlight Interfaces */ #define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */ #define BLC_PWM_FREQ_CALC_CONSTANT 32 #define MHz 1000000 #define PSB_BLC_PWM_PRECISION_FACTOR 10 #define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE #define PSB_BLC_MIN_PWM_REG_FREQ 0x2 #define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE) #define PSB_BACKLIGHT_PWM_CTL_SHIFT (16) static int cdv_brightness; static struct backlight_device *cdv_backlight_device; static int cdv_get_brightness(struct backlight_device *bd) { /* return locally cached var instead of HW read (due to DPST etc.) */ /* FIXME: ideally return actual value in case firmware fiddled with it */ return cdv_brightness; } static int cdv_backlight_setup(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; unsigned long core_clock; /* u32 bl_max_freq; */ /* unsigned long value; */ u16 bl_max_freq; uint32_t value; uint32_t blc_pwm_precision_factor; /* get bl_max_freq and pol from dev_priv*/ if (!dev_priv->lvds_bl) { dev_err(dev->dev, "Has no valid LVDS backlight info\n"); return -ENOENT; } bl_max_freq = dev_priv->lvds_bl->freq; blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR; core_clock = dev_priv->core_freq; value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT; value *= blc_pwm_precision_factor; value /= bl_max_freq; value /= blc_pwm_precision_factor; if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ || value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ) return -ERANGE; else { /* FIXME */ } return 0; } static int cdv_set_brightness(struct backlight_device *bd) { int level = bd->props.brightness; /* Percentage 1-100% being valid */ if (level < 1) level = 1; /*cdv_intel_lvds_set_brightness(dev, level); FIXME */ cdv_brightness = level; return 0; } static const struct backlight_ops cdv_ops = { .get_brightness = cdv_get_brightness, .update_status = cdv_set_brightness, }; static int cdv_backlight_init(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; int ret; struct backlight_properties props; memset(&props, 0, sizeof(struct backlight_properties)); props.max_brightness = 100; props.type = BACKLIGHT_PLATFORM; cdv_backlight_device = backlight_device_register("psb-bl", NULL, (void *)dev, &cdv_ops, &props); if (IS_ERR(cdv_backlight_device)) return PTR_ERR(cdv_backlight_device); ret = cdv_backlight_setup(dev); if (ret < 0) { backlight_device_unregister(cdv_backlight_device); cdv_backlight_device = NULL; return ret; } cdv_backlight_device->props.brightness = 100; cdv_backlight_device->props.max_brightness = 100; backlight_update_status(cdv_backlight_device); dev_priv->backlight_device = cdv_backlight_device; return 0; } #endif /* * Provide the Cedarview specific chip logic and low level methods * for power management * * FIXME: we need to implement the apm/ospm base management bits * for this and the MID devices. */ static inline u32 CDV_MSG_READ32(uint port, uint offset) { int mcr = (0x10<<24) | (port << 16) | (offset << 8); uint32_t ret_val = 0; struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); pci_write_config_dword(pci_root, 0xD0, mcr); pci_read_config_dword(pci_root, 0xD4, &ret_val); pci_dev_put(pci_root); return ret_val; } static inline void CDV_MSG_WRITE32(uint port, uint offset, u32 value) { int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0; struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); pci_write_config_dword(pci_root, 0xD4, value); pci_write_config_dword(pci_root, 0xD0, mcr); pci_dev_put(pci_root); } #define PSB_PM_SSC 0x20 #define PSB_PM_SSS 0x30 #define PSB_PWRGT_GFX_ON 0x02 #define PSB_PWRGT_GFX_OFF 0x01 #define PSB_PWRGT_GFX_D0 0x00 #define PSB_PWRGT_GFX_D3 0x03 static void cdv_init_pm(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; u32 pwr_cnt; int i; dev_priv->apm_base = CDV_MSG_READ32(PSB_PUNIT_PORT, PSB_APMBA) & 0xFFFF; dev_priv->ospm_base = CDV_MSG_READ32(PSB_PUNIT_PORT, PSB_OSPMBA) & 0xFFFF; /* Power status */ pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD); /* Enable the GPU */ pwr_cnt &= ~PSB_PWRGT_GFX_MASK; pwr_cnt |= PSB_PWRGT_GFX_ON; outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD); /* Wait for the GPU power */ for (i = 0; i < 5; i++) { u32 pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS); if ((pwr_sts & PSB_PWRGT_GFX_MASK) == 0) return; udelay(10); } dev_err(dev->dev, "GPU: power management timed out.\n"); } /** * cdv_save_display_registers - save registers lost on suspend * @dev: our DRM device * * Save the state we need in order to be able to restore the interface * upon resume from suspend */ static int cdv_save_display_registers(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; struct psb_save_area *regs = &dev_priv->regs; struct drm_connector *connector; dev_info(dev->dev, "Saving GPU registers.\n"); pci_read_config_byte(dev->pdev, 0xF4, &regs->cdv.saveLBB); regs->cdv.saveDSPCLK_GATE_D = REG_READ(DSPCLK_GATE_D); regs->cdv.saveRAMCLK_GATE_D = REG_READ(RAMCLK_GATE_D); regs->cdv.saveDSPARB = REG_READ(DSPARB); regs->cdv.saveDSPFW[0] = REG_READ(DSPFW1); regs->cdv.saveDSPFW[1] = REG_READ(DSPFW2); regs->cdv.saveDSPFW[2] = REG_READ(DSPFW3); regs->cdv.saveDSPFW[3] = REG_READ(DSPFW4); regs->cdv.saveDSPFW[4] = REG_READ(DSPFW5); regs->cdv.saveDSPFW[5] = REG_READ(DSPFW6); regs->cdv.saveADPA = REG_READ(ADPA); regs->cdv.savePP_CONTROL = REG_READ(PP_CONTROL); regs->cdv.savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS); regs->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL); regs->saveBLC_PWM_CTL2 = REG_READ(BLC_PWM_CTL2); regs->cdv.saveLVDS = REG_READ(LVDS); regs->cdv.savePFIT_CONTROL = REG_READ(PFIT_CONTROL); regs->cdv.savePP_ON_DELAYS = REG_READ(PP_ON_DELAYS); regs->cdv.savePP_OFF_DELAYS = REG_READ(PP_OFF_DELAYS); regs->cdv.savePP_CYCLE = REG_READ(PP_CYCLE); regs->cdv.saveVGACNTRL = REG_READ(VGACNTRL); regs->cdv.saveIER = REG_READ(PSB_INT_ENABLE_R); regs->cdv.saveIMR = REG_READ(PSB_INT_MASK_R); list_for_each_entry(connector, &dev->mode_config.connector_list, head) connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF); return 0; } /** * cdv_restore_display_registers - restore lost register state * @dev: our DRM device * * Restore register state that was lost during suspend and resume. * * FIXME: review */ static int cdv_restore_display_registers(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; struct psb_save_area *regs = &dev_priv->regs; struct drm_connector *connector; u32 temp; pci_write_config_byte(dev->pdev, 0xF4, regs->cdv.saveLBB); REG_WRITE(DSPCLK_GATE_D, regs->cdv.saveDSPCLK_GATE_D); REG_WRITE(RAMCLK_GATE_D, regs->cdv.saveRAMCLK_GATE_D); /* BIOS does below anyway */ REG_WRITE(DPIO_CFG, 0); REG_WRITE(DPIO_CFG, DPIO_MODE_SELECT_0 | DPIO_CMN_RESET_N); temp = REG_READ(DPLL_A); if ((temp & DPLL_SYNCLOCK_ENABLE) == 0) { REG_WRITE(DPLL_A, temp | DPLL_SYNCLOCK_ENABLE); REG_READ(DPLL_A); } temp = REG_READ(DPLL_B); if ((temp & DPLL_SYNCLOCK_ENABLE) == 0) { REG_WRITE(DPLL_B, temp | DPLL_SYNCLOCK_ENABLE); REG_READ(DPLL_B); } udelay(500); REG_WRITE(DSPFW1, regs->cdv.saveDSPFW[0]); REG_WRITE(DSPFW2, regs->cdv.saveDSPFW[1]); REG_WRITE(DSPFW3, regs->cdv.saveDSPFW[2]); REG_WRITE(DSPFW4, regs->cdv.saveDSPFW[3]); REG_WRITE(DSPFW5, regs->cdv.saveDSPFW[4]); REG_WRITE(DSPFW6, regs->cdv.saveDSPFW[5]); REG_WRITE(DSPARB, regs->cdv.saveDSPARB); REG_WRITE(ADPA, regs->cdv.saveADPA); REG_WRITE(BLC_PWM_CTL2, regs->saveBLC_PWM_CTL2); REG_WRITE(LVDS, regs->cdv.saveLVDS); REG_WRITE(PFIT_CONTROL, regs->cdv.savePFIT_CONTROL); REG_WRITE(PFIT_PGM_RATIOS, regs->cdv.savePFIT_PGM_RATIOS); REG_WRITE(BLC_PWM_CTL, regs->saveBLC_PWM_CTL); REG_WRITE(PP_ON_DELAYS, regs->cdv.savePP_ON_DELAYS); REG_WRITE(PP_OFF_DELAYS, regs->cdv.savePP_OFF_DELAYS); REG_WRITE(PP_CYCLE, regs->cdv.savePP_CYCLE); REG_WRITE(PP_CONTROL, regs->cdv.savePP_CONTROL); REG_WRITE(VGACNTRL, regs->cdv.saveVGACNTRL); REG_WRITE(PSB_INT_ENABLE_R, regs->cdv.saveIER); REG_WRITE(PSB_INT_MASK_R, regs->cdv.saveIMR); /* Fix arbitration bug */ CDV_MSG_WRITE32(3, 0x30, 0x08027108); drm_mode_config_reset(dev); list_for_each_entry(connector, &dev->mode_config.connector_list, head) connector->funcs->dpms(connector, DRM_MODE_DPMS_ON); /* Resume the modeset for every activated CRTC */ drm_helper_resume_force_mode(dev); return 0; } static int cdv_power_down(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; u32 pwr_cnt, pwr_mask, pwr_sts; int tries = 5; pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD); pwr_cnt &= ~PSB_PWRGT_GFX_MASK; pwr_cnt |= PSB_PWRGT_GFX_OFF; pwr_mask = PSB_PWRGT_GFX_MASK; outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD); while (tries--) { pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS); if ((pwr_sts & pwr_mask) == PSB_PWRGT_GFX_D3) return 0; udelay(10); } return 0; } static int cdv_power_up(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; u32 pwr_cnt, pwr_mask, pwr_sts; int tries = 5; pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD); pwr_cnt &= ~PSB_PWRGT_GFX_MASK; pwr_cnt |= PSB_PWRGT_GFX_ON; pwr_mask = PSB_PWRGT_GFX_MASK; outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD); while (tries--) { pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS); if ((pwr_sts & pwr_mask) == PSB_PWRGT_GFX_D0) return 0; udelay(10); } return 0; } /* FIXME ? - shared with Poulsbo */ static void cdv_get_core_freq(struct drm_device *dev) { uint32_t clock; struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); struct drm_psb_private *dev_priv = dev->dev_private; pci_write_config_dword(pci_root, 0xD0, 0xD0050300); pci_read_config_dword(pci_root, 0xD4, &clock); pci_dev_put(pci_root); switch (clock & 0x07) { case 0: dev_priv->core_freq = 100; break; case 1: dev_priv->core_freq = 133; break; case 2: dev_priv->core_freq = 150; break; case 3: dev_priv->core_freq = 178; break; case 4: dev_priv->core_freq = 200; break; case 5: case 6: case 7: dev_priv->core_freq = 266; default: dev_priv->core_freq = 0; } } static int cdv_chip_setup(struct drm_device *dev) { cdv_get_core_freq(dev); gma_intel_opregion_init(dev); psb_intel_init_bios(dev); REG_WRITE(PORT_HOTPLUG_EN, 0); REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT)); return 0; } /* CDV is much like Poulsbo but has MID like SGX offsets and PM */ const struct psb_ops cdv_chip_ops = { .name = "GMA3600/3650", .accel_2d = 0, .pipes = 2, .crtcs = 2, .sgx_offset = MRST_SGX_OFFSET, .chip_setup = cdv_chip_setup, .crtc_helper = &cdv_intel_helper_funcs, .crtc_funcs = &cdv_intel_crtc_funcs, .output_init = cdv_output_init, #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE .backlight_init = cdv_backlight_init, #endif .init_pm = cdv_init_pm, .save_regs = cdv_save_display_registers, .restore_regs = cdv_restore_display_registers, .power_down = cdv_power_down, .power_up = cdv_power_up, };
gpl-2.0
fire855/android_kernel_wiko_l5510
arch/powerpc/boot/ps3.c
11918
4030
/* * PS3 bootwrapper support. * * Copyright (C) 2007 Sony Computer Entertainment Inc. * Copyright 2007 Sony Corp. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "elf.h" #include "string.h" #include "stdio.h" #include "page.h" #include "ops.h" extern int lv1_panic(u64 in_1); extern int lv1_get_logical_partition_id(u64 *out_1); extern int lv1_get_logical_ppe_id(u64 *out_1); extern int lv1_get_repository_node_value(u64 in_1, u64 in_2, u64 in_3, u64 in_4, u64 in_5, u64 *out_1, u64 *out_2); #ifdef DEBUG #define DBG(fmt...) printf(fmt) #else static inline int __attribute__ ((format (printf, 1, 2))) DBG( const char *fmt, ...) {return 0;} #endif BSS_STACK(4096); /* A buffer that may be edited by tools operating on a zImage binary so as to * edit the command line passed to vmlinux (by setting /chosen/bootargs). * The buffer is put in it's own section so that tools may locate it easier. */ static char cmdline[COMMAND_LINE_SIZE] __attribute__((__section__("__builtin_cmdline"))); static void prep_cmdline(void *chosen) { if (cmdline[0] == '\0') getprop(chosen, "bootargs", cmdline, COMMAND_LINE_SIZE-1); else setprop_str(chosen, "bootargs", cmdline); printf("cmdline: '%s'\n", cmdline); } static void ps3_console_write(const char *buf, int len) { } static void ps3_exit(void) { printf("ps3_exit\n"); /* lv1_panic will shutdown the lpar. */ lv1_panic(0); /* zero = do not reboot */ while (1); } static int ps3_repository_read_rm_size(u64 *rm_size) { int result; u64 lpar_id; u64 ppe_id; u64 v2; result = lv1_get_logical_partition_id(&lpar_id); if (result) return -1; result = lv1_get_logical_ppe_id(&ppe_id); if (result) return -1; /* * n1: 0000000062690000 : ....bi.. * n2: 7075000000000000 : pu...... * n3: 0000000000000001 : ........ * n4: 726d5f73697a6500 : rm_size. */ result = lv1_get_repository_node_value(lpar_id, 0x0000000062690000ULL, 0x7075000000000000ULL, ppe_id, 0x726d5f73697a6500ULL, rm_size, &v2); printf("%s:%d: ppe_id %lu \n", __func__, __LINE__, (unsigned long)ppe_id); printf("%s:%d: lpar_id %lu \n", __func__, __LINE__, (unsigned long)lpar_id); printf("%s:%d: rm_size %llxh \n", __func__, __LINE__, *rm_size); return result ? -1 : 0; } void ps3_copy_vectors(void) { extern char __system_reset_kernel[]; memcpy((void *)0x100, __system_reset_kernel, 512); flush_cache((void *)0x100, 512); } void platform_init(unsigned long null_check) { const u32 heapsize = 0x1000000 - (u32)_end; /* 16MiB */ void *chosen; unsigned long ft_addr; u64 rm_size; unsigned long val; console_ops.write = ps3_console_write; platform_ops.exit = ps3_exit; printf("\n-- PS3 bootwrapper --\n"); simple_alloc_init(_end, heapsize, 32, 64); fdt_init(_dtb_start); chosen = finddevice("/chosen"); ps3_repository_read_rm_size(&rm_size); dt_fixup_memory(0, rm_size); if (_initrd_end > _initrd_start) { setprop_val(chosen, "linux,initrd-start", (u32)(_initrd_start)); setprop_val(chosen, "linux,initrd-end", (u32)(_initrd_end)); } prep_cmdline(chosen); ft_addr = dt_ops.finalize(); ps3_copy_vectors(); printf(" flat tree at 0x%lx\n\r", ft_addr); val = *(unsigned long *)0; if (val != null_check) printf("null check failed: %lx != %lx\n\r", val, null_check); ((kernel_entry_t)0)(ft_addr, 0, NULL); ps3_exit(); }
gpl-2.0
Clumsy-Kernel-Development/M8_Kernel
sound/core/seq/seq_fifo.c
12686
6036
/* * ALSA sequencer FIFO * Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <sound/core.h> #include <linux/slab.h> #include "seq_fifo.h" #include "seq_lock.h" /* FIFO */ /* create new fifo */ struct snd_seq_fifo *snd_seq_fifo_new(int poolsize) { struct snd_seq_fifo *f; f = kzalloc(sizeof(*f), GFP_KERNEL); if (f == NULL) { snd_printd("malloc failed for snd_seq_fifo_new() \n"); return NULL; } f->pool = snd_seq_pool_new(poolsize); if (f->pool == NULL) { kfree(f); return NULL; } if (snd_seq_pool_init(f->pool) < 0) { snd_seq_pool_delete(&f->pool); kfree(f); return NULL; } spin_lock_init(&f->lock); snd_use_lock_init(&f->use_lock); init_waitqueue_head(&f->input_sleep); atomic_set(&f->overflow, 0); f->head = NULL; f->tail = NULL; f->cells = 0; return f; } void snd_seq_fifo_delete(struct snd_seq_fifo **fifo) { struct snd_seq_fifo *f; if (snd_BUG_ON(!fifo)) return; f = *fifo; if (snd_BUG_ON(!f)) return; *fifo = NULL; snd_seq_fifo_clear(f); /* wake up clients if any */ if (waitqueue_active(&f->input_sleep)) wake_up(&f->input_sleep); /* release resources...*/ /*....................*/ if (f->pool) { snd_seq_pool_done(f->pool); snd_seq_pool_delete(&f->pool); } kfree(f); } static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f); /* clear queue */ void snd_seq_fifo_clear(struct snd_seq_fifo *f) { struct snd_seq_event_cell *cell; unsigned long flags; /* clear overflow flag */ atomic_set(&f->overflow, 0); snd_use_lock_sync(&f->use_lock); spin_lock_irqsave(&f->lock, flags); /* drain the fifo */ while ((cell = fifo_cell_out(f)) != NULL) { snd_seq_cell_free(cell); } spin_unlock_irqrestore(&f->lock, flags); } /* enqueue event to fifo */ int snd_seq_fifo_event_in(struct snd_seq_fifo *f, struct snd_seq_event *event) { struct snd_seq_event_cell *cell; unsigned long flags; int err; if (snd_BUG_ON(!f)) return -EINVAL; snd_use_lock_use(&f->use_lock); err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */ if (err < 0) { if (err == -ENOMEM) atomic_inc(&f->overflow); snd_use_lock_free(&f->use_lock); return err; } /* append new cells to fifo */ spin_lock_irqsave(&f->lock, flags); if (f->tail != NULL) f->tail->next = cell; f->tail = cell; if (f->head == NULL) f->head = cell; f->cells++; spin_unlock_irqrestore(&f->lock, flags); /* wakeup client */ if (waitqueue_active(&f->input_sleep)) wake_up(&f->input_sleep); snd_use_lock_free(&f->use_lock); return 0; /* success */ } /* dequeue cell from fifo */ static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f) { struct snd_seq_event_cell *cell; if ((cell = f->head) != NULL) { f->head = cell->next; /* reset tail if this was the last element */ if (f->tail == cell) f->tail = NULL; cell->next = NULL; f->cells--; } return cell; } /* dequeue cell from fifo and copy on user space */ int snd_seq_fifo_cell_out(struct snd_seq_fifo *f, struct snd_seq_event_cell **cellp, int nonblock) { struct snd_seq_event_cell *cell; unsigned long flags; wait_queue_t wait; if (snd_BUG_ON(!f)) return -EINVAL; *cellp = NULL; init_waitqueue_entry(&wait, current); spin_lock_irqsave(&f->lock, flags); while ((cell = fifo_cell_out(f)) == NULL) { if (nonblock) { /* non-blocking - return immediately */ spin_unlock_irqrestore(&f->lock, flags); return -EAGAIN; } set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&f->input_sleep, &wait); spin_unlock_irq(&f->lock); schedule(); spin_lock_irq(&f->lock); remove_wait_queue(&f->input_sleep, &wait); if (signal_pending(current)) { spin_unlock_irqrestore(&f->lock, flags); return -ERESTARTSYS; } } spin_unlock_irqrestore(&f->lock, flags); *cellp = cell; return 0; } void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f, struct snd_seq_event_cell *cell) { unsigned long flags; if (cell) { spin_lock_irqsave(&f->lock, flags); cell->next = f->head; f->head = cell; f->cells++; spin_unlock_irqrestore(&f->lock, flags); } } /* polling; return non-zero if queue is available */ int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file, poll_table *wait) { poll_wait(file, &f->input_sleep, wait); return (f->cells > 0); } /* change the size of pool; all old events are removed */ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize) { unsigned long flags; struct snd_seq_pool *newpool, *oldpool; struct snd_seq_event_cell *cell, *next, *oldhead; if (snd_BUG_ON(!f || !f->pool)) return -EINVAL; /* allocate new pool */ newpool = snd_seq_pool_new(poolsize); if (newpool == NULL) return -ENOMEM; if (snd_seq_pool_init(newpool) < 0) { snd_seq_pool_delete(&newpool); return -ENOMEM; } spin_lock_irqsave(&f->lock, flags); /* remember old pool */ oldpool = f->pool; oldhead = f->head; /* exchange pools */ f->pool = newpool; f->head = NULL; f->tail = NULL; f->cells = 0; /* NOTE: overflow flag is not cleared */ spin_unlock_irqrestore(&f->lock, flags); /* release cells in old pool */ for (cell = oldhead; cell; cell = next) { next = cell->next; snd_seq_cell_free(cell); } snd_seq_pool_delete(&oldpool); return 0; }
gpl-2.0
ElKowak/android_kernel_motorola_msm8610
drivers/media/video/ivtv/ivtv-queue.c
14222
9002
/* buffer queues. Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com> Copyright (C) 2004 Chris Kennedy <c@groovy.org> Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "ivtv-driver.h" #include "ivtv-queue.h" int ivtv_buf_copy_from_user(struct ivtv_stream *s, struct ivtv_buffer *buf, const char __user *src, int copybytes) { if (s->buf_size - buf->bytesused < copybytes) copybytes = s->buf_size - buf->bytesused; if (copy_from_user(buf->buf + buf->bytesused, src, copybytes)) { return -EFAULT; } buf->bytesused += copybytes; return copybytes; } void ivtv_buf_swap(struct ivtv_buffer *buf) { int i; for (i = 0; i < buf->bytesused; i += 4) swab32s((u32 *)(buf->buf + i)); } void ivtv_queue_init(struct ivtv_queue *q) { INIT_LIST_HEAD(&q->list); q->buffers = 0; q->length = 0; q->bytesused = 0; } void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_queue *q) { unsigned long flags; /* clear the buffer if it is going to be enqueued to the free queue */ if (q == &s->q_free) { buf->bytesused = 0; buf->readpos = 0; buf->b_flags = 0; buf->dma_xfer_cnt = 0; } spin_lock_irqsave(&s->qlock, flags); list_add_tail(&buf->list, &q->list); q->buffers++; q->length += s->buf_size; q->bytesused += buf->bytesused - buf->readpos; spin_unlock_irqrestore(&s->qlock, flags); } struct ivtv_buffer *ivtv_dequeue(struct ivtv_stream *s, struct ivtv_queue *q) { struct ivtv_buffer *buf = NULL; unsigned long flags; spin_lock_irqsave(&s->qlock, flags); if (!list_empty(&q->list)) { buf = list_entry(q->list.next, struct ivtv_buffer, list); list_del_init(q->list.next); q->buffers--; q->length -= s->buf_size; q->bytesused -= buf->bytesused - buf->readpos; } spin_unlock_irqrestore(&s->qlock, flags); return buf; } static void ivtv_queue_move_buf(struct ivtv_stream *s, struct ivtv_queue *from, struct ivtv_queue *to, int clear) { struct ivtv_buffer *buf = list_entry(from->list.next, struct ivtv_buffer, list); list_move_tail(from->list.next, &to->list); from->buffers--; from->length -= s->buf_size; from->bytesused -= buf->bytesused - buf->readpos; /* special handling for q_free */ if (clear) buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0; to->buffers++; to->length += s->buf_size; to->bytesused += buf->bytesused - buf->readpos; } /* Move 'needed_bytes' worth of buffers from queue 'from' into queue 'to'. If 'needed_bytes' == 0, then move all buffers from 'from' into 'to'. If 'steal' != NULL, then buffers may also taken from that queue if needed, but only if 'from' is the free queue. The buffer is automatically cleared if it goes to the free queue. It is also cleared if buffers need to be taken from the 'steal' queue and the 'from' queue is the free queue. When 'from' is q_free, then needed_bytes is compared to the total available buffer length, otherwise needed_bytes is compared to the bytesused value. For the 'steal' queue the total available buffer length is always used. -ENOMEM is returned if the buffers could not be obtained, 0 if all buffers where obtained from the 'from' list and if non-zero then the number of stolen buffers is returned. */ int ivtv_queue_move(struct ivtv_stream *s, struct ivtv_queue *from, struct ivtv_queue *steal, struct ivtv_queue *to, int needed_bytes) { unsigned long flags; int rc = 0; int from_free = from == &s->q_free; int to_free = to == &s->q_free; int bytes_available, bytes_steal; spin_lock_irqsave(&s->qlock, flags); if (needed_bytes == 0) { from_free = 1; needed_bytes = from->length; } bytes_available = from_free ? from->length : from->bytesused; bytes_steal = (from_free && steal) ? steal->length : 0; if (bytes_available + bytes_steal < needed_bytes) { spin_unlock_irqrestore(&s->qlock, flags); return -ENOMEM; } while (bytes_available < needed_bytes) { struct ivtv_buffer *buf = list_entry(steal->list.prev, struct ivtv_buffer, list); u16 dma_xfer_cnt = buf->dma_xfer_cnt; /* move buffers from the tail of the 'steal' queue to the tail of the 'from' queue. Always copy all the buffers with the same dma_xfer_cnt value, this ensures that you do not end up with partial frame data if one frame is stored in multiple buffers. */ while (dma_xfer_cnt == buf->dma_xfer_cnt) { list_move_tail(steal->list.prev, &from->list); rc++; steal->buffers--; steal->length -= s->buf_size; steal->bytesused -= buf->bytesused - buf->readpos; buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0; from->buffers++; from->length += s->buf_size; bytes_available += s->buf_size; if (list_empty(&steal->list)) break; buf = list_entry(steal->list.prev, struct ivtv_buffer, list); } } if (from_free) { u32 old_length = to->length; while (to->length - old_length < needed_bytes) { ivtv_queue_move_buf(s, from, to, 1); } } else { u32 old_bytesused = to->bytesused; while (to->bytesused - old_bytesused < needed_bytes) { ivtv_queue_move_buf(s, from, to, to_free); } } spin_unlock_irqrestore(&s->qlock, flags); return rc; } void ivtv_flush_queues(struct ivtv_stream *s) { ivtv_queue_move(s, &s->q_io, NULL, &s->q_free, 0); ivtv_queue_move(s, &s->q_full, NULL, &s->q_free, 0); ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0); ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0); } int ivtv_stream_alloc(struct ivtv_stream *s) { struct ivtv *itv = s->itv; int SGsize = sizeof(struct ivtv_sg_host_element) * s->buffers; int i; if (s->buffers == 0) return 0; IVTV_DEBUG_INFO("Allocate %s%s stream: %d x %d buffers (%dkB total)\n", s->dma != PCI_DMA_NONE ? "DMA " : "", s->name, s->buffers, s->buf_size, s->buffers * s->buf_size / 1024); s->sg_pending = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN); if (s->sg_pending == NULL) { IVTV_ERR("Could not allocate sg_pending for %s stream\n", s->name); return -ENOMEM; } s->sg_pending_size = 0; s->sg_processing = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN); if (s->sg_processing == NULL) { IVTV_ERR("Could not allocate sg_processing for %s stream\n", s->name); kfree(s->sg_pending); s->sg_pending = NULL; return -ENOMEM; } s->sg_processing_size = 0; s->sg_dma = kzalloc(sizeof(struct ivtv_sg_element), GFP_KERNEL|__GFP_NOWARN); if (s->sg_dma == NULL) { IVTV_ERR("Could not allocate sg_dma for %s stream\n", s->name); kfree(s->sg_pending); s->sg_pending = NULL; kfree(s->sg_processing); s->sg_processing = NULL; return -ENOMEM; } if (ivtv_might_use_dma(s)) { s->sg_handle = pci_map_single(itv->pdev, s->sg_dma, sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE); ivtv_stream_sync_for_cpu(s); } /* allocate stream buffers. Initially all buffers are in q_free. */ for (i = 0; i < s->buffers; i++) { struct ivtv_buffer *buf = kzalloc(sizeof(struct ivtv_buffer), GFP_KERNEL|__GFP_NOWARN); if (buf == NULL) break; buf->buf = kmalloc(s->buf_size + 256, GFP_KERNEL|__GFP_NOWARN); if (buf->buf == NULL) { kfree(buf); break; } INIT_LIST_HEAD(&buf->list); if (ivtv_might_use_dma(s)) { buf->dma_handle = pci_map_single(s->itv->pdev, buf->buf, s->buf_size + 256, s->dma); ivtv_buf_sync_for_cpu(s, buf); } ivtv_enqueue(s, buf, &s->q_free); } if (i == s->buffers) return 0; IVTV_ERR("Couldn't allocate buffers for %s stream\n", s->name); ivtv_stream_free(s); return -ENOMEM; } void ivtv_stream_free(struct ivtv_stream *s) { struct ivtv_buffer *buf; /* move all buffers to q_free */ ivtv_flush_queues(s); /* empty q_free */ while ((buf = ivtv_dequeue(s, &s->q_free))) { if (ivtv_might_use_dma(s)) pci_unmap_single(s->itv->pdev, buf->dma_handle, s->buf_size + 256, s->dma); kfree(buf->buf); kfree(buf); } /* Free SG Array/Lists */ if (s->sg_dma != NULL) { if (s->sg_handle != IVTV_DMA_UNMAPPED) { pci_unmap_single(s->itv->pdev, s->sg_handle, sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE); s->sg_handle = IVTV_DMA_UNMAPPED; } kfree(s->sg_pending); kfree(s->sg_processing); kfree(s->sg_dma); s->sg_pending = NULL; s->sg_processing = NULL; s->sg_dma = NULL; s->sg_pending_size = 0; s->sg_processing_size = 0; } }
gpl-2.0
zhaoleidd/btrfs
drivers/media/usb/cx231xx/cx231xx-cards.c
143
43624
/* cx231xx-cards.c - driver for Conexant Cx23100/101/102 USB video capture devices Copyright (C) 2008 <srinivasa.deevi at conexant dot com> Based on em28xx driver This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "cx231xx.h" #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/i2c.h> #include <media/tuner.h> #include <media/tveeprom.h> #include <media/v4l2-common.h> #include <media/cx25840.h> #include "dvb-usb-ids.h" #include "xc5000.h" #include "tda18271.h" static int tuner = -1; module_param(tuner, int, 0444); MODULE_PARM_DESC(tuner, "tuner type"); static int transfer_mode = 1; module_param(transfer_mode, int, 0444); MODULE_PARM_DESC(transfer_mode, "transfer mode (1-ISO or 0-BULK)"); static unsigned int disable_ir; module_param(disable_ir, int, 0444); MODULE_PARM_DESC(disable_ir, "disable infrared remote support"); /* Bitmask marking allocated devices from 0 to CX231XX_MAXBOARDS */ static unsigned long cx231xx_devused; /* * Reset sequences for analog/digital modes */ static struct cx231xx_reg_seq RDE250_XCV_TUNER[] = { {0x03, 0x01, 10}, {0x03, 0x00, 30}, {0x03, 0x01, 10}, {-1, -1, -1}, }; /* * Board definitions */ struct cx231xx_board cx231xx_boards[] = { [CX231XX_BOARD_UNKNOWN] = { .name = "Unknown CX231xx video grabber", .tuner_type = TUNER_ABSENT, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_CNXT_CARRAERA] = { .name = "Conexant Hybrid TV - CARRAERA", .tuner_type = TUNER_XC5000, .tuner_addr = 0x61, .tuner_gpio = RDE250_XCV_TUNER, .tuner_sif_gpio = 0x05, .tuner_scl_gpio = 0x1a, .tuner_sda_gpio = 0x1b, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_3, .demod_i2c_master = I2C_2, .has_dvb = 1, .demod_addr = 0x02, .norm = V4L2_STD_PAL, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_CNXT_SHELBY] = { .name = "Conexant Hybrid TV - SHELBY", .tuner_type = TUNER_XC5000, .tuner_addr = 0x61, .tuner_gpio = RDE250_XCV_TUNER, .tuner_sif_gpio = 0x05, .tuner_scl_gpio = 0x1a, .tuner_sda_gpio = 0x1b, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_3, .demod_i2c_master = I2C_2, .has_dvb = 1, .demod_addr = 0x32, .norm = V4L2_STD_NTSC, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_CNXT_RDE_253S] = { .name = "Conexant Hybrid TV - RDE253S", .tuner_type = TUNER_NXP_TDA18271, .tuner_addr = 0x60, .tuner_gpio = RDE250_XCV_TUNER, .tuner_sif_gpio = 0x05, .tuner_scl_gpio = 0x1a, .tuner_sda_gpio = 0x1b, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x1c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_3, .demod_i2c_master = I2C_2, .has_dvb = 1, .demod_addr = 0x02, .norm = V4L2_STD_PAL, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_CNXT_RDU_253S] = { .name = "Conexant Hybrid TV - RDU253S", .tuner_type = TUNER_NXP_TDA18271, .tuner_addr = 0x60, .tuner_gpio = RDE250_XCV_TUNER, .tuner_sif_gpio = 0x05, .tuner_scl_gpio = 0x1a, .tuner_sda_gpio = 0x1b, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x1c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_3, .demod_i2c_master = I2C_2, .has_dvb = 1, .demod_addr = 0x02, .norm = V4L2_STD_PAL, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_CNXT_VIDEO_GRABBER] = { .name = "Conexant VIDEO GRABBER", .tuner_type = TUNER_ABSENT, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x1c, .gpio_pin_status_mask = 0x4001000, .norm = V4L2_STD_PAL, .no_alt_vanc = 1, .external_av = 1, /* Actually, it has a 417, but it isn't working correctly. * So set to 0 for now until someone can manage to get this * to work reliably. */ .has_417 = 0, .input = {{ .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_CNXT_RDE_250] = { .name = "Conexant Hybrid TV - rde 250", .tuner_type = TUNER_XC5000, .tuner_addr = 0x61, .tuner_gpio = RDE250_XCV_TUNER, .tuner_sif_gpio = 0x05, .tuner_scl_gpio = 0x1a, .tuner_sda_gpio = 0x1b, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_3, .demod_i2c_master = I2C_2, .has_dvb = 1, .demod_addr = 0x02, .norm = V4L2_STD_PAL, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, } }, }, [CX231XX_BOARD_CNXT_RDU_250] = { .name = "Conexant Hybrid TV - RDU 250", .tuner_type = TUNER_XC5000, .tuner_addr = 0x61, .tuner_gpio = RDE250_XCV_TUNER, .tuner_sif_gpio = 0x05, .tuner_scl_gpio = 0x1a, .tuner_sda_gpio = 0x1b, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_3, .demod_i2c_master = I2C_2, .has_dvb = 1, .demod_addr = 0x32, .norm = V4L2_STD_NTSC, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, } }, }, [CX231XX_BOARD_HAUPPAUGE_EXETER] = { .name = "Hauppauge EXETER", .tuner_type = TUNER_NXP_TDA18271, .tuner_addr = 0x60, .tuner_gpio = RDE250_XCV_TUNER, .tuner_sif_gpio = 0x05, .tuner_scl_gpio = 0x1a, .tuner_sda_gpio = 0x1b, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_1, .demod_i2c_master = I2C_2, .has_dvb = 1, .demod_addr = 0x0e, .norm = V4L2_STD_NTSC, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_HAUPPAUGE_USBLIVE2] = { .name = "Hauppauge USB Live 2", .tuner_type = TUNER_ABSENT, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .norm = V4L2_STD_NTSC, .no_alt_vanc = 1, .external_av = 1, .input = {{ .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_KWORLD_UB430_USB_HYBRID] = { .name = "Kworld UB430 USB Hybrid", .tuner_type = TUNER_NXP_TDA18271, .tuner_addr = 0x60, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x11, /* According with PV cxPolaris.inf file */ .tuner_sif_gpio = -1, .tuner_scl_gpio = -1, .tuner_sda_gpio = -1, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_2, .demod_i2c_master = I2C_1_MUX_3, .ir_i2c_master = I2C_2, .has_dvb = 1, .demod_addr = 0x10, .norm = V4L2_STD_PAL_M, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_KWORLD_UB445_USB_HYBRID] = { .name = "Kworld UB445 USB Hybrid", .tuner_type = TUNER_NXP_TDA18271, .tuner_addr = 0x60, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x11, /* According with PV cxPolaris.inf file */ .tuner_sif_gpio = -1, .tuner_scl_gpio = -1, .tuner_sda_gpio = -1, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_2, .demod_i2c_master = I2C_1_MUX_3, .ir_i2c_master = I2C_2, .has_dvb = 1, .demod_addr = 0x10, .norm = V4L2_STD_NTSC_M, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_PV_PLAYTV_USB_HYBRID] = { .name = "Pixelview PlayTV USB Hybrid", .tuner_type = TUNER_NXP_TDA18271, .tuner_addr = 0x60, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x00, /* According with PV cxPolaris.inf file */ .tuner_sif_gpio = -1, .tuner_scl_gpio = -1, .tuner_sda_gpio = -1, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_2, .demod_i2c_master = I2C_1_MUX_3, .ir_i2c_master = I2C_2, .rc_map_name = RC_MAP_PIXELVIEW_002T, .has_dvb = 1, .demod_addr = 0x10, .norm = V4L2_STD_PAL_M, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_PV_XCAPTURE_USB] = { .name = "Pixelview Xcapture USB", .tuner_type = TUNER_ABSENT, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .norm = V4L2_STD_NTSC, .no_alt_vanc = 1, .external_av = 1, .input = {{ .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_ICONBIT_U100] = { .name = "Iconbit Analog Stick U100 FM", .tuner_type = TUNER_ABSENT, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x1C, .gpio_pin_status_mask = 0x4001000, .input = {{ .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL] = { .name = "Hauppauge WinTV USB2 FM (PAL)", .tuner_type = TUNER_NXP_TDA18271, .tuner_addr = 0x60, .tuner_gpio = RDE250_XCV_TUNER, .tuner_sif_gpio = 0x05, .tuner_scl_gpio = 0x1a, .tuner_sda_gpio = 0x1b, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_3, .norm = V4L2_STD_PAL, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC] = { .name = "Hauppauge WinTV USB2 FM (NTSC)", .tuner_type = TUNER_NXP_TDA18271, .tuner_addr = 0x60, .tuner_gpio = RDE250_XCV_TUNER, .tuner_sif_gpio = 0x05, .tuner_scl_gpio = 0x1a, .tuner_sda_gpio = 0x1b, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_3, .norm = V4L2_STD_NTSC, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_ELGATO_VIDEO_CAPTURE_V2] = { .name = "Elgato Video Capture V2", .tuner_type = TUNER_ABSENT, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .norm = V4L2_STD_NTSC, .no_alt_vanc = 1, .external_av = 1, .input = {{ .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_OTG102] = { .name = "Geniatech OTG102", .tuner_type = TUNER_ABSENT, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, /* According with PV CxPlrCAP.inf file */ .gpio_pin_status_mask = 0x4001000, .norm = V4L2_STD_NTSC, .no_alt_vanc = 1, .external_av = 1, /*.has_417 = 1, */ /* This board is believed to have a hardware encoding chip * supporting mpeg1/2/4, but as the 417 is apparently not * working for the reference board it is not here either. */ .input = {{ .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx] = { .name = "Hauppauge WinTV 930C-HD (1113xx) / HVR-900H (111xxx) / PCTV QuatroStick 521e", .tuner_type = TUNER_NXP_TDA18271, .tuner_addr = 0x60, .tuner_gpio = RDE250_XCV_TUNER, .tuner_sif_gpio = 0x05, .tuner_scl_gpio = 0x1a, .tuner_sda_gpio = 0x1b, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_3, .demod_i2c_master = I2C_2, .has_dvb = 1, .demod_addr = 0x0e, .norm = V4L2_STD_PAL, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx] = { .name = "Hauppauge WinTV 930C-HD (1114xx) / HVR-901H (1114xx) / PCTV QuatroStick 522e", .tuner_type = TUNER_ABSENT, .tuner_addr = 0x60, .tuner_gpio = RDE250_XCV_TUNER, .tuner_sif_gpio = 0x05, .tuner_scl_gpio = 0x1a, .tuner_sda_gpio = 0x1b, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_3, .demod_i2c_master = I2C_2, .has_dvb = 1, .demod_addr = 0x0e, .norm = V4L2_STD_PAL, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, }; const unsigned int cx231xx_bcount = ARRAY_SIZE(cx231xx_boards); /* table of devices that work with this driver */ struct usb_device_id cx231xx_id_table[] = { {USB_DEVICE(0x1D19, 0x6109), .driver_info = CX231XX_BOARD_PV_XCAPTURE_USB}, {USB_DEVICE(0x0572, 0x5A3C), .driver_info = CX231XX_BOARD_UNKNOWN}, {USB_DEVICE(0x0572, 0x58A2), .driver_info = CX231XX_BOARD_CNXT_CARRAERA}, {USB_DEVICE(0x0572, 0x58A1), .driver_info = CX231XX_BOARD_CNXT_SHELBY}, {USB_DEVICE(0x0572, 0x58A4), .driver_info = CX231XX_BOARD_CNXT_RDE_253S}, {USB_DEVICE(0x0572, 0x58A5), .driver_info = CX231XX_BOARD_CNXT_RDU_253S}, {USB_DEVICE(0x0572, 0x58A6), .driver_info = CX231XX_BOARD_CNXT_VIDEO_GRABBER}, {USB_DEVICE(0x0572, 0x589E), .driver_info = CX231XX_BOARD_CNXT_RDE_250}, {USB_DEVICE(0x0572, 0x58A0), .driver_info = CX231XX_BOARD_CNXT_RDU_250}, {USB_DEVICE(0x2040, 0xb110), .driver_info = CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL}, {USB_DEVICE(0x2040, 0xb111), .driver_info = CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC}, {USB_DEVICE(0x2040, 0xb120), .driver_info = CX231XX_BOARD_HAUPPAUGE_EXETER}, {USB_DEVICE(0x2040, 0xb130), .driver_info = CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx}, {USB_DEVICE(0x2040, 0xb131), .driver_info = CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx}, /* Hauppauge WinTV-HVR-900-H */ {USB_DEVICE(0x2040, 0xb138), .driver_info = CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx}, /* Hauppauge WinTV-HVR-901-H */ {USB_DEVICE(0x2040, 0xb139), .driver_info = CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx}, {USB_DEVICE(0x2040, 0xb140), .driver_info = CX231XX_BOARD_HAUPPAUGE_EXETER}, {USB_DEVICE(0x2040, 0xc200), .driver_info = CX231XX_BOARD_HAUPPAUGE_USBLIVE2}, /* PCTV QuatroStick 521e */ {USB_DEVICE(0x2013, 0x0259), .driver_info = CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx}, /* PCTV QuatroStick 522e */ {USB_DEVICE(0x2013, 0x025e), .driver_info = CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx}, {USB_DEVICE_VER(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD, 0x4000, 0x4001), .driver_info = CX231XX_BOARD_PV_PLAYTV_USB_HYBRID}, {USB_DEVICE(USB_VID_PIXELVIEW, 0x5014), .driver_info = CX231XX_BOARD_PV_XCAPTURE_USB}, {USB_DEVICE(0x1b80, 0xe424), .driver_info = CX231XX_BOARD_KWORLD_UB430_USB_HYBRID}, {USB_DEVICE(0x1b80, 0xe421), .driver_info = CX231XX_BOARD_KWORLD_UB445_USB_HYBRID}, {USB_DEVICE(0x1f4d, 0x0237), .driver_info = CX231XX_BOARD_ICONBIT_U100}, {USB_DEVICE(0x0fd9, 0x0037), .driver_info = CX231XX_BOARD_ELGATO_VIDEO_CAPTURE_V2}, {USB_DEVICE(0x1f4d, 0x0102), .driver_info = CX231XX_BOARD_OTG102}, {}, }; MODULE_DEVICE_TABLE(usb, cx231xx_id_table); /* cx231xx_tuner_callback * will be used to reset XC5000 tuner using GPIO pin */ int cx231xx_tuner_callback(void *ptr, int component, int command, int arg) { int rc = 0; struct cx231xx *dev = ptr; if (dev->tuner_type == TUNER_XC5000) { if (command == XC5000_TUNER_RESET) { dev_dbg(dev->dev, "Tuner CB: RESET: cmd %d : tuner type %d\n", command, dev->tuner_type); cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1); msleep(10); cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 0); msleep(330); cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1); msleep(10); } } else if (dev->tuner_type == TUNER_NXP_TDA18271) { switch (command) { case TDA18271_CALLBACK_CMD_AGC_ENABLE: if (dev->model == CX231XX_BOARD_PV_PLAYTV_USB_HYBRID) rc = cx231xx_set_agc_analog_digital_mux_select(dev, arg); break; default: rc = -EINVAL; break; } } return rc; } EXPORT_SYMBOL_GPL(cx231xx_tuner_callback); static void cx231xx_reset_out(struct cx231xx *dev) { cx231xx_set_gpio_value(dev, CX23417_RESET, 1); msleep(200); cx231xx_set_gpio_value(dev, CX23417_RESET, 0); msleep(200); cx231xx_set_gpio_value(dev, CX23417_RESET, 1); } static void cx231xx_enable_OSC(struct cx231xx *dev) { cx231xx_set_gpio_value(dev, CX23417_OSC_EN, 1); } static void cx231xx_sleep_s5h1432(struct cx231xx *dev) { cx231xx_set_gpio_value(dev, SLEEP_S5H1432, 0); } static inline void cx231xx_set_model(struct cx231xx *dev) { dev->board = cx231xx_boards[dev->model]; } /* Since cx231xx_pre_card_setup() requires a proper dev->model, * this won't work for boards with generic PCI IDs */ void cx231xx_pre_card_setup(struct cx231xx *dev) { cx231xx_set_model(dev); dev_info(dev->dev, "Identified as %s (card=%d)\n", dev->board.name, dev->model); /* set the direction for GPIO pins */ if (dev->board.tuner_gpio) { cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1); cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1); } if (dev->board.tuner_sif_gpio >= 0) cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1); /* request some modules if any required */ /* set the mode to Analog mode initially */ cx231xx_set_mode(dev, CX231XX_ANALOG_MODE); /* Unlock device */ /* cx231xx_set_mode(dev, CX231XX_SUSPEND); */ } static void cx231xx_config_tuner(struct cx231xx *dev) { struct tuner_setup tun_setup; struct v4l2_frequency f; if (dev->tuner_type == TUNER_ABSENT) return; tun_setup.mode_mask = T_ANALOG_TV | T_RADIO; tun_setup.type = dev->tuner_type; tun_setup.addr = dev->tuner_addr; tun_setup.tuner_callback = cx231xx_tuner_callback; tuner_call(dev, tuner, s_type_addr, &tun_setup); #if 0 if (tun_setup.type == TUNER_XC5000) { static struct xc2028_ctrl ctrl = { .fname = XC5000_DEFAULT_FIRMWARE, .max_len = 64, .demod = 0; }; struct v4l2_priv_tun_config cfg = { .tuner = dev->tuner_type, .priv = &ctrl, }; tuner_call(dev, tuner, s_config, &cfg); } #endif /* configure tuner */ f.tuner = 0; f.type = V4L2_TUNER_ANALOG_TV; f.frequency = 9076; /* just a magic number */ dev->ctl_freq = f.frequency; call_all(dev, tuner, s_frequency, &f); } static int read_eeprom(struct cx231xx *dev, struct i2c_client *client, u8 *eedata, int len) { int ret = 0; u8 start_offset = 0; int len_todo = len; u8 *eedata_cur = eedata; int i; struct i2c_msg msg_write = { .addr = client->addr, .flags = 0, .buf = &start_offset, .len = 1 }; struct i2c_msg msg_read = { .addr = client->addr, .flags = I2C_M_RD }; /* start reading at offset 0 */ ret = i2c_transfer(client->adapter, &msg_write, 1); if (ret < 0) { dev_err(dev->dev, "Can't read eeprom\n"); return ret; } while (len_todo > 0) { msg_read.len = (len_todo > 64) ? 64 : len_todo; msg_read.buf = eedata_cur; ret = i2c_transfer(client->adapter, &msg_read, 1); if (ret < 0) { dev_err(dev->dev, "Can't read eeprom\n"); return ret; } eedata_cur += msg_read.len; len_todo -= msg_read.len; } for (i = 0; i + 15 < len; i += 16) dev_dbg(dev->dev, "i2c eeprom %02x: %*ph\n", i, 16, &eedata[i]); return 0; } void cx231xx_card_setup(struct cx231xx *dev) { cx231xx_set_model(dev); dev->tuner_type = cx231xx_boards[dev->model].tuner_type; if (cx231xx_boards[dev->model].tuner_addr) dev->tuner_addr = cx231xx_boards[dev->model].tuner_addr; /* request some modules */ if (dev->board.decoder == CX231XX_AVDECODER) { dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev, cx231xx_get_i2c_adap(dev, I2C_0), "cx25840", 0x88 >> 1, NULL); if (dev->sd_cx25840 == NULL) dev_err(dev->dev, "cx25840 subdev registration failure\n"); cx25840_call(dev, core, load_fw); } /* Initialize the tuner */ if (dev->board.tuner_type != TUNER_ABSENT) { struct i2c_adapter *tuner_i2c = cx231xx_get_i2c_adap(dev, dev->board.tuner_i2c_master); dev->sd_tuner = v4l2_i2c_new_subdev(&dev->v4l2_dev, tuner_i2c, "tuner", dev->tuner_addr, NULL); if (dev->sd_tuner == NULL) dev_err(dev->dev, "tuner subdev registration failure\n"); else cx231xx_config_tuner(dev); } switch (dev->model) { case CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx: case CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx: { struct tveeprom tvee; static u8 eeprom[256]; struct i2c_client client; memset(&client, 0, sizeof(client)); client.adapter = cx231xx_get_i2c_adap(dev, I2C_1_MUX_1); client.addr = 0xa0 >> 1; read_eeprom(dev, &client, eeprom, sizeof(eeprom)); tveeprom_hauppauge_analog(&client, &tvee, eeprom + 0xc0); break; } } } /* * cx231xx_config() * inits registers with sane defaults */ int cx231xx_config(struct cx231xx *dev) { /* TBD need to add cx231xx specific code */ return 0; } /* * cx231xx_config_i2c() * configure i2c attached devices */ void cx231xx_config_i2c(struct cx231xx *dev) { /* u32 input = INPUT(dev->video_input)->vmux; */ call_all(dev, video, s_stream, 1); } /* * cx231xx_realease_resources() * unregisters the v4l2,i2c and usb devices * called when the device gets disconected or at module unload */ void cx231xx_release_resources(struct cx231xx *dev) { cx231xx_release_analog_resources(dev); cx231xx_remove_from_devlist(dev); cx231xx_ir_exit(dev); /* Release I2C buses */ cx231xx_dev_uninit(dev); /* delete v4l2 device */ v4l2_device_unregister(&dev->v4l2_dev); usb_put_dev(dev->udev); /* Mark device as unused */ clear_bit(dev->devno, &cx231xx_devused); } /* * cx231xx_init_dev() * allocates and inits the device structs, registers i2c bus and v4l device */ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev, int minor) { int retval = -ENOMEM; unsigned int maxh, maxw; dev->udev = udev; mutex_init(&dev->lock); mutex_init(&dev->ctrl_urb_lock); mutex_init(&dev->gpio_i2c_lock); mutex_init(&dev->i2c_lock); spin_lock_init(&dev->video_mode.slock); spin_lock_init(&dev->vbi_mode.slock); spin_lock_init(&dev->sliced_cc_mode.slock); init_waitqueue_head(&dev->open); init_waitqueue_head(&dev->wait_frame); init_waitqueue_head(&dev->wait_stream); dev->cx231xx_read_ctrl_reg = cx231xx_read_ctrl_reg; dev->cx231xx_write_ctrl_reg = cx231xx_write_ctrl_reg; dev->cx231xx_send_usb_command = cx231xx_send_usb_command; dev->cx231xx_gpio_i2c_read = cx231xx_gpio_i2c_read; dev->cx231xx_gpio_i2c_write = cx231xx_gpio_i2c_write; /* Query cx231xx to find what pcb config it is related to */ retval = initialize_cx231xx(dev); if (retval < 0) { dev_err(dev->dev, "Failed to read PCB config\n"); return retval; } /*To workaround error number=-71 on EP0 for VideoGrabber, need set alt here.*/ if (dev->model == CX231XX_BOARD_CNXT_VIDEO_GRABBER || dev->model == CX231XX_BOARD_HAUPPAUGE_USBLIVE2) { cx231xx_set_alt_setting(dev, INDEX_VIDEO, 3); cx231xx_set_alt_setting(dev, INDEX_VANC, 1); } /* Cx231xx pre card setup */ cx231xx_pre_card_setup(dev); retval = cx231xx_config(dev); if (retval) { dev_err(dev->dev, "error configuring device\n"); return -ENOMEM; } /* set default norm */ dev->norm = dev->board.norm; /* register i2c bus */ retval = cx231xx_dev_init(dev); if (retval) { dev_err(dev->dev, "%s: cx231xx_i2c_register - errCode [%d]!\n", __func__, retval); goto err_dev_init; } /* Do board specific init */ cx231xx_card_setup(dev); /* configure the device */ cx231xx_config_i2c(dev); maxw = norm_maxw(dev); maxh = norm_maxh(dev); /* set default image size */ dev->width = maxw; dev->height = maxh; dev->interlaced = 0; dev->video_input = 0; retval = cx231xx_config(dev); if (retval) { dev_err(dev->dev, "%s: cx231xx_config - errCode [%d]!\n", __func__, retval); goto err_dev_init; } /* init video dma queues */ INIT_LIST_HEAD(&dev->video_mode.vidq.active); INIT_LIST_HEAD(&dev->video_mode.vidq.queued); /* init vbi dma queues */ INIT_LIST_HEAD(&dev->vbi_mode.vidq.active); INIT_LIST_HEAD(&dev->vbi_mode.vidq.queued); /* Reset other chips required if they are tied up with GPIO pins */ cx231xx_add_into_devlist(dev); if (dev->board.has_417) { dev_info(dev->dev, "attach 417 %d\n", dev->model); if (cx231xx_417_register(dev) < 0) { dev_err(dev->dev, "%s() Failed to register 417 on VID_B\n", __func__); } } retval = cx231xx_register_analog_devices(dev); if (retval) { cx231xx_release_analog_resources(dev); goto err_analog; } cx231xx_ir_init(dev); cx231xx_init_extension(dev); return 0; err_analog: cx231xx_remove_from_devlist(dev); err_dev_init: cx231xx_dev_uninit(dev); return retval; } #if defined(CONFIG_MODULES) && defined(MODULE) static void request_module_async(struct work_struct *work) { struct cx231xx *dev = container_of(work, struct cx231xx, request_module_wk); if (dev->has_alsa_audio) request_module("cx231xx-alsa"); if (dev->board.has_dvb) request_module("cx231xx-dvb"); } static void request_modules(struct cx231xx *dev) { INIT_WORK(&dev->request_module_wk, request_module_async); schedule_work(&dev->request_module_wk); } static void flush_request_modules(struct cx231xx *dev) { flush_work(&dev->request_module_wk); } #else #define request_modules(dev) #define flush_request_modules(dev) #endif /* CONFIG_MODULES */ static int cx231xx_init_v4l2(struct cx231xx *dev, struct usb_device *udev, struct usb_interface *interface, int isoc_pipe) { struct usb_interface *uif; int i, idx; /* Video Init */ /* compute alternate max packet sizes for video */ idx = dev->current_pcb_config.hs_config_info[0].interface_info.video_index + 1; if (idx >= dev->max_iad_interface_count) { dev_err(dev->dev, "Video PCB interface #%d doesn't exist\n", idx); return -ENODEV; } uif = udev->actconfig->interface[idx]; dev->video_mode.end_point_addr = uif->altsetting[0].endpoint[isoc_pipe].desc.bEndpointAddress; dev->video_mode.num_alt = uif->num_altsetting; dev_info(dev->dev, "video EndPoint Addr 0x%x, Alternate settings: %i\n", dev->video_mode.end_point_addr, dev->video_mode.num_alt); dev->video_mode.alt_max_pkt_size = devm_kmalloc_array(&udev->dev, 32, dev->video_mode.num_alt, GFP_KERNEL); if (dev->video_mode.alt_max_pkt_size == NULL) return -ENOMEM; for (i = 0; i < dev->video_mode.num_alt; i++) { u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize); dev->video_mode.alt_max_pkt_size[i] = (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); dev_dbg(dev->dev, "Alternate setting %i, max size= %i\n", i, dev->video_mode.alt_max_pkt_size[i]); } /* VBI Init */ idx = dev->current_pcb_config.hs_config_info[0].interface_info.vanc_index + 1; if (idx >= dev->max_iad_interface_count) { dev_err(dev->dev, "VBI PCB interface #%d doesn't exist\n", idx); return -ENODEV; } uif = udev->actconfig->interface[idx]; dev->vbi_mode.end_point_addr = uif->altsetting[0].endpoint[isoc_pipe].desc. bEndpointAddress; dev->vbi_mode.num_alt = uif->num_altsetting; dev_info(dev->dev, "VBI EndPoint Addr 0x%x, Alternate settings: %i\n", dev->vbi_mode.end_point_addr, dev->vbi_mode.num_alt); /* compute alternate max packet sizes for vbi */ dev->vbi_mode.alt_max_pkt_size = devm_kmalloc_array(&udev->dev, 32, dev->vbi_mode.num_alt, GFP_KERNEL); if (dev->vbi_mode.alt_max_pkt_size == NULL) return -ENOMEM; for (i = 0; i < dev->vbi_mode.num_alt; i++) { u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe]. desc.wMaxPacketSize); dev->vbi_mode.alt_max_pkt_size[i] = (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); dev_dbg(dev->dev, "Alternate setting %i, max size= %i\n", i, dev->vbi_mode.alt_max_pkt_size[i]); } /* Sliced CC VBI init */ /* compute alternate max packet sizes for sliced CC */ idx = dev->current_pcb_config.hs_config_info[0].interface_info.hanc_index + 1; if (idx >= dev->max_iad_interface_count) { dev_err(dev->dev, "Sliced CC PCB interface #%d doesn't exist\n", idx); return -ENODEV; } uif = udev->actconfig->interface[idx]; dev->sliced_cc_mode.end_point_addr = uif->altsetting[0].endpoint[isoc_pipe].desc. bEndpointAddress; dev->sliced_cc_mode.num_alt = uif->num_altsetting; dev_info(dev->dev, "sliced CC EndPoint Addr 0x%x, Alternate settings: %i\n", dev->sliced_cc_mode.end_point_addr, dev->sliced_cc_mode.num_alt); dev->sliced_cc_mode.alt_max_pkt_size = devm_kmalloc_array(&udev->dev, 32, dev->sliced_cc_mode.num_alt, GFP_KERNEL); if (dev->sliced_cc_mode.alt_max_pkt_size == NULL) return -ENOMEM; for (i = 0; i < dev->sliced_cc_mode.num_alt; i++) { u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe]. desc.wMaxPacketSize); dev->sliced_cc_mode.alt_max_pkt_size[i] = (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); dev_dbg(dev->dev, "Alternate setting %i, max size= %i\n", i, dev->sliced_cc_mode.alt_max_pkt_size[i]); } return 0; } /* * cx231xx_usb_probe() * checks for supported devices */ static int cx231xx_usb_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev; struct device *d = &interface->dev; struct usb_interface *uif; struct cx231xx *dev = NULL; int retval = -ENODEV; int nr = 0, ifnum; int i, isoc_pipe = 0; char *speed; u8 idx; struct usb_interface_assoc_descriptor *assoc_desc; ifnum = interface->altsetting[0].desc.bInterfaceNumber; /* * Interface number 0 - IR interface (handled by mceusb driver) * Interface number 1 - AV interface (handled by this driver) */ if (ifnum != 1) return -ENODEV; /* Check to see next free device and mark as used */ do { nr = find_first_zero_bit(&cx231xx_devused, CX231XX_MAXBOARDS); if (nr >= CX231XX_MAXBOARDS) { /* No free device slots */ dev_err(d, "Supports only %i devices.\n", CX231XX_MAXBOARDS); return -ENOMEM; } } while (test_and_set_bit(nr, &cx231xx_devused)); udev = usb_get_dev(interface_to_usbdev(interface)); /* allocate memory for our device state and initialize it */ dev = devm_kzalloc(&udev->dev, sizeof(*dev), GFP_KERNEL); if (dev == NULL) { retval = -ENOMEM; goto err_if; } snprintf(dev->name, 29, "cx231xx #%d", nr); dev->devno = nr; dev->model = id->driver_info; dev->video_mode.alt = -1; dev->dev = d; dev->interface_count++; /* reset gpio dir and value */ dev->gpio_dir = 0; dev->gpio_val = 0; dev->xc_fw_load_done = 0; dev->has_alsa_audio = 1; dev->power_mode = -1; atomic_set(&dev->devlist_count, 0); /* 0 - vbi ; 1 -sliced cc mode */ dev->vbi_or_sliced_cc_mode = 0; /* get maximum no.of IAD interfaces */ dev->max_iad_interface_count = udev->config->desc.bNumInterfaces; /* init CIR module TBD */ /*mode_tv: digital=1 or analog=0*/ dev->mode_tv = 0; dev->USE_ISO = transfer_mode; switch (udev->speed) { case USB_SPEED_LOW: speed = "1.5"; break; case USB_SPEED_UNKNOWN: case USB_SPEED_FULL: speed = "12"; break; case USB_SPEED_HIGH: speed = "480"; break; default: speed = "unknown"; } dev_info(d, "New device %s %s @ %s Mbps (%04x:%04x) with %d interfaces\n", udev->manufacturer ? udev->manufacturer : "", udev->product ? udev->product : "", speed, le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct), dev->max_iad_interface_count); /* increment interface count */ dev->interface_count++; /* get device number */ nr = dev->devno; assoc_desc = udev->actconfig->intf_assoc[0]; if (assoc_desc->bFirstInterface != ifnum) { dev_err(d, "Not found matching IAD interface\n"); retval = -ENODEV; goto err_if; } dev_dbg(d, "registering interface %d\n", ifnum); /* save our data pointer in this interface device */ usb_set_intfdata(interface, dev); /* Create v4l2 device */ retval = v4l2_device_register(&interface->dev, &dev->v4l2_dev); if (retval) { dev_err(d, "v4l2_device_register failed\n"); goto err_v4l2; } /* allocate device struct */ retval = cx231xx_init_dev(dev, udev, nr); if (retval) goto err_init; retval = cx231xx_init_v4l2(dev, udev, interface, isoc_pipe); if (retval) goto err_init; if (dev->current_pcb_config.ts1_source != 0xff) { /* compute alternate max packet sizes for TS1 */ idx = dev->current_pcb_config.hs_config_info[0].interface_info.ts1_index + 1; if (idx >= dev->max_iad_interface_count) { dev_err(d, "TS1 PCB interface #%d doesn't exist\n", idx); retval = -ENODEV; goto err_video_alt; } uif = udev->actconfig->interface[idx]; dev->ts1_mode.end_point_addr = uif->altsetting[0].endpoint[isoc_pipe]. desc.bEndpointAddress; dev->ts1_mode.num_alt = uif->num_altsetting; dev_info(d, "TS EndPoint Addr 0x%x, Alternate settings: %i\n", dev->ts1_mode.end_point_addr, dev->ts1_mode.num_alt); dev->ts1_mode.alt_max_pkt_size = devm_kmalloc_array(&udev->dev, 32, dev->ts1_mode.num_alt, GFP_KERNEL); if (dev->ts1_mode.alt_max_pkt_size == NULL) { retval = -ENOMEM; goto err_video_alt; } for (i = 0; i < dev->ts1_mode.num_alt; i++) { u16 tmp = le16_to_cpu(uif->altsetting[i]. endpoint[isoc_pipe].desc. wMaxPacketSize); dev->ts1_mode.alt_max_pkt_size[i] = (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); dev_dbg(d, "Alternate setting %i, max size= %i\n", i, dev->ts1_mode.alt_max_pkt_size[i]); } } if (dev->model == CX231XX_BOARD_CNXT_VIDEO_GRABBER) { cx231xx_enable_OSC(dev); cx231xx_reset_out(dev); cx231xx_set_alt_setting(dev, INDEX_VIDEO, 3); } if (dev->model == CX231XX_BOARD_CNXT_RDE_253S) cx231xx_sleep_s5h1432(dev); /* load other modules required */ request_modules(dev); return 0; err_video_alt: /* cx231xx_uninit_dev: */ cx231xx_close_extension(dev); cx231xx_ir_exit(dev); cx231xx_release_analog_resources(dev); cx231xx_417_unregister(dev); cx231xx_remove_from_devlist(dev); cx231xx_dev_uninit(dev); err_init: v4l2_device_unregister(&dev->v4l2_dev); err_v4l2: usb_set_intfdata(interface, NULL); err_if: usb_put_dev(udev); clear_bit(nr, &cx231xx_devused); return retval; } /* * cx231xx_usb_disconnect() * called when the device gets diconencted * video device will be unregistered on v4l2_close in case it is still open */ static void cx231xx_usb_disconnect(struct usb_interface *interface) { struct cx231xx *dev; dev = usb_get_intfdata(interface); usb_set_intfdata(interface, NULL); if (!dev) return; if (!dev->udev) return; dev->state |= DEV_DISCONNECTED; flush_request_modules(dev); /* wait until all current v4l2 io is finished then deallocate resources */ mutex_lock(&dev->lock); wake_up_interruptible_all(&dev->open); if (dev->users) { dev_warn(dev->dev, "device %s is open! Deregistration and memory deallocation are deferred on close.\n", video_device_node_name(dev->vdev)); /* Even having users, it is safe to remove the RC i2c driver */ cx231xx_ir_exit(dev); if (dev->USE_ISO) cx231xx_uninit_isoc(dev); else cx231xx_uninit_bulk(dev); wake_up_interruptible(&dev->wait_frame); wake_up_interruptible(&dev->wait_stream); } else { } cx231xx_close_extension(dev); mutex_unlock(&dev->lock); if (!dev->users) cx231xx_release_resources(dev); } static struct usb_driver cx231xx_usb_driver = { .name = "cx231xx", .probe = cx231xx_usb_probe, .disconnect = cx231xx_usb_disconnect, .id_table = cx231xx_id_table, }; module_usb_driver(cx231xx_usb_driver);
gpl-2.0
psyke83/android_kernel_huawei_msm7x25
net/ipv4/sysctl_net_ipv4.c
399
21497
/* * sysctl_net_ipv4.c: sysctl interface to net IPV4 subsystem. * * Begun April 1, 1996, Mike Shaver. * Added /proc/sys/net/ipv4 directory entry (empty =) ). [MS] */ #include <linux/mm.h> #include <linux/module.h> #include <linux/sysctl.h> #include <linux/igmp.h> #include <linux/inetdevice.h> #include <linux/seqlock.h> #include <linux/init.h> #include <net/snmp.h> #include <net/icmp.h> #include <net/ip.h> #include <net/route.h> #include <net/tcp.h> #include <net/udp.h> #include <net/cipso_ipv4.h> #include <net/inet_frag.h> static int zero; static int tcp_retr1_max = 255; static int ip_local_port_range_min[] = { 1, 1 }; static int ip_local_port_range_max[] = { 65535, 65535 }; /* Update system visible IP port range */ static void set_local_port_range(int range[2]) { write_seqlock(&sysctl_local_ports.lock); sysctl_local_ports.range[0] = range[0]; sysctl_local_ports.range[1] = range[1]; write_sequnlock(&sysctl_local_ports.lock); } /* Validate changes from /proc interface. */ static int ipv4_local_port_range(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; int range[2]; ctl_table tmp = { .data = &range, .maxlen = sizeof(range), .mode = table->mode, .extra1 = &ip_local_port_range_min, .extra2 = &ip_local_port_range_max, }; inet_get_local_port_range(range, range + 1); ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); if (write && ret == 0) { if (range[1] < range[0]) ret = -EINVAL; else set_local_port_range(range); } return ret; } /* Validate changes from sysctl interface. */ static int ipv4_sysctl_local_port_range(ctl_table *table, void __user *oldval, size_t __user *oldlenp, void __user *newval, size_t newlen) { int ret; int range[2]; ctl_table tmp = { .data = &range, .maxlen = sizeof(range), .mode = table->mode, .extra1 = &ip_local_port_range_min, .extra2 = &ip_local_port_range_max, }; inet_get_local_port_range(range, range + 1); ret = sysctl_intvec(&tmp, oldval, oldlenp, newval, newlen); if (ret == 0 && newval && newlen) { if (range[1] < range[0]) ret = -EINVAL; else set_local_port_range(range); } return ret; } static int proc_tcp_congestion_control(ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { char val[TCP_CA_NAME_MAX]; ctl_table tbl = { .data = val, .maxlen = TCP_CA_NAME_MAX, }; int ret; tcp_get_default_congestion_control(val); ret = proc_dostring(&tbl, write, buffer, lenp, ppos); if (write && ret == 0) ret = tcp_set_default_congestion_control(val); return ret; } static int sysctl_tcp_congestion_control(ctl_table *table, void __user *oldval, size_t __user *oldlenp, void __user *newval, size_t newlen) { char val[TCP_CA_NAME_MAX]; ctl_table tbl = { .data = val, .maxlen = TCP_CA_NAME_MAX, }; int ret; tcp_get_default_congestion_control(val); ret = sysctl_string(&tbl, oldval, oldlenp, newval, newlen); if (ret == 1 && newval && newlen) ret = tcp_set_default_congestion_control(val); return ret; } static int proc_tcp_available_congestion_control(ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, }; int ret; tbl.data = kmalloc(tbl.maxlen, GFP_USER); if (!tbl.data) return -ENOMEM; tcp_get_available_congestion_control(tbl.data, TCP_CA_BUF_MAX); ret = proc_dostring(&tbl, write, buffer, lenp, ppos); kfree(tbl.data); return ret; } static int proc_allowed_congestion_control(ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX }; int ret; tbl.data = kmalloc(tbl.maxlen, GFP_USER); if (!tbl.data) return -ENOMEM; tcp_get_allowed_congestion_control(tbl.data, tbl.maxlen); ret = proc_dostring(&tbl, write, buffer, lenp, ppos); if (write && ret == 0) ret = tcp_set_allowed_congestion_control(tbl.data); kfree(tbl.data); return ret; } static int strategy_allowed_congestion_control(ctl_table *table, void __user *oldval, size_t __user *oldlenp, void __user *newval, size_t newlen) { ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX }; int ret; tbl.data = kmalloc(tbl.maxlen, GFP_USER); if (!tbl.data) return -ENOMEM; tcp_get_available_congestion_control(tbl.data, tbl.maxlen); ret = sysctl_string(&tbl, oldval, oldlenp, newval, newlen); if (ret == 1 && newval && newlen) ret = tcp_set_allowed_congestion_control(tbl.data); kfree(tbl.data); return ret; } static struct ctl_table ipv4_table[] = { { .ctl_name = NET_IPV4_TCP_TIMESTAMPS, .procname = "tcp_timestamps", .data = &sysctl_tcp_timestamps, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_TCP_WINDOW_SCALING, .procname = "tcp_window_scaling", .data = &sysctl_tcp_window_scaling, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_TCP_SACK, .procname = "tcp_sack", .data = &sysctl_tcp_sack, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_TCP_RETRANS_COLLAPSE, .procname = "tcp_retrans_collapse", .data = &sysctl_tcp_retrans_collapse, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_DEFAULT_TTL, .procname = "ip_default_ttl", .data = &sysctl_ip_default_ttl, .maxlen = sizeof(int), .mode = 0644, .proc_handler = ipv4_doint_and_flush, .strategy = ipv4_doint_and_flush_strategy, .extra2 = &init_net, }, { .ctl_name = NET_IPV4_NO_PMTU_DISC, .procname = "ip_no_pmtu_disc", .data = &ipv4_config.no_pmtu_disc, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_NONLOCAL_BIND, .procname = "ip_nonlocal_bind", .data = &sysctl_ip_nonlocal_bind, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_TCP_SYN_RETRIES, .procname = "tcp_syn_retries", .data = &sysctl_tcp_syn_retries, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_SYNACK_RETRIES, .procname = "tcp_synack_retries", .data = &sysctl_tcp_synack_retries, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_MAX_ORPHANS, .procname = "tcp_max_orphans", .data = &sysctl_tcp_max_orphans, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_MAX_TW_BUCKETS, .procname = "tcp_max_tw_buckets", .data = &tcp_death_row.sysctl_max_tw_buckets, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_DYNADDR, .procname = "ip_dynaddr", .data = &sysctl_ip_dynaddr, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_TCP_KEEPALIVE_TIME, .procname = "tcp_keepalive_time", .data = &sysctl_tcp_keepalive_time, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, .strategy = sysctl_jiffies }, { .ctl_name = NET_IPV4_TCP_KEEPALIVE_PROBES, .procname = "tcp_keepalive_probes", .data = &sysctl_tcp_keepalive_probes, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_TCP_KEEPALIVE_INTVL, .procname = "tcp_keepalive_intvl", .data = &sysctl_tcp_keepalive_intvl, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, .strategy = sysctl_jiffies }, { .ctl_name = NET_IPV4_TCP_RETRIES1, .procname = "tcp_retries1", .data = &sysctl_tcp_retries1, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .strategy = sysctl_intvec, .extra2 = &tcp_retr1_max }, { .ctl_name = NET_IPV4_TCP_RETRIES2, .procname = "tcp_retries2", .data = &sysctl_tcp_retries2, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_TCP_FIN_TIMEOUT, .procname = "tcp_fin_timeout", .data = &sysctl_tcp_fin_timeout, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, .strategy = sysctl_jiffies }, #ifdef CONFIG_SYN_COOKIES { .ctl_name = NET_TCP_SYNCOOKIES, .procname = "tcp_syncookies", .data = &sysctl_tcp_syncookies, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, #endif { .ctl_name = NET_TCP_TW_RECYCLE, .procname = "tcp_tw_recycle", .data = &tcp_death_row.sysctl_tw_recycle, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_ABORT_ON_OVERFLOW, .procname = "tcp_abort_on_overflow", .data = &sysctl_tcp_abort_on_overflow, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_STDURG, .procname = "tcp_stdurg", .data = &sysctl_tcp_stdurg, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_RFC1337, .procname = "tcp_rfc1337", .data = &sysctl_tcp_rfc1337, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_MAX_SYN_BACKLOG, .procname = "tcp_max_syn_backlog", .data = &sysctl_max_syn_backlog, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_LOCAL_PORT_RANGE, .procname = "ip_local_port_range", .data = &sysctl_local_ports.range, .maxlen = sizeof(sysctl_local_ports.range), .mode = 0644, .proc_handler = ipv4_local_port_range, .strategy = ipv4_sysctl_local_port_range, }, #ifdef CONFIG_IP_MULTICAST { .ctl_name = NET_IPV4_IGMP_MAX_MEMBERSHIPS, .procname = "igmp_max_memberships", .data = &sysctl_igmp_max_memberships, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, #endif { .ctl_name = NET_IPV4_IGMP_MAX_MSF, .procname = "igmp_max_msf", .data = &sysctl_igmp_max_msf, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_INET_PEER_THRESHOLD, .procname = "inet_peer_threshold", .data = &inet_peer_threshold, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_INET_PEER_MINTTL, .procname = "inet_peer_minttl", .data = &inet_peer_minttl, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, .strategy = sysctl_jiffies }, { .ctl_name = NET_IPV4_INET_PEER_MAXTTL, .procname = "inet_peer_maxttl", .data = &inet_peer_maxttl, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, .strategy = sysctl_jiffies }, { .ctl_name = NET_IPV4_INET_PEER_GC_MINTIME, .procname = "inet_peer_gc_mintime", .data = &inet_peer_gc_mintime, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, .strategy = sysctl_jiffies }, { .ctl_name = NET_IPV4_INET_PEER_GC_MAXTIME, .procname = "inet_peer_gc_maxtime", .data = &inet_peer_gc_maxtime, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, .strategy = sysctl_jiffies }, { .ctl_name = NET_TCP_ORPHAN_RETRIES, .procname = "tcp_orphan_retries", .data = &sysctl_tcp_orphan_retries, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_FACK, .procname = "tcp_fack", .data = &sysctl_tcp_fack, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_REORDERING, .procname = "tcp_reordering", .data = &sysctl_tcp_reordering, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_ECN, .procname = "tcp_ecn", .data = &sysctl_tcp_ecn, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_DSACK, .procname = "tcp_dsack", .data = &sysctl_tcp_dsack, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_MEM, .procname = "tcp_mem", .data = &sysctl_tcp_mem, .maxlen = sizeof(sysctl_tcp_mem), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_WMEM, .procname = "tcp_wmem", .data = &sysctl_tcp_wmem, .maxlen = sizeof(sysctl_tcp_wmem), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_RMEM, .procname = "tcp_rmem", .data = &sysctl_tcp_rmem, .maxlen = sizeof(sysctl_tcp_rmem), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_APP_WIN, .procname = "tcp_app_win", .data = &sysctl_tcp_app_win, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_ADV_WIN_SCALE, .procname = "tcp_adv_win_scale", .data = &sysctl_tcp_adv_win_scale, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_TW_REUSE, .procname = "tcp_tw_reuse", .data = &sysctl_tcp_tw_reuse, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_FRTO, .procname = "tcp_frto", .data = &sysctl_tcp_frto, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_FRTO_RESPONSE, .procname = "tcp_frto_response", .data = &sysctl_tcp_frto_response, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_LOW_LATENCY, .procname = "tcp_low_latency", .data = &sysctl_tcp_low_latency, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_TCP_NO_METRICS_SAVE, .procname = "tcp_no_metrics_save", .data = &sysctl_tcp_nometrics_save, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .ctl_name = NET_TCP_MODERATE_RCVBUF, .procname = "tcp_moderate_rcvbuf", .data = &sysctl_tcp_moderate_rcvbuf, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .ctl_name = NET_TCP_TSO_WIN_DIVISOR, .procname = "tcp_tso_win_divisor", .data = &sysctl_tcp_tso_win_divisor, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .ctl_name = NET_TCP_CONG_CONTROL, .procname = "tcp_congestion_control", .mode = 0644, .maxlen = TCP_CA_NAME_MAX, .proc_handler = proc_tcp_congestion_control, .strategy = sysctl_tcp_congestion_control, }, { .ctl_name = NET_TCP_ABC, .procname = "tcp_abc", .data = &sysctl_tcp_abc, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .ctl_name = NET_TCP_MTU_PROBING, .procname = "tcp_mtu_probing", .data = &sysctl_tcp_mtu_probing, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .ctl_name = NET_TCP_BASE_MSS, .procname = "tcp_base_mss", .data = &sysctl_tcp_base_mss, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .ctl_name = NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, .procname = "tcp_workaround_signed_windows", .data = &sysctl_tcp_workaround_signed_windows, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, #ifdef CONFIG_NET_DMA { .ctl_name = NET_TCP_DMA_COPYBREAK, .procname = "tcp_dma_copybreak", .data = &sysctl_tcp_dma_copybreak, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, #endif { .ctl_name = NET_TCP_SLOW_START_AFTER_IDLE, .procname = "tcp_slow_start_after_idle", .data = &sysctl_tcp_slow_start_after_idle, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, #ifdef CONFIG_NETLABEL { .ctl_name = NET_CIPSOV4_CACHE_ENABLE, .procname = "cipso_cache_enable", .data = &cipso_v4_cache_enabled, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .ctl_name = NET_CIPSOV4_CACHE_BUCKET_SIZE, .procname = "cipso_cache_bucket_size", .data = &cipso_v4_cache_bucketsize, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .ctl_name = NET_CIPSOV4_RBM_OPTFMT, .procname = "cipso_rbm_optfmt", .data = &cipso_v4_rbm_optfmt, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .ctl_name = NET_CIPSOV4_RBM_STRICTVALID, .procname = "cipso_rbm_strictvalid", .data = &cipso_v4_rbm_strictvalid, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif /* CONFIG_NETLABEL */ { .procname = "tcp_available_congestion_control", .maxlen = TCP_CA_BUF_MAX, .mode = 0444, .proc_handler = proc_tcp_available_congestion_control, }, { .ctl_name = NET_TCP_ALLOWED_CONG_CONTROL, .procname = "tcp_allowed_congestion_control", .maxlen = TCP_CA_BUF_MAX, .mode = 0644, .proc_handler = proc_allowed_congestion_control, .strategy = strategy_allowed_congestion_control, }, { .ctl_name = NET_TCP_MAX_SSTHRESH, .procname = "tcp_max_ssthresh", .data = &sysctl_tcp_max_ssthresh, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .ctl_name = CTL_UNNUMBERED, .procname = "udp_mem", .data = &sysctl_udp_mem, .maxlen = sizeof(sysctl_udp_mem), .mode = 0644, .proc_handler = proc_dointvec_minmax, .strategy = sysctl_intvec, .extra1 = &zero }, { .ctl_name = CTL_UNNUMBERED, .procname = "udp_rmem_min", .data = &sysctl_udp_rmem_min, .maxlen = sizeof(sysctl_udp_rmem_min), .mode = 0644, .proc_handler = proc_dointvec_minmax, .strategy = sysctl_intvec, .extra1 = &zero }, { .ctl_name = CTL_UNNUMBERED, .procname = "udp_wmem_min", .data = &sysctl_udp_wmem_min, .maxlen = sizeof(sysctl_udp_wmem_min), .mode = 0644, .proc_handler = proc_dointvec_minmax, .strategy = sysctl_intvec, .extra1 = &zero }, { .ctl_name = 0 } }; static struct ctl_table ipv4_net_table[] = { { .ctl_name = NET_IPV4_ICMP_ECHO_IGNORE_ALL, .procname = "icmp_echo_ignore_all", .data = &init_net.ipv4.sysctl_icmp_echo_ignore_all, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS, .procname = "icmp_echo_ignore_broadcasts", .data = &init_net.ipv4.sysctl_icmp_echo_ignore_broadcasts, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES, .procname = "icmp_ignore_bogus_error_responses", .data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR, .procname = "icmp_errors_use_inbound_ifaddr", .data = &init_net.ipv4.sysctl_icmp_errors_use_inbound_ifaddr, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = NET_IPV4_ICMP_RATELIMIT, .procname = "icmp_ratelimit", .data = &init_net.ipv4.sysctl_icmp_ratelimit, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_ms_jiffies, .strategy = sysctl_ms_jiffies }, { .ctl_name = NET_IPV4_ICMP_RATEMASK, .procname = "icmp_ratemask", .data = &init_net.ipv4.sysctl_icmp_ratemask, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .ctl_name = CTL_UNNUMBERED, .procname = "rt_cache_rebuild_count", .data = &init_net.ipv4.sysctl_rt_cache_rebuild_count, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { } }; struct ctl_path net_ipv4_ctl_path[] = { { .procname = "net", .ctl_name = CTL_NET, }, { .procname = "ipv4", .ctl_name = NET_IPV4, }, { }, }; EXPORT_SYMBOL_GPL(net_ipv4_ctl_path); static __net_init int ipv4_sysctl_init_net(struct net *net) { struct ctl_table *table; table = ipv4_net_table; if (net != &init_net) { table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL); if (table == NULL) goto err_alloc; table[0].data = &net->ipv4.sysctl_icmp_echo_ignore_all; table[1].data = &net->ipv4.sysctl_icmp_echo_ignore_broadcasts; table[2].data = &net->ipv4.sysctl_icmp_ignore_bogus_error_responses; table[3].data = &net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr; table[4].data = &net->ipv4.sysctl_icmp_ratelimit; table[5].data = &net->ipv4.sysctl_icmp_ratemask; table[6].data = &net->ipv4.sysctl_rt_cache_rebuild_count; } net->ipv4.sysctl_rt_cache_rebuild_count = 4; net->ipv4.ipv4_hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table); if (net->ipv4.ipv4_hdr == NULL) goto err_reg; return 0; err_reg: if (net != &init_net) kfree(table); err_alloc: return -ENOMEM; } static __net_exit void ipv4_sysctl_exit_net(struct net *net) { struct ctl_table *table; table = net->ipv4.ipv4_hdr->ctl_table_arg; unregister_net_sysctl_table(net->ipv4.ipv4_hdr); kfree(table); } static __net_initdata struct pernet_operations ipv4_sysctl_ops = { .init = ipv4_sysctl_init_net, .exit = ipv4_sysctl_exit_net, }; static __init int sysctl_ipv4_init(void) { struct ctl_table_header *hdr; hdr = register_sysctl_paths(net_ipv4_ctl_path, ipv4_table); if (hdr == NULL) return -ENOMEM; if (register_pernet_subsys(&ipv4_sysctl_ops)) { unregister_sysctl_table(hdr); return -ENOMEM; } return 0; } __initcall(sysctl_ipv4_init);
gpl-2.0
henrix/rpi-linux
drivers/irqchip/irq-bcm7038-l1.c
399
8384
/* * Broadcom BCM7038 style Level 1 interrupt controller driver * * Copyright (C) 2014 Broadcom Corporation * Author: Kevin Cernekee * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/bitops.h> #include <linux/kconfig.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/smp.h> #include <linux/types.h> #include <linux/irqchip/chained_irq.h> #include "irqchip.h" #define IRQS_PER_WORD 32 #define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 4) #define MAX_WORDS 8 struct bcm7038_l1_cpu; struct bcm7038_l1_chip { raw_spinlock_t lock; unsigned int n_words; struct irq_domain *domain; struct bcm7038_l1_cpu *cpus[NR_CPUS]; u8 affinity[MAX_WORDS * IRQS_PER_WORD]; }; struct bcm7038_l1_cpu { void __iomem *map_base; u32 mask_cache[0]; }; /* * STATUS/MASK_STATUS/MASK_SET/MASK_CLEAR are packed one right after another: * * 7038: * 0x1000_1400: W0_STATUS * 0x1000_1404: W1_STATUS * 0x1000_1408: W0_MASK_STATUS * 0x1000_140c: W1_MASK_STATUS * 0x1000_1410: W0_MASK_SET * 0x1000_1414: W1_MASK_SET * 0x1000_1418: W0_MASK_CLEAR * 0x1000_141c: W1_MASK_CLEAR * * 7445: * 0xf03e_1500: W0_STATUS * 0xf03e_1504: W1_STATUS * 0xf03e_1508: W2_STATUS * 0xf03e_150c: W3_STATUS * 0xf03e_1510: W4_STATUS * 0xf03e_1514: W0_MASK_STATUS * 0xf03e_1518: W1_MASK_STATUS * [...] */ static inline unsigned int reg_status(struct bcm7038_l1_chip *intc, unsigned int word) { return (0 * intc->n_words + word) * sizeof(u32); } static inline unsigned int reg_mask_status(struct bcm7038_l1_chip *intc, unsigned int word) { return (1 * intc->n_words + word) * sizeof(u32); } static inline unsigned int reg_mask_set(struct bcm7038_l1_chip *intc, unsigned int word) { return (2 * intc->n_words + word) * sizeof(u32); } static inline unsigned int reg_mask_clr(struct bcm7038_l1_chip *intc, unsigned int word) { return (3 * intc->n_words + word) * sizeof(u32); } static inline u32 l1_readl(void __iomem *reg) { if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) return ioread32be(reg); else return readl(reg); } static inline void l1_writel(u32 val, void __iomem *reg) { if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) iowrite32be(val, reg); else writel(val, reg); } static void bcm7038_l1_irq_handle(unsigned int irq, struct irq_desc *desc) { struct bcm7038_l1_chip *intc = irq_desc_get_handler_data(desc); struct bcm7038_l1_cpu *cpu; struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int idx; #ifdef CONFIG_SMP cpu = intc->cpus[cpu_logical_map(smp_processor_id())]; #else cpu = intc->cpus[0]; #endif chained_irq_enter(chip, desc); for (idx = 0; idx < intc->n_words; idx++) { int base = idx * IRQS_PER_WORD; unsigned long pending, flags; int hwirq; raw_spin_lock_irqsave(&intc->lock, flags); pending = l1_readl(cpu->map_base + reg_status(intc, idx)) & ~cpu->mask_cache[idx]; raw_spin_unlock_irqrestore(&intc->lock, flags); for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) { generic_handle_irq(irq_find_mapping(intc->domain, base + hwirq)); } } chained_irq_exit(chip, desc); } static void __bcm7038_l1_unmask(struct irq_data *d, unsigned int cpu_idx) { struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d); u32 word = d->hwirq / IRQS_PER_WORD; u32 mask = BIT(d->hwirq % IRQS_PER_WORD); intc->cpus[cpu_idx]->mask_cache[word] &= ~mask; l1_writel(mask, intc->cpus[cpu_idx]->map_base + reg_mask_clr(intc, word)); } static void __bcm7038_l1_mask(struct irq_data *d, unsigned int cpu_idx) { struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d); u32 word = d->hwirq / IRQS_PER_WORD; u32 mask = BIT(d->hwirq % IRQS_PER_WORD); intc->cpus[cpu_idx]->mask_cache[word] |= mask; l1_writel(mask, intc->cpus[cpu_idx]->map_base + reg_mask_set(intc, word)); } static void bcm7038_l1_unmask(struct irq_data *d) { struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d); unsigned long flags; raw_spin_lock_irqsave(&intc->lock, flags); __bcm7038_l1_unmask(d, intc->affinity[d->hwirq]); raw_spin_unlock_irqrestore(&intc->lock, flags); } static void bcm7038_l1_mask(struct irq_data *d) { struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d); unsigned long flags; raw_spin_lock_irqsave(&intc->lock, flags); __bcm7038_l1_mask(d, intc->affinity[d->hwirq]); raw_spin_unlock_irqrestore(&intc->lock, flags); } static int bcm7038_l1_set_affinity(struct irq_data *d, const struct cpumask *dest, bool force) { struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d); unsigned long flags; irq_hw_number_t hw = d->hwirq; u32 word = hw / IRQS_PER_WORD; u32 mask = BIT(hw % IRQS_PER_WORD); unsigned int first_cpu = cpumask_any_and(dest, cpu_online_mask); bool was_disabled; raw_spin_lock_irqsave(&intc->lock, flags); was_disabled = !!(intc->cpus[intc->affinity[hw]]->mask_cache[word] & mask); __bcm7038_l1_mask(d, intc->affinity[hw]); intc->affinity[hw] = first_cpu; if (!was_disabled) __bcm7038_l1_unmask(d, first_cpu); raw_spin_unlock_irqrestore(&intc->lock, flags); return 0; } static int __init bcm7038_l1_init_one(struct device_node *dn, unsigned int idx, struct bcm7038_l1_chip *intc) { struct resource res; resource_size_t sz; struct bcm7038_l1_cpu *cpu; unsigned int i, n_words, parent_irq; if (of_address_to_resource(dn, idx, &res)) return -EINVAL; sz = resource_size(&res); n_words = sz / REG_BYTES_PER_IRQ_WORD; if (n_words > MAX_WORDS) return -EINVAL; else if (!intc->n_words) intc->n_words = n_words; else if (intc->n_words != n_words) return -EINVAL; cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32), GFP_KERNEL); if (!cpu) return -ENOMEM; cpu->map_base = ioremap(res.start, sz); if (!cpu->map_base) return -ENOMEM; for (i = 0; i < n_words; i++) { l1_writel(0xffffffff, cpu->map_base + reg_mask_set(intc, i)); cpu->mask_cache[i] = 0xffffffff; } parent_irq = irq_of_parse_and_map(dn, idx); if (!parent_irq) { pr_err("failed to map parent interrupt %d\n", parent_irq); return -EINVAL; } irq_set_handler_data(parent_irq, intc); irq_set_chained_handler(parent_irq, bcm7038_l1_irq_handle); return 0; } static struct irq_chip bcm7038_l1_irq_chip = { .name = "bcm7038-l1", .irq_mask = bcm7038_l1_mask, .irq_unmask = bcm7038_l1_unmask, .irq_set_affinity = bcm7038_l1_set_affinity, }; static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw_irq) { irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq); irq_set_chip_data(virq, d->host_data); return 0; } static const struct irq_domain_ops bcm7038_l1_domain_ops = { .xlate = irq_domain_xlate_onecell, .map = bcm7038_l1_map, }; int __init bcm7038_l1_of_init(struct device_node *dn, struct device_node *parent) { struct bcm7038_l1_chip *intc; int idx, ret; intc = kzalloc(sizeof(*intc), GFP_KERNEL); if (!intc) return -ENOMEM; raw_spin_lock_init(&intc->lock); for_each_possible_cpu(idx) { ret = bcm7038_l1_init_one(dn, idx, intc); if (ret < 0) { if (idx) break; pr_err("failed to remap intc L1 registers\n"); goto out_free; } } intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words, &bcm7038_l1_domain_ops, intc); if (!intc->domain) { ret = -ENOMEM; goto out_unmap; } pr_info("registered BCM7038 L1 intc (mem: 0x%p, IRQs: %d)\n", intc->cpus[0]->map_base, IRQS_PER_WORD * intc->n_words); return 0; out_unmap: for_each_possible_cpu(idx) { struct bcm7038_l1_cpu *cpu = intc->cpus[idx]; if (cpu) { if (cpu->map_base) iounmap(cpu->map_base); kfree(cpu); } } out_free: kfree(intc); return ret; } IRQCHIP_DECLARE(bcm7038_l1, "brcm,bcm7038-l1-intc", bcm7038_l1_of_init);
gpl-2.0
hpzhong/linux-stable
drivers/i2c/busses/i2c-st.c
655
22165
/* * Copyright (C) 2013 STMicroelectronics * * I2C master mode controller driver, used in STMicroelectronics devices. * * Author: Maxime Coquelin <maxime.coquelin@st.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, as * published by the Free Software Foundation. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> /* SSC registers */ #define SSC_BRG 0x000 #define SSC_TBUF 0x004 #define SSC_RBUF 0x008 #define SSC_CTL 0x00C #define SSC_IEN 0x010 #define SSC_STA 0x014 #define SSC_I2C 0x018 #define SSC_SLAD 0x01C #define SSC_REP_START_HOLD 0x020 #define SSC_START_HOLD 0x024 #define SSC_REP_START_SETUP 0x028 #define SSC_DATA_SETUP 0x02C #define SSC_STOP_SETUP 0x030 #define SSC_BUS_FREE 0x034 #define SSC_TX_FSTAT 0x038 #define SSC_RX_FSTAT 0x03C #define SSC_PRE_SCALER_BRG 0x040 #define SSC_CLR 0x080 #define SSC_NOISE_SUPP_WIDTH 0x100 #define SSC_PRSCALER 0x104 #define SSC_NOISE_SUPP_WIDTH_DATAOUT 0x108 #define SSC_PRSCALER_DATAOUT 0x10c /* SSC Control */ #define SSC_CTL_DATA_WIDTH_9 0x8 #define SSC_CTL_DATA_WIDTH_MSK 0xf #define SSC_CTL_BM 0xf #define SSC_CTL_HB BIT(4) #define SSC_CTL_PH BIT(5) #define SSC_CTL_PO BIT(6) #define SSC_CTL_SR BIT(7) #define SSC_CTL_MS BIT(8) #define SSC_CTL_EN BIT(9) #define SSC_CTL_LPB BIT(10) #define SSC_CTL_EN_TX_FIFO BIT(11) #define SSC_CTL_EN_RX_FIFO BIT(12) #define SSC_CTL_EN_CLST_RX BIT(13) /* SSC Interrupt Enable */ #define SSC_IEN_RIEN BIT(0) #define SSC_IEN_TIEN BIT(1) #define SSC_IEN_TEEN BIT(2) #define SSC_IEN_REEN BIT(3) #define SSC_IEN_PEEN BIT(4) #define SSC_IEN_AASEN BIT(6) #define SSC_IEN_STOPEN BIT(7) #define SSC_IEN_ARBLEN BIT(8) #define SSC_IEN_NACKEN BIT(10) #define SSC_IEN_REPSTRTEN BIT(11) #define SSC_IEN_TX_FIFO_HALF BIT(12) #define SSC_IEN_RX_FIFO_HALF_FULL BIT(14) /* SSC Status */ #define SSC_STA_RIR BIT(0) #define SSC_STA_TIR BIT(1) #define SSC_STA_TE BIT(2) #define SSC_STA_RE BIT(3) #define SSC_STA_PE BIT(4) #define SSC_STA_CLST BIT(5) #define SSC_STA_AAS BIT(6) #define SSC_STA_STOP BIT(7) #define SSC_STA_ARBL BIT(8) #define SSC_STA_BUSY BIT(9) #define SSC_STA_NACK BIT(10) #define SSC_STA_REPSTRT BIT(11) #define SSC_STA_TX_FIFO_HALF BIT(12) #define SSC_STA_TX_FIFO_FULL BIT(13) #define SSC_STA_RX_FIFO_HALF BIT(14) /* SSC I2C Control */ #define SSC_I2C_I2CM BIT(0) #define SSC_I2C_STRTG BIT(1) #define SSC_I2C_STOPG BIT(2) #define SSC_I2C_ACKG BIT(3) #define SSC_I2C_AD10 BIT(4) #define SSC_I2C_TXENB BIT(5) #define SSC_I2C_REPSTRTG BIT(11) #define SSC_I2C_SLAVE_DISABLE BIT(12) /* SSC Tx FIFO Status */ #define SSC_TX_FSTAT_STATUS 0x07 /* SSC Rx FIFO Status */ #define SSC_RX_FSTAT_STATUS 0x07 /* SSC Clear bit operation */ #define SSC_CLR_SSCAAS BIT(6) #define SSC_CLR_SSCSTOP BIT(7) #define SSC_CLR_SSCARBL BIT(8) #define SSC_CLR_NACK BIT(10) #define SSC_CLR_REPSTRT BIT(11) /* SSC Clock Prescaler */ #define SSC_PRSC_VALUE 0x0f #define SSC_TXFIFO_SIZE 0x8 #define SSC_RXFIFO_SIZE 0x8 enum st_i2c_mode { I2C_MODE_STANDARD, I2C_MODE_FAST, I2C_MODE_END, }; /** * struct st_i2c_timings - per-Mode tuning parameters * @rate: I2C bus rate * @rep_start_hold: I2C repeated start hold time requirement * @rep_start_setup: I2C repeated start set up time requirement * @start_hold: I2C start hold time requirement * @data_setup_time: I2C data set up time requirement * @stop_setup_time: I2C stop set up time requirement * @bus_free_time: I2C bus free time requirement * @sda_pulse_min_limit: I2C SDA pulse mini width limit */ struct st_i2c_timings { u32 rate; u32 rep_start_hold; u32 rep_start_setup; u32 start_hold; u32 data_setup_time; u32 stop_setup_time; u32 bus_free_time; u32 sda_pulse_min_limit; }; /** * struct st_i2c_client - client specific data * @addr: 8-bit slave addr, including r/w bit * @count: number of bytes to be transfered * @xfered: number of bytes already transferred * @buf: data buffer * @result: result of the transfer * @stop: last I2C msg to be sent, i.e. STOP to be generated */ struct st_i2c_client { u8 addr; u32 count; u32 xfered; u8 *buf; int result; bool stop; }; /** * struct st_i2c_dev - private data of the controller * @adap: I2C adapter for this controller * @dev: device for this controller * @base: virtual memory area * @complete: completion of I2C message * @irq: interrupt line for th controller * @clk: hw ssc block clock * @mode: I2C mode of the controller. Standard or Fast only supported * @scl_min_width_us: SCL line minimum pulse width in us * @sda_min_width_us: SDA line minimum pulse width in us * @client: I2C transfert information * @busy: I2C transfer on-going */ struct st_i2c_dev { struct i2c_adapter adap; struct device *dev; void __iomem *base; struct completion complete; int irq; struct clk *clk; int mode; u32 scl_min_width_us; u32 sda_min_width_us; struct st_i2c_client client; bool busy; }; static inline void st_i2c_set_bits(void __iomem *reg, u32 mask) { writel_relaxed(readl_relaxed(reg) | mask, reg); } static inline void st_i2c_clr_bits(void __iomem *reg, u32 mask) { writel_relaxed(readl_relaxed(reg) & ~mask, reg); } /* * From I2C Specifications v0.5. * * All the values below have +10% margin added to be * compatible with some out-of-spec devices, * like HDMI link of the Toshiba 19AV600 TV. */ static struct st_i2c_timings i2c_timings[] = { [I2C_MODE_STANDARD] = { .rate = 100000, .rep_start_hold = 4400, .rep_start_setup = 5170, .start_hold = 4400, .data_setup_time = 275, .stop_setup_time = 4400, .bus_free_time = 5170, }, [I2C_MODE_FAST] = { .rate = 400000, .rep_start_hold = 660, .rep_start_setup = 660, .start_hold = 660, .data_setup_time = 110, .stop_setup_time = 660, .bus_free_time = 1430, }, }; static void st_i2c_flush_rx_fifo(struct st_i2c_dev *i2c_dev) { int count, i; /* * Counter only counts up to 7 but fifo size is 8... * When fifo is full, counter is 0 and RIR bit of status register is * set */ if (readl_relaxed(i2c_dev->base + SSC_STA) & SSC_STA_RIR) count = SSC_RXFIFO_SIZE; else count = readl_relaxed(i2c_dev->base + SSC_RX_FSTAT) & SSC_RX_FSTAT_STATUS; for (i = 0; i < count; i++) readl_relaxed(i2c_dev->base + SSC_RBUF); } static void st_i2c_soft_reset(struct st_i2c_dev *i2c_dev) { /* * FIFO needs to be emptied before reseting the IP, * else the controller raises a BUSY error. */ st_i2c_flush_rx_fifo(i2c_dev); st_i2c_set_bits(i2c_dev->base + SSC_CTL, SSC_CTL_SR); st_i2c_clr_bits(i2c_dev->base + SSC_CTL, SSC_CTL_SR); } /** * st_i2c_hw_config() - Prepare SSC block, calculate and apply tuning timings * @i2c_dev: Controller's private data */ static void st_i2c_hw_config(struct st_i2c_dev *i2c_dev) { unsigned long rate; u32 val, ns_per_clk; struct st_i2c_timings *t = &i2c_timings[i2c_dev->mode]; st_i2c_soft_reset(i2c_dev); val = SSC_CLR_REPSTRT | SSC_CLR_NACK | SSC_CLR_SSCARBL | SSC_CLR_SSCAAS | SSC_CLR_SSCSTOP; writel_relaxed(val, i2c_dev->base + SSC_CLR); /* SSC Control register setup */ val = SSC_CTL_PO | SSC_CTL_PH | SSC_CTL_HB | SSC_CTL_DATA_WIDTH_9; writel_relaxed(val, i2c_dev->base + SSC_CTL); rate = clk_get_rate(i2c_dev->clk); ns_per_clk = 1000000000 / rate; /* Baudrate */ val = rate / (2 * t->rate); writel_relaxed(val, i2c_dev->base + SSC_BRG); /* Pre-scaler baudrate */ writel_relaxed(1, i2c_dev->base + SSC_PRE_SCALER_BRG); /* Enable I2C mode */ writel_relaxed(SSC_I2C_I2CM, i2c_dev->base + SSC_I2C); /* Repeated start hold time */ val = t->rep_start_hold / ns_per_clk; writel_relaxed(val, i2c_dev->base + SSC_REP_START_HOLD); /* Repeated start set up time */ val = t->rep_start_setup / ns_per_clk; writel_relaxed(val, i2c_dev->base + SSC_REP_START_SETUP); /* Start hold time */ val = t->start_hold / ns_per_clk; writel_relaxed(val, i2c_dev->base + SSC_START_HOLD); /* Data set up time */ val = t->data_setup_time / ns_per_clk; writel_relaxed(val, i2c_dev->base + SSC_DATA_SETUP); /* Stop set up time */ val = t->stop_setup_time / ns_per_clk; writel_relaxed(val, i2c_dev->base + SSC_STOP_SETUP); /* Bus free time */ val = t->bus_free_time / ns_per_clk; writel_relaxed(val, i2c_dev->base + SSC_BUS_FREE); /* Prescalers set up */ val = rate / 10000000; writel_relaxed(val, i2c_dev->base + SSC_PRSCALER); writel_relaxed(val, i2c_dev->base + SSC_PRSCALER_DATAOUT); /* Noise suppression witdh */ val = i2c_dev->scl_min_width_us * rate / 100000000; writel_relaxed(val, i2c_dev->base + SSC_NOISE_SUPP_WIDTH); /* Noise suppression max output data delay width */ val = i2c_dev->sda_min_width_us * rate / 100000000; writel_relaxed(val, i2c_dev->base + SSC_NOISE_SUPP_WIDTH_DATAOUT); } static int st_i2c_wait_free_bus(struct st_i2c_dev *i2c_dev) { u32 sta; int i; for (i = 0; i < 10; i++) { sta = readl_relaxed(i2c_dev->base + SSC_STA); if (!(sta & SSC_STA_BUSY)) return 0; usleep_range(2000, 4000); } dev_err(i2c_dev->dev, "bus not free (status = 0x%08x)\n", sta); return -EBUSY; } /** * st_i2c_write_tx_fifo() - Write a byte in the Tx FIFO * @i2c_dev: Controller's private data * @byte: Data to write in the Tx FIFO */ static inline void st_i2c_write_tx_fifo(struct st_i2c_dev *i2c_dev, u8 byte) { u16 tbuf = byte << 1; writel_relaxed(tbuf | 1, i2c_dev->base + SSC_TBUF); } /** * st_i2c_wr_fill_tx_fifo() - Fill the Tx FIFO in write mode * @i2c_dev: Controller's private data * * This functions fills the Tx FIFO with I2C transfert buffer when * in write mode. */ static void st_i2c_wr_fill_tx_fifo(struct st_i2c_dev *i2c_dev) { struct st_i2c_client *c = &i2c_dev->client; u32 tx_fstat, sta; int i; sta = readl_relaxed(i2c_dev->base + SSC_STA); if (sta & SSC_STA_TX_FIFO_FULL) return; tx_fstat = readl_relaxed(i2c_dev->base + SSC_TX_FSTAT); tx_fstat &= SSC_TX_FSTAT_STATUS; if (c->count < (SSC_TXFIFO_SIZE - tx_fstat)) i = c->count; else i = SSC_TXFIFO_SIZE - tx_fstat; for (; i > 0; i--, c->count--, c->buf++) st_i2c_write_tx_fifo(i2c_dev, *c->buf); } /** * st_i2c_rd_fill_tx_fifo() - Fill the Tx FIFO in read mode * @i2c_dev: Controller's private data * * This functions fills the Tx FIFO with fixed pattern when * in read mode to trigger clock. */ static void st_i2c_rd_fill_tx_fifo(struct st_i2c_dev *i2c_dev, int max) { struct st_i2c_client *c = &i2c_dev->client; u32 tx_fstat, sta; int i; sta = readl_relaxed(i2c_dev->base + SSC_STA); if (sta & SSC_STA_TX_FIFO_FULL) return; tx_fstat = readl_relaxed(i2c_dev->base + SSC_TX_FSTAT); tx_fstat &= SSC_TX_FSTAT_STATUS; if (max < (SSC_TXFIFO_SIZE - tx_fstat)) i = max; else i = SSC_TXFIFO_SIZE - tx_fstat; for (; i > 0; i--, c->xfered++) st_i2c_write_tx_fifo(i2c_dev, 0xff); } static void st_i2c_read_rx_fifo(struct st_i2c_dev *i2c_dev) { struct st_i2c_client *c = &i2c_dev->client; u32 i, sta; u16 rbuf; sta = readl_relaxed(i2c_dev->base + SSC_STA); if (sta & SSC_STA_RIR) { i = SSC_RXFIFO_SIZE; } else { i = readl_relaxed(i2c_dev->base + SSC_RX_FSTAT); i &= SSC_RX_FSTAT_STATUS; } for (; (i > 0) && (c->count > 0); i--, c->count--) { rbuf = readl_relaxed(i2c_dev->base + SSC_RBUF) >> 1; *c->buf++ = (u8)rbuf & 0xff; } if (i) { dev_err(i2c_dev->dev, "Unexpected %d bytes in rx fifo\n", i); st_i2c_flush_rx_fifo(i2c_dev); } } /** * st_i2c_terminate_xfer() - Send either STOP or REPSTART condition * @i2c_dev: Controller's private data */ static void st_i2c_terminate_xfer(struct st_i2c_dev *i2c_dev) { struct st_i2c_client *c = &i2c_dev->client; st_i2c_clr_bits(i2c_dev->base + SSC_IEN, SSC_IEN_TEEN); st_i2c_clr_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STRTG); if (c->stop) { st_i2c_set_bits(i2c_dev->base + SSC_IEN, SSC_IEN_STOPEN); st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STOPG); } else { st_i2c_set_bits(i2c_dev->base + SSC_IEN, SSC_IEN_REPSTRTEN); st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_REPSTRTG); } } /** * st_i2c_handle_write() - Handle FIFO empty interrupt in case of write * @i2c_dev: Controller's private data */ static void st_i2c_handle_write(struct st_i2c_dev *i2c_dev) { struct st_i2c_client *c = &i2c_dev->client; st_i2c_flush_rx_fifo(i2c_dev); if (!c->count) /* End of xfer, send stop or repstart */ st_i2c_terminate_xfer(i2c_dev); else st_i2c_wr_fill_tx_fifo(i2c_dev); } /** * st_i2c_handle_write() - Handle FIFO enmpty interrupt in case of read * @i2c_dev: Controller's private data */ static void st_i2c_handle_read(struct st_i2c_dev *i2c_dev) { struct st_i2c_client *c = &i2c_dev->client; u32 ien; /* Trash the address read back */ if (!c->xfered) { readl_relaxed(i2c_dev->base + SSC_RBUF); st_i2c_clr_bits(i2c_dev->base + SSC_I2C, SSC_I2C_TXENB); } else { st_i2c_read_rx_fifo(i2c_dev); } if (!c->count) { /* End of xfer, send stop or repstart */ st_i2c_terminate_xfer(i2c_dev); } else if (c->count == 1) { /* Penultimate byte to xfer, disable ACK gen. */ st_i2c_clr_bits(i2c_dev->base + SSC_I2C, SSC_I2C_ACKG); /* Last received byte is to be handled by NACK interrupt */ ien = SSC_IEN_NACKEN | SSC_IEN_ARBLEN; writel_relaxed(ien, i2c_dev->base + SSC_IEN); st_i2c_rd_fill_tx_fifo(i2c_dev, c->count); } else { st_i2c_rd_fill_tx_fifo(i2c_dev, c->count - 1); } } /** * st_i2c_isr() - Interrupt routine * @irq: interrupt number * @data: Controller's private data */ static irqreturn_t st_i2c_isr_thread(int irq, void *data) { struct st_i2c_dev *i2c_dev = data; struct st_i2c_client *c = &i2c_dev->client; u32 sta, ien; int it; ien = readl_relaxed(i2c_dev->base + SSC_IEN); sta = readl_relaxed(i2c_dev->base + SSC_STA); /* Use __fls() to check error bits first */ it = __fls(sta & ien); if (it < 0) { dev_dbg(i2c_dev->dev, "spurious it (sta=0x%04x, ien=0x%04x)\n", sta, ien); return IRQ_NONE; } switch (1 << it) { case SSC_STA_TE: if (c->addr & I2C_M_RD) st_i2c_handle_read(i2c_dev); else st_i2c_handle_write(i2c_dev); break; case SSC_STA_STOP: case SSC_STA_REPSTRT: writel_relaxed(0, i2c_dev->base + SSC_IEN); complete(&i2c_dev->complete); break; case SSC_STA_NACK: writel_relaxed(SSC_CLR_NACK, i2c_dev->base + SSC_CLR); /* Last received byte handled by NACK interrupt */ if ((c->addr & I2C_M_RD) && (c->count == 1) && (c->xfered)) { st_i2c_handle_read(i2c_dev); break; } it = SSC_IEN_STOPEN | SSC_IEN_ARBLEN; writel_relaxed(it, i2c_dev->base + SSC_IEN); st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STOPG); c->result = -EIO; break; case SSC_STA_ARBL: writel_relaxed(SSC_CLR_SSCARBL, i2c_dev->base + SSC_CLR); it = SSC_IEN_STOPEN | SSC_IEN_ARBLEN; writel_relaxed(it, i2c_dev->base + SSC_IEN); st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STOPG); c->result = -EAGAIN; break; default: dev_err(i2c_dev->dev, "it %d unhandled (sta=0x%04x)\n", it, sta); } /* * Read IEN register to ensure interrupt mask write is effective * before re-enabling interrupt at GIC level, and thus avoid spurious * interrupts. */ readl(i2c_dev->base + SSC_IEN); return IRQ_HANDLED; } /** * st_i2c_xfer_msg() - Transfer a single I2C message * @i2c_dev: Controller's private data * @msg: I2C message to transfer * @is_first: first message of the sequence * @is_last: last message of the sequence */ static int st_i2c_xfer_msg(struct st_i2c_dev *i2c_dev, struct i2c_msg *msg, bool is_first, bool is_last) { struct st_i2c_client *c = &i2c_dev->client; u32 ctl, i2c, it; unsigned long timeout; int ret; c->addr = (u8)(msg->addr << 1); c->addr |= (msg->flags & I2C_M_RD); c->buf = msg->buf; c->count = msg->len; c->xfered = 0; c->result = 0; c->stop = is_last; reinit_completion(&i2c_dev->complete); ctl = SSC_CTL_EN | SSC_CTL_MS | SSC_CTL_EN_RX_FIFO | SSC_CTL_EN_TX_FIFO; st_i2c_set_bits(i2c_dev->base + SSC_CTL, ctl); i2c = SSC_I2C_TXENB; if (c->addr & I2C_M_RD) i2c |= SSC_I2C_ACKG; st_i2c_set_bits(i2c_dev->base + SSC_I2C, i2c); /* Write slave address */ st_i2c_write_tx_fifo(i2c_dev, c->addr); /* Pre-fill Tx fifo with data in case of write */ if (!(c->addr & I2C_M_RD)) st_i2c_wr_fill_tx_fifo(i2c_dev); it = SSC_IEN_NACKEN | SSC_IEN_TEEN | SSC_IEN_ARBLEN; writel_relaxed(it, i2c_dev->base + SSC_IEN); if (is_first) { ret = st_i2c_wait_free_bus(i2c_dev); if (ret) return ret; st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_STRTG); } timeout = wait_for_completion_timeout(&i2c_dev->complete, i2c_dev->adap.timeout); ret = c->result; if (!timeout) { dev_err(i2c_dev->dev, "Write to slave 0x%x timed out\n", c->addr); ret = -ETIMEDOUT; } i2c = SSC_I2C_STOPG | SSC_I2C_REPSTRTG; st_i2c_clr_bits(i2c_dev->base + SSC_I2C, i2c); writel_relaxed(SSC_CLR_SSCSTOP | SSC_CLR_REPSTRT, i2c_dev->base + SSC_CLR); return ret; } /** * st_i2c_xfer() - Transfer a single I2C message * @i2c_adap: Adapter pointer to the controller * @msgs: Pointer to data to be written. * @num: Number of messages to be executed */ static int st_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num) { struct st_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap); int ret, i; i2c_dev->busy = true; ret = clk_prepare_enable(i2c_dev->clk); if (ret) { dev_err(i2c_dev->dev, "Failed to prepare_enable clock\n"); return ret; } pinctrl_pm_select_default_state(i2c_dev->dev); st_i2c_hw_config(i2c_dev); for (i = 0; (i < num) && !ret; i++) ret = st_i2c_xfer_msg(i2c_dev, &msgs[i], i == 0, i == num - 1); pinctrl_pm_select_idle_state(i2c_dev->dev); clk_disable_unprepare(i2c_dev->clk); i2c_dev->busy = false; return (ret < 0) ? ret : i; } #ifdef CONFIG_PM_SLEEP static int st_i2c_suspend(struct device *dev) { struct platform_device *pdev = container_of(dev, struct platform_device, dev); struct st_i2c_dev *i2c_dev = platform_get_drvdata(pdev); if (i2c_dev->busy) return -EBUSY; pinctrl_pm_select_sleep_state(dev); return 0; } static int st_i2c_resume(struct device *dev) { pinctrl_pm_select_default_state(dev); /* Go in idle state if available */ pinctrl_pm_select_idle_state(dev); return 0; } static SIMPLE_DEV_PM_OPS(st_i2c_pm, st_i2c_suspend, st_i2c_resume); #define ST_I2C_PM (&st_i2c_pm) #else #define ST_I2C_PM NULL #endif static u32 st_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static struct i2c_algorithm st_i2c_algo = { .master_xfer = st_i2c_xfer, .functionality = st_i2c_func, }; static int st_i2c_of_get_deglitch(struct device_node *np, struct st_i2c_dev *i2c_dev) { int ret; ret = of_property_read_u32(np, "st,i2c-min-scl-pulse-width-us", &i2c_dev->scl_min_width_us); if ((ret == -ENODATA) || (ret == -EOVERFLOW)) { dev_err(i2c_dev->dev, "st,i2c-min-scl-pulse-width-us invalid\n"); return ret; } ret = of_property_read_u32(np, "st,i2c-min-sda-pulse-width-us", &i2c_dev->sda_min_width_us); if ((ret == -ENODATA) || (ret == -EOVERFLOW)) { dev_err(i2c_dev->dev, "st,i2c-min-sda-pulse-width-us invalid\n"); return ret; } return 0; } static int st_i2c_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct st_i2c_dev *i2c_dev; struct resource *res; u32 clk_rate; struct i2c_adapter *adap; int ret; i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL); if (!i2c_dev) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); i2c_dev->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(i2c_dev->base)) return PTR_ERR(i2c_dev->base); i2c_dev->irq = irq_of_parse_and_map(np, 0); if (!i2c_dev->irq) { dev_err(&pdev->dev, "IRQ missing or invalid\n"); return -EINVAL; } i2c_dev->clk = of_clk_get_by_name(np, "ssc"); if (IS_ERR(i2c_dev->clk)) { dev_err(&pdev->dev, "Unable to request clock\n"); return PTR_ERR(i2c_dev->clk); } i2c_dev->mode = I2C_MODE_STANDARD; ret = of_property_read_u32(np, "clock-frequency", &clk_rate); if ((!ret) && (clk_rate == 400000)) i2c_dev->mode = I2C_MODE_FAST; i2c_dev->dev = &pdev->dev; ret = devm_request_threaded_irq(&pdev->dev, i2c_dev->irq, NULL, st_i2c_isr_thread, IRQF_ONESHOT, pdev->name, i2c_dev); if (ret) { dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq); return ret; } pinctrl_pm_select_default_state(i2c_dev->dev); /* In case idle state available, select it */ pinctrl_pm_select_idle_state(i2c_dev->dev); ret = st_i2c_of_get_deglitch(np, i2c_dev); if (ret) return ret; adap = &i2c_dev->adap; i2c_set_adapdata(adap, i2c_dev); snprintf(adap->name, sizeof(adap->name), "ST I2C(0x%pa)", &res->start); adap->owner = THIS_MODULE; adap->timeout = 2 * HZ; adap->retries = 0; adap->algo = &st_i2c_algo; adap->dev.parent = &pdev->dev; adap->dev.of_node = pdev->dev.of_node; init_completion(&i2c_dev->complete); ret = i2c_add_adapter(adap); if (ret) { dev_err(&pdev->dev, "Failed to add adapter\n"); return ret; } platform_set_drvdata(pdev, i2c_dev); dev_info(i2c_dev->dev, "%s initialized\n", adap->name); return 0; } static int st_i2c_remove(struct platform_device *pdev) { struct st_i2c_dev *i2c_dev = platform_get_drvdata(pdev); i2c_del_adapter(&i2c_dev->adap); return 0; } static const struct of_device_id st_i2c_match[] = { { .compatible = "st,comms-ssc-i2c", }, { .compatible = "st,comms-ssc4-i2c", }, {}, }; MODULE_DEVICE_TABLE(of, st_i2c_match); static struct platform_driver st_i2c_driver = { .driver = { .name = "st-i2c", .of_match_table = st_i2c_match, .pm = ST_I2C_PM, }, .probe = st_i2c_probe, .remove = st_i2c_remove, }; module_platform_driver(st_i2c_driver); MODULE_AUTHOR("Maxime Coquelin <maxime.coquelin@st.com>"); MODULE_DESCRIPTION("STMicroelectronics I2C driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
sembre/kernel_totoro_update3
common/arch/arm/mach-aaec2000/aaed2000.c
1679
2298
/* * linux/arch/arm/mach-aaec2000/aaed2000.c * * Support for the Agilent AAED-2000 Development Platform. * * Copyright (c) 2005 Nicolas Bellido Y Ortega * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/device.h> #include <linux/major.h> #include <linux/interrupt.h> #include <asm/setup.h> #include <asm/memory.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/aaed2000.h> #include "core.h" static void aaed2000_clcd_disable(struct clcd_fb *fb) { AAED_EXT_GPIO &= ~AAED_EGPIO_LCD_PWR_EN; } static void aaed2000_clcd_enable(struct clcd_fb *fb) { AAED_EXT_GPIO |= AAED_EGPIO_LCD_PWR_EN; } struct aaec2000_clcd_info clcd_info = { .enable = aaed2000_clcd_enable, .disable = aaed2000_clcd_disable, .panel = { .mode = { .name = "Sharp", .refresh = 60, .xres = 640, .yres = 480, .pixclock = 39721, .left_margin = 20, .right_margin = 44, .upper_margin = 21, .lower_margin = 34, .hsync_len = 96, .vsync_len = 2, .sync = 0, .vmode = FB_VMODE_NONINTERLACED, }, .width = -1, .height = -1, .tim2 = TIM2_IVS | TIM2_IHS, .cntl = CNTL_LCDTFT, .bpp = 16, }, }; static void __init aaed2000_init_irq(void) { aaec2000_init_irq(); } static void __init aaed2000_init(void) { aaec2000_set_clcd_plat_data(&clcd_info); } static struct map_desc aaed2000_io_desc[] __initdata = { { .virtual = EXT_GPIO_VBASE, .pfn = __phys_to_pfn(EXT_GPIO_PBASE), .length = EXT_GPIO_LENGTH, .type = MT_DEVICE }, }; static void __init aaed2000_map_io(void) { aaec2000_map_io(); iotable_init(aaed2000_io_desc, ARRAY_SIZE(aaed2000_io_desc)); } MACHINE_START(AAED2000, "Agilent AAED-2000 Development Platform") /* Maintainer: Nicolas Bellido Y Ortega */ .phys_io = PIO_BASE, .io_pg_offst = ((VIO_BASE) >> 18) & 0xfffc, .map_io = aaed2000_map_io, .init_irq = aaed2000_init_irq, .timer = &aaec2000_timer, .init_machine = aaed2000_init, MACHINE_END
gpl-2.0
Fusion-Devices/android_kernel_motorola_msm8916
arch/arm/mach-kirkwood/t5325-setup.c
2703
4737
/* * * HP t5325 Thin Client setup * * Copyright (C) 2010 Martin Michlmayr <tbm@cyrius.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mtd/physmap.h> #include <linux/spi/flash.h> #include <linux/spi/spi.h> #include <linux/i2c.h> #include <linux/mv643xx_eth.h> #include <linux/ata_platform.h> #include <linux/gpio.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <sound/alc5623.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/kirkwood.h> #include "common.h" #include "mpp.h" static struct mtd_partition hp_t5325_partitions[] = { { .name = "u-boot env", .size = SZ_64K, .offset = SZ_512K + SZ_256K, }, { .name = "permanent u-boot env", .size = SZ_64K, .offset = MTDPART_OFS_APPEND, .mask_flags = MTD_WRITEABLE, }, { .name = "HP env", .size = SZ_64K, .offset = MTDPART_OFS_APPEND, }, { .name = "u-boot", .size = SZ_512K, .offset = 0, .mask_flags = MTD_WRITEABLE, }, { .name = "SSD firmware", .size = SZ_256K, .offset = SZ_512K, }, }; static const struct flash_platform_data hp_t5325_flash = { .type = "mx25l8005", .name = "spi_flash", .parts = hp_t5325_partitions, .nr_parts = ARRAY_SIZE(hp_t5325_partitions), }; static struct spi_board_info __initdata hp_t5325_spi_slave_info[] = { { .modalias = "m25p80", .platform_data = &hp_t5325_flash, .irq = -1, }, }; static struct mv643xx_eth_platform_data hp_t5325_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; static struct mv_sata_platform_data hp_t5325_sata_data = { .n_ports = 2, }; static struct gpio_keys_button hp_t5325_buttons[] = { { .code = KEY_POWER, .gpio = 45, .desc = "Power", .active_low = 1, }, }; static struct gpio_keys_platform_data hp_t5325_button_data = { .buttons = hp_t5325_buttons, .nbuttons = ARRAY_SIZE(hp_t5325_buttons), }; static struct platform_device hp_t5325_button_device = { .name = "gpio-keys", .id = -1, .num_resources = 0, .dev = { .platform_data = &hp_t5325_button_data, } }; static struct platform_device hp_t5325_audio_device = { .name = "t5325-audio", .id = -1, }; static unsigned int hp_t5325_mpp_config[] __initdata = { MPP0_NF_IO2, MPP1_SPI_MOSI, MPP2_SPI_SCK, MPP3_SPI_MISO, MPP4_NF_IO6, MPP5_NF_IO7, MPP6_SYSRST_OUTn, MPP7_SPI_SCn, MPP8_TW0_SDA, MPP9_TW0_SCK, MPP10_UART0_TXD, MPP11_UART0_RXD, MPP12_SD_CLK, MPP13_GPIO, MPP14_GPIO, MPP15_GPIO, MPP16_GPIO, MPP17_GPIO, MPP18_NF_IO0, MPP19_NF_IO1, MPP20_GPIO, MPP21_GPIO, MPP22_GPIO, MPP23_GPIO, MPP32_GPIO, MPP33_GE1_TXCTL, MPP39_AU_I2SBCLK, MPP40_AU_I2SDO, MPP43_AU_I2SDI, MPP41_AU_I2SLRCLK, MPP42_AU_I2SMCLK, MPP45_GPIO, /* Power button */ MPP48_GPIO, /* Board power off */ 0 }; static struct alc5623_platform_data alc5621_data = { .add_ctrl = 0x3700, .jack_det_ctrl = 0x4810, }; static struct i2c_board_info i2c_board_info[] __initdata = { { I2C_BOARD_INFO("alc5621", 0x1a), .platform_data = &alc5621_data, }, }; #define HP_T5325_GPIO_POWER_OFF 48 static void hp_t5325_power_off(void) { gpio_set_value(HP_T5325_GPIO_POWER_OFF, 1); } static void __init hp_t5325_init(void) { /* * Basic setup. Needs to be called early. */ kirkwood_init(); kirkwood_mpp_conf(hp_t5325_mpp_config); kirkwood_uart0_init(); spi_register_board_info(hp_t5325_spi_slave_info, ARRAY_SIZE(hp_t5325_spi_slave_info)); kirkwood_spi_init(); kirkwood_i2c_init(); kirkwood_ge00_init(&hp_t5325_ge00_data); kirkwood_sata_init(&hp_t5325_sata_data); kirkwood_ehci_init(); platform_device_register(&hp_t5325_button_device); platform_device_register(&hp_t5325_audio_device); i2c_register_board_info(0, i2c_board_info, ARRAY_SIZE(i2c_board_info)); kirkwood_audio_init(); if (gpio_request(HP_T5325_GPIO_POWER_OFF, "power-off") == 0 && gpio_direction_output(HP_T5325_GPIO_POWER_OFF, 0) == 0) pm_power_off = hp_t5325_power_off; else pr_err("t5325: failed to configure power-off GPIO\n"); } static int __init hp_t5325_pci_init(void) { if (machine_is_t5325()) kirkwood_pcie_init(KW_PCIE0); return 0; } subsys_initcall(hp_t5325_pci_init); MACHINE_START(T5325, "HP t5325 Thin Client") /* Maintainer: Martin Michlmayr <tbm@cyrius.com> */ .atag_offset = 0x100, .init_machine = hp_t5325_init, .map_io = kirkwood_map_io, .init_early = kirkwood_init_early, .init_irq = kirkwood_init_irq, .init_time = kirkwood_timer_init, .restart = kirkwood_restart, MACHINE_END
gpl-2.0
kimjh-sane/imx6sane-linux-3.14.28
drivers/staging/speakup/speakup_spkout.c
3471
5182
/* * originally written by: Kirk Reiser <kirk@braille.uwo.ca> * this version considerably modified by David Borowski, david575@rogers.com * * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * specificly written as a driver for the speakup screenreview * s not a general device driver. */ #include "spk_priv.h" #include "speakup.h" #include "serialio.h" #define DRV_VERSION "2.11" #define SYNTH_CLEAR 0x18 #define PROCSPEECH '\r' static void synth_flush(struct spk_synth *synth); static struct var_t vars[] = { { CAPS_START, .u.s = {"\x05P+" } }, { CAPS_STOP, .u.s = {"\x05P-" } }, { RATE, .u.n = {"\x05R%d", 7, 0, 9, 0, 0, NULL } }, { PITCH, .u.n = {"\x05P%d", 3, 0, 9, 0, 0, NULL } }, { VOL, .u.n = {"\x05V%d", 9, 0, 9, 0, 0, NULL } }, { TONE, .u.n = {"\x05T%c", 8, 0, 25, 65, 0, NULL } }, { PUNCT, .u.n = {"\x05M%c", 0, 0, 3, 0, 0, "nsma" } }, { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; /* * These attributes will appear in /sys/accessibility/speakup/spkout. */ static struct kobj_attribute caps_start_attribute = __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute caps_stop_attribute = __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute pitch_attribute = __ATTR(pitch, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute punct_attribute = __ATTR(punct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute rate_attribute = __ATTR(rate, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute tone_attribute = __ATTR(tone, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute vol_attribute = __ATTR(vol, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute delay_time_attribute = __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute direct_attribute = __ATTR(direct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute full_time_attribute = __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute jiffy_delta_attribute = __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute trigger_time_attribute = __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store); /* * Create a group of attributes so that we can create and destroy them all * at once. */ static struct attribute *synth_attrs[] = { &caps_start_attribute.attr, &caps_stop_attribute.attr, &pitch_attribute.attr, &punct_attribute.attr, &rate_attribute.attr, &tone_attribute.attr, &vol_attribute.attr, &delay_time_attribute.attr, &direct_attribute.attr, &full_time_attribute.attr, &jiffy_delta_attribute.attr, &trigger_time_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; static struct spk_synth synth_spkout = { .name = "spkout", .version = DRV_VERSION, .long_name = "Speakout", .init = "\005W1\005I2\005C3", .procspeech = PROCSPEECH, .clear = SYNTH_CLEAR, .delay = 500, .trigger = 50, .jiffies = 50, .full = 40000, .startup = SYNTH_START, .checkval = SYNTH_CHECK, .vars = vars, .probe = spk_serial_synth_probe, .release = spk_serial_release, .synth_immediate = spk_synth_immediate, .catch_up = spk_do_catch_up, .flush = synth_flush, .is_alive = spk_synth_is_alive_restart, .synth_adjust = NULL, .read_buff_add = NULL, .get_index = spk_serial_in_nowait, .indexing = { .command = "\x05[%c", .lowindex = 1, .highindex = 5, .currindex = 1, }, .attributes = { .attrs = synth_attrs, .name = "spkout", }, }; static void synth_flush(struct spk_synth *synth) { int timeout = SPK_XMITR_TIMEOUT; while (spk_serial_tx_busy()) { if (!--timeout) break; udelay(1); } outb(SYNTH_CLEAR, speakup_info.port_tts); } module_param_named(ser, synth_spkout.ser, int, S_IRUGO); module_param_named(start, synth_spkout.startup, short, S_IRUGO); MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based)."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); static int __init spkout_init(void) { return synth_add(&synth_spkout); } static void __exit spkout_exit(void) { synth_remove(&synth_spkout); } module_init(spkout_init); module_exit(spkout_exit); MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>"); MODULE_AUTHOR("David Borowski"); MODULE_DESCRIPTION("Speakup support for Speak Out synthesizers"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
drgroovestarr/kernel_samsung_manta
arch/sh/kernel/cpu/sh4a/setup-sh7724.c
4495
36230
/* * SH7724 Setup * * Copyright (C) 2009 Renesas Solutions Corp. * * Kuninori Morimoto <morimoto.kuninori@renesas.com> * * Based on SH7723 Setup * Copyright (C) 2008 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/serial.h> #include <linux/mm.h> #include <linux/serial_sci.h> #include <linux/uio_driver.h> #include <linux/sh_dma.h> #include <linux/sh_timer.h> #include <linux/io.h> #include <linux/notifier.h> #include <asm/suspend.h> #include <asm/clock.h> #include <asm/mmzone.h> #include <cpu/dma-register.h> #include <cpu/sh7724.h> /* DMA */ static const struct sh_dmae_slave_config sh7724_dmae_slaves[] = { { .slave_id = SHDMA_SLAVE_SCIF0_TX, .addr = 0xffe0000c, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x21, }, { .slave_id = SHDMA_SLAVE_SCIF0_RX, .addr = 0xffe00014, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x22, }, { .slave_id = SHDMA_SLAVE_SCIF1_TX, .addr = 0xffe1000c, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x25, }, { .slave_id = SHDMA_SLAVE_SCIF1_RX, .addr = 0xffe10014, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x26, }, { .slave_id = SHDMA_SLAVE_SCIF2_TX, .addr = 0xffe2000c, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x29, }, { .slave_id = SHDMA_SLAVE_SCIF2_RX, .addr = 0xffe20014, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x2a, }, { .slave_id = SHDMA_SLAVE_SCIF3_TX, .addr = 0xa4e30020, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x2d, }, { .slave_id = SHDMA_SLAVE_SCIF3_RX, .addr = 0xa4e30024, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x2e, }, { .slave_id = SHDMA_SLAVE_SCIF4_TX, .addr = 0xa4e40020, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x31, }, { .slave_id = SHDMA_SLAVE_SCIF4_RX, .addr = 0xa4e40024, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x32, }, { .slave_id = SHDMA_SLAVE_SCIF5_TX, .addr = 0xa4e50020, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x35, }, { .slave_id = SHDMA_SLAVE_SCIF5_RX, .addr = 0xa4e50024, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x36, }, { .slave_id = SHDMA_SLAVE_USB0D0_TX, .addr = 0xA4D80100, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), .mid_rid = 0x73, }, { .slave_id = SHDMA_SLAVE_USB0D0_RX, .addr = 0xA4D80100, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), .mid_rid = 0x73, }, { .slave_id = SHDMA_SLAVE_USB0D1_TX, .addr = 0xA4D80120, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), .mid_rid = 0x77, }, { .slave_id = SHDMA_SLAVE_USB0D1_RX, .addr = 0xA4D80120, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), .mid_rid = 0x77, }, { .slave_id = SHDMA_SLAVE_USB1D0_TX, .addr = 0xA4D90100, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), .mid_rid = 0xab, }, { .slave_id = SHDMA_SLAVE_USB1D0_RX, .addr = 0xA4D90100, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), .mid_rid = 0xab, }, { .slave_id = SHDMA_SLAVE_USB1D1_TX, .addr = 0xA4D90120, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), .mid_rid = 0xaf, }, { .slave_id = SHDMA_SLAVE_USB1D1_RX, .addr = 0xA4D90120, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), .mid_rid = 0xaf, }, { .slave_id = SHDMA_SLAVE_SDHI0_TX, .addr = 0x04ce0030, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), .mid_rid = 0xc1, }, { .slave_id = SHDMA_SLAVE_SDHI0_RX, .addr = 0x04ce0030, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), .mid_rid = 0xc2, }, { .slave_id = SHDMA_SLAVE_SDHI1_TX, .addr = 0x04cf0030, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), .mid_rid = 0xc9, }, { .slave_id = SHDMA_SLAVE_SDHI1_RX, .addr = 0x04cf0030, .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), .mid_rid = 0xca, }, }; static const struct sh_dmae_channel sh7724_dmae_channels[] = { { .offset = 0, .dmars = 0, .dmars_bit = 0, }, { .offset = 0x10, .dmars = 0, .dmars_bit = 8, }, { .offset = 0x20, .dmars = 4, .dmars_bit = 0, }, { .offset = 0x30, .dmars = 4, .dmars_bit = 8, }, { .offset = 0x50, .dmars = 8, .dmars_bit = 0, }, { .offset = 0x60, .dmars = 8, .dmars_bit = 8, } }; static const unsigned int ts_shift[] = TS_SHIFT; static struct sh_dmae_pdata dma_platform_data = { .slave = sh7724_dmae_slaves, .slave_num = ARRAY_SIZE(sh7724_dmae_slaves), .channel = sh7724_dmae_channels, .channel_num = ARRAY_SIZE(sh7724_dmae_channels), .ts_low_shift = CHCR_TS_LOW_SHIFT, .ts_low_mask = CHCR_TS_LOW_MASK, .ts_high_shift = CHCR_TS_HIGH_SHIFT, .ts_high_mask = CHCR_TS_HIGH_MASK, .ts_shift = ts_shift, .ts_shift_num = ARRAY_SIZE(ts_shift), .dmaor_init = DMAOR_INIT, }; /* Resource order important! */ static struct resource sh7724_dmae0_resources[] = { { /* Channel registers and DMAOR */ .start = 0xfe008020, .end = 0xfe00808f, .flags = IORESOURCE_MEM, }, { /* DMARSx */ .start = 0xfe009000, .end = 0xfe00900b, .flags = IORESOURCE_MEM, }, { .name = "error_irq", .start = 78, .end = 78, .flags = IORESOURCE_IRQ, }, { /* IRQ for channels 0-3 */ .start = 48, .end = 51, .flags = IORESOURCE_IRQ, }, { /* IRQ for channels 4-5 */ .start = 76, .end = 77, .flags = IORESOURCE_IRQ, }, }; /* Resource order important! */ static struct resource sh7724_dmae1_resources[] = { { /* Channel registers and DMAOR */ .start = 0xfdc08020, .end = 0xfdc0808f, .flags = IORESOURCE_MEM, }, { /* DMARSx */ .start = 0xfdc09000, .end = 0xfdc0900b, .flags = IORESOURCE_MEM, }, { .name = "error_irq", .start = 74, .end = 74, .flags = IORESOURCE_IRQ, }, { /* IRQ for channels 0-3 */ .start = 40, .end = 43, .flags = IORESOURCE_IRQ, }, { /* IRQ for channels 4-5 */ .start = 72, .end = 73, .flags = IORESOURCE_IRQ, }, }; static struct platform_device dma0_device = { .name = "sh-dma-engine", .id = 0, .resource = sh7724_dmae0_resources, .num_resources = ARRAY_SIZE(sh7724_dmae0_resources), .dev = { .platform_data = &dma_platform_data, }, }; static struct platform_device dma1_device = { .name = "sh-dma-engine", .id = 1, .resource = sh7724_dmae1_resources, .num_resources = ARRAY_SIZE(sh7724_dmae1_resources), .dev = { .platform_data = &dma_platform_data, }, }; /* Serial */ static struct plat_sci_port scif0_platform_data = { .mapbase = 0xffe00000, .port_reg = SCIx_NOT_SUPPORTED, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 80, 80, 80, 80 }, .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, }; static struct platform_device scif0_device = { .name = "sh-sci", .id = 0, .dev = { .platform_data = &scif0_platform_data, }, }; static struct plat_sci_port scif1_platform_data = { .mapbase = 0xffe10000, .port_reg = SCIx_NOT_SUPPORTED, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 81, 81, 81, 81 }, .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, }; static struct platform_device scif1_device = { .name = "sh-sci", .id = 1, .dev = { .platform_data = &scif1_platform_data, }, }; static struct plat_sci_port scif2_platform_data = { .mapbase = 0xffe20000, .port_reg = SCIx_NOT_SUPPORTED, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 82, 82, 82, 82 }, .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, }; static struct platform_device scif2_device = { .name = "sh-sci", .id = 2, .dev = { .platform_data = &scif2_platform_data, }, }; static struct plat_sci_port scif3_platform_data = { .mapbase = 0xa4e30000, .port_reg = SCIx_NOT_SUPPORTED, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_3, .type = PORT_SCIFA, .irqs = { 56, 56, 56, 56 }, }; static struct platform_device scif3_device = { .name = "sh-sci", .id = 3, .dev = { .platform_data = &scif3_platform_data, }, }; static struct plat_sci_port scif4_platform_data = { .mapbase = 0xa4e40000, .port_reg = SCIx_NOT_SUPPORTED, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_3, .type = PORT_SCIFA, .irqs = { 88, 88, 88, 88 }, }; static struct platform_device scif4_device = { .name = "sh-sci", .id = 4, .dev = { .platform_data = &scif4_platform_data, }, }; static struct plat_sci_port scif5_platform_data = { .mapbase = 0xa4e50000, .port_reg = SCIx_NOT_SUPPORTED, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_3, .type = PORT_SCIFA, .irqs = { 109, 109, 109, 109 }, }; static struct platform_device scif5_device = { .name = "sh-sci", .id = 5, .dev = { .platform_data = &scif5_platform_data, }, }; /* RTC */ static struct resource rtc_resources[] = { [0] = { .start = 0xa465fec0, .end = 0xa465fec0 + 0x58 - 1, .flags = IORESOURCE_IO, }, [1] = { /* Period IRQ */ .start = 69, .flags = IORESOURCE_IRQ, }, [2] = { /* Carry IRQ */ .start = 70, .flags = IORESOURCE_IRQ, }, [3] = { /* Alarm IRQ */ .start = 68, .flags = IORESOURCE_IRQ, }, }; static struct platform_device rtc_device = { .name = "sh-rtc", .id = -1, .num_resources = ARRAY_SIZE(rtc_resources), .resource = rtc_resources, }; /* I2C0 */ static struct resource iic0_resources[] = { [0] = { .name = "IIC0", .start = 0x04470000, .end = 0x04470018 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = 96, .end = 99, .flags = IORESOURCE_IRQ, }, }; static struct platform_device iic0_device = { .name = "i2c-sh_mobile", .id = 0, /* "i2c0" clock */ .num_resources = ARRAY_SIZE(iic0_resources), .resource = iic0_resources, }; /* I2C1 */ static struct resource iic1_resources[] = { [0] = { .name = "IIC1", .start = 0x04750000, .end = 0x04750018 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = 92, .end = 95, .flags = IORESOURCE_IRQ, }, }; static struct platform_device iic1_device = { .name = "i2c-sh_mobile", .id = 1, /* "i2c1" clock */ .num_resources = ARRAY_SIZE(iic1_resources), .resource = iic1_resources, }; /* VPU */ static struct uio_info vpu_platform_data = { .name = "VPU5F", .version = "0", .irq = 60, }; static struct resource vpu_resources[] = { [0] = { .name = "VPU", .start = 0xfe900000, .end = 0xfe902807, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device vpu_device = { .name = "uio_pdrv_genirq", .id = 0, .dev = { .platform_data = &vpu_platform_data, }, .resource = vpu_resources, .num_resources = ARRAY_SIZE(vpu_resources), }; /* VEU0 */ static struct uio_info veu0_platform_data = { .name = "VEU3F0", .version = "0", .irq = 83, }; static struct resource veu0_resources[] = { [0] = { .name = "VEU3F0", .start = 0xfe920000, .end = 0xfe9200cb, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device veu0_device = { .name = "uio_pdrv_genirq", .id = 1, .dev = { .platform_data = &veu0_platform_data, }, .resource = veu0_resources, .num_resources = ARRAY_SIZE(veu0_resources), }; /* VEU1 */ static struct uio_info veu1_platform_data = { .name = "VEU3F1", .version = "0", .irq = 54, }; static struct resource veu1_resources[] = { [0] = { .name = "VEU3F1", .start = 0xfe924000, .end = 0xfe9240cb, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device veu1_device = { .name = "uio_pdrv_genirq", .id = 2, .dev = { .platform_data = &veu1_platform_data, }, .resource = veu1_resources, .num_resources = ARRAY_SIZE(veu1_resources), }; /* BEU0 */ static struct uio_info beu0_platform_data = { .name = "BEU0", .version = "0", .irq = evt2irq(0x8A0), }; static struct resource beu0_resources[] = { [0] = { .name = "BEU0", .start = 0xfe930000, .end = 0xfe933400, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device beu0_device = { .name = "uio_pdrv_genirq", .id = 6, .dev = { .platform_data = &beu0_platform_data, }, .resource = beu0_resources, .num_resources = ARRAY_SIZE(beu0_resources), }; /* BEU1 */ static struct uio_info beu1_platform_data = { .name = "BEU1", .version = "0", .irq = evt2irq(0xA00), }; static struct resource beu1_resources[] = { [0] = { .name = "BEU1", .start = 0xfe940000, .end = 0xfe943400, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device beu1_device = { .name = "uio_pdrv_genirq", .id = 7, .dev = { .platform_data = &beu1_platform_data, }, .resource = beu1_resources, .num_resources = ARRAY_SIZE(beu1_resources), }; static struct sh_timer_config cmt_platform_data = { .channel_offset = 0x60, .timer_bit = 5, .clockevent_rating = 125, .clocksource_rating = 200, }; static struct resource cmt_resources[] = { [0] = { .start = 0x044a0060, .end = 0x044a006b, .flags = IORESOURCE_MEM, }, [1] = { .start = 104, .flags = IORESOURCE_IRQ, }, }; static struct platform_device cmt_device = { .name = "sh_cmt", .id = 0, .dev = { .platform_data = &cmt_platform_data, }, .resource = cmt_resources, .num_resources = ARRAY_SIZE(cmt_resources), }; static struct sh_timer_config tmu0_platform_data = { .channel_offset = 0x04, .timer_bit = 0, .clockevent_rating = 200, }; static struct resource tmu0_resources[] = { [0] = { .start = 0xffd80008, .end = 0xffd80013, .flags = IORESOURCE_MEM, }, [1] = { .start = 16, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu0_device = { .name = "sh_tmu", .id = 0, .dev = { .platform_data = &tmu0_platform_data, }, .resource = tmu0_resources, .num_resources = ARRAY_SIZE(tmu0_resources), }; static struct sh_timer_config tmu1_platform_data = { .channel_offset = 0x10, .timer_bit = 1, .clocksource_rating = 200, }; static struct resource tmu1_resources[] = { [0] = { .start = 0xffd80014, .end = 0xffd8001f, .flags = IORESOURCE_MEM, }, [1] = { .start = 17, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu1_device = { .name = "sh_tmu", .id = 1, .dev = { .platform_data = &tmu1_platform_data, }, .resource = tmu1_resources, .num_resources = ARRAY_SIZE(tmu1_resources), }; static struct sh_timer_config tmu2_platform_data = { .channel_offset = 0x1c, .timer_bit = 2, }; static struct resource tmu2_resources[] = { [0] = { .start = 0xffd80020, .end = 0xffd8002b, .flags = IORESOURCE_MEM, }, [1] = { .start = 18, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu2_device = { .name = "sh_tmu", .id = 2, .dev = { .platform_data = &tmu2_platform_data, }, .resource = tmu2_resources, .num_resources = ARRAY_SIZE(tmu2_resources), }; static struct sh_timer_config tmu3_platform_data = { .channel_offset = 0x04, .timer_bit = 0, }; static struct resource tmu3_resources[] = { [0] = { .start = 0xffd90008, .end = 0xffd90013, .flags = IORESOURCE_MEM, }, [1] = { .start = 57, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu3_device = { .name = "sh_tmu", .id = 3, .dev = { .platform_data = &tmu3_platform_data, }, .resource = tmu3_resources, .num_resources = ARRAY_SIZE(tmu3_resources), }; static struct sh_timer_config tmu4_platform_data = { .channel_offset = 0x10, .timer_bit = 1, }; static struct resource tmu4_resources[] = { [0] = { .start = 0xffd90014, .end = 0xffd9001f, .flags = IORESOURCE_MEM, }, [1] = { .start = 58, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu4_device = { .name = "sh_tmu", .id = 4, .dev = { .platform_data = &tmu4_platform_data, }, .resource = tmu4_resources, .num_resources = ARRAY_SIZE(tmu4_resources), }; static struct sh_timer_config tmu5_platform_data = { .channel_offset = 0x1c, .timer_bit = 2, }; static struct resource tmu5_resources[] = { [0] = { .start = 0xffd90020, .end = 0xffd9002b, .flags = IORESOURCE_MEM, }, [1] = { .start = 57, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu5_device = { .name = "sh_tmu", .id = 5, .dev = { .platform_data = &tmu5_platform_data, }, .resource = tmu5_resources, .num_resources = ARRAY_SIZE(tmu5_resources), }; /* JPU */ static struct uio_info jpu_platform_data = { .name = "JPU", .version = "0", .irq = 27, }; static struct resource jpu_resources[] = { [0] = { .name = "JPU", .start = 0xfe980000, .end = 0xfe9902d3, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device jpu_device = { .name = "uio_pdrv_genirq", .id = 3, .dev = { .platform_data = &jpu_platform_data, }, .resource = jpu_resources, .num_resources = ARRAY_SIZE(jpu_resources), }; /* SPU2DSP0 */ static struct uio_info spu0_platform_data = { .name = "SPU2DSP0", .version = "0", .irq = 86, }; static struct resource spu0_resources[] = { [0] = { .name = "SPU2DSP0", .start = 0xFE200000, .end = 0xFE2FFFFF, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device spu0_device = { .name = "uio_pdrv_genirq", .id = 4, .dev = { .platform_data = &spu0_platform_data, }, .resource = spu0_resources, .num_resources = ARRAY_SIZE(spu0_resources), }; /* SPU2DSP1 */ static struct uio_info spu1_platform_data = { .name = "SPU2DSP1", .version = "0", .irq = 87, }; static struct resource spu1_resources[] = { [0] = { .name = "SPU2DSP1", .start = 0xFE300000, .end = 0xFE3FFFFF, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device spu1_device = { .name = "uio_pdrv_genirq", .id = 5, .dev = { .platform_data = &spu1_platform_data, }, .resource = spu1_resources, .num_resources = ARRAY_SIZE(spu1_resources), }; static struct platform_device *sh7724_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &scif3_device, &scif4_device, &scif5_device, &cmt_device, &tmu0_device, &tmu1_device, &tmu2_device, &tmu3_device, &tmu4_device, &tmu5_device, &dma0_device, &dma1_device, &rtc_device, &iic0_device, &iic1_device, &vpu_device, &veu0_device, &veu1_device, &beu0_device, &beu1_device, &jpu_device, &spu0_device, &spu1_device, }; static int __init sh7724_devices_setup(void) { platform_resource_setup_memory(&vpu_device, "vpu", 2 << 20); platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20); platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20); platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20); platform_resource_setup_memory(&spu0_device, "spu0", 2 << 20); platform_resource_setup_memory(&spu1_device, "spu1", 2 << 20); return platform_add_devices(sh7724_devices, ARRAY_SIZE(sh7724_devices)); } arch_initcall(sh7724_devices_setup); static struct platform_device *sh7724_early_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &scif3_device, &scif4_device, &scif5_device, &cmt_device, &tmu0_device, &tmu1_device, &tmu2_device, &tmu3_device, &tmu4_device, &tmu5_device, }; void __init plat_early_device_setup(void) { early_platform_add_devices(sh7724_early_devices, ARRAY_SIZE(sh7724_early_devices)); } #define RAMCR_CACHE_L2FC 0x0002 #define RAMCR_CACHE_L2E 0x0001 #define L2_CACHE_ENABLE (RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC) void l2_cache_init(void) { /* Enable L2 cache */ __raw_writel(L2_CACHE_ENABLE, RAMCR); } enum { UNUSED = 0, ENABLED, DISABLED, /* interrupt sources */ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, HUDI, DMAC1A_DEI0, DMAC1A_DEI1, DMAC1A_DEI2, DMAC1A_DEI3, _2DG_TRI, _2DG_INI, _2DG_CEI, DMAC0A_DEI0, DMAC0A_DEI1, DMAC0A_DEI2, DMAC0A_DEI3, VIO_CEU0, VIO_BEU0, VIO_VEU1, VIO_VOU, SCIFA3, VPU, TPU, CEU1, BEU1, USB0, USB1, ATAPI, RTC_ATI, RTC_PRI, RTC_CUI, DMAC1B_DEI4, DMAC1B_DEI5, DMAC1B_DADERR, DMAC0B_DEI4, DMAC0B_DEI5, DMAC0B_DADERR, KEYSC, SCIF_SCIF0, SCIF_SCIF1, SCIF_SCIF2, VEU0, MSIOF_MSIOFI0, MSIOF_MSIOFI1, SPU_SPUI0, SPU_SPUI1, SCIFA4, ICB, ETHI, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI, CMT, TSIF, FSI, SCIFA5, TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2, IRDA, JPU, _2DDMAC, MMC_MMC2I, MMC_MMC3I, LCDC, TMU1_TUNI0, TMU1_TUNI1, TMU1_TUNI2, /* interrupt groups */ DMAC1A, _2DG, DMAC0A, VIO, USB, RTC, DMAC1B, DMAC0B, I2C0, I2C1, SDHI0, SDHI1, SPU, MMCIF, }; static struct intc_vect vectors[] __initdata = { INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620), INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660), INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0), INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0), INTC_VECT(DMAC1A_DEI0, 0x700), INTC_VECT(DMAC1A_DEI1, 0x720), INTC_VECT(DMAC1A_DEI2, 0x740), INTC_VECT(DMAC1A_DEI3, 0x760), INTC_VECT(_2DG_TRI, 0x780), INTC_VECT(_2DG_INI, 0x7A0), INTC_VECT(_2DG_CEI, 0x7C0), INTC_VECT(DMAC0A_DEI0, 0x800), INTC_VECT(DMAC0A_DEI1, 0x820), INTC_VECT(DMAC0A_DEI2, 0x840), INTC_VECT(DMAC0A_DEI3, 0x860), INTC_VECT(VIO_CEU0, 0x880), INTC_VECT(VIO_BEU0, 0x8A0), INTC_VECT(VIO_VEU1, 0x8C0), INTC_VECT(VIO_VOU, 0x8E0), INTC_VECT(SCIFA3, 0x900), INTC_VECT(VPU, 0x980), INTC_VECT(TPU, 0x9A0), INTC_VECT(CEU1, 0x9E0), INTC_VECT(BEU1, 0xA00), INTC_VECT(USB0, 0xA20), INTC_VECT(USB1, 0xA40), INTC_VECT(ATAPI, 0xA60), INTC_VECT(RTC_ATI, 0xA80), INTC_VECT(RTC_PRI, 0xAA0), INTC_VECT(RTC_CUI, 0xAC0), INTC_VECT(DMAC1B_DEI4, 0xB00), INTC_VECT(DMAC1B_DEI5, 0xB20), INTC_VECT(DMAC1B_DADERR, 0xB40), INTC_VECT(DMAC0B_DEI4, 0xB80), INTC_VECT(DMAC0B_DEI5, 0xBA0), INTC_VECT(DMAC0B_DADERR, 0xBC0), INTC_VECT(KEYSC, 0xBE0), INTC_VECT(SCIF_SCIF0, 0xC00), INTC_VECT(SCIF_SCIF1, 0xC20), INTC_VECT(SCIF_SCIF2, 0xC40), INTC_VECT(VEU0, 0xC60), INTC_VECT(MSIOF_MSIOFI0, 0xC80), INTC_VECT(MSIOF_MSIOFI1, 0xCA0), INTC_VECT(SPU_SPUI0, 0xCC0), INTC_VECT(SPU_SPUI1, 0xCE0), INTC_VECT(SCIFA4, 0xD00), INTC_VECT(ICB, 0xD20), INTC_VECT(ETHI, 0xD60), INTC_VECT(I2C1_ALI, 0xD80), INTC_VECT(I2C1_TACKI, 0xDA0), INTC_VECT(I2C1_WAITI, 0xDC0), INTC_VECT(I2C1_DTEI, 0xDE0), INTC_VECT(I2C0_ALI, 0xE00), INTC_VECT(I2C0_TACKI, 0xE20), INTC_VECT(I2C0_WAITI, 0xE40), INTC_VECT(I2C0_DTEI, 0xE60), INTC_VECT(SDHI0, 0xE80), INTC_VECT(SDHI0, 0xEA0), INTC_VECT(SDHI0, 0xEC0), INTC_VECT(SDHI0, 0xEE0), INTC_VECT(CMT, 0xF00), INTC_VECT(TSIF, 0xF20), INTC_VECT(FSI, 0xF80), INTC_VECT(SCIFA5, 0xFA0), INTC_VECT(TMU0_TUNI0, 0x400), INTC_VECT(TMU0_TUNI1, 0x420), INTC_VECT(TMU0_TUNI2, 0x440), INTC_VECT(IRDA, 0x480), INTC_VECT(SDHI1, 0x4E0), INTC_VECT(SDHI1, 0x500), INTC_VECT(SDHI1, 0x520), INTC_VECT(JPU, 0x560), INTC_VECT(_2DDMAC, 0x4A0), INTC_VECT(MMC_MMC2I, 0x5A0), INTC_VECT(MMC_MMC3I, 0x5C0), INTC_VECT(LCDC, 0xF40), INTC_VECT(TMU1_TUNI0, 0x920), INTC_VECT(TMU1_TUNI1, 0x940), INTC_VECT(TMU1_TUNI2, 0x960), }; static struct intc_group groups[] __initdata = { INTC_GROUP(DMAC1A, DMAC1A_DEI0, DMAC1A_DEI1, DMAC1A_DEI2, DMAC1A_DEI3), INTC_GROUP(_2DG, _2DG_TRI, _2DG_INI, _2DG_CEI), INTC_GROUP(DMAC0A, DMAC0A_DEI0, DMAC0A_DEI1, DMAC0A_DEI2, DMAC0A_DEI3), INTC_GROUP(VIO, VIO_CEU0, VIO_BEU0, VIO_VEU1, VIO_VOU), INTC_GROUP(USB, USB0, USB1), INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI), INTC_GROUP(DMAC1B, DMAC1B_DEI4, DMAC1B_DEI5, DMAC1B_DADERR), INTC_GROUP(DMAC0B, DMAC0B_DEI4, DMAC0B_DEI5, DMAC0B_DADERR), INTC_GROUP(I2C0, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI), INTC_GROUP(I2C1, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI), INTC_GROUP(SPU, SPU_SPUI0, SPU_SPUI1), INTC_GROUP(MMCIF, MMC_MMC2I, MMC_MMC3I), }; static struct intc_mask_reg mask_registers[] __initdata = { { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */ { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0, 0, ENABLED, ENABLED, ENABLED } }, { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */ { VIO_VOU, VIO_VEU1, VIO_BEU0, VIO_CEU0, DMAC0A_DEI3, DMAC0A_DEI2, DMAC0A_DEI1, DMAC0A_DEI0 } }, { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */ { 0, 0, 0, VPU, ATAPI, ETHI, 0, SCIFA3 } }, { 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */ { DMAC1A_DEI3, DMAC1A_DEI2, DMAC1A_DEI1, DMAC1A_DEI0, SPU_SPUI1, SPU_SPUI0, BEU1, IRDA } }, { 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */ { 0, TMU0_TUNI2, TMU0_TUNI1, TMU0_TUNI0, JPU, 0, 0, LCDC } }, { 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */ { KEYSC, DMAC0B_DADERR, DMAC0B_DEI5, DMAC0B_DEI4, VEU0, SCIF_SCIF2, SCIF_SCIF1, SCIF_SCIF0 } }, { 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */ { 0, 0, ICB, SCIFA4, CEU1, 0, MSIOF_MSIOFI1, MSIOF_MSIOFI0 } }, { 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */ { I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI, I2C1_DTEI, I2C1_WAITI, I2C1_TACKI, I2C1_ALI } }, { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, SCIFA5, FSI } }, { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ { 0, 0, 0, CMT, 0, USB1, USB0, 0 } }, { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ { 0, DMAC1B_DADERR, DMAC1B_DEI5, DMAC1B_DEI4, 0, RTC_CUI, RTC_PRI, RTC_ATI } }, { 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */ { 0, _2DG_CEI, _2DG_INI, _2DG_TRI, 0, TPU, 0, TSIF } }, { 0xa40800b0, 0xa40800f0, 8, /* IMR12 / IMCR12 */ { 0, 0, MMC_MMC3I, MMC_MMC2I, 0, 0, 0, _2DDMAC } }, { 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_prio_reg prio_registers[] __initdata = { { 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2, IRDA } }, { 0xa4080004, 0, 16, 4, /* IPRB */ { JPU, LCDC, DMAC1A, BEU1 } }, { 0xa4080008, 0, 16, 4, /* IPRC */ { TMU1_TUNI0, TMU1_TUNI1, TMU1_TUNI2, SPU } }, { 0xa408000c, 0, 16, 4, /* IPRD */ { 0, MMCIF, 0, ATAPI } }, { 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0A, VIO, SCIFA3, VPU } }, { 0xa4080014, 0, 16, 4, /* IPRF */ { KEYSC, DMAC0B, USB, CMT } }, { 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF_SCIF0, SCIF_SCIF1, SCIF_SCIF2, VEU0 } }, { 0xa408001c, 0, 16, 4, /* IPRH */ { MSIOF_MSIOFI0, MSIOF_MSIOFI1, I2C1, I2C0 } }, { 0xa4080020, 0, 16, 4, /* IPRI */ { SCIFA4, ICB, TSIF, _2DG } }, { 0xa4080024, 0, 16, 4, /* IPRJ */ { CEU1, ETHI, FSI, SDHI1 } }, { 0xa4080028, 0, 16, 4, /* IPRK */ { RTC, DMAC1B, 0, SDHI0 } }, { 0xa408002c, 0, 16, 4, /* IPRL */ { SCIFA5, 0, TPU, _2DDMAC } }, { 0xa4140010, 0, 32, 4, /* INTPRI00 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_sense_reg sense_registers[] __initdata = { { 0xa414001c, 16, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_mask_reg ack_registers[] __initdata = { { 0xa4140024, 0, 8, /* INTREQ00 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_desc intc_desc __initdata = { .name = "sh7724", .force_enable = ENABLED, .force_disable = DISABLED, .hw = INTC_HW_DESC(vectors, groups, mask_registers, prio_registers, sense_registers, ack_registers), }; void __init plat_irq_setup(void) { register_intc_controller(&intc_desc); } static struct { /* BSC */ unsigned long mmselr; unsigned long cs0bcr; unsigned long cs4bcr; unsigned long cs5abcr; unsigned long cs5bbcr; unsigned long cs6abcr; unsigned long cs6bbcr; unsigned long cs4wcr; unsigned long cs5awcr; unsigned long cs5bwcr; unsigned long cs6awcr; unsigned long cs6bwcr; /* INTC */ unsigned short ipra; unsigned short iprb; unsigned short iprc; unsigned short iprd; unsigned short ipre; unsigned short iprf; unsigned short iprg; unsigned short iprh; unsigned short ipri; unsigned short iprj; unsigned short iprk; unsigned short iprl; unsigned char imr0; unsigned char imr1; unsigned char imr2; unsigned char imr3; unsigned char imr4; unsigned char imr5; unsigned char imr6; unsigned char imr7; unsigned char imr8; unsigned char imr9; unsigned char imr10; unsigned char imr11; unsigned char imr12; /* RWDT */ unsigned short rwtcnt; unsigned short rwtcsr; /* CPG */ unsigned long irdaclk; unsigned long spuclk; } sh7724_rstandby_state; static int sh7724_pre_sleep_notifier_call(struct notifier_block *nb, unsigned long flags, void *unused) { if (!(flags & SUSP_SH_RSTANDBY)) return NOTIFY_DONE; /* BCR */ sh7724_rstandby_state.mmselr = __raw_readl(0xff800020); /* MMSELR */ sh7724_rstandby_state.mmselr |= 0xa5a50000; sh7724_rstandby_state.cs0bcr = __raw_readl(0xfec10004); /* CS0BCR */ sh7724_rstandby_state.cs4bcr = __raw_readl(0xfec10010); /* CS4BCR */ sh7724_rstandby_state.cs5abcr = __raw_readl(0xfec10014); /* CS5ABCR */ sh7724_rstandby_state.cs5bbcr = __raw_readl(0xfec10018); /* CS5BBCR */ sh7724_rstandby_state.cs6abcr = __raw_readl(0xfec1001c); /* CS6ABCR */ sh7724_rstandby_state.cs6bbcr = __raw_readl(0xfec10020); /* CS6BBCR */ sh7724_rstandby_state.cs4wcr = __raw_readl(0xfec10030); /* CS4WCR */ sh7724_rstandby_state.cs5awcr = __raw_readl(0xfec10034); /* CS5AWCR */ sh7724_rstandby_state.cs5bwcr = __raw_readl(0xfec10038); /* CS5BWCR */ sh7724_rstandby_state.cs6awcr = __raw_readl(0xfec1003c); /* CS6AWCR */ sh7724_rstandby_state.cs6bwcr = __raw_readl(0xfec10040); /* CS6BWCR */ /* INTC */ sh7724_rstandby_state.ipra = __raw_readw(0xa4080000); /* IPRA */ sh7724_rstandby_state.iprb = __raw_readw(0xa4080004); /* IPRB */ sh7724_rstandby_state.iprc = __raw_readw(0xa4080008); /* IPRC */ sh7724_rstandby_state.iprd = __raw_readw(0xa408000c); /* IPRD */ sh7724_rstandby_state.ipre = __raw_readw(0xa4080010); /* IPRE */ sh7724_rstandby_state.iprf = __raw_readw(0xa4080014); /* IPRF */ sh7724_rstandby_state.iprg = __raw_readw(0xa4080018); /* IPRG */ sh7724_rstandby_state.iprh = __raw_readw(0xa408001c); /* IPRH */ sh7724_rstandby_state.ipri = __raw_readw(0xa4080020); /* IPRI */ sh7724_rstandby_state.iprj = __raw_readw(0xa4080024); /* IPRJ */ sh7724_rstandby_state.iprk = __raw_readw(0xa4080028); /* IPRK */ sh7724_rstandby_state.iprl = __raw_readw(0xa408002c); /* IPRL */ sh7724_rstandby_state.imr0 = __raw_readb(0xa4080080); /* IMR0 */ sh7724_rstandby_state.imr1 = __raw_readb(0xa4080084); /* IMR1 */ sh7724_rstandby_state.imr2 = __raw_readb(0xa4080088); /* IMR2 */ sh7724_rstandby_state.imr3 = __raw_readb(0xa408008c); /* IMR3 */ sh7724_rstandby_state.imr4 = __raw_readb(0xa4080090); /* IMR4 */ sh7724_rstandby_state.imr5 = __raw_readb(0xa4080094); /* IMR5 */ sh7724_rstandby_state.imr6 = __raw_readb(0xa4080098); /* IMR6 */ sh7724_rstandby_state.imr7 = __raw_readb(0xa408009c); /* IMR7 */ sh7724_rstandby_state.imr8 = __raw_readb(0xa40800a0); /* IMR8 */ sh7724_rstandby_state.imr9 = __raw_readb(0xa40800a4); /* IMR9 */ sh7724_rstandby_state.imr10 = __raw_readb(0xa40800a8); /* IMR10 */ sh7724_rstandby_state.imr11 = __raw_readb(0xa40800ac); /* IMR11 */ sh7724_rstandby_state.imr12 = __raw_readb(0xa40800b0); /* IMR12 */ /* RWDT */ sh7724_rstandby_state.rwtcnt = __raw_readb(0xa4520000); /* RWTCNT */ sh7724_rstandby_state.rwtcnt |= 0x5a00; sh7724_rstandby_state.rwtcsr = __raw_readb(0xa4520004); /* RWTCSR */ sh7724_rstandby_state.rwtcsr |= 0xa500; __raw_writew(sh7724_rstandby_state.rwtcsr & 0x07, 0xa4520004); /* CPG */ sh7724_rstandby_state.irdaclk = __raw_readl(0xa4150018); /* IRDACLKCR */ sh7724_rstandby_state.spuclk = __raw_readl(0xa415003c); /* SPUCLKCR */ return NOTIFY_DONE; } static int sh7724_post_sleep_notifier_call(struct notifier_block *nb, unsigned long flags, void *unused) { if (!(flags & SUSP_SH_RSTANDBY)) return NOTIFY_DONE; /* BCR */ __raw_writel(sh7724_rstandby_state.mmselr, 0xff800020); /* MMSELR */ __raw_writel(sh7724_rstandby_state.cs0bcr, 0xfec10004); /* CS0BCR */ __raw_writel(sh7724_rstandby_state.cs4bcr, 0xfec10010); /* CS4BCR */ __raw_writel(sh7724_rstandby_state.cs5abcr, 0xfec10014); /* CS5ABCR */ __raw_writel(sh7724_rstandby_state.cs5bbcr, 0xfec10018); /* CS5BBCR */ __raw_writel(sh7724_rstandby_state.cs6abcr, 0xfec1001c); /* CS6ABCR */ __raw_writel(sh7724_rstandby_state.cs6bbcr, 0xfec10020); /* CS6BBCR */ __raw_writel(sh7724_rstandby_state.cs4wcr, 0xfec10030); /* CS4WCR */ __raw_writel(sh7724_rstandby_state.cs5awcr, 0xfec10034); /* CS5AWCR */ __raw_writel(sh7724_rstandby_state.cs5bwcr, 0xfec10038); /* CS5BWCR */ __raw_writel(sh7724_rstandby_state.cs6awcr, 0xfec1003c); /* CS6AWCR */ __raw_writel(sh7724_rstandby_state.cs6bwcr, 0xfec10040); /* CS6BWCR */ /* INTC */ __raw_writew(sh7724_rstandby_state.ipra, 0xa4080000); /* IPRA */ __raw_writew(sh7724_rstandby_state.iprb, 0xa4080004); /* IPRB */ __raw_writew(sh7724_rstandby_state.iprc, 0xa4080008); /* IPRC */ __raw_writew(sh7724_rstandby_state.iprd, 0xa408000c); /* IPRD */ __raw_writew(sh7724_rstandby_state.ipre, 0xa4080010); /* IPRE */ __raw_writew(sh7724_rstandby_state.iprf, 0xa4080014); /* IPRF */ __raw_writew(sh7724_rstandby_state.iprg, 0xa4080018); /* IPRG */ __raw_writew(sh7724_rstandby_state.iprh, 0xa408001c); /* IPRH */ __raw_writew(sh7724_rstandby_state.ipri, 0xa4080020); /* IPRI */ __raw_writew(sh7724_rstandby_state.iprj, 0xa4080024); /* IPRJ */ __raw_writew(sh7724_rstandby_state.iprk, 0xa4080028); /* IPRK */ __raw_writew(sh7724_rstandby_state.iprl, 0xa408002c); /* IPRL */ __raw_writeb(sh7724_rstandby_state.imr0, 0xa4080080); /* IMR0 */ __raw_writeb(sh7724_rstandby_state.imr1, 0xa4080084); /* IMR1 */ __raw_writeb(sh7724_rstandby_state.imr2, 0xa4080088); /* IMR2 */ __raw_writeb(sh7724_rstandby_state.imr3, 0xa408008c); /* IMR3 */ __raw_writeb(sh7724_rstandby_state.imr4, 0xa4080090); /* IMR4 */ __raw_writeb(sh7724_rstandby_state.imr5, 0xa4080094); /* IMR5 */ __raw_writeb(sh7724_rstandby_state.imr6, 0xa4080098); /* IMR6 */ __raw_writeb(sh7724_rstandby_state.imr7, 0xa408009c); /* IMR7 */ __raw_writeb(sh7724_rstandby_state.imr8, 0xa40800a0); /* IMR8 */ __raw_writeb(sh7724_rstandby_state.imr9, 0xa40800a4); /* IMR9 */ __raw_writeb(sh7724_rstandby_state.imr10, 0xa40800a8); /* IMR10 */ __raw_writeb(sh7724_rstandby_state.imr11, 0xa40800ac); /* IMR11 */ __raw_writeb(sh7724_rstandby_state.imr12, 0xa40800b0); /* IMR12 */ /* RWDT */ __raw_writew(sh7724_rstandby_state.rwtcnt, 0xa4520000); /* RWTCNT */ __raw_writew(sh7724_rstandby_state.rwtcsr, 0xa4520004); /* RWTCSR */ /* CPG */ __raw_writel(sh7724_rstandby_state.irdaclk, 0xa4150018); /* IRDACLKCR */ __raw_writel(sh7724_rstandby_state.spuclk, 0xa415003c); /* SPUCLKCR */ return NOTIFY_DONE; } static struct notifier_block sh7724_pre_sleep_notifier = { .notifier_call = sh7724_pre_sleep_notifier_call, .priority = SH_MOBILE_PRE(SH_MOBILE_SLEEP_CPU), }; static struct notifier_block sh7724_post_sleep_notifier = { .notifier_call = sh7724_post_sleep_notifier_call, .priority = SH_MOBILE_POST(SH_MOBILE_SLEEP_CPU), }; static int __init sh7724_sleep_setup(void) { atomic_notifier_chain_register(&sh_mobile_pre_sleep_notifier_list, &sh7724_pre_sleep_notifier); atomic_notifier_chain_register(&sh_mobile_post_sleep_notifier_list, &sh7724_post_sleep_notifier); return 0; } arch_initcall(sh7724_sleep_setup);
gpl-2.0
tako0910/android_kernel_htc_valentewx
drivers/watchdog/iop_wdt.c
4751
6130
/* * drivers/char/watchdog/iop_wdt.c * * WDT driver for Intel I/O Processors * Copyright (C) 2005, Intel Corporation. * * Based on ixp4xx driver, Copyright 2004 (c) MontaVista, Software, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Curt E Bruns <curt.e.bruns@intel.com> * Peter Milne <peter.milne@d-tacq.com> * Dan Williams <dan.j.williams@intel.com> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/device.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/uaccess.h> #include <mach/hardware.h> static int nowayout = WATCHDOG_NOWAYOUT; static unsigned long wdt_status; static unsigned long boot_status; static spinlock_t wdt_lock; #define WDT_IN_USE 0 #define WDT_OK_TO_CLOSE 1 #define WDT_ENABLED 2 static unsigned long iop_watchdog_timeout(void) { return (0xffffffffUL / get_iop_tick_rate()); } /** * wdt_supports_disable - determine if we are accessing a iop13xx watchdog * or iop3xx by whether it has a disable command */ static int wdt_supports_disable(void) { int can_disable; if (IOP_WDTCR_EN_ARM != IOP_WDTCR_DIS_ARM) can_disable = 1; else can_disable = 0; return can_disable; } static void wdt_enable(void) { /* Arm and enable the Timer to starting counting down from 0xFFFF.FFFF * Takes approx. 10.7s to timeout */ spin_lock(&wdt_lock); write_wdtcr(IOP_WDTCR_EN_ARM); write_wdtcr(IOP_WDTCR_EN); spin_unlock(&wdt_lock); } /* returns 0 if the timer was successfully disabled */ static int wdt_disable(void) { /* Stop Counting */ if (wdt_supports_disable()) { spin_lock(&wdt_lock); write_wdtcr(IOP_WDTCR_DIS_ARM); write_wdtcr(IOP_WDTCR_DIS); clear_bit(WDT_ENABLED, &wdt_status); spin_unlock(&wdt_lock); printk(KERN_INFO "WATCHDOG: Disabled\n"); return 0; } else return 1; } static int iop_wdt_open(struct inode *inode, struct file *file) { if (test_and_set_bit(WDT_IN_USE, &wdt_status)) return -EBUSY; clear_bit(WDT_OK_TO_CLOSE, &wdt_status); wdt_enable(); set_bit(WDT_ENABLED, &wdt_status); return nonseekable_open(inode, file); } static ssize_t iop_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos) { if (len) { if (!nowayout) { size_t i; clear_bit(WDT_OK_TO_CLOSE, &wdt_status); for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') set_bit(WDT_OK_TO_CLOSE, &wdt_status); } } wdt_enable(); } return len; } static const struct watchdog_info ident = { .options = WDIOF_CARDRESET | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, .identity = "iop watchdog", }; static long iop_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int options; int ret = -ENOTTY; int __user *argp = (int __user *)arg; switch (cmd) { case WDIOC_GETSUPPORT: if (copy_to_user(argp, &ident, sizeof(ident))) ret = -EFAULT; else ret = 0; break; case WDIOC_GETSTATUS: ret = put_user(0, argp); break; case WDIOC_GETBOOTSTATUS: ret = put_user(boot_status, argp); break; case WDIOC_SETOPTIONS: if (get_user(options, (int *)arg)) return -EFAULT; if (options & WDIOS_DISABLECARD) { if (!nowayout) { if (wdt_disable() == 0) { set_bit(WDT_OK_TO_CLOSE, &wdt_status); ret = 0; } else ret = -ENXIO; } else ret = 0; } if (options & WDIOS_ENABLECARD) { wdt_enable(); ret = 0; } break; case WDIOC_KEEPALIVE: wdt_enable(); ret = 0; break; case WDIOC_GETTIMEOUT: ret = put_user(iop_watchdog_timeout(), argp); break; } return ret; } static int iop_wdt_release(struct inode *inode, struct file *file) { int state = 1; if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) if (test_bit(WDT_ENABLED, &wdt_status)) state = wdt_disable(); /* if the timer is not disabled reload and notify that we are still * going down */ if (state != 0) { wdt_enable(); printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - " "reset in %lu seconds\n", iop_watchdog_timeout()); } clear_bit(WDT_IN_USE, &wdt_status); clear_bit(WDT_OK_TO_CLOSE, &wdt_status); return 0; } static const struct file_operations iop_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = iop_wdt_write, .unlocked_ioctl = iop_wdt_ioctl, .open = iop_wdt_open, .release = iop_wdt_release, }; static struct miscdevice iop_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &iop_wdt_fops, }; static int __init iop_wdt_init(void) { int ret; spin_lock_init(&wdt_lock); /* check if the reset was caused by the watchdog timer */ boot_status = (read_rcsr() & IOP_RCSR_WDT) ? WDIOF_CARDRESET : 0; /* Configure Watchdog Timeout to cause an Internal Bus (IB) Reset * NOTE: An IB Reset will Reset both cores in the IOP342 */ write_wdtsr(IOP13XX_WDTCR_IB_RESET); /* Register after we have the device set up so we cannot race with an open */ ret = misc_register(&iop_wdt_miscdev); if (ret == 0) printk(KERN_INFO "iop watchdog timer: timeout %lu sec\n", iop_watchdog_timeout()); return ret; } static void __exit iop_wdt_exit(void) { misc_deregister(&iop_wdt_miscdev); } module_init(iop_wdt_init); module_exit(iop_wdt_exit); module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); MODULE_AUTHOR("Curt E Bruns <curt.e.bruns@intel.com>"); MODULE_DESCRIPTION("iop watchdog timer driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
htc-m8-caf/android_kernel_htc_msm8974
fs/btrfs/dir-item.c
5007
11931
/* * Copyright (C) 2007 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include "ctree.h" #include "disk-io.h" #include "hash.h" #include "transaction.h" /* * insert a name into a directory, doing overflow properly if there is a hash * collision. data_size indicates how big the item inserted should be. On * success a struct btrfs_dir_item pointer is returned, otherwise it is * an ERR_PTR. * * The name is not copied into the dir item, you have to do that yourself. */ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *cpu_key, u32 data_size, const char *name, int name_len) { int ret; char *ptr; struct btrfs_item *item; struct extent_buffer *leaf; ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); if (ret == -EEXIST) { struct btrfs_dir_item *di; di = btrfs_match_dir_item_name(root, path, name, name_len); if (di) return ERR_PTR(-EEXIST); btrfs_extend_item(trans, root, path, data_size); } else if (ret < 0) return ERR_PTR(ret); WARN_ON(ret > 0); leaf = path->nodes[0]; item = btrfs_item_nr(leaf, path->slots[0]); ptr = btrfs_item_ptr(leaf, path->slots[0], char); BUG_ON(data_size > btrfs_item_size(leaf, item)); ptr += btrfs_item_size(leaf, item) - data_size; return (struct btrfs_dir_item *)ptr; } /* * xattrs work a lot like directories, this inserts an xattr item * into the tree */ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 objectid, const char *name, u16 name_len, const void *data, u16 data_len) { int ret = 0; struct btrfs_dir_item *dir_item; unsigned long name_ptr, data_ptr; struct btrfs_key key, location; struct btrfs_disk_key disk_key; struct extent_buffer *leaf; u32 data_size; BUG_ON(name_len + data_len > BTRFS_MAX_XATTR_SIZE(root)); key.objectid = objectid; btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY); key.offset = btrfs_name_hash(name, name_len); data_size = sizeof(*dir_item) + name_len + data_len; dir_item = insert_with_overflow(trans, root, path, &key, data_size, name, name_len); if (IS_ERR(dir_item)) return PTR_ERR(dir_item); memset(&location, 0, sizeof(location)); leaf = path->nodes[0]; btrfs_cpu_key_to_disk(&disk_key, &location); btrfs_set_dir_item_key(leaf, dir_item, &disk_key); btrfs_set_dir_type(leaf, dir_item, BTRFS_FT_XATTR); btrfs_set_dir_name_len(leaf, dir_item, name_len); btrfs_set_dir_transid(leaf, dir_item, trans->transid); btrfs_set_dir_data_len(leaf, dir_item, data_len); name_ptr = (unsigned long)(dir_item + 1); data_ptr = (unsigned long)((char *)name_ptr + name_len); write_extent_buffer(leaf, name, name_ptr, name_len); write_extent_buffer(leaf, data, data_ptr, data_len); btrfs_mark_buffer_dirty(path->nodes[0]); return ret; } /* * insert a directory item in the tree, doing all the magic for * both indexes. 'dir' indicates which objectid to insert it into, * 'location' is the key to stuff into the directory item, 'type' is the * type of the inode we're pointing to, and 'index' is the sequence number * to use for the second index (if one is created). * Will return 0 or -ENOMEM */ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, int name_len, struct inode *dir, struct btrfs_key *location, u8 type, u64 index) { int ret = 0; int ret2 = 0; struct btrfs_path *path; struct btrfs_dir_item *dir_item; struct extent_buffer *leaf; unsigned long name_ptr; struct btrfs_key key; struct btrfs_disk_key disk_key; u32 data_size; key.objectid = btrfs_ino(dir); btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY); key.offset = btrfs_name_hash(name, name_len); path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->leave_spinning = 1; btrfs_cpu_key_to_disk(&disk_key, location); data_size = sizeof(*dir_item) + name_len; dir_item = insert_with_overflow(trans, root, path, &key, data_size, name, name_len); if (IS_ERR(dir_item)) { ret = PTR_ERR(dir_item); if (ret == -EEXIST) goto second_insert; goto out_free; } leaf = path->nodes[0]; btrfs_set_dir_item_key(leaf, dir_item, &disk_key); btrfs_set_dir_type(leaf, dir_item, type); btrfs_set_dir_data_len(leaf, dir_item, 0); btrfs_set_dir_name_len(leaf, dir_item, name_len); btrfs_set_dir_transid(leaf, dir_item, trans->transid); name_ptr = (unsigned long)(dir_item + 1); write_extent_buffer(leaf, name, name_ptr, name_len); btrfs_mark_buffer_dirty(leaf); second_insert: /* FIXME, use some real flag for selecting the extra index */ if (root == root->fs_info->tree_root) { ret = 0; goto out_free; } btrfs_release_path(path); ret2 = btrfs_insert_delayed_dir_index(trans, root, name, name_len, dir, &disk_key, type, index); out_free: btrfs_free_path(path); if (ret) return ret; if (ret2) return ret2; return 0; } /* * lookup a directory item based on name. 'dir' is the objectid * we're searching in, and 'mod' tells us if you plan on deleting the * item (use mod < 0) or changing the options (use mod > 0) */ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 dir, const char *name, int name_len, int mod) { int ret; struct btrfs_key key; int ins_len = mod < 0 ? -1 : 0; int cow = mod != 0; key.objectid = dir; btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY); key.offset = btrfs_name_hash(name, name_len); ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow); if (ret < 0) return ERR_PTR(ret); if (ret > 0) return NULL; return btrfs_match_dir_item_name(root, path, name, name_len); } /* * lookup a directory item based on index. 'dir' is the objectid * we're searching in, and 'mod' tells us if you plan on deleting the * item (use mod < 0) or changing the options (use mod > 0) * * The name is used to make sure the index really points to the name you were * looking for. */ struct btrfs_dir_item * btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 dir, u64 objectid, const char *name, int name_len, int mod) { int ret; struct btrfs_key key; int ins_len = mod < 0 ? -1 : 0; int cow = mod != 0; key.objectid = dir; btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY); key.offset = objectid; ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow); if (ret < 0) return ERR_PTR(ret); if (ret > 0) return ERR_PTR(-ENOENT); return btrfs_match_dir_item_name(root, path, name, name_len); } struct btrfs_dir_item * btrfs_search_dir_index_item(struct btrfs_root *root, struct btrfs_path *path, u64 dirid, const char *name, int name_len) { struct extent_buffer *leaf; struct btrfs_dir_item *di; struct btrfs_key key; u32 nritems; int ret; key.objectid = dirid; key.type = BTRFS_DIR_INDEX_KEY; key.offset = 0; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) return ERR_PTR(ret); leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); while (1) { if (path->slots[0] >= nritems) { ret = btrfs_next_leaf(root, path); if (ret < 0) return ERR_PTR(ret); if (ret > 0) break; leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); continue; } btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (key.objectid != dirid || key.type != BTRFS_DIR_INDEX_KEY) break; di = btrfs_match_dir_item_name(root, path, name, name_len); if (di) return di; path->slots[0]++; } return NULL; } struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 dir, const char *name, u16 name_len, int mod) { int ret; struct btrfs_key key; int ins_len = mod < 0 ? -1 : 0; int cow = mod != 0; key.objectid = dir; btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY); key.offset = btrfs_name_hash(name, name_len); ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow); if (ret < 0) return ERR_PTR(ret); if (ret > 0) return NULL; return btrfs_match_dir_item_name(root, path, name, name_len); } /* * helper function to look at the directory item pointed to by 'path' * this walks through all the entries in a dir item and finds one * for a specific name. */ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, struct btrfs_path *path, const char *name, int name_len) { struct btrfs_dir_item *dir_item; unsigned long name_ptr; u32 total_len; u32 cur = 0; u32 this_len; struct extent_buffer *leaf; leaf = path->nodes[0]; dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); if (verify_dir_item(root, leaf, dir_item)) return NULL; total_len = btrfs_item_size_nr(leaf, path->slots[0]); while (cur < total_len) { this_len = sizeof(*dir_item) + btrfs_dir_name_len(leaf, dir_item) + btrfs_dir_data_len(leaf, dir_item); name_ptr = (unsigned long)(dir_item + 1); if (btrfs_dir_name_len(leaf, dir_item) == name_len && memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0) return dir_item; cur += this_len; dir_item = (struct btrfs_dir_item *)((char *)dir_item + this_len); } return NULL; } /* * given a pointer into a directory item, delete it. This * handles items that have more than one entry in them. */ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_dir_item *di) { struct extent_buffer *leaf; u32 sub_item_len; u32 item_len; int ret = 0; leaf = path->nodes[0]; sub_item_len = sizeof(*di) + btrfs_dir_name_len(leaf, di) + btrfs_dir_data_len(leaf, di); item_len = btrfs_item_size_nr(leaf, path->slots[0]); if (sub_item_len == item_len) { ret = btrfs_del_item(trans, root, path); } else { /* MARKER */ unsigned long ptr = (unsigned long)di; unsigned long start; start = btrfs_item_ptr_offset(leaf, path->slots[0]); memmove_extent_buffer(leaf, ptr, ptr + sub_item_len, item_len - (ptr + sub_item_len - start)); btrfs_truncate_item(trans, root, path, item_len - sub_item_len, 1); } return ret; } int verify_dir_item(struct btrfs_root *root, struct extent_buffer *leaf, struct btrfs_dir_item *dir_item) { u16 namelen = BTRFS_NAME_LEN; u8 type = btrfs_dir_type(leaf, dir_item); if (type >= BTRFS_FT_MAX) { printk(KERN_CRIT "btrfs: invalid dir item type: %d\n", (int)type); return 1; } if (type == BTRFS_FT_XATTR) namelen = XATTR_NAME_MAX; if (btrfs_dir_name_len(leaf, dir_item) > namelen) { printk(KERN_CRIT "btrfs: invalid dir item name len: %u\n", (unsigned)btrfs_dir_data_len(leaf, dir_item)); return 1; } /* BTRFS_MAX_XATTR_SIZE is the same for all dir items */ if (btrfs_dir_data_len(leaf, dir_item) > BTRFS_MAX_XATTR_SIZE(root)) { printk(KERN_CRIT "btrfs: invalid dir item data len: %u\n", (unsigned)btrfs_dir_data_len(leaf, dir_item)); return 1; } return 0; }
gpl-2.0
NoelMacwan/SXDHuashan
drivers/net/wireless/b43/tables_phy_lcn.c
5519
25194
/* Broadcom B43 wireless driver IEEE 802.11n LCN-PHY data tables Copyright (c) 2011 Rafał Miłecki <zajec5@gmail.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "b43.h" #include "tables_phy_lcn.h" #include "phy_common.h" #include "phy_lcn.h" struct b43_lcntab_tx_gain_tbl_entry { u8 gm; u8 pga; u8 pad; u8 dac; u8 bb_mult; }; /************************************************** * Static tables. **************************************************/ static const u16 b43_lcntab_0x02[] = { 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, 0x014d, }; static const u16 b43_lcntab_0x01[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const u32 b43_lcntab_0x0b[] = { 0x000141f8, 0x000021f8, 0x000021fb, 0x000041fb, 0x0001fedb, 0x0000217b, 0x00002133, 0x000040eb, 0x0001fea3, 0x0000024b, }; static const u32 b43_lcntab_0x0c[] = { 0x00100001, 0x00200010, 0x00300001, 0x00400010, 0x00500022, 0x00600122, 0x00700222, 0x00800322, 0x00900422, 0x00a00522, 0x00b00622, 0x00c00722, 0x00d00822, 0x00f00922, 0x00100a22, 0x00200b22, 0x00300c22, 0x00400d22, 0x00500e22, 0x00600f22, }; static const u32 b43_lcntab_0x0d[] = { 0x00000000, 0x00000000, 0x10000000, 0x00000000, 0x20000000, 0x00000000, 0x30000000, 0x00000000, 0x40000000, 0x00000000, 0x50000000, 0x00000000, 0x60000000, 0x00000000, 0x70000000, 0x00000000, 0x80000000, 0x00000000, 0x90000000, 0x00000008, 0xa0000000, 0x00000008, 0xb0000000, 0x00000008, 0xc0000000, 0x00000008, 0xd0000000, 0x00000008, 0xe0000000, 0x00000008, 0xf0000000, 0x00000008, 0x00000000, 0x00000009, 0x10000000, 0x00000009, 0x20000000, 0x00000019, 0x30000000, 0x00000019, 0x40000000, 0x00000019, 0x50000000, 0x00000019, 0x60000000, 0x00000019, 0x70000000, 0x00000019, 0x80000000, 0x00000019, 0x90000000, 0x00000019, 0xa0000000, 0x00000019, 0xb0000000, 0x00000019, 0xc0000000, 0x00000019, 0xd0000000, 0x00000019, 0xe0000000, 0x00000019, 0xf0000000, 0x00000019, 0x00000000, 0x0000001a, 0x10000000, 0x0000001a, 0x20000000, 0x0000001a, 0x30000000, 0x0000001a, 0x40000000, 0x0000001a, 0x50000000, 0x00000002, 0x60000000, 0x00000002, 0x70000000, 0x00000002, 0x80000000, 0x00000002, 0x90000000, 0x00000002, 0xa0000000, 0x00000002, 0xb0000000, 0x00000002, 0xc0000000, 0x0000000a, 0xd0000000, 0x0000000a, 0xe0000000, 0x0000000a, 0xf0000000, 0x0000000a, 0x00000000, 0x0000000b, 0x10000000, 0x0000000b, 0x20000000, 0x0000000b, 0x30000000, 0x0000000b, 0x40000000, 0x0000000b, 0x50000000, 0x0000001b, 0x60000000, 0x0000001b, 0x70000000, 0x0000001b, 0x80000000, 0x0000001b, 0x90000000, 0x0000001b, 0xa0000000, 0x0000001b, 0xb0000000, 0x0000001b, 0xc0000000, 0x0000001b, 0xd0000000, 0x0000001b, 0xe0000000, 0x0000001b, 0xf0000000, 0x0000001b, 0x00000000, 0x0000001c, 0x10000000, 0x0000001c, 0x20000000, 0x0000001c, 0x30000000, 0x0000001c, 0x40000000, 0x0000001c, 0x50000000, 0x0000001c, 0x60000000, 0x0000001c, 0x70000000, 0x0000001c, 0x80000000, 0x0000001c, 0x90000000, 0x0000001c, }; static const u16 b43_lcntab_0x0e[] = { 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0406, 0x0407, 0x0408, 0x0409, 0x040a, 0x058b, 0x058c, 0x058d, 0x058e, 0x058f, 0x0090, 0x0091, 0x0092, 0x0193, 0x0194, 0x0195, 0x0196, 0x0197, 0x0198, 0x0199, 0x019a, 0x019b, 0x019c, 0x019d, 0x019e, 0x019f, 0x01a0, 0x01a1, 0x01a2, 0x01a3, 0x01a4, 0x01a5, 0x0000, }; static const u16 b43_lcntab_0x0f[] = { 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, 0x000a, 0x0009, 0x0006, 0x0005, }; static const u16 b43_lcntab_0x10[] = { 0x005f, 0x0036, 0x0029, 0x001f, 0x005f, 0x0036, 0x0029, 0x001f, 0x005f, 0x0036, 0x0029, 0x001f, 0x005f, 0x0036, 0x0029, 0x001f, }; static const u16 b43_lcntab_0x11[] = { 0x0009, 0x000f, 0x0014, 0x0018, 0x00fe, 0x0007, 0x000b, 0x000f, 0x00fb, 0x00fe, 0x0001, 0x0005, 0x0008, 0x000b, 0x000e, 0x0011, 0x0014, 0x0017, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0003, 0x0006, 0x0009, 0x000c, 0x000f, 0x0012, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0003, 0x0006, 0x0009, 0x000c, 0x000f, 0x0012, 0x0015, 0x0018, 0x001b, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0003, 0x00eb, 0x0000, 0x0000, }; static const u32 b43_lcntab_0x12[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004, 0x00000000, 0x00000004, 0x00000008, 0x00000001, 0x00000005, 0x00000009, 0x0000000d, 0x0000004d, 0x0000008d, 0x0000000d, 0x0000004d, 0x0000008d, 0x000000cd, 0x0000004f, 0x0000008f, 0x000000cf, 0x000000d3, 0x00000113, 0x00000513, 0x00000913, 0x00000953, 0x00000d53, 0x00001153, 0x00001193, 0x00005193, 0x00009193, 0x0000d193, 0x00011193, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004, 0x00000000, 0x00000004, 0x00000008, 0x00000001, 0x00000005, 0x00000009, 0x0000000d, 0x0000004d, 0x0000008d, 0x0000000d, 0x0000004d, 0x0000008d, 0x000000cd, 0x0000004f, 0x0000008f, 0x000000cf, 0x000000d3, 0x00000113, 0x00000513, 0x00000913, 0x00000953, 0x00000d53, 0x00001153, 0x00005153, 0x00009153, 0x0000d153, 0x00011153, 0x00015153, 0x00019153, 0x0001d153, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; static const u16 b43_lcntab_0x14[] = { 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0002, 0x0003, 0x0001, 0x0003, 0x0002, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0002, 0x0003, 0x0001, 0x0003, 0x0002, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, }; static const u16 b43_lcntab_0x17[] = { 0x001a, 0x0034, 0x004e, 0x0068, 0x009c, 0x00d0, 0x00ea, 0x0104, 0x0034, 0x0068, 0x009c, 0x00d0, 0x0138, 0x01a0, 0x01d4, 0x0208, 0x004e, 0x009c, 0x00ea, 0x0138, 0x01d4, 0x0270, 0x02be, 0x030c, 0x0068, 0x00d0, 0x0138, 0x01a0, 0x0270, 0x0340, 0x03a8, 0x0410, 0x0018, 0x009c, 0x00d0, 0x0104, 0x00ea, 0x0138, 0x0186, 0x00d0, 0x0104, 0x0104, 0x0138, 0x016c, 0x016c, 0x01a0, 0x0138, 0x0186, 0x0186, 0x01d4, 0x0222, 0x0222, 0x0270, 0x0104, 0x0138, 0x016c, 0x0138, 0x016c, 0x01a0, 0x01d4, 0x01a0, 0x01d4, 0x0208, 0x0208, 0x023c, 0x0186, 0x01d4, 0x0222, 0x01d4, 0x0222, 0x0270, 0x02be, 0x0270, 0x02be, 0x030c, 0x030c, 0x035a, 0x0036, 0x006c, 0x00a2, 0x00d8, 0x0144, 0x01b0, 0x01e6, 0x021c, 0x006c, 0x00d8, 0x0144, 0x01b0, 0x0288, 0x0360, 0x03cc, 0x0438, 0x00a2, 0x0144, 0x01e6, 0x0288, 0x03cc, 0x0510, 0x05b2, 0x0654, 0x00d8, 0x01b0, 0x0288, 0x0360, 0x0510, 0x06c0, 0x0798, 0x0870, 0x0018, 0x0144, 0x01b0, 0x021c, 0x01e6, 0x0288, 0x032a, 0x01b0, 0x021c, 0x021c, 0x0288, 0x02f4, 0x02f4, 0x0360, 0x0288, 0x032a, 0x032a, 0x03cc, 0x046e, 0x046e, 0x0510, 0x021c, 0x0288, 0x02f4, 0x0288, 0x02f4, 0x0360, 0x03cc, 0x0360, 0x03cc, 0x0438, 0x0438, 0x04a4, 0x032a, 0x03cc, 0x046e, 0x03cc, 0x046e, 0x0510, 0x05b2, 0x0510, 0x05b2, 0x0654, 0x0654, 0x06f6, }; static const u16 b43_lcntab_0x00[] = { 0x0200, 0x0300, 0x0400, 0x0600, 0x0800, 0x0b00, 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007, 0x1707, 0x2007, 0x2d07, 0x4007, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x0300, 0x0400, 0x0600, 0x0800, 0x0b00, 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007, 0x1707, 0x2007, 0x2d07, 0x4007, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const u32 b43_lcntab_0x18[] = { 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000, }; /************************************************** * TX gain. **************************************************/ const struct b43_lcntab_tx_gain_tbl_entry b43_lcntab_tx_gain_tbl_2ghz_ext_pa_rev0[B43_LCNTAB_TX_GAIN_SIZE] = { { 0x03, 0x00, 0x1f, 0x0, 0x48 }, { 0x03, 0x00, 0x1f, 0x0, 0x46 }, { 0x03, 0x00, 0x1f, 0x0, 0x44 }, { 0x03, 0x00, 0x1e, 0x0, 0x43 }, { 0x03, 0x00, 0x1d, 0x0, 0x44 }, { 0x03, 0x00, 0x1c, 0x0, 0x44 }, { 0x03, 0x00, 0x1b, 0x0, 0x45 }, { 0x03, 0x00, 0x1a, 0x0, 0x46 }, { 0x03, 0x00, 0x19, 0x0, 0x46 }, { 0x03, 0x00, 0x18, 0x0, 0x47 }, { 0x03, 0x00, 0x17, 0x0, 0x48 }, { 0x03, 0x00, 0x17, 0x0, 0x46 }, { 0x03, 0x00, 0x16, 0x0, 0x47 }, { 0x03, 0x00, 0x15, 0x0, 0x48 }, { 0x03, 0x00, 0x15, 0x0, 0x46 }, { 0x03, 0x00, 0x15, 0x0, 0x44 }, { 0x03, 0x00, 0x15, 0x0, 0x42 }, { 0x03, 0x00, 0x15, 0x0, 0x40 }, { 0x03, 0x00, 0x15, 0x0, 0x3f }, { 0x03, 0x00, 0x14, 0x0, 0x40 }, { 0x03, 0x00, 0x13, 0x0, 0x41 }, { 0x03, 0x00, 0x13, 0x0, 0x40 }, { 0x03, 0x00, 0x12, 0x0, 0x41 }, { 0x03, 0x00, 0x12, 0x0, 0x40 }, { 0x03, 0x00, 0x11, 0x0, 0x41 }, { 0x03, 0x00, 0x11, 0x0, 0x40 }, { 0x03, 0x00, 0x10, 0x0, 0x41 }, { 0x03, 0x00, 0x10, 0x0, 0x40 }, { 0x03, 0x00, 0x10, 0x0, 0x3e }, { 0x03, 0x00, 0x10, 0x0, 0x3c }, { 0x03, 0x00, 0x10, 0x0, 0x3a }, { 0x03, 0x00, 0x0f, 0x0, 0x3d }, { 0x03, 0x00, 0x0f, 0x0, 0x3b }, { 0x03, 0x00, 0x0e, 0x0, 0x3d }, { 0x03, 0x00, 0x0e, 0x0, 0x3c }, { 0x03, 0x00, 0x0e, 0x0, 0x3a }, { 0x03, 0x00, 0x0d, 0x0, 0x3c }, { 0x03, 0x00, 0x0d, 0x0, 0x3b }, { 0x03, 0x00, 0x0c, 0x0, 0x3e }, { 0x03, 0x00, 0x0c, 0x0, 0x3c }, { 0x03, 0x00, 0x0c, 0x0, 0x3a }, { 0x03, 0x00, 0x0b, 0x0, 0x3e }, { 0x03, 0x00, 0x0b, 0x0, 0x3c }, { 0x03, 0x00, 0x0b, 0x0, 0x3b }, { 0x03, 0x00, 0x0b, 0x0, 0x39 }, { 0x03, 0x00, 0x0a, 0x0, 0x3d }, { 0x03, 0x00, 0x0a, 0x0, 0x3b }, { 0x03, 0x00, 0x0a, 0x0, 0x39 }, { 0x03, 0x00, 0x09, 0x0, 0x3e }, { 0x03, 0x00, 0x09, 0x0, 0x3c }, { 0x03, 0x00, 0x09, 0x0, 0x3a }, { 0x03, 0x00, 0x09, 0x0, 0x39 }, { 0x03, 0x00, 0x08, 0x0, 0x3e }, { 0x03, 0x00, 0x08, 0x0, 0x3c }, { 0x03, 0x00, 0x08, 0x0, 0x3a }, { 0x03, 0x00, 0x08, 0x0, 0x39 }, { 0x03, 0x00, 0x08, 0x0, 0x37 }, { 0x03, 0x00, 0x07, 0x0, 0x3d }, { 0x03, 0x00, 0x07, 0x0, 0x3c }, { 0x03, 0x00, 0x07, 0x0, 0x3a }, { 0x03, 0x00, 0x07, 0x0, 0x38 }, { 0x03, 0x00, 0x07, 0x0, 0x37 }, { 0x03, 0x00, 0x06, 0x0, 0x3e }, { 0x03, 0x00, 0x06, 0x0, 0x3c }, { 0x03, 0x00, 0x06, 0x0, 0x3a }, { 0x03, 0x00, 0x06, 0x0, 0x39 }, { 0x03, 0x00, 0x06, 0x0, 0x37 }, { 0x03, 0x00, 0x06, 0x0, 0x36 }, { 0x03, 0x00, 0x06, 0x0, 0x34 }, { 0x03, 0x00, 0x05, 0x0, 0x3d }, { 0x03, 0x00, 0x05, 0x0, 0x3b }, { 0x03, 0x00, 0x05, 0x0, 0x39 }, { 0x03, 0x00, 0x05, 0x0, 0x38 }, { 0x03, 0x00, 0x05, 0x0, 0x36 }, { 0x03, 0x00, 0x05, 0x0, 0x35 }, { 0x03, 0x00, 0x05, 0x0, 0x33 }, { 0x03, 0x00, 0x04, 0x0, 0x3e }, { 0x03, 0x00, 0x04, 0x0, 0x3c }, { 0x03, 0x00, 0x04, 0x0, 0x3a }, { 0x03, 0x00, 0x04, 0x0, 0x39 }, { 0x03, 0x00, 0x04, 0x0, 0x37 }, { 0x03, 0x00, 0x04, 0x0, 0x36 }, { 0x03, 0x00, 0x04, 0x0, 0x34 }, { 0x03, 0x00, 0x04, 0x0, 0x33 }, { 0x03, 0x00, 0x04, 0x0, 0x31 }, { 0x03, 0x00, 0x04, 0x0, 0x30 }, { 0x03, 0x00, 0x04, 0x0, 0x2e }, { 0x03, 0x00, 0x03, 0x0, 0x3c }, { 0x03, 0x00, 0x03, 0x0, 0x3a }, { 0x03, 0x00, 0x03, 0x0, 0x39 }, { 0x03, 0x00, 0x03, 0x0, 0x37 }, { 0x03, 0x00, 0x03, 0x0, 0x36 }, { 0x03, 0x00, 0x03, 0x0, 0x34 }, { 0x03, 0x00, 0x03, 0x0, 0x33 }, { 0x03, 0x00, 0x03, 0x0, 0x31 }, { 0x03, 0x00, 0x03, 0x0, 0x30 }, { 0x03, 0x00, 0x03, 0x0, 0x2e }, { 0x03, 0x00, 0x03, 0x0, 0x2d }, { 0x03, 0x00, 0x03, 0x0, 0x2c }, { 0x03, 0x00, 0x03, 0x0, 0x2b }, { 0x03, 0x00, 0x03, 0x0, 0x29 }, { 0x03, 0x00, 0x02, 0x0, 0x3d }, { 0x03, 0x00, 0x02, 0x0, 0x3b }, { 0x03, 0x00, 0x02, 0x0, 0x39 }, { 0x03, 0x00, 0x02, 0x0, 0x38 }, { 0x03, 0x00, 0x02, 0x0, 0x36 }, { 0x03, 0x00, 0x02, 0x0, 0x35 }, { 0x03, 0x00, 0x02, 0x0, 0x33 }, { 0x03, 0x00, 0x02, 0x0, 0x32 }, { 0x03, 0x00, 0x02, 0x0, 0x30 }, { 0x03, 0x00, 0x02, 0x0, 0x2f }, { 0x03, 0x00, 0x02, 0x0, 0x2e }, { 0x03, 0x00, 0x02, 0x0, 0x2c }, { 0x03, 0x00, 0x02, 0x0, 0x2b }, { 0x03, 0x00, 0x02, 0x0, 0x2a }, { 0x03, 0x00, 0x02, 0x0, 0x29 }, { 0x03, 0x00, 0x02, 0x0, 0x27 }, { 0x03, 0x00, 0x02, 0x0, 0x26 }, { 0x03, 0x00, 0x02, 0x0, 0x25 }, { 0x03, 0x00, 0x02, 0x0, 0x24 }, { 0x03, 0x00, 0x02, 0x0, 0x23 }, { 0x03, 0x00, 0x02, 0x0, 0x22 }, { 0x03, 0x00, 0x02, 0x0, 0x21 }, { 0x03, 0x00, 0x02, 0x0, 0x20 }, { 0x03, 0x00, 0x01, 0x0, 0x3f }, { 0x03, 0x00, 0x01, 0x0, 0x3d }, { 0x03, 0x00, 0x01, 0x0, 0x3b }, { 0x03, 0x00, 0x01, 0x0, 0x39 }, }; /************************************************** * SW control. **************************************************/ const u16 b43_lcntab_sw_ctl_4313_epa_rev0[] = { 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, 0x0002, 0x0008, 0x0004, 0x0001, }; /************************************************** * R/W ops. **************************************************/ u32 b43_lcntab_read(struct b43_wldev *dev, u32 offset) { u32 type, value; type = offset & B43_LCNTAB_TYPEMASK; offset &= ~B43_LCNTAB_TYPEMASK; B43_WARN_ON(offset > 0xFFFF); switch (type) { case B43_LCNTAB_8BIT: b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, offset); value = b43_phy_read(dev, B43_PHY_LCN_TABLE_DATALO) & 0xFF; break; case B43_LCNTAB_16BIT: b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, offset); value = b43_phy_read(dev, B43_PHY_LCN_TABLE_DATALO); break; case B43_LCNTAB_32BIT: b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, offset); value = b43_phy_read(dev, B43_PHY_LCN_TABLE_DATALO); value |= (b43_phy_read(dev, B43_PHY_LCN_TABLE_DATAHI) << 16); break; default: B43_WARN_ON(1); value = 0; } return value; } void b43_lcntab_read_bulk(struct b43_wldev *dev, u32 offset, unsigned int nr_elements, void *_data) { u32 type; u8 *data = _data; unsigned int i; type = offset & B43_LCNTAB_TYPEMASK; offset &= ~B43_LCNTAB_TYPEMASK; B43_WARN_ON(offset > 0xFFFF); b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, offset); for (i = 0; i < nr_elements; i++) { switch (type) { case B43_LCNTAB_8BIT: *data = b43_phy_read(dev, B43_PHY_LCN_TABLE_DATALO) & 0xFF; data++; break; case B43_LCNTAB_16BIT: *((u16 *)data) = b43_phy_read(dev, B43_PHY_LCN_TABLE_DATALO); data += 2; break; case B43_LCNTAB_32BIT: *((u32 *)data) = b43_phy_read(dev, B43_PHY_LCN_TABLE_DATALO); *((u32 *)data) |= (b43_phy_read(dev, B43_PHY_LCN_TABLE_DATAHI) << 16); data += 4; break; default: B43_WARN_ON(1); } } } void b43_lcntab_write(struct b43_wldev *dev, u32 offset, u32 value) { u32 type; type = offset & B43_LCNTAB_TYPEMASK; offset &= 0xFFFF; switch (type) { case B43_LCNTAB_8BIT: B43_WARN_ON(value & ~0xFF); b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, offset); b43_phy_write(dev, B43_PHY_LCN_TABLE_DATALO, value); break; case B43_LCNTAB_16BIT: B43_WARN_ON(value & ~0xFFFF); b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, offset); b43_phy_write(dev, B43_PHY_LCN_TABLE_DATALO, value); break; case B43_LCNTAB_32BIT: b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, offset); b43_phy_write(dev, B43_PHY_LCN_TABLE_DATAHI, value >> 16); b43_phy_write(dev, B43_PHY_LCN_TABLE_DATALO, value & 0xFFFF); break; default: B43_WARN_ON(1); } return; } void b43_lcntab_write_bulk(struct b43_wldev *dev, u32 offset, unsigned int nr_elements, const void *_data) { u32 type, value; const u8 *data = _data; unsigned int i; type = offset & B43_LCNTAB_TYPEMASK; offset &= ~B43_LCNTAB_TYPEMASK; B43_WARN_ON(offset > 0xFFFF); b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, offset); for (i = 0; i < nr_elements; i++) { switch (type) { case B43_LCNTAB_8BIT: value = *data; data++; B43_WARN_ON(value & ~0xFF); b43_phy_write(dev, B43_PHY_LCN_TABLE_DATALO, value); break; case B43_LCNTAB_16BIT: value = *((u16 *)data); data += 2; B43_WARN_ON(value & ~0xFFFF); b43_phy_write(dev, B43_PHY_LCN_TABLE_DATALO, value); break; case B43_LCNTAB_32BIT: value = *((u32 *)data); data += 4; b43_phy_write(dev, B43_PHY_LCN_TABLE_DATAHI, value >> 16); b43_phy_write(dev, B43_PHY_LCN_TABLE_DATALO, value & 0xFFFF); break; default: B43_WARN_ON(1); } } } /************************************************** * Tables ops. **************************************************/ #define lcntab_upload(dev, offset, data) do { \ b43_lcntab_write_bulk(dev, offset, ARRAY_SIZE(data), data); \ } while (0) static void b43_phy_lcn_upload_static_tables(struct b43_wldev *dev) { lcntab_upload(dev, B43_LCNTAB16(0x02, 0), b43_lcntab_0x02); lcntab_upload(dev, B43_LCNTAB16(0x01, 0), b43_lcntab_0x01); lcntab_upload(dev, B43_LCNTAB32(0x0b, 0), b43_lcntab_0x0b); lcntab_upload(dev, B43_LCNTAB32(0x0c, 0), b43_lcntab_0x0c); lcntab_upload(dev, B43_LCNTAB32(0x0d, 0), b43_lcntab_0x0d); lcntab_upload(dev, B43_LCNTAB16(0x0e, 0), b43_lcntab_0x0e); lcntab_upload(dev, B43_LCNTAB16(0x0f, 0), b43_lcntab_0x0f); lcntab_upload(dev, B43_LCNTAB16(0x10, 0), b43_lcntab_0x10); lcntab_upload(dev, B43_LCNTAB16(0x11, 0), b43_lcntab_0x11); lcntab_upload(dev, B43_LCNTAB32(0x12, 0), b43_lcntab_0x12); lcntab_upload(dev, B43_LCNTAB16(0x14, 0), b43_lcntab_0x14); lcntab_upload(dev, B43_LCNTAB16(0x17, 0), b43_lcntab_0x17); lcntab_upload(dev, B43_LCNTAB16(0x00, 0), b43_lcntab_0x00); lcntab_upload(dev, B43_LCNTAB32(0x18, 0), b43_lcntab_0x18); } void b43_phy_lcn_load_tx_gain_tab(struct b43_wldev *dev, const struct b43_lcntab_tx_gain_tbl_entry *gain_table) { u32 i; u32 val; u16 pa_gain = 0x70; if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_FEM) pa_gain = 0x10; for (i = 0; i < B43_LCNTAB_TX_GAIN_SIZE; i++) { val = ((pa_gain << 24) | (gain_table[i].pad << 16) | (gain_table[i].pga << 8) | gain_table[i].gm); b43_lcntab_write(dev, B43_LCNTAB32(0x7, 0xc0 + i), val); /* brcmsmac doesn't maskset, we follow newer wl here */ val = b43_lcntab_read(dev, B43_LCNTAB32(0x7, 0x140 + i)); val &= 0x000fffff; val |= ((gain_table[i].dac << 28) | (gain_table[i].bb_mult << 20)); b43_lcntab_write(dev, B43_LCNTAB32(0x7, 0x140 + i), val); } } /* wlc_lcnphy_load_rfpower */ static void b43_phy_lcn_load_rfpower(struct b43_wldev *dev) { u32 bbmult, rfgain; u8 i; for (i = 0; i < 128; i++) { bbmult = b43_lcntab_read(dev, B43_LCNTAB32(0x7, 0x140 + i)); bbmult >>= 20; rfgain = b43_lcntab_read(dev, B43_LCNTAB32(0x7, 0xc0 + i)); /* TODO: calculate value for 0x240 + i table offset * b43_lcntab_write(dev, B43_LCNTAB32(0x7, 0x240 + i), val); */ } } /* Not implemented in brcmsmac, noticed in wl in MMIO dump */ static void b43_phy_lcn_rewrite_rfpower_table(struct b43_wldev *dev) { int i; u32 tmp; for (i = 0; i < 128; i++) { tmp = b43_lcntab_read(dev, B43_LCNTAB32(0x7, 0x240 + i)); b43_lcntab_write(dev, B43_LCNTAB32(0x7, 0x240 + i), tmp); } } /* wlc_lcnphy_clear_papd_comptable */ static void b43_phy_lcn_clean_papd_comp_table(struct b43_wldev *dev) { u8 i; for (i = 0; i < 0x80; i++) b43_lcntab_write(dev, B43_LCNTAB32(0x18, i), 0x80000); } /* wlc_lcnphy_tbl_init */ void b43_phy_lcn_tables_init(struct b43_wldev *dev) { struct ssb_sprom *sprom = dev->dev->bus_sprom; b43_phy_lcn_upload_static_tables(dev); if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { if (sprom->boardflags_lo & B43_BFL_FEM) b43_phy_lcn_load_tx_gain_tab(dev, b43_lcntab_tx_gain_tbl_2ghz_ext_pa_rev0); else b43err(dev->wl, "TX gain table unknown for this card\n"); } if (sprom->boardflags_lo & B43_BFL_FEM && !(sprom->boardflags_hi & B43_BFH_FEM_BT)) b43_lcntab_write_bulk(dev, B43_LCNTAB16(0xf, 0), ARRAY_SIZE(b43_lcntab_sw_ctl_4313_epa_rev0), b43_lcntab_sw_ctl_4313_epa_rev0); else b43err(dev->wl, "SW ctl table is unknown for this card\n"); b43_phy_lcn_load_rfpower(dev); b43_phy_lcn_rewrite_rfpower_table(dev); b43_phy_lcn_clean_papd_comp_table(dev); }
gpl-2.0
mythos234/NamelessN910F-LL
fs/ext4/xattr_user.c
8591
1604
/* * linux/fs/ext4/xattr_user.c * Handler for extended user attributes. * * Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org> */ #include <linux/string.h> #include <linux/fs.h> #include "ext4_jbd2.h" #include "ext4.h" #include "xattr.h" static size_t ext4_xattr_user_list(struct dentry *dentry, char *list, size_t list_size, const char *name, size_t name_len, int type) { const size_t prefix_len = XATTR_USER_PREFIX_LEN; const size_t total_len = prefix_len + name_len + 1; if (!test_opt(dentry->d_sb, XATTR_USER)) return 0; if (list && total_len <= list_size) { memcpy(list, XATTR_USER_PREFIX, prefix_len); memcpy(list+prefix_len, name, name_len); list[prefix_len + name_len] = '\0'; } return total_len; } static int ext4_xattr_user_get(struct dentry *dentry, const char *name, void *buffer, size_t size, int type) { if (strcmp(name, "") == 0) return -EINVAL; if (!test_opt(dentry->d_sb, XATTR_USER)) return -EOPNOTSUPP; return ext4_xattr_get(dentry->d_inode, EXT4_XATTR_INDEX_USER, name, buffer, size); } static int ext4_xattr_user_set(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int type) { if (strcmp(name, "") == 0) return -EINVAL; if (!test_opt(dentry->d_sb, XATTR_USER)) return -EOPNOTSUPP; return ext4_xattr_set(dentry->d_inode, EXT4_XATTR_INDEX_USER, name, value, size, flags); } const struct xattr_handler ext4_xattr_user_handler = { .prefix = XATTR_USER_PREFIX, .list = ext4_xattr_user_list, .get = ext4_xattr_user_get, .set = ext4_xattr_user_set, };
gpl-2.0
AlmightyMegadeth00/kernel_minnow
fs/ext4/xattr_trusted.c
8591
1523
/* * linux/fs/ext4/xattr_trusted.c * Handler for trusted extended attributes. * * Copyright (C) 2003 by Andreas Gruenbacher, <a.gruenbacher@computer.org> */ #include <linux/string.h> #include <linux/capability.h> #include <linux/fs.h> #include "ext4_jbd2.h" #include "ext4.h" #include "xattr.h" static size_t ext4_xattr_trusted_list(struct dentry *dentry, char *list, size_t list_size, const char *name, size_t name_len, int type) { const size_t prefix_len = XATTR_TRUSTED_PREFIX_LEN; const size_t total_len = prefix_len + name_len + 1; if (!capable(CAP_SYS_ADMIN)) return 0; if (list && total_len <= list_size) { memcpy(list, XATTR_TRUSTED_PREFIX, prefix_len); memcpy(list+prefix_len, name, name_len); list[prefix_len + name_len] = '\0'; } return total_len; } static int ext4_xattr_trusted_get(struct dentry *dentry, const char *name, void *buffer, size_t size, int type) { if (strcmp(name, "") == 0) return -EINVAL; return ext4_xattr_get(dentry->d_inode, EXT4_XATTR_INDEX_TRUSTED, name, buffer, size); } static int ext4_xattr_trusted_set(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int type) { if (strcmp(name, "") == 0) return -EINVAL; return ext4_xattr_set(dentry->d_inode, EXT4_XATTR_INDEX_TRUSTED, name, value, size, flags); } const struct xattr_handler ext4_xattr_trusted_handler = { .prefix = XATTR_TRUSTED_PREFIX, .list = ext4_xattr_trusted_list, .get = ext4_xattr_trusted_get, .set = ext4_xattr_trusted_set, };
gpl-2.0
blastagator/LGG2_TWRP_Kernel
fs/ext4/xattr_user.c
8591
1604
/* * linux/fs/ext4/xattr_user.c * Handler for extended user attributes. * * Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org> */ #include <linux/string.h> #include <linux/fs.h> #include "ext4_jbd2.h" #include "ext4.h" #include "xattr.h" static size_t ext4_xattr_user_list(struct dentry *dentry, char *list, size_t list_size, const char *name, size_t name_len, int type) { const size_t prefix_len = XATTR_USER_PREFIX_LEN; const size_t total_len = prefix_len + name_len + 1; if (!test_opt(dentry->d_sb, XATTR_USER)) return 0; if (list && total_len <= list_size) { memcpy(list, XATTR_USER_PREFIX, prefix_len); memcpy(list+prefix_len, name, name_len); list[prefix_len + name_len] = '\0'; } return total_len; } static int ext4_xattr_user_get(struct dentry *dentry, const char *name, void *buffer, size_t size, int type) { if (strcmp(name, "") == 0) return -EINVAL; if (!test_opt(dentry->d_sb, XATTR_USER)) return -EOPNOTSUPP; return ext4_xattr_get(dentry->d_inode, EXT4_XATTR_INDEX_USER, name, buffer, size); } static int ext4_xattr_user_set(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int type) { if (strcmp(name, "") == 0) return -EINVAL; if (!test_opt(dentry->d_sb, XATTR_USER)) return -EOPNOTSUPP; return ext4_xattr_set(dentry->d_inode, EXT4_XATTR_INDEX_USER, name, value, size, flags); } const struct xattr_handler ext4_xattr_user_handler = { .prefix = XATTR_USER_PREFIX, .list = ext4_xattr_user_list, .get = ext4_xattr_user_get, .set = ext4_xattr_user_set, };
gpl-2.0
lukier/linux-hi3518
lib/mpi/generic_mpih-mul2.c
9871
1976
/* mpihelp-mul_2.c - MPI helper functions * Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc. * * This file is part of GnuPG. * * GnuPG is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GnuPG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. * The GNU MP Library itself is published under the LGPL; * however I decided to publish this code under the plain GPL. */ #include "mpi-internal.h" #include "longlong.h" mpi_limb_t mpihelp_addmul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, mpi_limb_t s2_limb) { mpi_limb_t cy_limb; mpi_size_t j; mpi_limb_t prod_high, prod_low; mpi_limb_t x; /* The loop counter and index J goes from -SIZE to -1. This way * the loop becomes faster. */ j = -s1_size; res_ptr -= j; s1_ptr -= j; cy_limb = 0; do { umul_ppmm(prod_high, prod_low, s1_ptr[j], s2_limb); prod_low += cy_limb; cy_limb = (prod_low < cy_limb ? 1 : 0) + prod_high; x = res_ptr[j]; prod_low = x + prod_low; cy_limb += prod_low < x ? 1 : 0; res_ptr[j] = prod_low; } while (++j); return cy_limb; }
gpl-2.0
Radium-Devices/Radium_shamu
lib/mpi/generic_mpih-add1.c
9871
2027
/* mpihelp-add_1.c - MPI helper functions * Copyright (C) 1994, 1996, 1997, 1998, * 2000 Free Software Foundation, Inc. * * This file is part of GnuPG. * * GnuPG is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GnuPG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. * The GNU MP Library itself is published under the LGPL; * however I decided to publish this code under the plain GPL. */ #include "mpi-internal.h" #include "longlong.h" mpi_limb_t mpihelp_add_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_ptr_t s2_ptr, mpi_size_t size) { mpi_limb_t x, y, cy; mpi_size_t j; /* The loop counter and index J goes from -SIZE to -1. This way the loop becomes faster. */ j = -size; /* Offset the base pointers to compensate for the negative indices. */ s1_ptr -= j; s2_ptr -= j; res_ptr -= j; cy = 0; do { y = s2_ptr[j]; x = s1_ptr[j]; y += cy; /* add previous carry to one addend */ cy = y < cy; /* get out carry from that addition */ y += x; /* add other addend */ cy += y < x; /* get out carry from that add, combine */ res_ptr[j] = y; } while (++j); return cy; }
gpl-2.0
keily90/tf101-nv-linux
sound/core/rawmidi_compat.c
13967
3570
/* * 32bit -> 64bit ioctl wrapper for raw MIDI API * Copyright (c) by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* This file included from rawmidi.c */ #include <linux/compat.h> struct snd_rawmidi_params32 { s32 stream; u32 buffer_size; u32 avail_min; unsigned int no_active_sensing; /* avoid bit-field */ unsigned char reserved[16]; } __attribute__((packed)); static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile, struct snd_rawmidi_params32 __user *src) { struct snd_rawmidi_params params; unsigned int val; if (rfile->output == NULL) return -EINVAL; if (get_user(params.stream, &src->stream) || get_user(params.buffer_size, &src->buffer_size) || get_user(params.avail_min, &src->avail_min) || get_user(val, &src->no_active_sensing)) return -EFAULT; params.no_active_sensing = val; switch (params.stream) { case SNDRV_RAWMIDI_STREAM_OUTPUT: return snd_rawmidi_output_params(rfile->output, &params); case SNDRV_RAWMIDI_STREAM_INPUT: return snd_rawmidi_input_params(rfile->input, &params); } return -EINVAL; } struct snd_rawmidi_status32 { s32 stream; struct compat_timespec tstamp; u32 avail; u32 xruns; unsigned char reserved[16]; } __attribute__((packed)); static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile, struct snd_rawmidi_status32 __user *src) { int err; struct snd_rawmidi_status status; if (rfile->output == NULL) return -EINVAL; if (get_user(status.stream, &src->stream)) return -EFAULT; switch (status.stream) { case SNDRV_RAWMIDI_STREAM_OUTPUT: err = snd_rawmidi_output_status(rfile->output, &status); break; case SNDRV_RAWMIDI_STREAM_INPUT: err = snd_rawmidi_input_status(rfile->input, &status); break; default: return -EINVAL; } if (err < 0) return err; if (put_user(status.tstamp.tv_sec, &src->tstamp.tv_sec) || put_user(status.tstamp.tv_nsec, &src->tstamp.tv_nsec) || put_user(status.avail, &src->avail) || put_user(status.xruns, &src->xruns)) return -EFAULT; return 0; } enum { SNDRV_RAWMIDI_IOCTL_PARAMS32 = _IOWR('W', 0x10, struct snd_rawmidi_params32), SNDRV_RAWMIDI_IOCTL_STATUS32 = _IOWR('W', 0x20, struct snd_rawmidi_status32), }; static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_rawmidi_file *rfile; void __user *argp = compat_ptr(arg); rfile = file->private_data; switch (cmd) { case SNDRV_RAWMIDI_IOCTL_PVERSION: case SNDRV_RAWMIDI_IOCTL_INFO: case SNDRV_RAWMIDI_IOCTL_DROP: case SNDRV_RAWMIDI_IOCTL_DRAIN: return snd_rawmidi_ioctl(file, cmd, (unsigned long)argp); case SNDRV_RAWMIDI_IOCTL_PARAMS32: return snd_rawmidi_ioctl_params_compat(rfile, argp); case SNDRV_RAWMIDI_IOCTL_STATUS32: return snd_rawmidi_ioctl_status_compat(rfile, argp); } return -ENOIOCTLCMD; }
gpl-2.0
Euphoria-OS-Devices/android_kernel_motorola_msm8226
sound/core/rawmidi_compat.c
13967
3570
/* * 32bit -> 64bit ioctl wrapper for raw MIDI API * Copyright (c) by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* This file included from rawmidi.c */ #include <linux/compat.h> struct snd_rawmidi_params32 { s32 stream; u32 buffer_size; u32 avail_min; unsigned int no_active_sensing; /* avoid bit-field */ unsigned char reserved[16]; } __attribute__((packed)); static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile, struct snd_rawmidi_params32 __user *src) { struct snd_rawmidi_params params; unsigned int val; if (rfile->output == NULL) return -EINVAL; if (get_user(params.stream, &src->stream) || get_user(params.buffer_size, &src->buffer_size) || get_user(params.avail_min, &src->avail_min) || get_user(val, &src->no_active_sensing)) return -EFAULT; params.no_active_sensing = val; switch (params.stream) { case SNDRV_RAWMIDI_STREAM_OUTPUT: return snd_rawmidi_output_params(rfile->output, &params); case SNDRV_RAWMIDI_STREAM_INPUT: return snd_rawmidi_input_params(rfile->input, &params); } return -EINVAL; } struct snd_rawmidi_status32 { s32 stream; struct compat_timespec tstamp; u32 avail; u32 xruns; unsigned char reserved[16]; } __attribute__((packed)); static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile, struct snd_rawmidi_status32 __user *src) { int err; struct snd_rawmidi_status status; if (rfile->output == NULL) return -EINVAL; if (get_user(status.stream, &src->stream)) return -EFAULT; switch (status.stream) { case SNDRV_RAWMIDI_STREAM_OUTPUT: err = snd_rawmidi_output_status(rfile->output, &status); break; case SNDRV_RAWMIDI_STREAM_INPUT: err = snd_rawmidi_input_status(rfile->input, &status); break; default: return -EINVAL; } if (err < 0) return err; if (put_user(status.tstamp.tv_sec, &src->tstamp.tv_sec) || put_user(status.tstamp.tv_nsec, &src->tstamp.tv_nsec) || put_user(status.avail, &src->avail) || put_user(status.xruns, &src->xruns)) return -EFAULT; return 0; } enum { SNDRV_RAWMIDI_IOCTL_PARAMS32 = _IOWR('W', 0x10, struct snd_rawmidi_params32), SNDRV_RAWMIDI_IOCTL_STATUS32 = _IOWR('W', 0x20, struct snd_rawmidi_status32), }; static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_rawmidi_file *rfile; void __user *argp = compat_ptr(arg); rfile = file->private_data; switch (cmd) { case SNDRV_RAWMIDI_IOCTL_PVERSION: case SNDRV_RAWMIDI_IOCTL_INFO: case SNDRV_RAWMIDI_IOCTL_DROP: case SNDRV_RAWMIDI_IOCTL_DRAIN: return snd_rawmidi_ioctl(file, cmd, (unsigned long)argp); case SNDRV_RAWMIDI_IOCTL_PARAMS32: return snd_rawmidi_ioctl_params_compat(rfile, argp); case SNDRV_RAWMIDI_IOCTL_STATUS32: return snd_rawmidi_ioctl_status_compat(rfile, argp); } return -ENOIOCTLCMD; }
gpl-2.0
KylinUI/android_kernel_oppo_n1
arch/arm/mach-msm/smd.c
400
91980
/* arch/arm/mach-msm/smd.c * * Copyright (C) 2007 Google, Inc. * Copyright (c) 2008-2012, The Linux Foundation. All rights reserved. * Author: Brian Swetland <swetland@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/platform_device.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/cdev.h> #include <linux/device.h> #include <linux/wait.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/termios.h> #include <linux/ctype.h> #include <linux/remote_spinlock.h> #include <linux/uaccess.h> #include <linux/kfifo.h> #include <linux/wakelock.h> #include <linux/notifier.h> #include <linux/sort.h> #include <linux/suspend.h> #include <mach/msm_smd.h> #include <mach/msm_iomap.h> #include <mach/system.h> #include <mach/subsystem_notif.h> #include <mach/socinfo.h> #include <mach/proc_comm.h> #include <asm/cacheflush.h> #include "smd_private.h" #include "modem_notifier.h" #if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM8X60) \ || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX) \ || defined(CONFIG_ARCH_MSM9615) || defined(CONFIG_ARCH_APQ8064) #define CONFIG_QDSP6 1 #endif #if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) \ || defined(CONFIG_ARCH_APQ8064) #define CONFIG_DSPS 1 #endif #if defined(CONFIG_ARCH_MSM8960) \ || defined(CONFIG_ARCH_APQ8064) #define CONFIG_WCNSS 1 #define CONFIG_DSPS_SMSM 1 #endif #define MODULE_NAME "msm_smd" #define SMEM_VERSION 0x000B #define SMD_VERSION 0x00020000 #define SMSM_SNAPSHOT_CNT 64 #define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4) uint32_t SMSM_NUM_ENTRIES = 8; uint32_t SMSM_NUM_HOSTS = 3; /* Legacy SMSM interrupt notifications */ #define LEGACY_MODEM_SMSM_MASK (SMSM_RESET | SMSM_INIT | SMSM_SMDINIT \ | SMSM_RUN | SMSM_SYSTEM_DOWNLOAD) enum { MSM_SMD_DEBUG = 1U << 0, MSM_SMSM_DEBUG = 1U << 1, MSM_SMD_INFO = 1U << 2, MSM_SMSM_INFO = 1U << 3, MSM_SMx_POWER_INFO = 1U << 4, }; struct smsm_shared_info { uint32_t *state; uint32_t *intr_mask; uint32_t *intr_mux; }; static struct smsm_shared_info smsm_info; static struct kfifo smsm_snapshot_fifo; static struct wake_lock smsm_snapshot_wakelock; static int smsm_snapshot_count; static DEFINE_SPINLOCK(smsm_snapshot_count_lock); struct smsm_size_info_type { uint32_t num_hosts; uint32_t num_entries; uint32_t reserved0; uint32_t reserved1; }; struct smsm_state_cb_info { struct list_head cb_list; uint32_t mask; void *data; void (*notify)(void *data, uint32_t old_state, uint32_t new_state); }; struct smsm_state_info { struct list_head callbacks; uint32_t last_value; uint32_t intr_mask_set; uint32_t intr_mask_clear; }; struct interrupt_config_item { /* must be initialized */ irqreturn_t (*irq_handler)(int req, void *data); /* outgoing interrupt config (set from platform data) */ uint32_t out_bit_pos; void __iomem *out_base; uint32_t out_offset; int irq_id; }; struct interrupt_config { struct interrupt_config_item smd; struct interrupt_config_item smsm; }; static irqreturn_t smd_modem_irq_handler(int irq, void *data); static irqreturn_t smsm_modem_irq_handler(int irq, void *data); static irqreturn_t smd_dsp_irq_handler(int irq, void *data); static irqreturn_t smsm_dsp_irq_handler(int irq, void *data); static irqreturn_t smd_dsps_irq_handler(int irq, void *data); static irqreturn_t smsm_dsps_irq_handler(int irq, void *data); static irqreturn_t smd_wcnss_irq_handler(int irq, void *data); static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data); static irqreturn_t smd_rpm_irq_handler(int irq, void *data); static irqreturn_t smsm_irq_handler(int irq, void *data); static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = { [SMD_MODEM] = { .smd.irq_handler = smd_modem_irq_handler, .smsm.irq_handler = smsm_modem_irq_handler, }, [SMD_Q6] = { .smd.irq_handler = smd_dsp_irq_handler, .smsm.irq_handler = smsm_dsp_irq_handler, }, [SMD_DSPS] = { .smd.irq_handler = smd_dsps_irq_handler, .smsm.irq_handler = smsm_dsps_irq_handler, }, [SMD_WCNSS] = { .smd.irq_handler = smd_wcnss_irq_handler, .smsm.irq_handler = smsm_wcnss_irq_handler, }, [SMD_RPM] = { .smd.irq_handler = smd_rpm_irq_handler, .smsm.irq_handler = NULL, /* does not support smsm */ }, }; struct smem_area { void *phys_addr; unsigned size; void __iomem *virt_addr; }; static uint32_t num_smem_areas; static struct smem_area *smem_areas; static void *smem_range_check(void *base, unsigned offset); struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS]; #define SMSM_STATE_ADDR(entry) (smsm_info.state + entry) #define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \ entry * SMSM_NUM_HOSTS + host) #define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry) /* Internal definitions which are not exported in some targets */ enum { SMSM_APPS_DEM_I = 3, }; static int msm_smd_debug_mask; module_param_named(debug_mask, msm_smd_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); #if defined(CONFIG_MSM_SMD_DEBUG) #define SMD_DBG(x...) do { \ if (msm_smd_debug_mask & MSM_SMD_DEBUG) \ printk(KERN_DEBUG x); \ } while (0) #define SMSM_DBG(x...) do { \ if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \ printk(KERN_DEBUG x); \ } while (0) #define SMD_INFO(x...) do { \ if (msm_smd_debug_mask & MSM_SMD_INFO) \ printk(KERN_INFO x); \ } while (0) #define SMSM_INFO(x...) do { \ if (msm_smd_debug_mask & MSM_SMSM_INFO) \ printk(KERN_INFO x); \ } while (0) #define SMx_POWER_INFO(x...) do { \ if (msm_smd_debug_mask & MSM_SMx_POWER_INFO) \ printk(KERN_INFO x); \ } while (0) #else #define SMD_DBG(x...) do { } while (0) #define SMSM_DBG(x...) do { } while (0) #define SMD_INFO(x...) do { } while (0) #define SMSM_INFO(x...) do { } while (0) #define SMx_POWER_INFO(x...) do { } while (0) #endif static unsigned last_heap_free = 0xffffffff; static inline void smd_write_intr(unsigned int val, const void __iomem *addr); #if defined(CONFIG_ARCH_MSM7X30) #define MSM_TRIG_A2M_SMD_INT \ (smd_write_intr(1 << 0, MSM_APCS_GCC_BASE + 0x8)) #define MSM_TRIG_A2Q6_SMD_INT \ (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8)) #define MSM_TRIG_A2M_SMSM_INT \ (smd_write_intr(1 << 5, MSM_APCS_GCC_BASE + 0x8)) #define MSM_TRIG_A2Q6_SMSM_INT \ (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8)) #define MSM_TRIG_A2DSPS_SMD_INT #define MSM_TRIG_A2DSPS_SMSM_INT #define MSM_TRIG_A2WCNSS_SMD_INT #define MSM_TRIG_A2WCNSS_SMSM_INT #elif defined(CONFIG_ARCH_MSM8X60) #define MSM_TRIG_A2M_SMD_INT \ (smd_write_intr(1 << 3, MSM_GCC_BASE + 0x8)) #define MSM_TRIG_A2Q6_SMD_INT \ (smd_write_intr(1 << 15, MSM_GCC_BASE + 0x8)) #define MSM_TRIG_A2M_SMSM_INT \ (smd_write_intr(1 << 4, MSM_GCC_BASE + 0x8)) #define MSM_TRIG_A2Q6_SMSM_INT \ (smd_write_intr(1 << 14, MSM_GCC_BASE + 0x8)) #define MSM_TRIG_A2DSPS_SMD_INT \ (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4080)) #define MSM_TRIG_A2DSPS_SMSM_INT #define MSM_TRIG_A2WCNSS_SMD_INT #define MSM_TRIG_A2WCNSS_SMSM_INT #elif defined(CONFIG_ARCH_MSM9615) #define MSM_TRIG_A2M_SMD_INT \ (smd_write_intr(1 << 3, MSM_APCS_GCC_BASE + 0x8)) #define MSM_TRIG_A2Q6_SMD_INT \ (smd_write_intr(1 << 15, MSM_APCS_GCC_BASE + 0x8)) #define MSM_TRIG_A2M_SMSM_INT \ (smd_write_intr(1 << 4, MSM_APCS_GCC_BASE + 0x8)) #define MSM_TRIG_A2Q6_SMSM_INT \ (smd_write_intr(1 << 14, MSM_APCS_GCC_BASE + 0x8)) #define MSM_TRIG_A2DSPS_SMD_INT #define MSM_TRIG_A2DSPS_SMSM_INT #define MSM_TRIG_A2WCNSS_SMD_INT #define MSM_TRIG_A2WCNSS_SMSM_INT #elif defined(CONFIG_ARCH_FSM9XXX) #define MSM_TRIG_A2Q6_SMD_INT \ (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8)) #define MSM_TRIG_A2Q6_SMSM_INT \ (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8)) #define MSM_TRIG_A2M_SMD_INT \ (smd_write_intr(1 << 0, MSM_GCC_BASE + 0x8)) #define MSM_TRIG_A2M_SMSM_INT \ (smd_write_intr(1 << 5, MSM_GCC_BASE + 0x8)) #define MSM_TRIG_A2DSPS_SMD_INT #define MSM_TRIG_A2DSPS_SMSM_INT #define MSM_TRIG_A2WCNSS_SMD_INT #define MSM_TRIG_A2WCNSS_SMSM_INT #elif defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7x25) #define MSM_TRIG_A2M_SMD_INT \ (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4)) #define MSM_TRIG_A2Q6_SMD_INT #define MSM_TRIG_A2M_SMSM_INT \ (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4)) #define MSM_TRIG_A2Q6_SMSM_INT #define MSM_TRIG_A2DSPS_SMD_INT #define MSM_TRIG_A2DSPS_SMSM_INT #define MSM_TRIG_A2WCNSS_SMD_INT #define MSM_TRIG_A2WCNSS_SMSM_INT #elif defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM7X27A) #define MSM_TRIG_A2M_SMD_INT \ (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4)) #define MSM_TRIG_A2Q6_SMD_INT #define MSM_TRIG_A2M_SMSM_INT \ (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4)) #define MSM_TRIG_A2Q6_SMSM_INT #define MSM_TRIG_A2DSPS_SMD_INT #define MSM_TRIG_A2DSPS_SMSM_INT #define MSM_TRIG_A2WCNSS_SMD_INT #define MSM_TRIG_A2WCNSS_SMSM_INT #else /* use platform device / device tree configuration */ #define MSM_TRIG_A2M_SMD_INT #define MSM_TRIG_A2Q6_SMD_INT #define MSM_TRIG_A2M_SMSM_INT #define MSM_TRIG_A2Q6_SMSM_INT #define MSM_TRIG_A2DSPS_SMD_INT #define MSM_TRIG_A2DSPS_SMSM_INT #define MSM_TRIG_A2WCNSS_SMD_INT #define MSM_TRIG_A2WCNSS_SMSM_INT #endif /* * stub out legacy macros if they are not being used so that the legacy * code compiles even though it is not used * * these definitions should not be used in active code and will cause * an early failure */ #ifndef INT_A9_M2A_0 #define INT_A9_M2A_0 -1 #endif #ifndef INT_A9_M2A_5 #define INT_A9_M2A_5 -1 #endif #ifndef INT_ADSP_A11 #define INT_ADSP_A11 -1 #endif #ifndef INT_ADSP_A11_SMSM #define INT_ADSP_A11_SMSM -1 #endif #ifndef INT_DSPS_A11 #define INT_DSPS_A11 -1 #endif #ifndef INT_DSPS_A11_SMSM #define INT_DSPS_A11_SMSM -1 #endif #ifndef INT_WCNSS_A11 #define INT_WCNSS_A11 -1 #endif #ifndef INT_WCNSS_A11_SMSM #define INT_WCNSS_A11_SMSM -1 #endif #define SMD_LOOPBACK_CID 100 #define SMEM_SPINLOCK_SMEM_ALLOC "S:3" static remote_spinlock_t remote_spinlock; static LIST_HEAD(smd_ch_list_loopback); static void smd_fake_irq_handler(unsigned long arg); static void smsm_cb_snapshot(uint32_t use_wakelock); static struct workqueue_struct *smsm_cb_wq; static void notify_smsm_cb_clients_worker(struct work_struct *work); static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker); static DEFINE_MUTEX(smsm_lock); static struct smsm_state_info *smsm_states; static int spinlocks_initialized; /** * Variables to indicate smd module initialization. * Dependents to register for smd module init notifier. */ static int smd_module_inited; static RAW_NOTIFIER_HEAD(smd_module_init_notifier_list); static DEFINE_MUTEX(smd_module_init_notifier_lock); static void smd_module_init_notify(uint32_t state, void *data); static inline void smd_write_intr(unsigned int val, const void __iomem *addr) { wmb(); __raw_writel(val, addr); } static inline void notify_modem_smd(void) { static const struct interrupt_config_item *intr = &private_intr_config[SMD_MODEM].smd; if (intr->out_base) { ++interrupt_stats[SMD_MODEM].smd_out_config_count; smd_write_intr(intr->out_bit_pos, intr->out_base + intr->out_offset); } else { ++interrupt_stats[SMD_MODEM].smd_out_hardcode_count; MSM_TRIG_A2M_SMD_INT; } } static inline void notify_dsp_smd(void) { static const struct interrupt_config_item *intr = &private_intr_config[SMD_Q6].smd; if (intr->out_base) { ++interrupt_stats[SMD_Q6].smd_out_config_count; smd_write_intr(intr->out_bit_pos, intr->out_base + intr->out_offset); } else { ++interrupt_stats[SMD_Q6].smd_out_hardcode_count; MSM_TRIG_A2Q6_SMD_INT; } } static inline void notify_dsps_smd(void) { static const struct interrupt_config_item *intr = &private_intr_config[SMD_DSPS].smd; if (intr->out_base) { ++interrupt_stats[SMD_DSPS].smd_out_config_count; smd_write_intr(intr->out_bit_pos, intr->out_base + intr->out_offset); } else { ++interrupt_stats[SMD_DSPS].smd_out_hardcode_count; MSM_TRIG_A2DSPS_SMD_INT; } } static inline void notify_wcnss_smd(void) { static const struct interrupt_config_item *intr = &private_intr_config[SMD_WCNSS].smd; if (intr->out_base) { ++interrupt_stats[SMD_WCNSS].smd_out_config_count; smd_write_intr(intr->out_bit_pos, intr->out_base + intr->out_offset); } else { ++interrupt_stats[SMD_WCNSS].smd_out_hardcode_count; MSM_TRIG_A2WCNSS_SMD_INT; } } static inline void notify_rpm_smd(void) { static const struct interrupt_config_item *intr = &private_intr_config[SMD_RPM].smd; if (intr->out_base) { ++interrupt_stats[SMD_RPM].smd_out_config_count; smd_write_intr(intr->out_bit_pos, intr->out_base + intr->out_offset); } } static inline void notify_modem_smsm(void) { static const struct interrupt_config_item *intr = &private_intr_config[SMD_MODEM].smsm; if (intr->out_base) { ++interrupt_stats[SMD_MODEM].smsm_out_config_count; smd_write_intr(intr->out_bit_pos, intr->out_base + intr->out_offset); } else { ++interrupt_stats[SMD_MODEM].smsm_out_hardcode_count; MSM_TRIG_A2M_SMSM_INT; } } static inline void notify_dsp_smsm(void) { static const struct interrupt_config_item *intr = &private_intr_config[SMD_Q6].smsm; if (intr->out_base) { ++interrupt_stats[SMD_Q6].smsm_out_config_count; smd_write_intr(intr->out_bit_pos, intr->out_base + intr->out_offset); } else { ++interrupt_stats[SMD_Q6].smsm_out_hardcode_count; MSM_TRIG_A2Q6_SMSM_INT; } } static inline void notify_dsps_smsm(void) { static const struct interrupt_config_item *intr = &private_intr_config[SMD_DSPS].smsm; if (intr->out_base) { ++interrupt_stats[SMD_DSPS].smsm_out_config_count; smd_write_intr(intr->out_bit_pos, intr->out_base + intr->out_offset); } else { ++interrupt_stats[SMD_DSPS].smsm_out_hardcode_count; MSM_TRIG_A2DSPS_SMSM_INT; } } static inline void notify_wcnss_smsm(void) { static const struct interrupt_config_item *intr = &private_intr_config[SMD_WCNSS].smsm; if (intr->out_base) { ++interrupt_stats[SMD_WCNSS].smsm_out_config_count; smd_write_intr(intr->out_bit_pos, intr->out_base + intr->out_offset); } else { ++interrupt_stats[SMD_WCNSS].smsm_out_hardcode_count; MSM_TRIG_A2WCNSS_SMSM_INT; } } static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask) { /* older protocol don't use smsm_intr_mask, but still communicates with modem */ if (!smsm_info.intr_mask || (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM)) & notify_mask)) notify_modem_smsm(); if (smsm_info.intr_mask && (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6)) & notify_mask)) { uint32_t mux_val; if (cpu_is_qsd8x50() && smsm_info.intr_mux) { mux_val = __raw_readl( SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM)); mux_val++; __raw_writel(mux_val, SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM)); } notify_dsp_smsm(); } if (smsm_info.intr_mask && (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS)) & notify_mask)) { notify_wcnss_smsm(); } if (smsm_info.intr_mask && (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS)) & notify_mask)) { notify_dsps_smsm(); } /* * Notify local SMSM callback clients without wakelock since this * code is used by power management during power-down/-up sequencing * on DEM-based targets. Grabbing a wakelock in this case will * abort the power-down sequencing. */ if (smsm_info.intr_mask && (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS)) & notify_mask)) { smsm_cb_snapshot(0); } } static int smsm_pm_notifier(struct notifier_block *nb, unsigned long event, void *unused) { switch (event) { case PM_SUSPEND_PREPARE: smsm_change_state(SMSM_APPS_STATE, SMSM_PROC_AWAKE, 0); break; case PM_POST_SUSPEND: smsm_change_state(SMSM_APPS_STATE, 0, SMSM_PROC_AWAKE); break; } return NOTIFY_DONE; } static struct notifier_block smsm_pm_nb = { .notifier_call = smsm_pm_notifier, .priority = 0, }; void smd_diag(void) { char *x; int size; x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG); if (x != 0) { x[SZ_DIAG_ERR_MSG - 1] = 0; SMD_INFO("smem: DIAG '%s'\n", x); } x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size); if (x != 0) { x[size - 1] = 0; pr_err("smem: CRASH LOG\n'%s'\n", x); } } static void handle_modem_crash(void) { pr_err("MODEM/AMSS has CRASHED\n"); smd_diag(); /* hard reboot if possible FIXME if (msm_reset_hook) msm_reset_hook(); */ /* in this case the modem or watchdog should reboot us */ for (;;) ; } int smsm_check_for_modem_crash(void) { /* if the modem's not ready yet, we have to hope for the best */ if (!smsm_info.state) return 0; if (__raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)) & SMSM_RESET) { handle_modem_crash(); return -1; } return 0; } EXPORT_SYMBOL(smsm_check_for_modem_crash); /* the spinlock is used to synchronize between the * irq handler and code that mutates the channel * list or fiddles with channel state */ static DEFINE_SPINLOCK(smd_lock); DEFINE_SPINLOCK(smem_lock); /* the mutex is used during open() and close() * operations to avoid races while creating or * destroying smd_channel structures */ static DEFINE_MUTEX(smd_creation_mutex); static int smd_initialized; struct smd_shared_v1 { struct smd_half_channel ch0; unsigned char data0[SMD_BUF_SIZE]; struct smd_half_channel ch1; unsigned char data1[SMD_BUF_SIZE]; }; struct smd_shared_v2 { struct smd_half_channel ch0; struct smd_half_channel ch1; }; struct smd_shared_v2_word_access { struct smd_half_channel_word_access ch0; struct smd_half_channel_word_access ch1; }; struct smd_channel { volatile void *send; /* some variant of smd_half_channel */ volatile void *recv; /* some variant of smd_half_channel */ unsigned char *send_data; unsigned char *recv_data; unsigned fifo_size; unsigned fifo_mask; struct list_head ch_list; unsigned current_packet; unsigned n; void *priv; void (*notify)(void *priv, unsigned flags); int (*read)(smd_channel_t *ch, void *data, int len, int user_buf); int (*write)(smd_channel_t *ch, const void *data, int len, int user_buf); int (*read_avail)(smd_channel_t *ch); int (*write_avail)(smd_channel_t *ch); int (*read_from_cb)(smd_channel_t *ch, void *data, int len, int user_buf); void (*update_state)(smd_channel_t *ch); unsigned last_state; void (*notify_other_cpu)(void); char name[20]; struct platform_device pdev; unsigned type; int pending_pkt_sz; char is_pkt_ch; /* * private internal functions to access *send and *recv. * never to be exported outside of smd */ struct smd_half_channel_access *half_ch; }; struct edge_to_pid { uint32_t local_pid; uint32_t remote_pid; char subsys_name[SMD_MAX_CH_NAME_LEN]; }; /** * Maps edge type to local and remote processor ID's. */ static struct edge_to_pid edge_to_pids[] = { [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"}, [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "q6"}, [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6}, [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"}, [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS}, [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS}, [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"}, [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS}, [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS}, [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS}, [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW}, [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW}, [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW}, [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW}, [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW}, [SMD_APPS_RPM] = {SMD_APPS, SMD_RPM}, [SMD_MODEM_RPM] = {SMD_MODEM, SMD_RPM}, [SMD_QDSP_RPM] = {SMD_Q6, SMD_RPM}, [SMD_WCNSS_RPM] = {SMD_WCNSS, SMD_RPM}, }; struct restart_notifier_block { unsigned processor; char *name; struct notifier_block nb; }; static int disable_smsm_reset_handshake; static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"}; static LIST_HEAD(smd_ch_closed_list); static LIST_HEAD(smd_ch_closing_list); static LIST_HEAD(smd_ch_to_close_list); static LIST_HEAD(smd_ch_list_modem); static LIST_HEAD(smd_ch_list_dsp); static LIST_HEAD(smd_ch_list_dsps); static LIST_HEAD(smd_ch_list_wcnss); static LIST_HEAD(smd_ch_list_rpm); static unsigned char smd_ch_allocated[64]; static struct work_struct probe_work; static void finalize_channel_close_fn(struct work_struct *work); static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn); static struct workqueue_struct *channel_close_wq; static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm); /* on smp systems, the probe might get called from multiple cores, hence use a lock */ static DEFINE_MUTEX(smd_probe_lock); static void smd_channel_probe_worker(struct work_struct *work) { struct smd_alloc_elm *shared; unsigned n; uint32_t type; shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64); if (!shared) { pr_err("%s: allocation table not initialized\n", __func__); return; } mutex_lock(&smd_probe_lock); for (n = 0; n < 64; n++) { if (smd_ch_allocated[n]) continue; /* channel should be allocated only if APPS processor is involved */ type = SMD_CHANNEL_TYPE(shared[n].type); if (type >= ARRAY_SIZE(edge_to_pids) || edge_to_pids[type].local_pid != SMD_APPS) continue; if (!shared[n].ref_count) continue; if (!shared[n].name[0]) continue; if (!smd_alloc_channel(&shared[n])) smd_ch_allocated[n] = 1; else SMD_INFO("Probe skipping ch %d, not allocated\n", n); } mutex_unlock(&smd_probe_lock); } /** * Lookup processor ID and determine if it belongs to the proved edge * type. * * @shared2: Pointer to v2 shared channel structure * @type: Edge type * @pid: Processor ID of processor on edge * @local_ch: Channel that belongs to processor @pid * @remote_ch: Other side of edge contained @pid * @is_word_access_ch: Bool, is this a word aligned access channel * * Returns 0 for not on edge, 1 for found on edge */ static int pid_is_on_edge(void *shared2, uint32_t type, uint32_t pid, void **local_ch, void **remote_ch, int is_word_access_ch ) { int ret = 0; struct edge_to_pid *edge; void *ch0; void *ch1; *local_ch = 0; *remote_ch = 0; if (!shared2 || (type >= ARRAY_SIZE(edge_to_pids))) return 0; if (is_word_access_ch) { ch0 = &((struct smd_shared_v2_word_access *)(shared2))->ch0; ch1 = &((struct smd_shared_v2_word_access *)(shared2))->ch1; } else { ch0 = &((struct smd_shared_v2 *)(shared2))->ch0; ch1 = &((struct smd_shared_v2 *)(shared2))->ch1; } edge = &edge_to_pids[type]; if (edge->local_pid != edge->remote_pid) { if (pid == edge->local_pid) { *local_ch = ch0; *remote_ch = ch1; ret = 1; } else if (pid == edge->remote_pid) { *local_ch = ch1; *remote_ch = ch0; ret = 1; } } return ret; } /* * Returns a pointer to the subsystem name or NULL if no * subsystem name is available. * * @type - Edge definition */ const char *smd_edge_to_subsystem(uint32_t type) { const char *subsys = NULL; if (type < ARRAY_SIZE(edge_to_pids)) { subsys = edge_to_pids[type].subsys_name; if (subsys[0] == 0x0) subsys = NULL; } return subsys; } EXPORT_SYMBOL(smd_edge_to_subsystem); /* * Returns a pointer to the subsystem name given the * remote processor ID. * * @pid Remote processor ID * @returns Pointer to subsystem name or NULL if not found */ const char *smd_pid_to_subsystem(uint32_t pid) { const char *subsys = NULL; int i; for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) { if (pid == edge_to_pids[i].remote_pid && edge_to_pids[i].subsys_name[0] != 0x0 ) { subsys = edge_to_pids[i].subsys_name; break; } } return subsys; } EXPORT_SYMBOL(smd_pid_to_subsystem); static void smd_reset_edge(void *void_ch, unsigned new_state, int is_word_access_ch) { if (is_word_access_ch) { struct smd_half_channel_word_access *ch = (struct smd_half_channel_word_access *)(void_ch); if (ch->state != SMD_SS_CLOSED) { ch->state = new_state; ch->fDSR = 0; ch->fCTS = 0; ch->fCD = 0; ch->fSTATE = 1; } } else { struct smd_half_channel *ch = (struct smd_half_channel *)(void_ch); if (ch->state != SMD_SS_CLOSED) { ch->state = new_state; ch->fDSR = 0; ch->fCTS = 0; ch->fCD = 0; ch->fSTATE = 1; } } } static void smd_channel_reset_state(struct smd_alloc_elm *shared, unsigned new_state, unsigned pid) { unsigned n; void *shared2; uint32_t type; void *local_ch; void *remote_ch; int is_word_access; for (n = 0; n < SMD_CHANNELS; n++) { if (!shared[n].ref_count) continue; if (!shared[n].name[0]) continue; type = SMD_CHANNEL_TYPE(shared[n].type); is_word_access = is_word_access_ch(type); if (is_word_access) shared2 = smem_alloc(SMEM_SMD_BASE_ID + n, sizeof(struct smd_shared_v2_word_access)); else shared2 = smem_alloc(SMEM_SMD_BASE_ID + n, sizeof(struct smd_shared_v2)); if (!shared2) continue; if (pid_is_on_edge(shared2, type, pid, &local_ch, &remote_ch, is_word_access)) smd_reset_edge(local_ch, new_state, is_word_access); /* * ModemFW is in the same subsystem as ModemSW, but has * separate SMD edges that need to be reset. */ if (pid == SMSM_MODEM && pid_is_on_edge(shared2, type, SMD_MODEM_Q6_FW, &local_ch, &remote_ch, is_word_access)) smd_reset_edge(local_ch, new_state, is_word_access); } } void smd_channel_reset(uint32_t restart_pid) { struct smd_alloc_elm *shared; unsigned long flags; SMD_DBG("%s: starting reset\n", __func__); /* release any held spinlocks */ remote_spin_release(&remote_spinlock, restart_pid); remote_spin_release_all(restart_pid); shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64); if (!shared) { pr_err("%s: allocation table not initialized\n", __func__); return; } /* reset SMSM entry */ if (smsm_info.state) { writel_relaxed(0, SMSM_STATE_ADDR(restart_pid)); /* restart SMSM init handshake */ if (restart_pid == SMSM_MODEM) { smsm_change_state(SMSM_APPS_STATE, SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET, 0); } /* notify SMSM processors */ smsm_irq_handler(0, 0); notify_modem_smsm(); notify_dsp_smsm(); notify_dsps_smsm(); notify_wcnss_smsm(); } /* change all remote states to CLOSING */ mutex_lock(&smd_probe_lock); spin_lock_irqsave(&smd_lock, flags); smd_channel_reset_state(shared, SMD_SS_CLOSING, restart_pid); spin_unlock_irqrestore(&smd_lock, flags); mutex_unlock(&smd_probe_lock); /* notify SMD processors */ mb(); smd_fake_irq_handler(0); notify_modem_smd(); notify_dsp_smd(); notify_dsps_smd(); notify_wcnss_smd(); /* change all remote states to CLOSED */ mutex_lock(&smd_probe_lock); spin_lock_irqsave(&smd_lock, flags); smd_channel_reset_state(shared, SMD_SS_CLOSED, restart_pid); spin_unlock_irqrestore(&smd_lock, flags); mutex_unlock(&smd_probe_lock); /* notify SMD processors */ mb(); smd_fake_irq_handler(0); notify_modem_smd(); notify_dsp_smd(); notify_dsps_smd(); notify_wcnss_smd(); SMD_DBG("%s: finished reset\n", __func__); } /* how many bytes are available for reading */ static int smd_stream_read_avail(struct smd_channel *ch) { return (ch->half_ch->get_head(ch->recv) - ch->half_ch->get_tail(ch->recv)) & ch->fifo_mask; } /* how many bytes we are free to write */ static int smd_stream_write_avail(struct smd_channel *ch) { return ch->fifo_mask - ((ch->half_ch->get_head(ch->send) - ch->half_ch->get_tail(ch->send)) & ch->fifo_mask); } static int smd_packet_read_avail(struct smd_channel *ch) { if (ch->current_packet) { int n = smd_stream_read_avail(ch); if (n > ch->current_packet) n = ch->current_packet; return n; } else { return 0; } } static int smd_packet_write_avail(struct smd_channel *ch) { int n = smd_stream_write_avail(ch); return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0; } static int ch_is_open(struct smd_channel *ch) { return (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED || ch->half_ch->get_state(ch->recv) == SMD_SS_FLUSHING) && (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED); } /* provide a pointer and length to readable data in the fifo */ static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr) { unsigned head = ch->half_ch->get_head(ch->recv); unsigned tail = ch->half_ch->get_tail(ch->recv); *ptr = (void *) (ch->recv_data + tail); if (tail <= head) return head - tail; else return ch->fifo_size - tail; } static int read_intr_blocked(struct smd_channel *ch) { return ch->half_ch->get_fBLOCKREADINTR(ch->recv); } /* advance the fifo read pointer after data from ch_read_buffer is consumed */ static void ch_read_done(struct smd_channel *ch, unsigned count) { BUG_ON(count > smd_stream_read_avail(ch)); ch->half_ch->set_tail(ch->recv, (ch->half_ch->get_tail(ch->recv) + count) & ch->fifo_mask); wmb(); ch->half_ch->set_fTAIL(ch->send, 1); } /* basic read interface to ch_read_{buffer,done} used * by smd_*_read() and update_packet_state() * will read-and-discard if the _data pointer is null */ static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf) { void *ptr; unsigned n; unsigned char *data = _data; int orig_len = len; int r = 0; while (len > 0) { n = ch_read_buffer(ch, &ptr); if (n == 0) break; if (n > len) n = len; if (_data) { if (user_buf) { r = copy_to_user(data, ptr, n); if (r > 0) { pr_err("%s: " "copy_to_user could not copy " "%i bytes.\n", __func__, r); } } else memcpy(data, ptr, n); } data += n; len -= n; ch_read_done(ch, n); } return orig_len - len; } static void update_stream_state(struct smd_channel *ch) { /* streams have no special state requiring updating */ } static void update_packet_state(struct smd_channel *ch) { unsigned hdr[5]; int r; /* can't do anything if we're in the middle of a packet */ while (ch->current_packet == 0) { /* discard 0 length packets if any */ /* don't bother unless we can get the full header */ if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE) return; r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0); BUG_ON(r != SMD_HEADER_SIZE); ch->current_packet = hdr[0]; } } /* provide a pointer and length to next free space in the fifo */ static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr) { unsigned head = ch->half_ch->get_head(ch->send); unsigned tail = ch->half_ch->get_tail(ch->send); *ptr = (void *) (ch->send_data + head); if (head < tail) { return tail - head - 1; } else { if (tail == 0) return ch->fifo_size - head - 1; else return ch->fifo_size - head; } } /* advace the fifo write pointer after freespace * from ch_write_buffer is filled */ static void ch_write_done(struct smd_channel *ch, unsigned count) { BUG_ON(count > smd_stream_write_avail(ch)); ch->half_ch->set_head(ch->send, (ch->half_ch->get_head(ch->send) + count) & ch->fifo_mask); wmb(); ch->half_ch->set_fHEAD(ch->send, 1); } static void ch_set_state(struct smd_channel *ch, unsigned n) { if (n == SMD_SS_OPENED) { ch->half_ch->set_fDSR(ch->send, 1); ch->half_ch->set_fCTS(ch->send, 1); ch->half_ch->set_fCD(ch->send, 1); } else { ch->half_ch->set_fDSR(ch->send, 0); ch->half_ch->set_fCTS(ch->send, 0); ch->half_ch->set_fCD(ch->send, 0); } ch->half_ch->set_state(ch->send, n); ch->half_ch->set_fSTATE(ch->send, 1); ch->notify_other_cpu(); } static void do_smd_probe(void) { struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE; if (shared->heap_info.free_offset != last_heap_free) { last_heap_free = shared->heap_info.free_offset; schedule_work(&probe_work); } } static void smd_state_change(struct smd_channel *ch, unsigned last, unsigned next) { ch->last_state = next; SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next); switch (next) { case SMD_SS_OPENING: if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSING || ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) { ch->half_ch->set_tail(ch->recv, 0); ch->half_ch->set_head(ch->send, 0); ch->half_ch->set_fBLOCKREADINTR(ch->send, 0); ch_set_state(ch, SMD_SS_OPENING); } break; case SMD_SS_OPENED: if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENING) { ch_set_state(ch, SMD_SS_OPENED); ch->notify(ch->priv, SMD_EVENT_OPEN); } break; case SMD_SS_FLUSHING: case SMD_SS_RESET: /* we should force them to close? */ break; case SMD_SS_CLOSED: if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED) { ch_set_state(ch, SMD_SS_CLOSING); ch->current_packet = 0; ch->pending_pkt_sz = 0; ch->notify(ch->priv, SMD_EVENT_CLOSE); } break; case SMD_SS_CLOSING: if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) { list_move(&ch->ch_list, &smd_ch_to_close_list); queue_work(channel_close_wq, &finalize_channel_close_work); } break; } } static void handle_smd_irq_closing_list(void) { unsigned long flags; struct smd_channel *ch; struct smd_channel *index; unsigned tmp; spin_lock_irqsave(&smd_lock, flags); list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) { if (ch->half_ch->get_fSTATE(ch->recv)) ch->half_ch->set_fSTATE(ch->recv, 0); tmp = ch->half_ch->get_state(ch->recv); if (tmp != ch->last_state) smd_state_change(ch, ch->last_state, tmp); } spin_unlock_irqrestore(&smd_lock, flags); } static void handle_smd_irq(struct list_head *list, void (*notify)(void)) { unsigned long flags; struct smd_channel *ch; unsigned ch_flags; unsigned tmp; unsigned char state_change; spin_lock_irqsave(&smd_lock, flags); list_for_each_entry(ch, list, ch_list) { state_change = 0; ch_flags = 0; if (ch_is_open(ch)) { if (ch->half_ch->get_fHEAD(ch->recv)) { ch->half_ch->set_fHEAD(ch->recv, 0); ch_flags |= 1; } if (ch->half_ch->get_fTAIL(ch->recv)) { ch->half_ch->set_fTAIL(ch->recv, 0); ch_flags |= 2; } if (ch->half_ch->get_fSTATE(ch->recv)) { ch->half_ch->set_fSTATE(ch->recv, 0); ch_flags |= 4; } } tmp = ch->half_ch->get_state(ch->recv); if (tmp != ch->last_state) { SMx_POWER_INFO("SMD ch%d '%s' State change %d->%d\n", ch->n, ch->name, ch->last_state, tmp); smd_state_change(ch, ch->last_state, tmp); state_change = 1; } if (ch_flags & 0x3) { ch->update_state(ch); SMx_POWER_INFO("SMD ch%d '%s' Data event r%d/w%d\n", ch->n, ch->name, ch->read_avail(ch), ch->fifo_size - ch->write_avail(ch)); ch->notify(ch->priv, SMD_EVENT_DATA); } if (ch_flags & 0x4 && !state_change) { SMx_POWER_INFO("SMD ch%d '%s' State update\n", ch->n, ch->name); ch->notify(ch->priv, SMD_EVENT_STATUS); } } spin_unlock_irqrestore(&smd_lock, flags); do_smd_probe(); } static irqreturn_t smd_modem_irq_handler(int irq, void *data) { SMx_POWER_INFO("SMD Int Modem->Apps\n"); ++interrupt_stats[SMD_MODEM].smd_in_count; handle_smd_irq(&smd_ch_list_modem, notify_modem_smd); handle_smd_irq_closing_list(); return IRQ_HANDLED; } static irqreturn_t smd_dsp_irq_handler(int irq, void *data) { SMx_POWER_INFO("SMD Int LPASS->Apps\n"); ++interrupt_stats[SMD_Q6].smd_in_count; handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd); handle_smd_irq_closing_list(); return IRQ_HANDLED; } static irqreturn_t smd_dsps_irq_handler(int irq, void *data) { SMx_POWER_INFO("SMD Int DSPS->Apps\n"); ++interrupt_stats[SMD_DSPS].smd_in_count; handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd); handle_smd_irq_closing_list(); return IRQ_HANDLED; } static irqreturn_t smd_wcnss_irq_handler(int irq, void *data) { SMx_POWER_INFO("SMD Int WCNSS->Apps\n"); ++interrupt_stats[SMD_WCNSS].smd_in_count; handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd); handle_smd_irq_closing_list(); return IRQ_HANDLED; } static irqreturn_t smd_rpm_irq_handler(int irq, void *data) { SMx_POWER_INFO("SMD Int RPM->Apps\n"); ++interrupt_stats[SMD_RPM].smd_in_count; handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd); handle_smd_irq_closing_list(); return IRQ_HANDLED; } static void smd_fake_irq_handler(unsigned long arg) { handle_smd_irq(&smd_ch_list_modem, notify_modem_smd); handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd); handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd); handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd); handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd); handle_smd_irq_closing_list(); } static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0); static inline int smd_need_int(struct smd_channel *ch) { if (ch_is_open(ch)) { if (ch->half_ch->get_fHEAD(ch->recv) || ch->half_ch->get_fTAIL(ch->recv) || ch->half_ch->get_fSTATE(ch->recv)) return 1; if (ch->half_ch->get_state(ch->recv) != ch->last_state) return 1; } return 0; } void smd_sleep_exit(void) { unsigned long flags; struct smd_channel *ch; int need_int = 0; spin_lock_irqsave(&smd_lock, flags); list_for_each_entry(ch, &smd_ch_list_modem, ch_list) { if (smd_need_int(ch)) { need_int = 1; break; } } list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) { if (smd_need_int(ch)) { need_int = 1; break; } } list_for_each_entry(ch, &smd_ch_list_dsps, ch_list) { if (smd_need_int(ch)) { need_int = 1; break; } } list_for_each_entry(ch, &smd_ch_list_wcnss, ch_list) { if (smd_need_int(ch)) { need_int = 1; break; } } spin_unlock_irqrestore(&smd_lock, flags); do_smd_probe(); if (need_int) { SMD_DBG("smd_sleep_exit need interrupt\n"); tasklet_schedule(&smd_fake_irq_tasklet); } } EXPORT_SYMBOL(smd_sleep_exit); static int smd_is_packet(struct smd_alloc_elm *alloc_elm) { if (SMD_XFER_TYPE(alloc_elm->type) == 1) return 0; else if (SMD_XFER_TYPE(alloc_elm->type) == 2) return 1; /* for cases where xfer type is 0 */ if (!strncmp(alloc_elm->name, "DAL", 3)) return 0; /* for cases where xfer type is 0 */ if (!strncmp(alloc_elm->name, "RPCCALL_QDSP", 12)) return 0; if (alloc_elm->cid > 4 || alloc_elm->cid == 1) return 1; else return 0; } static int smd_stream_write(smd_channel_t *ch, const void *_data, int len, int user_buf) { void *ptr; const unsigned char *buf = _data; unsigned xfer; int orig_len = len; int r = 0; SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n); if (len < 0) return -EINVAL; else if (len == 0) return 0; while ((xfer = ch_write_buffer(ch, &ptr)) != 0) { if (!ch_is_open(ch)) { len = orig_len; break; } if (xfer > len) xfer = len; if (user_buf) { r = copy_from_user(ptr, buf, xfer); if (r > 0) { pr_err("%s: " "copy_from_user could not copy %i " "bytes.\n", __func__, r); } } else memcpy(ptr, buf, xfer); ch_write_done(ch, xfer); len -= xfer; buf += xfer; if (len == 0) break; } if (orig_len - len) ch->notify_other_cpu(); return orig_len - len; } static int smd_packet_write(smd_channel_t *ch, const void *_data, int len, int user_buf) { int ret; unsigned hdr[5]; SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n); if (len < 0) return -EINVAL; else if (len == 0) return 0; if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE)) return -ENOMEM; hdr[0] = len; hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0; ret = smd_stream_write(ch, hdr, sizeof(hdr), 0); if (ret < 0 || ret != sizeof(hdr)) { SMD_DBG("%s failed to write pkt header: " "%d returned\n", __func__, ret); return -1; } ret = smd_stream_write(ch, _data, len, user_buf); if (ret < 0 || ret != len) { SMD_DBG("%s failed to write pkt data: " "%d returned\n", __func__, ret); return ret; } return len; } static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf) { int r; if (len < 0) return -EINVAL; r = ch_read(ch, data, len, user_buf); if (r > 0) if (!read_intr_blocked(ch)) ch->notify_other_cpu(); return r; } static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf) { unsigned long flags; int r; if (len < 0) return -EINVAL; if (len > ch->current_packet) len = ch->current_packet; r = ch_read(ch, data, len, user_buf); if (r > 0) if (!read_intr_blocked(ch)) ch->notify_other_cpu(); spin_lock_irqsave(&smd_lock, flags); ch->current_packet -= r; update_packet_state(ch); spin_unlock_irqrestore(&smd_lock, flags); return r; } static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len, int user_buf) { int r; if (len < 0) return -EINVAL; if (len > ch->current_packet) len = ch->current_packet; r = ch_read(ch, data, len, user_buf); if (r > 0) if (!read_intr_blocked(ch)) ch->notify_other_cpu(); ch->current_packet -= r; update_packet_state(ch); return r; } #if (defined(CONFIG_MSM_SMD_PKG4) || defined(CONFIG_MSM_SMD_PKG3)) static int smd_alloc_v2(struct smd_channel *ch) { void *buffer; unsigned buffer_sz; if (is_word_access_ch(ch->type)) { struct smd_shared_v2_word_access *shared2; shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n, sizeof(*shared2)); if (!shared2) { SMD_INFO("smem_alloc failed ch=%d\n", ch->n); return -EINVAL; } ch->send = &shared2->ch0; ch->recv = &shared2->ch1; } else { struct smd_shared_v2 *shared2; shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n, sizeof(*shared2)); if (!shared2) { SMD_INFO("smem_alloc failed ch=%d\n", ch->n); return -EINVAL; } ch->send = &shared2->ch0; ch->recv = &shared2->ch1; } ch->half_ch = get_half_ch_funcs(ch->type); buffer = smem_get_entry(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz); if (!buffer) { SMD_INFO("smem_get_entry failed\n"); return -EINVAL; } /* buffer must be a power-of-two size */ if (buffer_sz & (buffer_sz - 1)) { SMD_INFO("Buffer size: %u not power of two\n", buffer_sz); return -EINVAL; } buffer_sz /= 2; ch->send_data = buffer; ch->recv_data = buffer + buffer_sz; ch->fifo_size = buffer_sz; return 0; } static int smd_alloc_v1(struct smd_channel *ch) { return -EINVAL; } #else /* define v1 for older targets */ static int smd_alloc_v2(struct smd_channel *ch) { return -EINVAL; } static int smd_alloc_v1(struct smd_channel *ch) { struct smd_shared_v1 *shared1; shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1)); if (!shared1) { pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n); return -EINVAL; } ch->send = &shared1->ch0; ch->recv = &shared1->ch1; ch->send_data = shared1->data0; ch->recv_data = shared1->data1; ch->fifo_size = SMD_BUF_SIZE; ch->half_ch = get_half_ch_funcs(ch->type); return 0; } #endif static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm) { struct smd_channel *ch; ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL); if (ch == 0) { pr_err("smd_alloc_channel() out of memory\n"); return -1; } ch->n = alloc_elm->cid; ch->type = SMD_CHANNEL_TYPE(alloc_elm->type); if (smd_alloc_v2(ch) && smd_alloc_v1(ch)) { kfree(ch); return -1; } ch->fifo_mask = ch->fifo_size - 1; /* probe_worker guarentees ch->type will be a valid type */ if (ch->type == SMD_APPS_MODEM) ch->notify_other_cpu = notify_modem_smd; else if (ch->type == SMD_APPS_QDSP) ch->notify_other_cpu = notify_dsp_smd; else if (ch->type == SMD_APPS_DSPS) ch->notify_other_cpu = notify_dsps_smd; else if (ch->type == SMD_APPS_WCNSS) ch->notify_other_cpu = notify_wcnss_smd; else if (ch->type == SMD_APPS_RPM) ch->notify_other_cpu = notify_rpm_smd; if (smd_is_packet(alloc_elm)) { ch->read = smd_packet_read; ch->write = smd_packet_write; ch->read_avail = smd_packet_read_avail; ch->write_avail = smd_packet_write_avail; ch->update_state = update_packet_state; ch->read_from_cb = smd_packet_read_from_cb; ch->is_pkt_ch = 1; } else { ch->read = smd_stream_read; ch->write = smd_stream_write; ch->read_avail = smd_stream_read_avail; ch->write_avail = smd_stream_write_avail; ch->update_state = update_stream_state; ch->read_from_cb = smd_stream_read; } memcpy(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN); ch->name[SMD_MAX_CH_NAME_LEN-1] = 0; ch->pdev.name = ch->name; ch->pdev.id = ch->type; SMD_INFO("smd_alloc_channel() '%s' cid=%d\n", ch->name, ch->n); mutex_lock(&smd_creation_mutex); list_add(&ch->ch_list, &smd_ch_closed_list); mutex_unlock(&smd_creation_mutex); platform_device_register(&ch->pdev); if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) { /* create a platform driver to be used by smd_tty driver * so that it can access the loopback port */ loopback_tty_pdev.id = ch->type; platform_device_register(&loopback_tty_pdev); } return 0; } static inline void notify_loopback_smd(void) { unsigned long flags; struct smd_channel *ch; spin_lock_irqsave(&smd_lock, flags); list_for_each_entry(ch, &smd_ch_list_loopback, ch_list) { ch->notify(ch->priv, SMD_EVENT_DATA); } spin_unlock_irqrestore(&smd_lock, flags); } static int smd_alloc_loopback_channel(void) { static struct smd_half_channel smd_loopback_ctl; static char smd_loopback_data[SMD_BUF_SIZE]; struct smd_channel *ch; ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL); if (ch == 0) { pr_err("%s: out of memory\n", __func__); return -1; } ch->n = SMD_LOOPBACK_CID; ch->send = &smd_loopback_ctl; ch->recv = &smd_loopback_ctl; ch->send_data = smd_loopback_data; ch->recv_data = smd_loopback_data; ch->fifo_size = SMD_BUF_SIZE; ch->fifo_mask = ch->fifo_size - 1; ch->type = SMD_LOOPBACK_TYPE; ch->notify_other_cpu = notify_loopback_smd; ch->read = smd_stream_read; ch->write = smd_stream_write; ch->read_avail = smd_stream_read_avail; ch->write_avail = smd_stream_write_avail; ch->update_state = update_stream_state; ch->read_from_cb = smd_stream_read; memset(ch->name, 0, 20); memcpy(ch->name, "local_loopback", 14); ch->pdev.name = ch->name; ch->pdev.id = ch->type; SMD_INFO("%s: '%s' cid=%d\n", __func__, ch->name, ch->n); mutex_lock(&smd_creation_mutex); list_add(&ch->ch_list, &smd_ch_closed_list); mutex_unlock(&smd_creation_mutex); platform_device_register(&ch->pdev); return 0; } static void do_nothing_notify(void *priv, unsigned flags) { } static void finalize_channel_close_fn(struct work_struct *work) { unsigned long flags; struct smd_channel *ch; struct smd_channel *index; mutex_lock(&smd_creation_mutex); spin_lock_irqsave(&smd_lock, flags); list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) { list_del(&ch->ch_list); list_add(&ch->ch_list, &smd_ch_closed_list); ch->notify(ch->priv, SMD_EVENT_REOPEN_READY); ch->notify = do_nothing_notify; } spin_unlock_irqrestore(&smd_lock, flags); mutex_unlock(&smd_creation_mutex); } struct smd_channel *smd_get_channel(const char *name, uint32_t type) { struct smd_channel *ch; mutex_lock(&smd_creation_mutex); list_for_each_entry(ch, &smd_ch_closed_list, ch_list) { if (!strcmp(name, ch->name) && (type == ch->type)) { list_del(&ch->ch_list); mutex_unlock(&smd_creation_mutex); return ch; } } mutex_unlock(&smd_creation_mutex); return NULL; } int smd_named_open_on_edge(const char *name, uint32_t edge, smd_channel_t **_ch, void *priv, void (*notify)(void *, unsigned)) { struct smd_channel *ch; unsigned long flags; if (smd_initialized == 0) { SMD_INFO("smd_open() before smd_init()\n"); return -ENODEV; } SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify); ch = smd_get_channel(name, edge); if (!ch) { /* check closing list for port */ spin_lock_irqsave(&smd_lock, flags); list_for_each_entry(ch, &smd_ch_closing_list, ch_list) { if (!strncmp(name, ch->name, 20) && (edge == ch->type)) { /* channel exists, but is being closed */ spin_unlock_irqrestore(&smd_lock, flags); return -EAGAIN; } } /* check closing workqueue list for port */ list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) { if (!strncmp(name, ch->name, 20) && (edge == ch->type)) { /* channel exists, but is being closed */ spin_unlock_irqrestore(&smd_lock, flags); return -EAGAIN; } } spin_unlock_irqrestore(&smd_lock, flags); /* one final check to handle closing->closed race condition */ ch = smd_get_channel(name, edge); if (!ch) return -ENODEV; } if (notify == 0) notify = do_nothing_notify; ch->notify = notify; ch->current_packet = 0; ch->last_state = SMD_SS_CLOSED; ch->priv = priv; if (edge == SMD_LOOPBACK_TYPE) { ch->last_state = SMD_SS_OPENED; ch->half_ch->set_state(ch->send, SMD_SS_OPENED); ch->half_ch->set_fDSR(ch->send, 1); ch->half_ch->set_fCTS(ch->send, 1); ch->half_ch->set_fCD(ch->send, 1); } *_ch = ch; SMD_DBG("smd_open: opening '%s'\n", ch->name); spin_lock_irqsave(&smd_lock, flags); if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_MODEM) list_add(&ch->ch_list, &smd_ch_list_modem); else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_QDSP) list_add(&ch->ch_list, &smd_ch_list_dsp); else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_DSPS) list_add(&ch->ch_list, &smd_ch_list_dsps); else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_WCNSS) list_add(&ch->ch_list, &smd_ch_list_wcnss); else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_RPM) list_add(&ch->ch_list, &smd_ch_list_rpm); else list_add(&ch->ch_list, &smd_ch_list_loopback); SMD_DBG("%s: opening ch %d\n", __func__, ch->n); if (edge != SMD_LOOPBACK_TYPE) smd_state_change(ch, ch->last_state, SMD_SS_OPENING); spin_unlock_irqrestore(&smd_lock, flags); return 0; } EXPORT_SYMBOL(smd_named_open_on_edge); int smd_open(const char *name, smd_channel_t **_ch, void *priv, void (*notify)(void *, unsigned)) { return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv, notify); } EXPORT_SYMBOL(smd_open); int smd_close(smd_channel_t *ch) { unsigned long flags; if (ch == 0) return -1; SMD_INFO("smd_close(%s)\n", ch->name); spin_lock_irqsave(&smd_lock, flags); list_del(&ch->ch_list); if (ch->n == SMD_LOOPBACK_CID) { ch->half_ch->set_fDSR(ch->send, 0); ch->half_ch->set_fCTS(ch->send, 0); ch->half_ch->set_fCD(ch->send, 0); ch->half_ch->set_state(ch->send, SMD_SS_CLOSED); } else ch_set_state(ch, SMD_SS_CLOSED); if (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED) { list_add(&ch->ch_list, &smd_ch_closing_list); spin_unlock_irqrestore(&smd_lock, flags); } else { spin_unlock_irqrestore(&smd_lock, flags); ch->notify = do_nothing_notify; mutex_lock(&smd_creation_mutex); list_add(&ch->ch_list, &smd_ch_closed_list); mutex_unlock(&smd_creation_mutex); } return 0; } EXPORT_SYMBOL(smd_close); int smd_write_start(smd_channel_t *ch, int len) { int ret; unsigned hdr[5]; if (!ch) { pr_err("%s: Invalid channel specified\n", __func__); return -ENODEV; } if (!ch->is_pkt_ch) { pr_err("%s: non-packet channel specified\n", __func__); return -EACCES; } if (len < 1) { pr_err("%s: invalid length: %d\n", __func__, len); return -EINVAL; } if (ch->pending_pkt_sz) { pr_err("%s: packet of size: %d in progress\n", __func__, ch->pending_pkt_sz); return -EBUSY; } ch->pending_pkt_sz = len; if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) { ch->pending_pkt_sz = 0; SMD_DBG("%s: no space to write packet header\n", __func__); return -EAGAIN; } hdr[0] = len; hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0; ret = smd_stream_write(ch, hdr, sizeof(hdr), 0); if (ret < 0 || ret != sizeof(hdr)) { ch->pending_pkt_sz = 0; pr_err("%s: packet header failed to write\n", __func__); return -EPERM; } return 0; } EXPORT_SYMBOL(smd_write_start); int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf) { int bytes_written; if (!ch) { pr_err("%s: Invalid channel specified\n", __func__); return -ENODEV; } if (len < 1) { pr_err("%s: invalid length: %d\n", __func__, len); return -EINVAL; } if (!ch->pending_pkt_sz) { pr_err("%s: no transaction in progress\n", __func__); return -ENOEXEC; } if (ch->pending_pkt_sz - len < 0) { pr_err("%s: segment of size: %d will make packet go over " "length\n", __func__, len); return -EINVAL; } bytes_written = smd_stream_write(ch, data, len, user_buf); ch->pending_pkt_sz -= bytes_written; return bytes_written; } EXPORT_SYMBOL(smd_write_segment); int smd_write_end(smd_channel_t *ch) { if (!ch) { pr_err("%s: Invalid channel specified\n", __func__); return -ENODEV; } if (ch->pending_pkt_sz) { pr_err("%s: current packet not completely written\n", __func__); return -E2BIG; } return 0; } EXPORT_SYMBOL(smd_write_end); int smd_read(smd_channel_t *ch, void *data, int len) { if (!ch) { pr_err("%s: Invalid channel specified\n", __func__); return -ENODEV; } return ch->read(ch, data, len, 0); } EXPORT_SYMBOL(smd_read); int smd_read_user_buffer(smd_channel_t *ch, void *data, int len) { if (!ch) { pr_err("%s: Invalid channel specified\n", __func__); return -ENODEV; } return ch->read(ch, data, len, 1); } EXPORT_SYMBOL(smd_read_user_buffer); int smd_read_from_cb(smd_channel_t *ch, void *data, int len) { if (!ch) { pr_err("%s: Invalid channel specified\n", __func__); return -ENODEV; } return ch->read_from_cb(ch, data, len, 0); } EXPORT_SYMBOL(smd_read_from_cb); int smd_write(smd_channel_t *ch, const void *data, int len) { if (!ch) { pr_err("%s: Invalid channel specified\n", __func__); return -ENODEV; } return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0); } EXPORT_SYMBOL(smd_write); int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len) { if (!ch) { pr_err("%s: Invalid channel specified\n", __func__); return -ENODEV; } return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1); } EXPORT_SYMBOL(smd_write_user_buffer); int smd_read_avail(smd_channel_t *ch) { if (!ch) { pr_err("%s: Invalid channel specified\n", __func__); return -ENODEV; } return ch->read_avail(ch); } EXPORT_SYMBOL(smd_read_avail); int smd_write_avail(smd_channel_t *ch) { if (!ch) { pr_err("%s: Invalid channel specified\n", __func__); return -ENODEV; } return ch->write_avail(ch); } EXPORT_SYMBOL(smd_write_avail); void smd_enable_read_intr(smd_channel_t *ch) { if (ch) ch->half_ch->set_fBLOCKREADINTR(ch->send, 0); } EXPORT_SYMBOL(smd_enable_read_intr); void smd_disable_read_intr(smd_channel_t *ch) { if (ch) ch->half_ch->set_fBLOCKREADINTR(ch->send, 1); } EXPORT_SYMBOL(smd_disable_read_intr); /** * Enable/disable receive interrupts for the remote processor used by a * particular channel. * @ch: open channel handle to use for the edge * @mask: 1 = mask interrupts; 0 = unmask interrupts * @returns: 0 for success; < 0 for failure * * Note that this enables/disables all interrupts from the remote subsystem for * all channels. As such, it should be used with care and only for specific * use cases such as power-collapse sequencing. */ int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask) { struct irq_chip *irq_chip; struct irq_data *irq_data; struct interrupt_config_item *int_cfg; if (!ch) return -EINVAL; if (ch->type >= ARRAY_SIZE(edge_to_pids)) return -ENODEV; int_cfg = &private_intr_config[edge_to_pids[ch->type].remote_pid].smd; if (int_cfg->irq_id < 0) return -ENODEV; irq_chip = irq_get_chip(int_cfg->irq_id); if (!irq_chip) return -ENODEV; irq_data = irq_get_irq_data(int_cfg->irq_id); if (!irq_data) return -ENODEV; if (mask) { SMx_POWER_INFO("SMD Masking interrupts from %s\n", edge_to_pids[ch->type].subsys_name); irq_chip->irq_mask(irq_data); } else { SMx_POWER_INFO("SMD Unmasking interrupts from %s\n", edge_to_pids[ch->type].subsys_name); irq_chip->irq_unmask(irq_data); } return 0; } EXPORT_SYMBOL(smd_mask_receive_interrupt); int smd_wait_until_readable(smd_channel_t *ch, int bytes) { return -1; } int smd_wait_until_writable(smd_channel_t *ch, int bytes) { return -1; } int smd_cur_packet_size(smd_channel_t *ch) { if (!ch) { pr_err("%s: Invalid channel specified\n", __func__); return -ENODEV; } return ch->current_packet; } EXPORT_SYMBOL(smd_cur_packet_size); int smd_tiocmget(smd_channel_t *ch) { if (!ch) { pr_err("%s: Invalid channel specified\n", __func__); return -ENODEV; } return (ch->half_ch->get_fDSR(ch->recv) ? TIOCM_DSR : 0) | (ch->half_ch->get_fCTS(ch->recv) ? TIOCM_CTS : 0) | (ch->half_ch->get_fCD(ch->recv) ? TIOCM_CD : 0) | (ch->half_ch->get_fRI(ch->recv) ? TIOCM_RI : 0) | (ch->half_ch->get_fCTS(ch->send) ? TIOCM_RTS : 0) | (ch->half_ch->get_fDSR(ch->send) ? TIOCM_DTR : 0); } EXPORT_SYMBOL(smd_tiocmget); /* this api will be called while holding smd_lock */ int smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear) { if (!ch) { pr_err("%s: Invalid channel specified\n", __func__); return -ENODEV; } if (set & TIOCM_DTR) ch->half_ch->set_fDSR(ch->send, 1); if (set & TIOCM_RTS) ch->half_ch->set_fCTS(ch->send, 1); if (clear & TIOCM_DTR) ch->half_ch->set_fDSR(ch->send, 0); if (clear & TIOCM_RTS) ch->half_ch->set_fCTS(ch->send, 0); ch->half_ch->set_fSTATE(ch->send, 1); barrier(); ch->notify_other_cpu(); return 0; } EXPORT_SYMBOL(smd_tiocmset_from_cb); int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear) { unsigned long flags; if (!ch) { pr_err("%s: Invalid channel specified\n", __func__); return -ENODEV; } spin_lock_irqsave(&smd_lock, flags); smd_tiocmset_from_cb(ch, set, clear); spin_unlock_irqrestore(&smd_lock, flags); return 0; } EXPORT_SYMBOL(smd_tiocmset); int smd_is_pkt_avail(smd_channel_t *ch) { unsigned long flags; if (!ch || !ch->is_pkt_ch) return -EINVAL; if (ch->current_packet) return 1; spin_lock_irqsave(&smd_lock, flags); update_packet_state(ch); spin_unlock_irqrestore(&smd_lock, flags); return ch->current_packet ? 1 : 0; } EXPORT_SYMBOL(smd_is_pkt_avail); /* -------------------------------------------------------------------------- */ /* * Shared Memory Range Check * * Takes a physical address and an offset and checks if the resulting physical * address would fit into one of the aux smem regions. If so, returns the * corresponding virtual address. Otherwise returns NULL. Expects the array * of smem regions to be in ascending physical address order. * * @base: physical base address to check * @offset: offset from the base to get the final address */ static void *smem_range_check(void *base, unsigned offset) { int i; void *phys_addr; unsigned size; for (i = 0; i < num_smem_areas; ++i) { phys_addr = smem_areas[i].phys_addr; size = smem_areas[i].size; if (base < phys_addr) return NULL; if (base > phys_addr + size) continue; if (base >= phys_addr && base + offset < phys_addr + size) return smem_areas[i].virt_addr + offset; } return NULL; } /* smem_alloc returns the pointer to smem item if it is already allocated. * Otherwise, it returns NULL. */ void *smem_alloc(unsigned id, unsigned size) { return smem_find(id, size); } EXPORT_SYMBOL(smem_alloc); /* smem_alloc2 returns the pointer to smem item. If it is not allocated, * it allocates it and then returns the pointer to it. */ void *smem_alloc2(unsigned id, unsigned size_in) { struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE; struct smem_heap_entry *toc = shared->heap_toc; unsigned long flags; void *ret = NULL; if (!shared->heap_info.initialized) { pr_err("%s: smem heap info not initialized\n", __func__); return NULL; } if (id >= SMEM_NUM_ITEMS) return NULL; size_in = ALIGN(size_in, 8); remote_spin_lock_irqsave(&remote_spinlock, flags); if (toc[id].allocated) { SMD_DBG("%s: %u already allocated\n", __func__, id); if (size_in != toc[id].size) pr_err("%s: wrong size %u (expected %u)\n", __func__, toc[id].size, size_in); else ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset); } else if (id > SMEM_FIXED_ITEM_LAST) { SMD_DBG("%s: allocating %u\n", __func__, id); if (shared->heap_info.heap_remaining >= size_in) { toc[id].offset = shared->heap_info.free_offset; toc[id].size = size_in; wmb(); toc[id].allocated = 1; shared->heap_info.free_offset += size_in; shared->heap_info.heap_remaining -= size_in; ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset); } else pr_err("%s: not enough memory %u (required %u)\n", __func__, shared->heap_info.heap_remaining, size_in); } wmb(); remote_spin_unlock_irqrestore(&remote_spinlock, flags); return ret; } EXPORT_SYMBOL(smem_alloc2); void *smem_get_entry(unsigned id, unsigned *size) { struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE; struct smem_heap_entry *toc = shared->heap_toc; int use_spinlocks = spinlocks_initialized; void *ret = 0; unsigned long flags = 0; if (id >= SMEM_NUM_ITEMS) return ret; if (use_spinlocks) remote_spin_lock_irqsave(&remote_spinlock, flags); /* toc is in device memory and cannot be speculatively accessed */ if (toc[id].allocated) { *size = toc[id].size; barrier(); if (!(toc[id].reserved & BASE_ADDR_MASK)) ret = (void *) (MSM_SHARED_RAM_BASE + toc[id].offset); else ret = smem_range_check( (void *)(toc[id].reserved & BASE_ADDR_MASK), toc[id].offset); } else { *size = 0; } if (use_spinlocks) remote_spin_unlock_irqrestore(&remote_spinlock, flags); return ret; } EXPORT_SYMBOL(smem_get_entry); void *smem_find(unsigned id, unsigned size_in) { unsigned size; void *ptr; ptr = smem_get_entry(id, &size); if (!ptr) return 0; size_in = ALIGN(size_in, 8); if (size_in != size) { pr_err("smem_find(%d, %d): wrong size %d\n", id, size_in, size); return 0; } return ptr; } EXPORT_SYMBOL(smem_find); static int smsm_cb_init(void) { struct smsm_state_info *state_info; int n; int ret = 0; smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES, GFP_KERNEL); if (!smsm_states) { pr_err("%s: SMSM init failed\n", __func__); return -ENOMEM; } smsm_cb_wq = create_singlethread_workqueue("smsm_cb_wq"); if (!smsm_cb_wq) { pr_err("%s: smsm_cb_wq creation failed\n", __func__); kfree(smsm_states); return -EFAULT; } mutex_lock(&smsm_lock); for (n = 0; n < SMSM_NUM_ENTRIES; n++) { state_info = &smsm_states[n]; state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n)); state_info->intr_mask_set = 0x0; state_info->intr_mask_clear = 0x0; INIT_LIST_HEAD(&state_info->callbacks); } mutex_unlock(&smsm_lock); return ret; } static int smsm_init(void) { struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE; int i; struct smsm_size_info_type *smsm_size_info; smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO, sizeof(struct smsm_size_info_type)); if (smsm_size_info) { SMSM_NUM_ENTRIES = smsm_size_info->num_entries; SMSM_NUM_HOSTS = smsm_size_info->num_hosts; } i = kfifo_alloc(&smsm_snapshot_fifo, sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT, GFP_KERNEL); if (i) { pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i); return i; } wake_lock_init(&smsm_snapshot_wakelock, WAKE_LOCK_SUSPEND, "smsm_snapshot"); if (!smsm_info.state) { smsm_info.state = smem_alloc2(ID_SHARED_STATE, SMSM_NUM_ENTRIES * sizeof(uint32_t)); if (smsm_info.state) { __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE)); if ((shared->version[VERSION_MODEM] >> 16) >= 0xB) __raw_writel(0, \ SMSM_STATE_ADDR(SMSM_APPS_DEM_I)); } } if (!smsm_info.intr_mask) { smsm_info.intr_mask = smem_alloc2(SMEM_SMSM_CPU_INTR_MASK, SMSM_NUM_ENTRIES * SMSM_NUM_HOSTS * sizeof(uint32_t)); if (smsm_info.intr_mask) { for (i = 0; i < SMSM_NUM_ENTRIES; i++) __raw_writel(0x0, SMSM_INTR_MASK_ADDR(i, SMSM_APPS)); /* Configure legacy modem bits */ __raw_writel(LEGACY_MODEM_SMSM_MASK, SMSM_INTR_MASK_ADDR(SMSM_MODEM_STATE, SMSM_APPS)); } } if (!smsm_info.intr_mux) smsm_info.intr_mux = smem_alloc2(SMEM_SMD_SMSM_INTR_MUX, SMSM_NUM_INTR_MUX * sizeof(uint32_t)); i = smsm_cb_init(); if (i) return i; wmb(); smsm_pm_notifier(&smsm_pm_nb, PM_POST_SUSPEND, NULL); i = register_pm_notifier(&smsm_pm_nb); if (i) pr_err("%s: power state notif error %d\n", __func__, i); return 0; } void smsm_reset_modem(unsigned mode) { if (mode == SMSM_SYSTEM_DOWNLOAD) { mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD; } else if (mode == SMSM_MODEM_WAIT) { mode = SMSM_RESET | SMSM_MODEM_WAIT; } else { /* reset_mode is SMSM_RESET or default */ mode = SMSM_RESET; } smsm_change_state(SMSM_APPS_STATE, mode, mode); } EXPORT_SYMBOL(smsm_reset_modem); void smsm_reset_modem_cont(void) { unsigned long flags; uint32_t state; if (!smsm_info.state) return; spin_lock_irqsave(&smem_lock, flags); state = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)) \ & ~SMSM_MODEM_WAIT; __raw_writel(state, SMSM_STATE_ADDR(SMSM_APPS_STATE)); wmb(); spin_unlock_irqrestore(&smem_lock, flags); } EXPORT_SYMBOL(smsm_reset_modem_cont); static void smsm_cb_snapshot(uint32_t use_wakelock) { int n; uint32_t new_state; unsigned long flags; int ret; ret = kfifo_avail(&smsm_snapshot_fifo); if (ret < SMSM_SNAPSHOT_SIZE) { pr_err("%s: SMSM snapshot full %d\n", __func__, ret); return; } /* * To avoid a race condition with notify_smsm_cb_clients_worker, the * following sequence must be followed: * 1) increment snapshot count * 2) insert data into FIFO * * Potentially in parallel, the worker: * a) verifies >= 1 snapshots are in FIFO * b) processes snapshot * c) decrements reference count * * This order ensures that 1 will always occur before abc. */ if (use_wakelock) { spin_lock_irqsave(&smsm_snapshot_count_lock, flags); if (smsm_snapshot_count == 0) { SMx_POWER_INFO("SMSM snapshot wake lock\n"); wake_lock(&smsm_snapshot_wakelock); } ++smsm_snapshot_count; spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags); } /* queue state entries */ for (n = 0; n < SMSM_NUM_ENTRIES; n++) { new_state = __raw_readl(SMSM_STATE_ADDR(n)); ret = kfifo_in(&smsm_snapshot_fifo, &new_state, sizeof(new_state)); if (ret != sizeof(new_state)) { pr_err("%s: SMSM snapshot failure %d\n", __func__, ret); goto restore_snapshot_count; } } /* queue wakelock usage flag */ ret = kfifo_in(&smsm_snapshot_fifo, &use_wakelock, sizeof(use_wakelock)); if (ret != sizeof(use_wakelock)) { pr_err("%s: SMSM snapshot failure %d\n", __func__, ret); goto restore_snapshot_count; } queue_work(smsm_cb_wq, &smsm_cb_work); return; restore_snapshot_count: if (use_wakelock) { spin_lock_irqsave(&smsm_snapshot_count_lock, flags); if (smsm_snapshot_count) { --smsm_snapshot_count; if (smsm_snapshot_count == 0) { SMx_POWER_INFO("SMSM snapshot wake unlock\n"); wake_unlock(&smsm_snapshot_wakelock); } } else { pr_err("%s: invalid snapshot count\n", __func__); } spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags); } } static irqreturn_t smsm_irq_handler(int irq, void *data) { unsigned long flags; if (irq == INT_ADSP_A11_SMSM) { uint32_t mux_val; static uint32_t prev_smem_q6_apps_smsm; if (smsm_info.intr_mux && cpu_is_qsd8x50()) { mux_val = __raw_readl( SMSM_INTR_MUX_ADDR(SMEM_Q6_APPS_SMSM)); if (mux_val != prev_smem_q6_apps_smsm) prev_smem_q6_apps_smsm = mux_val; } spin_lock_irqsave(&smem_lock, flags); smsm_cb_snapshot(1); spin_unlock_irqrestore(&smem_lock, flags); return IRQ_HANDLED; } spin_lock_irqsave(&smem_lock, flags); if (!smsm_info.state) { SMSM_INFO("<SM NO STATE>\n"); } else { unsigned old_apps, apps; unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)); old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)); SMSM_DBG("<SM %08x %08x>\n", apps, modm); if (apps & SMSM_RESET) { /* If we get an interrupt and the apps SMSM_RESET bit is already set, the modem is acking the app's reset ack. */ if (!disable_smsm_reset_handshake) apps &= ~SMSM_RESET; /* Issue a fake irq to handle any * smd state changes during reset */ smd_fake_irq_handler(0); /* queue modem restart notify chain */ modem_queue_start_reset_notify(); } else if (modm & SMSM_RESET) { pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET."); if (!disable_smsm_reset_handshake) { apps |= SMSM_RESET; flush_cache_all(); outer_flush_all(); } modem_queue_start_reset_notify(); } else if (modm & SMSM_INIT) { if (!(apps & SMSM_INIT)) { apps |= SMSM_INIT; modem_queue_smsm_init_notify(); } if (modm & SMSM_SMDINIT) apps |= SMSM_SMDINIT; if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) == (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) apps |= SMSM_RUN; } else if (modm & SMSM_SYSTEM_DOWNLOAD) { pr_err("\nSMSM: Modem SMSM state changed to SMSM_SYSTEM_DOWNLOAD."); modem_queue_start_reset_notify(); } if (old_apps != apps) { SMSM_DBG("<SM %08x NOTIFY>\n", apps); __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE)); do_smd_probe(); notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps)); } smsm_cb_snapshot(1); } spin_unlock_irqrestore(&smem_lock, flags); return IRQ_HANDLED; } static irqreturn_t smsm_modem_irq_handler(int irq, void *data) { SMx_POWER_INFO("SMSM Int Modem->Apps\n"); ++interrupt_stats[SMD_MODEM].smsm_in_count; return smsm_irq_handler(irq, data); } static irqreturn_t smsm_dsp_irq_handler(int irq, void *data) { SMx_POWER_INFO("SMSM Int LPASS->Apps\n"); ++interrupt_stats[SMD_Q6].smsm_in_count; return smsm_irq_handler(irq, data); } static irqreturn_t smsm_dsps_irq_handler(int irq, void *data) { SMx_POWER_INFO("SMSM Int DSPS->Apps\n"); ++interrupt_stats[SMD_DSPS].smsm_in_count; return smsm_irq_handler(irq, data); } static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data) { SMx_POWER_INFO("SMSM Int WCNSS->Apps\n"); ++interrupt_stats[SMD_WCNSS].smsm_in_count; return smsm_irq_handler(irq, data); } /* * Changes the global interrupt mask. The set and clear masks are re-applied * every time the global interrupt mask is updated for callback registration * and de-registration. * * The clear mask is applied first, so if a bit is set to 1 in both the clear * mask and the set mask, the result will be that the interrupt is set. * * @smsm_entry SMSM entry to change * @clear_mask 1 = clear bit, 0 = no-op * @set_mask 1 = set bit, 0 = no-op * * @returns 0 for success, < 0 for error */ int smsm_change_intr_mask(uint32_t smsm_entry, uint32_t clear_mask, uint32_t set_mask) { uint32_t old_mask, new_mask; unsigned long flags; if (smsm_entry >= SMSM_NUM_ENTRIES) { pr_err("smsm_change_state: Invalid entry %d\n", smsm_entry); return -EINVAL; } if (!smsm_info.intr_mask) { pr_err("smsm_change_intr_mask <SM NO STATE>\n"); return -EIO; } spin_lock_irqsave(&smem_lock, flags); smsm_states[smsm_entry].intr_mask_clear = clear_mask; smsm_states[smsm_entry].intr_mask_set = set_mask; old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS)); new_mask = (old_mask & ~clear_mask) | set_mask; __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS)); wmb(); spin_unlock_irqrestore(&smem_lock, flags); return 0; } EXPORT_SYMBOL(smsm_change_intr_mask); int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask) { if (smsm_entry >= SMSM_NUM_ENTRIES) { pr_err("smsm_change_state: Invalid entry %d\n", smsm_entry); return -EINVAL; } if (!smsm_info.intr_mask) { pr_err("smsm_change_intr_mask <SM NO STATE>\n"); return -EIO; } *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS)); return 0; } EXPORT_SYMBOL(smsm_get_intr_mask); int smsm_change_state(uint32_t smsm_entry, uint32_t clear_mask, uint32_t set_mask) { unsigned long flags; uint32_t old_state, new_state; if (smsm_entry >= SMSM_NUM_ENTRIES) { pr_err("smsm_change_state: Invalid entry %d", smsm_entry); return -EINVAL; } if (!smsm_info.state) { pr_err("smsm_change_state <SM NO STATE>\n"); return -EIO; } spin_lock_irqsave(&smem_lock, flags); old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry)); new_state = (old_state & ~clear_mask) | set_mask; __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry)); SMSM_DBG("smsm_change_state %x\n", new_state); notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state)); spin_unlock_irqrestore(&smem_lock, flags); return 0; } EXPORT_SYMBOL(smsm_change_state); uint32_t smsm_get_state(uint32_t smsm_entry) { uint32_t rv = 0; /* needs interface change to return error code */ if (smsm_entry >= SMSM_NUM_ENTRIES) { pr_err("smsm_change_state: Invalid entry %d", smsm_entry); return 0; } if (!smsm_info.state) { pr_err("smsm_get_state <SM NO STATE>\n"); } else { rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry)); } return rv; } EXPORT_SYMBOL(smsm_get_state); /** * Performs SMSM callback client notifiction. */ void notify_smsm_cb_clients_worker(struct work_struct *work) { struct smsm_state_cb_info *cb_info; struct smsm_state_info *state_info; int n; uint32_t new_state; uint32_t state_changes; uint32_t use_wakelock; int ret; unsigned long flags; if (!smd_initialized) return; while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) { mutex_lock(&smsm_lock); for (n = 0; n < SMSM_NUM_ENTRIES; n++) { state_info = &smsm_states[n]; ret = kfifo_out(&smsm_snapshot_fifo, &new_state, sizeof(new_state)); if (ret != sizeof(new_state)) { pr_err("%s: snapshot underflow %d\n", __func__, ret); mutex_unlock(&smsm_lock); return; } state_changes = state_info->last_value ^ new_state; if (state_changes) { SMx_POWER_INFO("SMSM Change %d: %08x->%08x\n", n, state_info->last_value, new_state); list_for_each_entry(cb_info, &state_info->callbacks, cb_list) { if (cb_info->mask & state_changes) cb_info->notify(cb_info->data, state_info->last_value, new_state); } state_info->last_value = new_state; } } /* read wakelock flag */ ret = kfifo_out(&smsm_snapshot_fifo, &use_wakelock, sizeof(use_wakelock)); if (ret != sizeof(use_wakelock)) { pr_err("%s: snapshot underflow %d\n", __func__, ret); mutex_unlock(&smsm_lock); return; } mutex_unlock(&smsm_lock); if (use_wakelock) { spin_lock_irqsave(&smsm_snapshot_count_lock, flags); if (smsm_snapshot_count) { --smsm_snapshot_count; if (smsm_snapshot_count == 0) { SMx_POWER_INFO("SMSM snapshot" " wake unlock\n"); wake_unlock(&smsm_snapshot_wakelock); } } else { pr_err("%s: invalid snapshot count\n", __func__); } spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags); } } } /** * Registers callback for SMSM state notifications when the specified * bits change. * * @smsm_entry Processor entry to deregister * @mask Bits to deregister (if result is 0, callback is removed) * @notify Notification function to deregister * @data Opaque data passed in to callback * * @returns Status code * <0 error code * 0 inserted new entry * 1 updated mask of existing entry */ int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask, void (*notify)(void *, uint32_t, uint32_t), void *data) { struct smsm_state_info *state; struct smsm_state_cb_info *cb_info; struct smsm_state_cb_info *cb_found = 0; uint32_t new_mask = 0; int ret = 0; if (smsm_entry >= SMSM_NUM_ENTRIES) return -EINVAL; mutex_lock(&smsm_lock); if (!smsm_states) { /* smsm not yet initialized */ ret = -ENODEV; goto cleanup; } state = &smsm_states[smsm_entry]; list_for_each_entry(cb_info, &state->callbacks, cb_list) { if (!ret && (cb_info->notify == notify) && (cb_info->data == data)) { cb_info->mask |= mask; cb_found = cb_info; ret = 1; } new_mask |= cb_info->mask; } if (!cb_found) { cb_info = kmalloc(sizeof(struct smsm_state_cb_info), GFP_ATOMIC); if (!cb_info) { ret = -ENOMEM; goto cleanup; } cb_info->mask = mask; cb_info->notify = notify; cb_info->data = data; INIT_LIST_HEAD(&cb_info->cb_list); list_add_tail(&cb_info->cb_list, &state->callbacks); new_mask |= mask; } /* update interrupt notification mask */ if (smsm_entry == SMSM_MODEM_STATE) new_mask |= LEGACY_MODEM_SMSM_MASK; if (smsm_info.intr_mask) { unsigned long flags; spin_lock_irqsave(&smem_lock, flags); new_mask = (new_mask & ~state->intr_mask_clear) | state->intr_mask_set; __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS)); wmb(); spin_unlock_irqrestore(&smem_lock, flags); } cleanup: mutex_unlock(&smsm_lock); return ret; } EXPORT_SYMBOL(smsm_state_cb_register); /** * Deregisters for SMSM state notifications for the specified bits. * * @smsm_entry Processor entry to deregister * @mask Bits to deregister (if result is 0, callback is removed) * @notify Notification function to deregister * @data Opaque data passed in to callback * * @returns Status code * <0 error code * 0 not found * 1 updated mask * 2 removed callback */ int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask, void (*notify)(void *, uint32_t, uint32_t), void *data) { struct smsm_state_cb_info *cb_info; struct smsm_state_cb_info *cb_tmp; struct smsm_state_info *state; uint32_t new_mask = 0; int ret = 0; if (smsm_entry >= SMSM_NUM_ENTRIES) return -EINVAL; mutex_lock(&smsm_lock); if (!smsm_states) { /* smsm not yet initialized */ mutex_unlock(&smsm_lock); return -ENODEV; } state = &smsm_states[smsm_entry]; list_for_each_entry_safe(cb_info, cb_tmp, &state->callbacks, cb_list) { if (!ret && (cb_info->notify == notify) && (cb_info->data == data)) { cb_info->mask &= ~mask; ret = 1; if (!cb_info->mask) { /* no mask bits set, remove callback */ list_del(&cb_info->cb_list); kfree(cb_info); ret = 2; continue; } } new_mask |= cb_info->mask; } /* update interrupt notification mask */ if (smsm_entry == SMSM_MODEM_STATE) new_mask |= LEGACY_MODEM_SMSM_MASK; if (smsm_info.intr_mask) { unsigned long flags; spin_lock_irqsave(&smem_lock, flags); new_mask = (new_mask & ~state->intr_mask_clear) | state->intr_mask_set; __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS)); wmb(); spin_unlock_irqrestore(&smem_lock, flags); } mutex_unlock(&smsm_lock); return ret; } EXPORT_SYMBOL(smsm_state_cb_deregister); int smd_module_init_notifier_register(struct notifier_block *nb) { int ret; if (!nb) return -EINVAL; mutex_lock(&smd_module_init_notifier_lock); ret = raw_notifier_chain_register(&smd_module_init_notifier_list, nb); if (smd_module_inited) nb->notifier_call(nb, 0, NULL); mutex_unlock(&smd_module_init_notifier_lock); return ret; } EXPORT_SYMBOL(smd_module_init_notifier_register); int smd_module_init_notifier_unregister(struct notifier_block *nb) { int ret; if (!nb) return -EINVAL; mutex_lock(&smd_module_init_notifier_lock); ret = raw_notifier_chain_unregister(&smd_module_init_notifier_list, nb); mutex_unlock(&smd_module_init_notifier_lock); return ret; } EXPORT_SYMBOL(smd_module_init_notifier_unregister); static void smd_module_init_notify(uint32_t state, void *data) { mutex_lock(&smd_module_init_notifier_lock); smd_module_inited = 1; raw_notifier_call_chain(&smd_module_init_notifier_list, state, data); mutex_unlock(&smd_module_init_notifier_lock); } int smd_core_init(void) { int r; unsigned long flags = IRQF_TRIGGER_RISING; SMD_INFO("smd_core_init()\n"); r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler, flags, "smd_dev", 0); if (r < 0) return r; interrupt_stats[SMD_MODEM].smd_interrupt_id = INT_A9_M2A_0; r = enable_irq_wake(INT_A9_M2A_0); if (r < 0) pr_err("smd_core_init: " "enable_irq_wake failed for INT_A9_M2A_0\n"); r = request_irq(INT_A9_M2A_5, smsm_modem_irq_handler, flags, "smsm_dev", 0); if (r < 0) { free_irq(INT_A9_M2A_0, 0); return r; } interrupt_stats[SMD_MODEM].smsm_interrupt_id = INT_A9_M2A_5; r = enable_irq_wake(INT_A9_M2A_5); if (r < 0) pr_err("smd_core_init: " "enable_irq_wake failed for INT_A9_M2A_5\n"); #if defined(CONFIG_QDSP6) #if (INT_ADSP_A11 == INT_ADSP_A11_SMSM) flags |= IRQF_SHARED; #endif r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler, flags, "smd_dev", smd_dsp_irq_handler); if (r < 0) { free_irq(INT_A9_M2A_0, 0); free_irq(INT_A9_M2A_5, 0); return r; } interrupt_stats[SMD_Q6].smd_interrupt_id = INT_ADSP_A11; r = request_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler, flags, "smsm_dev", smsm_dsp_irq_handler); if (r < 0) { free_irq(INT_A9_M2A_0, 0); free_irq(INT_A9_M2A_5, 0); free_irq(INT_ADSP_A11, smd_dsp_irq_handler); return r; } interrupt_stats[SMD_Q6].smsm_interrupt_id = INT_ADSP_A11_SMSM; r = enable_irq_wake(INT_ADSP_A11); if (r < 0) pr_err("smd_core_init: " "enable_irq_wake failed for INT_ADSP_A11\n"); #if (INT_ADSP_A11 != INT_ADSP_A11_SMSM) r = enable_irq_wake(INT_ADSP_A11_SMSM); if (r < 0) pr_err("smd_core_init: enable_irq_wake " "failed for INT_ADSP_A11_SMSM\n"); #endif flags &= ~IRQF_SHARED; #endif #if defined(CONFIG_DSPS) r = request_irq(INT_DSPS_A11, smd_dsps_irq_handler, flags, "smd_dev", smd_dsps_irq_handler); if (r < 0) { free_irq(INT_A9_M2A_0, 0); free_irq(INT_A9_M2A_5, 0); free_irq(INT_ADSP_A11, smd_dsp_irq_handler); free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler); return r; } interrupt_stats[SMD_DSPS].smd_interrupt_id = INT_DSPS_A11; r = enable_irq_wake(INT_DSPS_A11); if (r < 0) pr_err("smd_core_init: " "enable_irq_wake failed for INT_ADSP_A11\n"); #endif #if defined(CONFIG_WCNSS) r = request_irq(INT_WCNSS_A11, smd_wcnss_irq_handler, flags, "smd_dev", smd_wcnss_irq_handler); if (r < 0) { free_irq(INT_A9_M2A_0, 0); free_irq(INT_A9_M2A_5, 0); free_irq(INT_ADSP_A11, smd_dsp_irq_handler); free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler); free_irq(INT_DSPS_A11, smd_dsps_irq_handler); return r; } interrupt_stats[SMD_WCNSS].smd_interrupt_id = INT_WCNSS_A11; r = enable_irq_wake(INT_WCNSS_A11); if (r < 0) pr_err("smd_core_init: " "enable_irq_wake failed for INT_WCNSS_A11\n"); r = request_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler, flags, "smsm_dev", smsm_wcnss_irq_handler); if (r < 0) { free_irq(INT_A9_M2A_0, 0); free_irq(INT_A9_M2A_5, 0); free_irq(INT_ADSP_A11, smd_dsp_irq_handler); free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler); free_irq(INT_DSPS_A11, smd_dsps_irq_handler); free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler); return r; } interrupt_stats[SMD_WCNSS].smsm_interrupt_id = INT_WCNSS_A11_SMSM; r = enable_irq_wake(INT_WCNSS_A11_SMSM); if (r < 0) pr_err("smd_core_init: " "enable_irq_wake failed for INT_WCNSS_A11_SMSM\n"); #endif #if defined(CONFIG_DSPS_SMSM) r = request_irq(INT_DSPS_A11_SMSM, smsm_dsps_irq_handler, flags, "smsm_dev", smsm_dsps_irq_handler); if (r < 0) { free_irq(INT_A9_M2A_0, 0); free_irq(INT_A9_M2A_5, 0); free_irq(INT_ADSP_A11, smd_dsp_irq_handler); free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler); free_irq(INT_DSPS_A11, smd_dsps_irq_handler); free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler); free_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler); return r; } interrupt_stats[SMD_DSPS].smsm_interrupt_id = INT_DSPS_A11_SMSM; r = enable_irq_wake(INT_DSPS_A11_SMSM); if (r < 0) pr_err("smd_core_init: " "enable_irq_wake failed for INT_DSPS_A11_SMSM\n"); #endif SMD_INFO("smd_core_init() done\n"); return 0; } static int intr_init(struct interrupt_config_item *private_irq, struct smd_irq_config *platform_irq, struct platform_device *pdev ) { int irq_id; int ret; int ret_wake; private_irq->out_bit_pos = platform_irq->out_bit_pos; private_irq->out_offset = platform_irq->out_offset; private_irq->out_base = platform_irq->out_base; irq_id = platform_get_irq_byname( pdev, platform_irq->irq_name ); SMD_DBG("smd: %s: register irq: %s id: %d\n", __func__, platform_irq->irq_name, irq_id); ret = request_irq(irq_id, private_irq->irq_handler, platform_irq->flags, platform_irq->device_name, (void *)platform_irq->dev_id ); if (ret < 0) { platform_irq->irq_id = ret; private_irq->irq_id = ret; } else { platform_irq->irq_id = irq_id; private_irq->irq_id = irq_id; ret_wake = enable_irq_wake(irq_id); if (ret_wake < 0) { pr_err("smd: enable_irq_wake failed on %s", platform_irq->irq_name); } } return ret; } int sort_cmp_func(const void *a, const void *b) { struct smem_area *left = (struct smem_area *)(a); struct smem_area *right = (struct smem_area *)(b); return left->phys_addr - right->phys_addr; } int smd_core_platform_init(struct platform_device *pdev) { int i; int ret; uint32_t num_ss; struct smd_platform *smd_platform_data; struct smd_subsystem_config *smd_ss_config_list; struct smd_subsystem_config *cfg; int err_ret = 0; struct smd_smem_regions *smd_smem_areas; int smem_idx = 0; smd_platform_data = pdev->dev.platform_data; num_ss = smd_platform_data->num_ss_configs; smd_ss_config_list = smd_platform_data->smd_ss_configs; if (smd_platform_data->smd_ssr_config) disable_smsm_reset_handshake = smd_platform_data-> smd_ssr_config->disable_smsm_reset_handshake; smd_smem_areas = smd_platform_data->smd_smem_areas; if (smd_smem_areas) { num_smem_areas = smd_platform_data->num_smem_areas; smem_areas = kmalloc(sizeof(struct smem_area) * num_smem_areas, GFP_KERNEL); if (!smem_areas) { pr_err("%s: smem_areas kmalloc failed\n", __func__); err_ret = -ENOMEM; goto smem_areas_alloc_fail; } for (smem_idx = 0; smem_idx < num_smem_areas; ++smem_idx) { smem_areas[smem_idx].phys_addr = smd_smem_areas[smem_idx].phys_addr; smem_areas[smem_idx].size = smd_smem_areas[smem_idx].size; smem_areas[smem_idx].virt_addr = ioremap_nocache( (unsigned long)(smem_areas[smem_idx].phys_addr), smem_areas[smem_idx].size); if (!smem_areas[smem_idx].virt_addr) { pr_err("%s: ioremap_nocache() of addr:%p" " size: %x\n", __func__, smem_areas[smem_idx].phys_addr, smem_areas[smem_idx].size); err_ret = -ENOMEM; ++smem_idx; goto smem_failed; } } sort(smem_areas, num_smem_areas, sizeof(struct smem_area), sort_cmp_func, NULL); } for (i = 0; i < num_ss; i++) { cfg = &smd_ss_config_list[i]; ret = intr_init( &private_intr_config[cfg->irq_config_id].smd, &cfg->smd_int, pdev ); if (ret < 0) { err_ret = ret; pr_err("smd: register irq failed on %s\n", cfg->smd_int.irq_name); goto intr_failed; } interrupt_stats[cfg->irq_config_id].smd_interrupt_id = cfg->smd_int.irq_id; /* only init smsm structs if this edge supports smsm */ if (cfg->smsm_int.irq_id) ret = intr_init( &private_intr_config[cfg->irq_config_id].smsm, &cfg->smsm_int, pdev ); if (ret < 0) { err_ret = ret; pr_err("smd: register irq failed on %s\n", cfg->smsm_int.irq_name); goto intr_failed; } if (cfg->smsm_int.irq_id) interrupt_stats[cfg->irq_config_id].smsm_interrupt_id = cfg->smsm_int.irq_id; if (cfg->subsys_name) strlcpy(edge_to_pids[cfg->edge].subsys_name, cfg->subsys_name, SMD_MAX_CH_NAME_LEN); } SMD_INFO("smd_core_platform_init() done\n"); return 0; intr_failed: pr_err("smd: deregistering IRQs\n"); for (i = 0; i < num_ss; ++i) { cfg = &smd_ss_config_list[i]; if (cfg->smd_int.irq_id >= 0) free_irq(cfg->smd_int.irq_id, (void *)cfg->smd_int.dev_id ); if (cfg->smsm_int.irq_id >= 0) free_irq(cfg->smsm_int.irq_id, (void *)cfg->smsm_int.dev_id ); } smem_failed: for (smem_idx = smem_idx - 1; smem_idx >= 0; --smem_idx) iounmap(smem_areas[smem_idx].virt_addr); kfree(smem_areas); smem_areas_alloc_fail: return err_ret; } static int __devinit msm_smd_probe(struct platform_device *pdev) { int ret; SMD_INFO("smd probe\n"); INIT_WORK(&probe_work, smd_channel_probe_worker); channel_close_wq = create_singlethread_workqueue("smd_channel_close"); if (IS_ERR(channel_close_wq)) { pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__); return -ENOMEM; } if (smsm_init()) { pr_err("smsm_init() failed\n"); return -1; } if (pdev) { if (pdev->dev.of_node) { pr_err("SMD: Device tree not currently supported\n"); return -ENODEV; } else if (pdev->dev.platform_data) { ret = smd_core_platform_init(pdev); if (ret) { pr_err( "SMD: smd_core_platform_init() failed\n"); return -ENODEV; } } else { ret = smd_core_init(); if (ret) { pr_err("smd_core_init() failed\n"); return -ENODEV; } } } else { pr_err("SMD: PDEV not found\n"); return -ENODEV; } smd_initialized = 1; smd_alloc_loopback_channel(); smsm_irq_handler(0, 0); tasklet_schedule(&smd_fake_irq_tasklet); return 0; } static int restart_notifier_cb(struct notifier_block *this, unsigned long code, void *data); static struct restart_notifier_block restart_notifiers[] = { {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb}, {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb}, {SMD_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb}, {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb}, {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb}, }; static int restart_notifier_cb(struct notifier_block *this, unsigned long code, void *data) { /* * Some SMD or SMSM clients assume SMD/SMSM SSR handling will be * done in the AFTER_SHUTDOWN level. If this ever changes, extra * care should be taken to verify no clients are broken. */ if (code == SUBSYS_AFTER_SHUTDOWN) { struct restart_notifier_block *notifier; notifier = container_of(this, struct restart_notifier_block, nb); SMD_INFO("%s: ssrestart for processor %d ('%s')\n", __func__, notifier->processor, notifier->name); smd_channel_reset(notifier->processor); } return NOTIFY_DONE; } static __init int modem_restart_late_init(void) { int i; void *handle; struct restart_notifier_block *nb; for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) { nb = &restart_notifiers[i]; handle = subsys_notif_register_notifier(nb->name, &nb->nb); SMD_DBG("%s: registering notif for '%s', handle=%p\n", __func__, nb->name, handle); } return 0; } late_initcall(modem_restart_late_init); static struct platform_driver msm_smd_driver = { .probe = msm_smd_probe, .driver = { .name = MODULE_NAME, .owner = THIS_MODULE, }, }; int __init msm_smd_init(void) { static bool registered; int rc; if (registered) return 0; registered = true; rc = remote_spin_lock_init(&remote_spinlock, SMEM_SPINLOCK_SMEM_ALLOC); if (rc) { pr_err("%s: remote spinlock init failed %d\n", __func__, rc); return rc; } spinlocks_initialized = 1; rc = platform_driver_register(&msm_smd_driver); if (rc) { pr_err("%s: msm_smd_driver register failed %d\n", __func__, rc); return rc; } smd_module_init_notify(0, NULL); return 0; } module_init(msm_smd_init); MODULE_DESCRIPTION("MSM Shared Memory Core"); MODULE_AUTHOR("Brian Swetland <swetland@google.com>"); MODULE_LICENSE("GPL");
gpl-2.0
cellphone/lge_p880_kernel_3.0
fs/hfs/btree.c
912
8736
/* * linux/fs/hfs/btree.c * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) * (C) 2003 Ardis Technologies <roman@ardistech.com> * * Handle opening/closing btree */ #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/log2.h> #include "btree.h" /* Get a reference to a B*Tree and do some initial checks */ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp keycmp) { struct hfs_btree *tree; struct hfs_btree_header_rec *head; struct address_space *mapping; struct page *page; unsigned int size; tree = kzalloc(sizeof(*tree), GFP_KERNEL); if (!tree) return NULL; mutex_init(&tree->tree_lock); spin_lock_init(&tree->hash_lock); /* Set the correct compare function */ tree->sb = sb; tree->cnid = id; tree->keycmp = keycmp; tree->inode = iget_locked(sb, id); if (!tree->inode) goto free_tree; BUG_ON(!(tree->inode->i_state & I_NEW)); { struct hfs_mdb *mdb = HFS_SB(sb)->mdb; HFS_I(tree->inode)->flags = 0; mutex_init(&HFS_I(tree->inode)->extents_lock); switch (id) { case HFS_EXT_CNID: hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize, mdb->drXTFlSize, be32_to_cpu(mdb->drXTClpSiz)); tree->inode->i_mapping->a_ops = &hfs_btree_aops; break; case HFS_CAT_CNID: hfs_inode_read_fork(tree->inode, mdb->drCTExtRec, mdb->drCTFlSize, mdb->drCTFlSize, be32_to_cpu(mdb->drCTClpSiz)); tree->inode->i_mapping->a_ops = &hfs_btree_aops; break; default: BUG(); } } unlock_new_inode(tree->inode); if (!HFS_I(tree->inode)->first_blocks) { printk(KERN_ERR "hfs: invalid btree extent records (0 size).\n"); goto free_inode; } mapping = tree->inode->i_mapping; page = read_mapping_page(mapping, 0, NULL); if (IS_ERR(page)) goto free_inode; /* Load the header */ head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc)); tree->root = be32_to_cpu(head->root); tree->leaf_count = be32_to_cpu(head->leaf_count); tree->leaf_head = be32_to_cpu(head->leaf_head); tree->leaf_tail = be32_to_cpu(head->leaf_tail); tree->node_count = be32_to_cpu(head->node_count); tree->free_nodes = be32_to_cpu(head->free_nodes); tree->attributes = be32_to_cpu(head->attributes); tree->node_size = be16_to_cpu(head->node_size); tree->max_key_len = be16_to_cpu(head->max_key_len); tree->depth = be16_to_cpu(head->depth); size = tree->node_size; if (!is_power_of_2(size)) goto fail_page; if (!tree->node_count) goto fail_page; switch (id) { case HFS_EXT_CNID: if (tree->max_key_len != HFS_MAX_EXT_KEYLEN) { printk(KERN_ERR "hfs: invalid extent max_key_len %d\n", tree->max_key_len); goto fail_page; } break; case HFS_CAT_CNID: if (tree->max_key_len != HFS_MAX_CAT_KEYLEN) { printk(KERN_ERR "hfs: invalid catalog max_key_len %d\n", tree->max_key_len); goto fail_page; } break; default: BUG(); } tree->node_size_shift = ffs(size) - 1; tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; kunmap(page); page_cache_release(page); return tree; fail_page: page_cache_release(page); free_inode: tree->inode->i_mapping->a_ops = &hfs_aops; iput(tree->inode); free_tree: kfree(tree); return NULL; } /* Release resources used by a btree */ void hfs_btree_close(struct hfs_btree *tree) { struct hfs_bnode *node; int i; if (!tree) return; for (i = 0; i < NODE_HASH_SIZE; i++) { while ((node = tree->node_hash[i])) { tree->node_hash[i] = node->next_hash; if (atomic_read(&node->refcnt)) printk(KERN_ERR "hfs: node %d:%d still has %d user(s)!\n", node->tree->cnid, node->this, atomic_read(&node->refcnt)); hfs_bnode_free(node); tree->node_hash_cnt--; } } iput(tree->inode); kfree(tree); } void hfs_btree_write(struct hfs_btree *tree) { struct hfs_btree_header_rec *head; struct hfs_bnode *node; struct page *page; node = hfs_bnode_find(tree, 0); if (IS_ERR(node)) /* panic? */ return; /* Load the header */ page = node->page[0]; head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc)); head->root = cpu_to_be32(tree->root); head->leaf_count = cpu_to_be32(tree->leaf_count); head->leaf_head = cpu_to_be32(tree->leaf_head); head->leaf_tail = cpu_to_be32(tree->leaf_tail); head->node_count = cpu_to_be32(tree->node_count); head->free_nodes = cpu_to_be32(tree->free_nodes); head->attributes = cpu_to_be32(tree->attributes); head->depth = cpu_to_be16(tree->depth); kunmap(page); set_page_dirty(page); hfs_bnode_put(node); } static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx) { struct hfs_btree *tree = prev->tree; struct hfs_bnode *node; struct hfs_bnode_desc desc; __be32 cnid; node = hfs_bnode_create(tree, idx); if (IS_ERR(node)) return node; if (!tree->free_nodes) panic("FIXME!!!"); tree->free_nodes--; prev->next = idx; cnid = cpu_to_be32(idx); hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4); node->type = HFS_NODE_MAP; node->num_recs = 1; hfs_bnode_clear(node, 0, tree->node_size); desc.next = 0; desc.prev = 0; desc.type = HFS_NODE_MAP; desc.height = 0; desc.num_recs = cpu_to_be16(1); desc.reserved = 0; hfs_bnode_write(node, &desc, 0, sizeof(desc)); hfs_bnode_write_u16(node, 14, 0x8000); hfs_bnode_write_u16(node, tree->node_size - 2, 14); hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6); return node; } struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) { struct hfs_bnode *node, *next_node; struct page **pagep; u32 nidx, idx; unsigned off; u16 off16; u16 len; u8 *data, byte, m; int i; while (!tree->free_nodes) { struct inode *inode = tree->inode; u32 count; int res; res = hfs_extend_file(inode); if (res) return ERR_PTR(res); HFS_I(inode)->phys_size = inode->i_size = (loff_t)HFS_I(inode)->alloc_blocks * HFS_SB(tree->sb)->alloc_blksz; HFS_I(inode)->fs_blocks = inode->i_size >> tree->sb->s_blocksize_bits; inode_set_bytes(inode, inode->i_size); count = inode->i_size >> tree->node_size_shift; tree->free_nodes = count - tree->node_count; tree->node_count = count; } nidx = 0; node = hfs_bnode_find(tree, nidx); if (IS_ERR(node)) return node; len = hfs_brec_lenoff(node, 2, &off16); off = off16; off += node->page_offset; pagep = node->page + (off >> PAGE_CACHE_SHIFT); data = kmap(*pagep); off &= ~PAGE_CACHE_MASK; idx = 0; for (;;) { while (len) { byte = data[off]; if (byte != 0xff) { for (m = 0x80, i = 0; i < 8; m >>= 1, i++) { if (!(byte & m)) { idx += i; data[off] |= m; set_page_dirty(*pagep); kunmap(*pagep); tree->free_nodes--; mark_inode_dirty(tree->inode); hfs_bnode_put(node); return hfs_bnode_create(tree, idx); } } } if (++off >= PAGE_CACHE_SIZE) { kunmap(*pagep); data = kmap(*++pagep); off = 0; } idx += 8; len--; } kunmap(*pagep); nidx = node->next; if (!nidx) { printk(KERN_DEBUG "hfs: create new bmap node...\n"); next_node = hfs_bmap_new_bmap(node, idx); } else next_node = hfs_bnode_find(tree, nidx); hfs_bnode_put(node); if (IS_ERR(next_node)) return next_node; node = next_node; len = hfs_brec_lenoff(node, 0, &off16); off = off16; off += node->page_offset; pagep = node->page + (off >> PAGE_CACHE_SHIFT); data = kmap(*pagep); off &= ~PAGE_CACHE_MASK; } } void hfs_bmap_free(struct hfs_bnode *node) { struct hfs_btree *tree; struct page *page; u16 off, len; u32 nidx; u8 *data, byte, m; dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this); tree = node->tree; nidx = node->this; node = hfs_bnode_find(tree, 0); if (IS_ERR(node)) return; len = hfs_brec_lenoff(node, 2, &off); while (nidx >= len * 8) { u32 i; nidx -= len * 8; i = node->next; hfs_bnode_put(node); if (!i) { /* panic */; printk(KERN_CRIT "hfs: unable to free bnode %u. bmap not found!\n", node->this); return; } node = hfs_bnode_find(tree, i); if (IS_ERR(node)) return; if (node->type != HFS_NODE_MAP) { /* panic */; printk(KERN_CRIT "hfs: invalid bmap found! (%u,%d)\n", node->this, node->type); hfs_bnode_put(node); return; } len = hfs_brec_lenoff(node, 0, &off); } off += node->page_offset + nidx / 8; page = node->page[off >> PAGE_CACHE_SHIFT]; data = kmap(page); off &= ~PAGE_CACHE_MASK; m = 1 << (~nidx & 7); byte = data[off]; if (!(byte & m)) { printk(KERN_CRIT "hfs: trying to free free bnode %u(%d)\n", node->this, node->type); kunmap(page); hfs_bnode_put(node); return; } data[off] = byte & ~m; set_page_dirty(page); kunmap(page); hfs_bnode_put(node); tree->free_nodes++; mark_inode_dirty(tree->inode); }
gpl-2.0
linux-wpan/linux-wpan
drivers/scsi/qla4xxx/ql4_83xx.c
912
44525
/* * QLogic iSCSI HBA Driver * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ #include <linux/ratelimit.h> #include "ql4_def.h" #include "ql4_version.h" #include "ql4_glbl.h" #include "ql4_dbg.h" #include "ql4_inline.h" uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr) { return readl((void __iomem *)(ha->nx_pcibase + addr)); } void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val) { writel(val, (void __iomem *)(ha->nx_pcibase + addr)); } static int qla4_83xx_set_win_base(struct scsi_qla_host *ha, uint32_t addr) { uint32_t val; int ret_val = QLA_SUCCESS; qla4_83xx_wr_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num), addr); val = qla4_83xx_rd_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num)); if (val != addr) { ql4_printk(KERN_ERR, ha, "%s: Failed to set register window : addr written 0x%x, read 0x%x!\n", __func__, addr, val); ret_val = QLA_ERROR; } return ret_val; } int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr, uint32_t *data) { int ret_val; ret_val = qla4_83xx_set_win_base(ha, addr); if (ret_val == QLA_SUCCESS) *data = qla4_83xx_rd_reg(ha, QLA83XX_WILDCARD); else ql4_printk(KERN_ERR, ha, "%s: failed read of addr 0x%x!\n", __func__, addr); return ret_val; } int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr, uint32_t data) { int ret_val; ret_val = qla4_83xx_set_win_base(ha, addr); if (ret_val == QLA_SUCCESS) qla4_83xx_wr_reg(ha, QLA83XX_WILDCARD, data); else ql4_printk(KERN_ERR, ha, "%s: failed wrt to addr 0x%x, data 0x%x\n", __func__, addr, data); return ret_val; } static int qla4_83xx_flash_lock(struct scsi_qla_host *ha) { int lock_owner; int timeout = 0; uint32_t lock_status = 0; int ret_val = QLA_SUCCESS; while (lock_status == 0) { lock_status = qla4_83xx_rd_reg(ha, QLA83XX_FLASH_LOCK); if (lock_status) break; if (++timeout >= QLA83XX_FLASH_LOCK_TIMEOUT / 20) { lock_owner = qla4_83xx_rd_reg(ha, QLA83XX_FLASH_LOCK_ID); ql4_printk(KERN_ERR, ha, "%s: flash lock by func %d failed, held by func %d\n", __func__, ha->func_num, lock_owner); ret_val = QLA_ERROR; break; } msleep(20); } qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, ha->func_num); return ret_val; } static void qla4_83xx_flash_unlock(struct scsi_qla_host *ha) { /* Reading FLASH_UNLOCK register unlocks the Flash */ qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, 0xFF); qla4_83xx_rd_reg(ha, QLA83XX_FLASH_UNLOCK); } int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr, uint8_t *p_data, int u32_word_count) { int i; uint32_t u32_word; uint32_t addr = flash_addr; int ret_val = QLA_SUCCESS; ret_val = qla4_83xx_flash_lock(ha); if (ret_val == QLA_ERROR) goto exit_lock_error; if (addr & 0x03) { ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n", __func__, addr); ret_val = QLA_ERROR; goto exit_flash_read; } for (i = 0; i < u32_word_count; i++) { ret_val = qla4_83xx_wr_reg_indirect(ha, QLA83XX_FLASH_DIRECT_WINDOW, (addr & 0xFFFF0000)); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW\n!", __func__, addr); goto exit_flash_read; } ret_val = qla4_83xx_rd_reg_indirect(ha, QLA83XX_FLASH_DIRECT_DATA(addr), &u32_word); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n", __func__, addr); goto exit_flash_read; } *(__le32 *)p_data = le32_to_cpu(u32_word); p_data = p_data + 4; addr = addr + 4; } exit_flash_read: qla4_83xx_flash_unlock(ha); exit_lock_error: return ret_val; } int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr, uint8_t *p_data, int u32_word_count) { uint32_t i; uint32_t u32_word; uint32_t flash_offset; uint32_t addr = flash_addr; int ret_val = QLA_SUCCESS; flash_offset = addr & (QLA83XX_FLASH_SECTOR_SIZE - 1); if (addr & 0x3) { ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n", __func__, addr); ret_val = QLA_ERROR; goto exit_lockless_read; } ret_val = qla4_83xx_wr_reg_indirect(ha, QLA83XX_FLASH_DIRECT_WINDOW, addr); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n", __func__, addr); goto exit_lockless_read; } /* Check if data is spread across multiple sectors */ if ((flash_offset + (u32_word_count * sizeof(uint32_t))) > (QLA83XX_FLASH_SECTOR_SIZE - 1)) { /* Multi sector read */ for (i = 0; i < u32_word_count; i++) { ret_val = qla4_83xx_rd_reg_indirect(ha, QLA83XX_FLASH_DIRECT_DATA(addr), &u32_word); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n", __func__, addr); goto exit_lockless_read; } *(__le32 *)p_data = le32_to_cpu(u32_word); p_data = p_data + 4; addr = addr + 4; flash_offset = flash_offset + 4; if (flash_offset > (QLA83XX_FLASH_SECTOR_SIZE - 1)) { /* This write is needed once for each sector */ ret_val = qla4_83xx_wr_reg_indirect(ha, QLA83XX_FLASH_DIRECT_WINDOW, addr); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n", __func__, addr); goto exit_lockless_read; } flash_offset = 0; } } } else { /* Single sector read */ for (i = 0; i < u32_word_count; i++) { ret_val = qla4_83xx_rd_reg_indirect(ha, QLA83XX_FLASH_DIRECT_DATA(addr), &u32_word); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n", __func__, addr); goto exit_lockless_read; } *(__le32 *)p_data = le32_to_cpu(u32_word); p_data = p_data + 4; addr = addr + 4; } } exit_lockless_read: return ret_val; } void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha) { if (qla4_83xx_flash_lock(ha)) ql4_printk(KERN_INFO, ha, "%s: Resetting rom lock\n", __func__); /* * We got the lock, or someone else is holding the lock * since we are restting, forcefully unlock */ qla4_83xx_flash_unlock(ha); } #define INTENT_TO_RECOVER 0x01 #define PROCEED_TO_RECOVER 0x02 static int qla4_83xx_lock_recovery(struct scsi_qla_host *ha) { uint32_t lock = 0, lockid; int ret_val = QLA_ERROR; lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY); /* Check for other Recovery in progress, go wait */ if ((lockid & 0x3) != 0) goto exit_lock_recovery; /* Intent to Recover */ ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, (ha->func_num << 2) | INTENT_TO_RECOVER); msleep(200); /* Check Intent to Recover is advertised */ lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY); if ((lockid & 0x3C) != (ha->func_num << 2)) goto exit_lock_recovery; ql4_printk(KERN_INFO, ha, "%s: IDC Lock recovery initiated for func %d\n", __func__, ha->func_num); /* Proceed to Recover */ ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, (ha->func_num << 2) | PROCEED_TO_RECOVER); /* Force Unlock */ ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, 0xFF); ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_UNLOCK); /* Clear bits 0-5 in IDC_RECOVERY register*/ ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, 0); /* Get lock */ lock = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK); if (lock) { lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK_ID); lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->func_num; ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, lockid); ret_val = QLA_SUCCESS; } exit_lock_recovery: return ret_val; } #define QLA83XX_DRV_LOCK_MSLEEP 200 int qla4_83xx_drv_lock(struct scsi_qla_host *ha) { int timeout = 0; uint32_t status = 0; int ret_val = QLA_SUCCESS; uint32_t first_owner = 0; uint32_t tmo_owner = 0; uint32_t lock_id; uint32_t func_num; uint32_t lock_cnt; while (status == 0) { status = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK); if (status) { /* Increment Counter (8-31) and update func_num (0-7) on * getting a successful lock */ lock_id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID); lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->func_num; qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, lock_id); break; } if (timeout == 0) /* Save counter + ID of function holding the lock for * first failure */ first_owner = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK_ID); if (++timeout >= (QLA83XX_DRV_LOCK_TIMEOUT / QLA83XX_DRV_LOCK_MSLEEP)) { tmo_owner = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID); func_num = tmo_owner & 0xFF; lock_cnt = tmo_owner >> 8; ql4_printk(KERN_INFO, ha, "%s: Lock by func %d failed after 2s, lock held by func %d, lock count %d, first_owner %d\n", __func__, ha->func_num, func_num, lock_cnt, (first_owner & 0xFF)); if (first_owner != tmo_owner) { /* Some other driver got lock, OR same driver * got lock again (counter value changed), when * we were waiting for lock. * Retry for another 2 sec */ ql4_printk(KERN_INFO, ha, "%s: IDC lock failed for func %d\n", __func__, ha->func_num); timeout = 0; } else { /* Same driver holding lock > 2sec. * Force Recovery */ ret_val = qla4_83xx_lock_recovery(ha); if (ret_val == QLA_SUCCESS) { /* Recovered and got lock */ ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d successful\n", __func__, ha->func_num); break; } /* Recovery Failed, some other function * has the lock, wait for 2secs and retry */ ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d failed, Retrying timeout\n", __func__, ha->func_num); timeout = 0; } } msleep(QLA83XX_DRV_LOCK_MSLEEP); } return ret_val; } void qla4_83xx_drv_unlock(struct scsi_qla_host *ha) { int id; id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID); if ((id & 0xFF) != ha->func_num) { ql4_printk(KERN_ERR, ha, "%s: IDC Unlock by %d failed, lock owner is %d\n", __func__, ha->func_num, (id & 0xFF)); return; } /* Keep lock counter value, update the ha->func_num to 0xFF */ qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, (id | 0xFF)); qla4_83xx_rd_reg(ha, QLA83XX_DRV_UNLOCK); } void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha) { uint32_t idc_ctrl; idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); idc_ctrl |= DONTRESET_BIT0; qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__, idc_ctrl)); } void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha) { uint32_t idc_ctrl; idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); idc_ctrl &= ~DONTRESET_BIT0; qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__, idc_ctrl)); } int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha) { uint32_t idc_ctrl; idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); return idc_ctrl & DONTRESET_BIT0; } /*-------------------------IDC State Machine ---------------------*/ enum { UNKNOWN_CLASS = 0, NIC_CLASS, FCOE_CLASS, ISCSI_CLASS }; struct device_info { int func_num; int device_type; int port_num; }; int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha) { uint32_t drv_active; uint32_t dev_part, dev_part1, dev_part2; int i; struct device_info device_map[16]; int func_nibble; int nibble; int nic_present = 0; int iscsi_present = 0; int iscsi_func_low = 0; /* Use the dev_partition register to determine the PCI function number * and then check drv_active register to see which driver is loaded */ dev_part1 = qla4_83xx_rd_reg(ha, ha->reg_tbl[QLA8XXX_CRB_DEV_PART_INFO]); dev_part2 = qla4_83xx_rd_reg(ha, QLA83XX_CRB_DEV_PART_INFO2); drv_active = qla4_83xx_rd_reg(ha, ha->reg_tbl[QLA8XXX_CRB_DRV_ACTIVE]); /* Each function has 4 bits in dev_partition Info register, * Lower 2 bits - device type, Upper 2 bits - physical port number */ dev_part = dev_part1; for (i = nibble = 0; i <= 15; i++, nibble++) { func_nibble = dev_part & (0xF << (nibble * 4)); func_nibble >>= (nibble * 4); device_map[i].func_num = i; device_map[i].device_type = func_nibble & 0x3; device_map[i].port_num = func_nibble & 0xC; if (device_map[i].device_type == NIC_CLASS) { if (drv_active & (1 << device_map[i].func_num)) { nic_present++; break; } } else if (device_map[i].device_type == ISCSI_CLASS) { if (drv_active & (1 << device_map[i].func_num)) { if (!iscsi_present || (iscsi_present && (iscsi_func_low > device_map[i].func_num))) iscsi_func_low = device_map[i].func_num; iscsi_present++; } } /* For function_num[8..15] get info from dev_part2 register */ if (nibble == 7) { nibble = 0; dev_part = dev_part2; } } /* NIC, iSCSI and FCOE are the Reset owners based on order, NIC gets * precedence over iSCSI and FCOE and iSCSI over FCOE, based on drivers * present. */ if (!nic_present && (ha->func_num == iscsi_func_low)) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: can reset - NIC not present and lower iSCSI function is %d\n", __func__, ha->func_num)); return 1; } return 0; } /** * qla4_83xx_need_reset_handler - Code to start reset sequence * @ha: pointer to adapter structure * * Note: IDC lock must be held upon entry **/ void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha) { uint32_t dev_state, drv_state, drv_active; unsigned long reset_timeout, dev_init_timeout; ql4_printk(KERN_INFO, ha, "%s: Performing ISP error recovery\n", __func__); if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: reset acknowledged\n", __func__)); qla4_8xxx_set_rst_ready(ha); /* Non-reset owners ACK Reset and wait for device INIT state * as part of Reset Recovery by Reset Owner */ dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); do { if (time_after_eq(jiffies, dev_init_timeout)) { ql4_printk(KERN_INFO, ha, "%s: Non Reset owner dev init timeout\n", __func__); break; } ha->isp_ops->idc_unlock(ha); msleep(1000); ha->isp_ops->idc_lock(ha); dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); } while (dev_state == QLA8XXX_DEV_NEED_RESET); } else { qla4_8xxx_set_rst_ready(ha); reset_timeout = jiffies + (ha->nx_reset_timeout * HZ); drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); ql4_printk(KERN_INFO, ha, "%s: drv_state = 0x%x, drv_active = 0x%x\n", __func__, drv_state, drv_active); while (drv_state != drv_active) { if (time_after_eq(jiffies, reset_timeout)) { ql4_printk(KERN_INFO, ha, "%s: %s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n", __func__, DRIVER_NAME, drv_state, drv_active); break; } ha->isp_ops->idc_unlock(ha); msleep(1000); ha->isp_ops->idc_lock(ha); drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); } if (drv_state != drv_active) { ql4_printk(KERN_INFO, ha, "%s: Reset_owner turning off drv_active of non-acking function 0x%x\n", __func__, (drv_active ^ drv_state)); drv_active = drv_active & drv_state; qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE, drv_active); } clear_bit(AF_8XXX_RST_OWNER, &ha->flags); /* Start Reset Recovery */ qla4_8xxx_device_bootstrap(ha); } } void qla4_83xx_get_idc_param(struct scsi_qla_host *ha) { uint32_t idc_params, ret_val; ret_val = qla4_83xx_flash_read_u32(ha, QLA83XX_IDC_PARAM_ADDR, (uint8_t *)&idc_params, 1); if (ret_val == QLA_SUCCESS) { ha->nx_dev_init_timeout = idc_params & 0xFFFF; ha->nx_reset_timeout = (idc_params >> 16) & 0xFFFF; } else { ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT; ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT; } DEBUG2(ql4_printk(KERN_DEBUG, ha, "%s: ha->nx_dev_init_timeout = %d, ha->nx_reset_timeout = %d\n", __func__, ha->nx_dev_init_timeout, ha->nx_reset_timeout)); } /*-------------------------Reset Sequence Functions-----------------------*/ static void qla4_83xx_dump_reset_seq_hdr(struct scsi_qla_host *ha) { uint8_t *phdr; if (!ha->reset_tmplt.buff) { ql4_printk(KERN_ERR, ha, "%s: Error: Invalid reset_seq_template\n", __func__); return; } phdr = ha->reset_tmplt.buff; DEBUG2(ql4_printk(KERN_INFO, ha, "Reset Template: 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n", *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4), *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8), *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12), *(phdr+13), *(phdr+14), *(phdr+15))); } static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha) { uint8_t *p_cache; uint32_t src, count, size; uint64_t dest; int ret_val = QLA_SUCCESS; src = QLA83XX_BOOTLOADER_FLASH_ADDR; dest = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_ADDR); size = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_SIZE); /* 128 bit alignment check */ if (size & 0xF) size = (size + 16) & ~0xF; /* 16 byte count */ count = size/16; p_cache = vmalloc(size); if (p_cache == NULL) { ql4_printk(KERN_ERR, ha, "%s: Failed to allocate memory for boot loader cache\n", __func__); ret_val = QLA_ERROR; goto exit_copy_bootloader; } ret_val = qla4_83xx_lockless_flash_read_u32(ha, src, p_cache, size / sizeof(uint32_t)); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: Error reading firmware from flash\n", __func__); goto exit_copy_error; } DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Read firmware from flash\n", __func__)); /* 128 bit/16 byte write to MS memory */ ret_val = qla4_8xxx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache, count); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n", __func__); goto exit_copy_error; } DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Wrote firmware size %d to MS\n", __func__, size)); exit_copy_error: vfree(p_cache); exit_copy_bootloader: return ret_val; } static int qla4_83xx_check_cmd_peg_status(struct scsi_qla_host *ha) { uint32_t val, ret_val = QLA_ERROR; int retries = CRB_CMDPEG_CHECK_RETRY_COUNT; do { val = qla4_83xx_rd_reg(ha, QLA83XX_CMDPEG_STATE); if (val == PHAN_INITIALIZE_COMPLETE) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Command Peg initialization complete. State=0x%x\n", __func__, val)); ret_val = QLA_SUCCESS; break; } msleep(CRB_CMDPEG_CHECK_DELAY); } while (--retries); return ret_val; } /** * qla4_83xx_poll_reg - Poll the given CRB addr for duration msecs till * value read ANDed with test_mask is equal to test_result. * * @ha : Pointer to adapter structure * @addr : CRB register address * @duration : Poll for total of "duration" msecs * @test_mask : Mask value read with "test_mask" * @test_result : Compare (value&test_mask) with test_result. **/ static int qla4_83xx_poll_reg(struct scsi_qla_host *ha, uint32_t addr, int duration, uint32_t test_mask, uint32_t test_result) { uint32_t value; uint8_t retries; int ret_val = QLA_SUCCESS; ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value); if (ret_val == QLA_ERROR) goto exit_poll_reg; retries = duration / 10; do { if ((value & test_mask) != test_result) { msleep(duration / 10); ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value); if (ret_val == QLA_ERROR) goto exit_poll_reg; ret_val = QLA_ERROR; } else { ret_val = QLA_SUCCESS; break; } } while (retries--); exit_poll_reg: if (ret_val == QLA_ERROR) { ha->reset_tmplt.seq_error++; ql4_printk(KERN_ERR, ha, "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n", __func__, value, test_mask, test_result); } return ret_val; } static int qla4_83xx_reset_seq_checksum_test(struct scsi_qla_host *ha) { uint32_t sum = 0; uint16_t *buff = (uint16_t *)ha->reset_tmplt.buff; int u16_count = ha->reset_tmplt.hdr->size / sizeof(uint16_t); int ret_val; while (u16_count-- > 0) sum += *buff++; while (sum >> 16) sum = (sum & 0xFFFF) + (sum >> 16); /* checksum of 0 indicates a valid template */ if (~sum) { ret_val = QLA_SUCCESS; } else { ql4_printk(KERN_ERR, ha, "%s: Reset seq checksum failed\n", __func__); ret_val = QLA_ERROR; } return ret_val; } /** * qla4_83xx_read_reset_template - Read Reset Template from Flash * @ha: Pointer to adapter structure **/ void qla4_83xx_read_reset_template(struct scsi_qla_host *ha) { uint8_t *p_buff; uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size; uint32_t ret_val; ha->reset_tmplt.seq_error = 0; ha->reset_tmplt.buff = vmalloc(QLA83XX_RESTART_TEMPLATE_SIZE); if (ha->reset_tmplt.buff == NULL) { ql4_printk(KERN_ERR, ha, "%s: Failed to allocate reset template resources\n", __func__); goto exit_read_reset_template; } p_buff = ha->reset_tmplt.buff; addr = QLA83XX_RESET_TEMPLATE_ADDR; tmplt_hdr_def_size = sizeof(struct qla4_83xx_reset_template_hdr) / sizeof(uint32_t); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Read template hdr size %d from Flash\n", __func__, tmplt_hdr_def_size)); /* Copy template header from flash */ ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff, tmplt_hdr_def_size); if (ret_val != QLA_SUCCESS) { ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n", __func__); goto exit_read_template_error; } ha->reset_tmplt.hdr = (struct qla4_83xx_reset_template_hdr *)ha->reset_tmplt.buff; /* Validate the template header size and signature */ tmplt_hdr_size = ha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t); if ((tmplt_hdr_size != tmplt_hdr_def_size) || (ha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) { ql4_printk(KERN_ERR, ha, "%s: Template Header size %d is invalid, tmplt_hdr_def_size %d\n", __func__, tmplt_hdr_size, tmplt_hdr_def_size); goto exit_read_template_error; } addr = QLA83XX_RESET_TEMPLATE_ADDR + ha->reset_tmplt.hdr->hdr_size; p_buff = ha->reset_tmplt.buff + ha->reset_tmplt.hdr->hdr_size; tmplt_hdr_def_size = (ha->reset_tmplt.hdr->size - ha->reset_tmplt.hdr->hdr_size) / sizeof(uint32_t); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Read rest of the template size %d\n", __func__, ha->reset_tmplt.hdr->size)); /* Copy rest of the template */ ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff, tmplt_hdr_def_size); if (ret_val != QLA_SUCCESS) { ql4_printk(KERN_ERR, ha, "%s: Failed to read reset tempelate\n", __func__); goto exit_read_template_error; } /* Integrity check */ if (qla4_83xx_reset_seq_checksum_test(ha)) { ql4_printk(KERN_ERR, ha, "%s: Reset Seq checksum failed!\n", __func__); goto exit_read_template_error; } DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Reset Seq checksum passed, Get stop, start and init seq offsets\n", __func__)); /* Get STOP, START, INIT sequence offsets */ ha->reset_tmplt.init_offset = ha->reset_tmplt.buff + ha->reset_tmplt.hdr->init_seq_offset; ha->reset_tmplt.start_offset = ha->reset_tmplt.buff + ha->reset_tmplt.hdr->start_seq_offset; ha->reset_tmplt.stop_offset = ha->reset_tmplt.buff + ha->reset_tmplt.hdr->hdr_size; qla4_83xx_dump_reset_seq_hdr(ha); goto exit_read_reset_template; exit_read_template_error: vfree(ha->reset_tmplt.buff); exit_read_reset_template: return; } /** * qla4_83xx_read_write_crb_reg - Read from raddr and write value to waddr. * * @ha : Pointer to adapter structure * @raddr : CRB address to read from * @waddr : CRB address to write to **/ static void qla4_83xx_read_write_crb_reg(struct scsi_qla_host *ha, uint32_t raddr, uint32_t waddr) { uint32_t value; qla4_83xx_rd_reg_indirect(ha, raddr, &value); qla4_83xx_wr_reg_indirect(ha, waddr, value); } /** * qla4_83xx_rmw_crb_reg - Read Modify Write crb register * * This function read value from raddr, AND with test_mask, * Shift Left,Right/OR/XOR with values RMW header and write value to waddr. * * @ha : Pointer to adapter structure * @raddr : CRB address to read from * @waddr : CRB address to write to * @p_rmw_hdr : header with shift/or/xor values. **/ static void qla4_83xx_rmw_crb_reg(struct scsi_qla_host *ha, uint32_t raddr, uint32_t waddr, struct qla4_83xx_rmw *p_rmw_hdr) { uint32_t value; if (p_rmw_hdr->index_a) value = ha->reset_tmplt.array[p_rmw_hdr->index_a]; else qla4_83xx_rd_reg_indirect(ha, raddr, &value); value &= p_rmw_hdr->test_mask; value <<= p_rmw_hdr->shl; value >>= p_rmw_hdr->shr; value |= p_rmw_hdr->or_value; value ^= p_rmw_hdr->xor_value; qla4_83xx_wr_reg_indirect(ha, waddr, value); return; } static void qla4_83xx_write_list(struct scsi_qla_host *ha, struct qla4_83xx_reset_entry_hdr *p_hdr) { struct qla4_83xx_entry *p_entry; uint32_t i; p_entry = (struct qla4_83xx_entry *) ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr)); for (i = 0; i < p_hdr->count; i++, p_entry++) { qla4_83xx_wr_reg_indirect(ha, p_entry->arg1, p_entry->arg2); if (p_hdr->delay) udelay((uint32_t)(p_hdr->delay)); } } static void qla4_83xx_read_write_list(struct scsi_qla_host *ha, struct qla4_83xx_reset_entry_hdr *p_hdr) { struct qla4_83xx_entry *p_entry; uint32_t i; p_entry = (struct qla4_83xx_entry *) ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr)); for (i = 0; i < p_hdr->count; i++, p_entry++) { qla4_83xx_read_write_crb_reg(ha, p_entry->arg1, p_entry->arg2); if (p_hdr->delay) udelay((uint32_t)(p_hdr->delay)); } } static void qla4_83xx_poll_list(struct scsi_qla_host *ha, struct qla4_83xx_reset_entry_hdr *p_hdr) { long delay; struct qla4_83xx_entry *p_entry; struct qla4_83xx_poll *p_poll; uint32_t i; uint32_t value; p_poll = (struct qla4_83xx_poll *) ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr)); /* Entries start after 8 byte qla4_83xx_poll, poll header contains * the test_mask, test_value. */ p_entry = (struct qla4_83xx_entry *)((char *)p_poll + sizeof(struct qla4_83xx_poll)); delay = (long)p_hdr->delay; if (!delay) { for (i = 0; i < p_hdr->count; i++, p_entry++) { qla4_83xx_poll_reg(ha, p_entry->arg1, delay, p_poll->test_mask, p_poll->test_value); } } else { for (i = 0; i < p_hdr->count; i++, p_entry++) { if (qla4_83xx_poll_reg(ha, p_entry->arg1, delay, p_poll->test_mask, p_poll->test_value)) { qla4_83xx_rd_reg_indirect(ha, p_entry->arg1, &value); qla4_83xx_rd_reg_indirect(ha, p_entry->arg2, &value); } } } } static void qla4_83xx_poll_write_list(struct scsi_qla_host *ha, struct qla4_83xx_reset_entry_hdr *p_hdr) { long delay; struct qla4_83xx_quad_entry *p_entry; struct qla4_83xx_poll *p_poll; uint32_t i; p_poll = (struct qla4_83xx_poll *) ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr)); p_entry = (struct qla4_83xx_quad_entry *) ((char *)p_poll + sizeof(struct qla4_83xx_poll)); delay = (long)p_hdr->delay; for (i = 0; i < p_hdr->count; i++, p_entry++) { qla4_83xx_wr_reg_indirect(ha, p_entry->dr_addr, p_entry->dr_value); qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr, p_entry->ar_value); if (delay) { if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay, p_poll->test_mask, p_poll->test_value)) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Timeout Error: poll list, item_num %d, entry_num %d\n", __func__, i, ha->reset_tmplt.seq_index)); } } } } static void qla4_83xx_read_modify_write(struct scsi_qla_host *ha, struct qla4_83xx_reset_entry_hdr *p_hdr) { struct qla4_83xx_entry *p_entry; struct qla4_83xx_rmw *p_rmw_hdr; uint32_t i; p_rmw_hdr = (struct qla4_83xx_rmw *) ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr)); p_entry = (struct qla4_83xx_entry *) ((char *)p_rmw_hdr + sizeof(struct qla4_83xx_rmw)); for (i = 0; i < p_hdr->count; i++, p_entry++) { qla4_83xx_rmw_crb_reg(ha, p_entry->arg1, p_entry->arg2, p_rmw_hdr); if (p_hdr->delay) udelay((uint32_t)(p_hdr->delay)); } } static void qla4_83xx_pause(struct scsi_qla_host *ha, struct qla4_83xx_reset_entry_hdr *p_hdr) { if (p_hdr->delay) mdelay((uint32_t)((long)p_hdr->delay)); } static void qla4_83xx_poll_read_list(struct scsi_qla_host *ha, struct qla4_83xx_reset_entry_hdr *p_hdr) { long delay; int index; struct qla4_83xx_quad_entry *p_entry; struct qla4_83xx_poll *p_poll; uint32_t i; uint32_t value; p_poll = (struct qla4_83xx_poll *) ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr)); p_entry = (struct qla4_83xx_quad_entry *) ((char *)p_poll + sizeof(struct qla4_83xx_poll)); delay = (long)p_hdr->delay; for (i = 0; i < p_hdr->count; i++, p_entry++) { qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr, p_entry->ar_value); if (delay) { if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay, p_poll->test_mask, p_poll->test_value)) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Timeout Error: poll list, Item_num %d, entry_num %d\n", __func__, i, ha->reset_tmplt.seq_index)); } else { index = ha->reset_tmplt.array_index; qla4_83xx_rd_reg_indirect(ha, p_entry->dr_addr, &value); ha->reset_tmplt.array[index++] = value; if (index == QLA83XX_MAX_RESET_SEQ_ENTRIES) ha->reset_tmplt.array_index = 1; } } } } static void qla4_83xx_seq_end(struct scsi_qla_host *ha, struct qla4_83xx_reset_entry_hdr *p_hdr) { ha->reset_tmplt.seq_end = 1; } static void qla4_83xx_template_end(struct scsi_qla_host *ha, struct qla4_83xx_reset_entry_hdr *p_hdr) { ha->reset_tmplt.template_end = 1; if (ha->reset_tmplt.seq_error == 0) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Reset sequence completed SUCCESSFULLY.\n", __func__)); } else { ql4_printk(KERN_ERR, ha, "%s: Reset sequence completed with some timeout errors.\n", __func__); } } /** * qla4_83xx_process_reset_template - Process reset template. * * Process all entries in reset template till entry with SEQ_END opcode, * which indicates end of the reset template processing. Each entry has a * Reset Entry header, entry opcode/command, with size of the entry, number * of entries in sub-sequence and delay in microsecs or timeout in millisecs. * * @ha : Pointer to adapter structure * @p_buff : Common reset entry header. **/ static void qla4_83xx_process_reset_template(struct scsi_qla_host *ha, char *p_buff) { int index, entries; struct qla4_83xx_reset_entry_hdr *p_hdr; char *p_entry = p_buff; ha->reset_tmplt.seq_end = 0; ha->reset_tmplt.template_end = 0; entries = ha->reset_tmplt.hdr->entries; index = ha->reset_tmplt.seq_index; for (; (!ha->reset_tmplt.seq_end) && (index < entries); index++) { p_hdr = (struct qla4_83xx_reset_entry_hdr *)p_entry; switch (p_hdr->cmd) { case OPCODE_NOP: break; case OPCODE_WRITE_LIST: qla4_83xx_write_list(ha, p_hdr); break; case OPCODE_READ_WRITE_LIST: qla4_83xx_read_write_list(ha, p_hdr); break; case OPCODE_POLL_LIST: qla4_83xx_poll_list(ha, p_hdr); break; case OPCODE_POLL_WRITE_LIST: qla4_83xx_poll_write_list(ha, p_hdr); break; case OPCODE_READ_MODIFY_WRITE: qla4_83xx_read_modify_write(ha, p_hdr); break; case OPCODE_SEQ_PAUSE: qla4_83xx_pause(ha, p_hdr); break; case OPCODE_SEQ_END: qla4_83xx_seq_end(ha, p_hdr); break; case OPCODE_TMPL_END: qla4_83xx_template_end(ha, p_hdr); break; case OPCODE_POLL_READ_LIST: qla4_83xx_poll_read_list(ha, p_hdr); break; default: ql4_printk(KERN_ERR, ha, "%s: Unknown command ==> 0x%04x on entry = %d\n", __func__, p_hdr->cmd, index); break; } /* Set pointer to next entry in the sequence. */ p_entry += p_hdr->size; } ha->reset_tmplt.seq_index = index; } static void qla4_83xx_process_stop_seq(struct scsi_qla_host *ha) { ha->reset_tmplt.seq_index = 0; qla4_83xx_process_reset_template(ha, ha->reset_tmplt.stop_offset); if (ha->reset_tmplt.seq_end != 1) ql4_printk(KERN_ERR, ha, "%s: Abrupt STOP Sub-Sequence end.\n", __func__); } static void qla4_83xx_process_start_seq(struct scsi_qla_host *ha) { qla4_83xx_process_reset_template(ha, ha->reset_tmplt.start_offset); if (ha->reset_tmplt.template_end != 1) ql4_printk(KERN_ERR, ha, "%s: Abrupt START Sub-Sequence end.\n", __func__); } static void qla4_83xx_process_init_seq(struct scsi_qla_host *ha) { qla4_83xx_process_reset_template(ha, ha->reset_tmplt.init_offset); if (ha->reset_tmplt.seq_end != 1) ql4_printk(KERN_ERR, ha, "%s: Abrupt INIT Sub-Sequence end.\n", __func__); } static int qla4_83xx_restart(struct scsi_qla_host *ha) { int ret_val = QLA_SUCCESS; uint32_t idc_ctrl; qla4_83xx_process_stop_seq(ha); /* * Collect minidump. * If IDC_CTRL BIT1 is set, clear it on going to INIT state and * don't collect minidump */ idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); if (idc_ctrl & GRACEFUL_RESET_BIT1) { qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, (idc_ctrl & ~GRACEFUL_RESET_BIT1)); ql4_printk(KERN_INFO, ha, "%s: Graceful RESET: Not collecting minidump\n", __func__); } else { qla4_8xxx_get_minidump(ha); } qla4_83xx_process_init_seq(ha); if (qla4_83xx_copy_bootloader(ha)) { ql4_printk(KERN_ERR, ha, "%s: Copy bootloader, firmware restart failed!\n", __func__); ret_val = QLA_ERROR; goto exit_restart; } qla4_83xx_wr_reg(ha, QLA83XX_FW_IMAGE_VALID, QLA83XX_BOOT_FROM_FLASH); qla4_83xx_process_start_seq(ha); exit_restart: return ret_val; } int qla4_83xx_start_firmware(struct scsi_qla_host *ha) { int ret_val = QLA_SUCCESS; ret_val = qla4_83xx_restart(ha); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: Restart error\n", __func__); goto exit_start_fw; } else { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Restart done\n", __func__)); } ret_val = qla4_83xx_check_cmd_peg_status(ha); if (ret_val == QLA_ERROR) ql4_printk(KERN_ERR, ha, "%s: Peg not initialized\n", __func__); exit_start_fw: return ret_val; } /*----------------------Interrupt Related functions ---------------------*/ static void qla4_83xx_disable_iocb_intrs(struct scsi_qla_host *ha) { if (test_and_clear_bit(AF_83XX_IOCB_INTR_ON, &ha->flags)) qla4_8xxx_intr_disable(ha); } static void qla4_83xx_disable_mbox_intrs(struct scsi_qla_host *ha) { uint32_t mb_int, ret; if (test_and_clear_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) { ret = readl(&ha->qla4_83xx_reg->mbox_int); mb_int = ret & ~INT_ENABLE_FW_MB; writel(mb_int, &ha->qla4_83xx_reg->mbox_int); writel(1, &ha->qla4_83xx_reg->leg_int_mask); } } void qla4_83xx_disable_intrs(struct scsi_qla_host *ha) { qla4_83xx_disable_mbox_intrs(ha); qla4_83xx_disable_iocb_intrs(ha); } static void qla4_83xx_enable_iocb_intrs(struct scsi_qla_host *ha) { if (!test_bit(AF_83XX_IOCB_INTR_ON, &ha->flags)) { qla4_8xxx_intr_enable(ha); set_bit(AF_83XX_IOCB_INTR_ON, &ha->flags); } } void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha) { uint32_t mb_int; if (!test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) { mb_int = INT_ENABLE_FW_MB; writel(mb_int, &ha->qla4_83xx_reg->mbox_int); writel(0, &ha->qla4_83xx_reg->leg_int_mask); set_bit(AF_83XX_MBOX_INTR_ON, &ha->flags); } } void qla4_83xx_enable_intrs(struct scsi_qla_host *ha) { qla4_83xx_enable_mbox_intrs(ha); qla4_83xx_enable_iocb_intrs(ha); } void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd, int incount) { int i; /* Load all mailbox registers, except mailbox 0. */ for (i = 1; i < incount; i++) writel(mbx_cmd[i], &ha->qla4_83xx_reg->mailbox_in[i]); writel(mbx_cmd[0], &ha->qla4_83xx_reg->mailbox_in[0]); /* Set Host Interrupt register to 1, to tell the firmware that * a mailbox command is pending. Firmware after reading the * mailbox command, clears the host interrupt register */ writel(HINT_MBX_INT_PENDING, &ha->qla4_83xx_reg->host_intr); } void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount) { int intr_status; intr_status = readl(&ha->qla4_83xx_reg->risc_intr); if (intr_status) { ha->mbox_status_count = outcount; ha->isp_ops->interrupt_service_routine(ha, intr_status); } } /** * qla4_83xx_isp_reset - Resets ISP and aborts all outstanding commands. * @ha: pointer to host adapter structure. **/ int qla4_83xx_isp_reset(struct scsi_qla_host *ha) { int rval; uint32_t dev_state; ha->isp_ops->idc_lock(ha); dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); if (ql4xdontresethba) qla4_83xx_set_idc_dontreset(ha); if (dev_state == QLA8XXX_DEV_READY) { /* If IDC_CTRL DONTRESETHBA_BIT0 is set dont do reset * recovery */ if (qla4_83xx_idc_dontreset(ha) == DONTRESET_BIT0) { ql4_printk(KERN_ERR, ha, "%s: Reset recovery disabled\n", __func__); rval = QLA_ERROR; goto exit_isp_reset; } DEBUG2(ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET\n", __func__)); qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_NEED_RESET); } else { /* If device_state is NEED_RESET, go ahead with * Reset,irrespective of ql4xdontresethba. This is to allow a * non-reset-owner to force a reset. Non-reset-owner sets * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset * and then forces a Reset by setting device_state to * NEED_RESET. */ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: HW state already set to NEED_RESET\n", __func__)); } /* For ISP8324 and ISP8042, Reset owner is NIC, iSCSI or FCOE based on * priority and which drivers are present. Unlike ISP8022, the function * setting NEED_RESET, may not be the Reset owner. */ if (qla4_83xx_can_perform_reset(ha)) set_bit(AF_8XXX_RST_OWNER, &ha->flags); ha->isp_ops->idc_unlock(ha); rval = qla4_8xxx_device_state_handler(ha); ha->isp_ops->idc_lock(ha); qla4_8xxx_clear_rst_ready(ha); exit_isp_reset: ha->isp_ops->idc_unlock(ha); if (rval == QLA_SUCCESS) clear_bit(AF_FW_RECOVERY, &ha->flags); return rval; } static void qla4_83xx_dump_pause_control_regs(struct scsi_qla_host *ha) { u32 val = 0, val1 = 0; int i, status = QLA_SUCCESS; status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL, &val); DEBUG2(ql4_printk(KERN_INFO, ha, "SRE-Shim Ctrl:0x%x\n", val)); /* Port 0 Rx Buffer Pause Threshold Registers. */ DEBUG2(ql4_printk(KERN_INFO, ha, "Port 0 Rx Buffer Pause Threshold Registers[TC7..TC0]:")); for (i = 0; i < 8; i++) { status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4), &val); DEBUG2(pr_info("0x%x ", val)); } DEBUG2(pr_info("\n")); /* Port 1 Rx Buffer Pause Threshold Registers. */ DEBUG2(ql4_printk(KERN_INFO, ha, "Port 1 Rx Buffer Pause Threshold Registers[TC7..TC0]:")); for (i = 0; i < 8; i++) { status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4), &val); DEBUG2(pr_info("0x%x ", val)); } DEBUG2(pr_info("\n")); /* Port 0 RxB Traffic Class Max Cell Registers. */ DEBUG2(ql4_printk(KERN_INFO, ha, "Port 0 RxB Traffic Class Max Cell Registers[3..0]:")); for (i = 0; i < 4; i++) { status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4), &val); DEBUG2(pr_info("0x%x ", val)); } DEBUG2(pr_info("\n")); /* Port 1 RxB Traffic Class Max Cell Registers. */ DEBUG2(ql4_printk(KERN_INFO, ha, "Port 1 RxB Traffic Class Max Cell Registers[3..0]:")); for (i = 0; i < 4; i++) { status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4), &val); DEBUG2(pr_info("0x%x ", val)); } DEBUG2(pr_info("\n")); /* Port 0 RxB Rx Traffic Class Stats. */ DEBUG2(ql4_printk(KERN_INFO, ha, "Port 0 RxB Rx Traffic Class Stats [TC7..TC0]")); for (i = 7; i >= 0; i--) { status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS, &val); val &= ~(0x7 << 29); /* Reset bits 29 to 31 */ qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS, (val | (i << 29))); status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS, &val); DEBUG2(pr_info("0x%x ", val)); } DEBUG2(pr_info("\n")); /* Port 1 RxB Rx Traffic Class Stats. */ DEBUG2(ql4_printk(KERN_INFO, ha, "Port 1 RxB Rx Traffic Class Stats [TC7..TC0]")); for (i = 7; i >= 0; i--) { status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS, &val); val &= ~(0x7 << 29); /* Reset bits 29 to 31 */ qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS, (val | (i << 29))); status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS, &val); DEBUG2(pr_info("0x%x ", val)); } DEBUG2(pr_info("\n")); status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS, &val); status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS, &val1); DEBUG2(ql4_printk(KERN_INFO, ha, "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n", val, val1)); } static void __qla4_83xx_disable_pause(struct scsi_qla_host *ha) { int i; /* set SRE-Shim Control Register */ qla4_83xx_wr_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL, QLA83XX_SET_PAUSE_VAL); for (i = 0; i < 8; i++) { /* Port 0 Rx Buffer Pause Threshold Registers. */ qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4), QLA83XX_SET_PAUSE_VAL); /* Port 1 Rx Buffer Pause Threshold Registers. */ qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4), QLA83XX_SET_PAUSE_VAL); } for (i = 0; i < 4; i++) { /* Port 0 RxB Traffic Class Max Cell Registers. */ qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4), QLA83XX_SET_TC_MAX_CELL_VAL); /* Port 1 RxB Traffic Class Max Cell Registers. */ qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4), QLA83XX_SET_TC_MAX_CELL_VAL); } qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS, QLA83XX_SET_PAUSE_VAL); qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS, QLA83XX_SET_PAUSE_VAL); ql4_printk(KERN_INFO, ha, "Disabled pause frames successfully.\n"); } /** * qla4_83xx_eport_init - Initialize EPort. * @ha: Pointer to host adapter structure. * * If EPort hardware is in reset state before disabling pause, there would be * serious hardware wedging issues. To prevent this perform eport init everytime * before disabling pause frames. **/ static void qla4_83xx_eport_init(struct scsi_qla_host *ha) { /* Clear the 8 registers */ qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_REG, 0x0); qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT0, 0x0); qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT1, 0x0); qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT2, 0x0); qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT3, 0x0); qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_SRE_SHIM, 0x0); qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_EPG_SHIM, 0x0); qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_ETHER_PCS, 0x0); /* Write any value to Reset Control register */ qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_CONTROL, 0xFF); ql4_printk(KERN_INFO, ha, "EPORT is out of reset.\n"); } void qla4_83xx_disable_pause(struct scsi_qla_host *ha) { ha->isp_ops->idc_lock(ha); /* Before disabling pause frames, ensure that eport is not in reset */ qla4_83xx_eport_init(ha); qla4_83xx_dump_pause_control_regs(ha); __qla4_83xx_disable_pause(ha); ha->isp_ops->idc_unlock(ha); } /** * qla4_83xx_is_detached - Check if we are marked invisible. * @ha: Pointer to host adapter structure. **/ int qla4_83xx_is_detached(struct scsi_qla_host *ha) { uint32_t drv_active; drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); if (test_bit(AF_INIT_DONE, &ha->flags) && !(drv_active & (1 << ha->func_num))) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: drv_active = 0x%X\n", __func__, drv_active)); return QLA_SUCCESS; } return QLA_ERROR; }
gpl-2.0
ChronoMonochrome/android_kernel_ste-3.4
drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c
2192
38038
/* * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel * * Copyright (C) 1999-2012, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * $Id: bcmsdh_sdmmc.c 321372 2012-03-15 01:10:32Z $ */ #include <typedefs.h> #include <bcmdevs.h> #include <bcmendian.h> #include <bcmutils.h> #include <osl.h> #include <sdio.h> /* SDIO Device and Protocol Specs */ #include <sdioh.h> /* Standard SDIO Host Controller Specification */ #include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */ #include <sdiovar.h> /* ioctl/iovars */ #include <linux/mmc/core.h> #include <linux/mmc/card.h> #include <linux/mmc/sdio_func.h> #include <linux/mmc/sdio_ids.h> #include <dngl_stats.h> #include <dhd.h> #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) #include <linux/suspend.h> extern volatile bool dhd_mmc_suspend; #endif #include "bcmsdh_sdmmc.h" #ifndef BCMSDH_MODULE extern int sdio_function_init(void); extern void sdio_function_cleanup(void); #endif /* BCMSDH_MODULE */ #if !defined(OOB_INTR_ONLY) static void IRQHandler(struct sdio_func *func); static void IRQHandlerF2(struct sdio_func *func); #endif /* !defined(OOB_INTR_ONLY) */ static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr); extern int sdio_reset_comm(struct mmc_card *card); extern PBCMSDH_SDMMC_INSTANCE gInstance; uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */ uint sd_f2_blocksize = 512; /* Default blocksize */ uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */ uint sd_power = 1; /* Default to SD Slot powered ON */ uint sd_clock = 1; /* Default to SD Clock turned ON */ uint sd_hiok = FALSE; /* Don't use hi-speed mode by default */ uint sd_msglevel = 0x01; uint sd_use_dma = TRUE; DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait); DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait); DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait); DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait); #define DMA_ALIGN_MASK 0x03 int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data); static int sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd) { int err_ret; uint32 fbraddr; uint8 func; sd_trace(("%s\n", __FUNCTION__)); /* Get the Card's common CIS address */ sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0); sd->func_cis_ptr[0] = sd->com_cis_ptr; sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr)); /* Get the Card's function CIS (for each function) */ for (fbraddr = SDIOD_FBR_STARTADDR, func = 1; func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) { sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr); sd_info(("%s: Function %d CIS Ptr = 0x%x\n", __FUNCTION__, func, sd->func_cis_ptr[func])); } sd->func_cis_ptr[0] = sd->com_cis_ptr; sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr)); /* Enable Function 1 */ sdio_claim_host(gInstance->func[1]); err_ret = sdio_enable_func(gInstance->func[1]); sdio_release_host(gInstance->func[1]); if (err_ret) { sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x", err_ret)); } return FALSE; } /* * Public entry points & extern's */ extern sdioh_info_t * sdioh_attach(osl_t *osh, void *bar0, uint irq) { sdioh_info_t *sd; int err_ret; sd_trace(("%s\n", __FUNCTION__)); if (gInstance == NULL) { sd_err(("%s: SDIO Device not present\n", __FUNCTION__)); return NULL; } if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) { sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh))); return NULL; } bzero((char *)sd, sizeof(sdioh_info_t)); sd->osh = osh; if (sdioh_sdmmc_osinit(sd) != 0) { sd_err(("%s:sdioh_sdmmc_osinit() failed\n", __FUNCTION__)); MFREE(sd->osh, sd, sizeof(sdioh_info_t)); return NULL; } sd->num_funcs = 2; sd->sd_blockmode = TRUE; sd->use_client_ints = TRUE; sd->client_block_size[0] = 64; sd->use_rxchain = FALSE; gInstance->sd = sd; /* Claim host controller */ sdio_claim_host(gInstance->func[1]); sd->client_block_size[1] = 64; err_ret = sdio_set_block_size(gInstance->func[1], 64); if (err_ret) { sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n")); } /* Release host controller F1 */ sdio_release_host(gInstance->func[1]); if (gInstance->func[2]) { /* Claim host controller F2 */ sdio_claim_host(gInstance->func[2]); sd->client_block_size[2] = sd_f2_blocksize; err_ret = sdio_set_block_size(gInstance->func[2], sd_f2_blocksize); if (err_ret) { sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d\n", sd_f2_blocksize)); } /* Release host controller F2 */ sdio_release_host(gInstance->func[2]); } sdioh_sdmmc_card_enablefuncs(sd); sd_trace(("%s: Done\n", __FUNCTION__)); return sd; } extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd) { sd_trace(("%s\n", __FUNCTION__)); if (sd) { /* Disable Function 2 */ sdio_claim_host(gInstance->func[2]); sdio_disable_func(gInstance->func[2]); sdio_release_host(gInstance->func[2]); /* Disable Function 1 */ if (gInstance->func[1]) { sdio_claim_host(gInstance->func[1]); sdio_disable_func(gInstance->func[1]); sdio_release_host(gInstance->func[1]); } gInstance->func[1] = NULL; gInstance->func[2] = NULL; /* deregister irq */ sdioh_sdmmc_osfree(sd); MFREE(sd->osh, sd, sizeof(sdioh_info_t)); } return SDIOH_API_RC_SUCCESS; } #if defined(OOB_INTR_ONLY) && defined(HW_OOB) extern SDIOH_API_RC sdioh_enable_func_intr(void) { uint8 reg; int err; if (gInstance->func[0]) { sdio_claim_host(gInstance->func[0]); reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err); if (err) { sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); sdio_release_host(gInstance->func[0]); return SDIOH_API_RC_FAIL; } /* Enable F1 and F2 interrupts, set master enable */ reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN | INTR_CTL_MASTER_EN); sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err); sdio_release_host(gInstance->func[0]); if (err) { sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); return SDIOH_API_RC_FAIL; } } return SDIOH_API_RC_SUCCESS; } extern SDIOH_API_RC sdioh_disable_func_intr(void) { uint8 reg; int err; if (gInstance->func[0]) { sdio_claim_host(gInstance->func[0]); reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err); if (err) { sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); sdio_release_host(gInstance->func[0]); return SDIOH_API_RC_FAIL; } reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN); /* Disable master interrupt with the last function interrupt */ if (!(reg & 0xFE)) reg = 0; sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err); sdio_release_host(gInstance->func[0]); if (err) { sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); return SDIOH_API_RC_FAIL; } } return SDIOH_API_RC_SUCCESS; } #endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */ /* Configure callback to client when we recieve client interrupt */ extern SDIOH_API_RC sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh) { sd_trace(("%s: Entering\n", __FUNCTION__)); if (fn == NULL) { sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__)); return SDIOH_API_RC_FAIL; } #if !defined(OOB_INTR_ONLY) sd->intr_handler = fn; sd->intr_handler_arg = argh; sd->intr_handler_valid = TRUE; /* register and unmask irq */ if (gInstance->func[2]) { sdio_claim_host(gInstance->func[2]); sdio_claim_irq(gInstance->func[2], IRQHandlerF2); sdio_release_host(gInstance->func[2]); } if (gInstance->func[1]) { sdio_claim_host(gInstance->func[1]); sdio_claim_irq(gInstance->func[1], IRQHandler); sdio_release_host(gInstance->func[1]); } #elif defined(HW_OOB) sdioh_enable_func_intr(); #endif /* !defined(OOB_INTR_ONLY) */ return SDIOH_API_RC_SUCCESS; } extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *sd) { sd_trace(("%s: Entering\n", __FUNCTION__)); #if !defined(OOB_INTR_ONLY) if (gInstance->func[1]) { /* register and unmask irq */ sdio_claim_host(gInstance->func[1]); sdio_release_irq(gInstance->func[1]); sdio_release_host(gInstance->func[1]); } if (gInstance->func[2]) { /* Claim host controller F2 */ sdio_claim_host(gInstance->func[2]); sdio_release_irq(gInstance->func[2]); /* Release host controller F2 */ sdio_release_host(gInstance->func[2]); } sd->intr_handler_valid = FALSE; sd->intr_handler = NULL; sd->intr_handler_arg = NULL; #elif defined(HW_OOB) sdioh_disable_func_intr(); #endif /* !defined(OOB_INTR_ONLY) */ return SDIOH_API_RC_SUCCESS; } extern SDIOH_API_RC sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff) { sd_trace(("%s: Entering\n", __FUNCTION__)); *onoff = sd->client_intr_enabled; return SDIOH_API_RC_SUCCESS; } #if defined(DHD_DEBUG) extern bool sdioh_interrupt_pending(sdioh_info_t *sd) { return (0); } #endif uint sdioh_query_iofnum(sdioh_info_t *sd) { return sd->num_funcs; } /* IOVar table */ enum { IOV_MSGLEVEL = 1, IOV_BLOCKMODE, IOV_BLOCKSIZE, IOV_DMA, IOV_USEINTS, IOV_NUMINTS, IOV_NUMLOCALINTS, IOV_HOSTREG, IOV_DEVREG, IOV_DIVISOR, IOV_SDMODE, IOV_HISPEED, IOV_HCIREGS, IOV_POWER, IOV_CLOCK, IOV_RXCHAIN }; const bcm_iovar_t sdioh_iovars[] = { {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 }, {"sd_blockmode", IOV_BLOCKMODE, 0, IOVT_BOOL, 0 }, {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */ {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 }, {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 }, {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 }, {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 }, {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 }, {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 }, {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 }, {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100}, {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0 }, {"sd_rxchain", IOV_RXCHAIN, 0, IOVT_BOOL, 0 }, {NULL, 0, 0, 0, 0 } }; int sdioh_iovar_op(sdioh_info_t *si, const char *name, void *params, int plen, void *arg, int len, bool set) { const bcm_iovar_t *vi = NULL; int bcmerror = 0; int val_size; int32 int_val = 0; bool bool_val; uint32 actionid; ASSERT(name); ASSERT(len >= 0); /* Get must have return space; Set does not take qualifiers */ ASSERT(set || (arg && len)); ASSERT(!set || (!params && !plen)); sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name)); if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) { bcmerror = BCME_UNSUPPORTED; goto exit; } if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0) goto exit; /* Set up params so get and set can share the convenience variables */ if (params == NULL) { params = arg; plen = len; } if (vi->type == IOVT_VOID) val_size = 0; else if (vi->type == IOVT_BUFFER) val_size = len; else val_size = sizeof(int); if (plen >= (int)sizeof(int_val)) bcopy(params, &int_val, sizeof(int_val)); bool_val = (int_val != 0) ? TRUE : FALSE; BCM_REFERENCE(bool_val); actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); switch (actionid) { case IOV_GVAL(IOV_MSGLEVEL): int_val = (int32)sd_msglevel; bcopy(&int_val, arg, val_size); break; case IOV_SVAL(IOV_MSGLEVEL): sd_msglevel = int_val; break; case IOV_GVAL(IOV_BLOCKMODE): int_val = (int32)si->sd_blockmode; bcopy(&int_val, arg, val_size); break; case IOV_SVAL(IOV_BLOCKMODE): si->sd_blockmode = (bool)int_val; /* Haven't figured out how to make non-block mode with DMA */ break; case IOV_GVAL(IOV_BLOCKSIZE): if ((uint32)int_val > si->num_funcs) { bcmerror = BCME_BADARG; break; } int_val = (int32)si->client_block_size[int_val]; bcopy(&int_val, arg, val_size); break; case IOV_SVAL(IOV_BLOCKSIZE): { uint func = ((uint32)int_val >> 16); uint blksize = (uint16)int_val; uint maxsize; if (func > si->num_funcs) { bcmerror = BCME_BADARG; break; } switch (func) { case 0: maxsize = 32; break; case 1: maxsize = BLOCK_SIZE_4318; break; case 2: maxsize = BLOCK_SIZE_4328; break; default: maxsize = 0; } if (blksize > maxsize) { bcmerror = BCME_BADARG; break; } if (!blksize) { blksize = maxsize; } /* Now set it */ si->client_block_size[func] = blksize; break; } case IOV_GVAL(IOV_RXCHAIN): int_val = (int32)si->use_rxchain; bcopy(&int_val, arg, val_size); break; case IOV_GVAL(IOV_DMA): int_val = (int32)si->sd_use_dma; bcopy(&int_val, arg, val_size); break; case IOV_SVAL(IOV_DMA): si->sd_use_dma = (bool)int_val; break; case IOV_GVAL(IOV_USEINTS): int_val = (int32)si->use_client_ints; bcopy(&int_val, arg, val_size); break; case IOV_SVAL(IOV_USEINTS): si->use_client_ints = (bool)int_val; if (si->use_client_ints) si->intmask |= CLIENT_INTR; else si->intmask &= ~CLIENT_INTR; break; case IOV_GVAL(IOV_DIVISOR): int_val = (uint32)sd_divisor; bcopy(&int_val, arg, val_size); break; case IOV_SVAL(IOV_DIVISOR): sd_divisor = int_val; break; case IOV_GVAL(IOV_POWER): int_val = (uint32)sd_power; bcopy(&int_val, arg, val_size); break; case IOV_SVAL(IOV_POWER): sd_power = int_val; break; case IOV_GVAL(IOV_CLOCK): int_val = (uint32)sd_clock; bcopy(&int_val, arg, val_size); break; case IOV_SVAL(IOV_CLOCK): sd_clock = int_val; break; case IOV_GVAL(IOV_SDMODE): int_val = (uint32)sd_sdmode; bcopy(&int_val, arg, val_size); break; case IOV_SVAL(IOV_SDMODE): sd_sdmode = int_val; break; case IOV_GVAL(IOV_HISPEED): int_val = (uint32)sd_hiok; bcopy(&int_val, arg, val_size); break; case IOV_SVAL(IOV_HISPEED): sd_hiok = int_val; break; case IOV_GVAL(IOV_NUMINTS): int_val = (int32)si->intrcount; bcopy(&int_val, arg, val_size); break; case IOV_GVAL(IOV_NUMLOCALINTS): int_val = (int32)0; bcopy(&int_val, arg, val_size); break; case IOV_GVAL(IOV_HOSTREG): { sdreg_t *sd_ptr = (sdreg_t *)params; if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) { sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset)); bcmerror = BCME_BADARG; break; } sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__, (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32), sd_ptr->offset)); if (sd_ptr->offset & 1) int_val = 8; /* sdioh_sdmmc_rreg8(si, sd_ptr->offset); */ else if (sd_ptr->offset & 2) int_val = 16; /* sdioh_sdmmc_rreg16(si, sd_ptr->offset); */ else int_val = 32; /* sdioh_sdmmc_rreg(si, sd_ptr->offset); */ bcopy(&int_val, arg, sizeof(int_val)); break; } case IOV_SVAL(IOV_HOSTREG): { sdreg_t *sd_ptr = (sdreg_t *)params; if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) { sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset)); bcmerror = BCME_BADARG; break; } sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value, (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32), sd_ptr->offset)); break; } case IOV_GVAL(IOV_DEVREG): { sdreg_t *sd_ptr = (sdreg_t *)params; uint8 data = 0; if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) { bcmerror = BCME_SDIO_ERROR; break; } int_val = (int)data; bcopy(&int_val, arg, sizeof(int_val)); break; } case IOV_SVAL(IOV_DEVREG): { sdreg_t *sd_ptr = (sdreg_t *)params; uint8 data = (uint8)sd_ptr->value; if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) { bcmerror = BCME_SDIO_ERROR; break; } break; } default: bcmerror = BCME_UNSUPPORTED; break; } exit: return bcmerror; } #if defined(OOB_INTR_ONLY) && defined(HW_OOB) SDIOH_API_RC sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable) { SDIOH_API_RC status; uint8 data; if (enable) data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE | SDIO_SEPINT_ACT_HI; else data = SDIO_SEPINT_ACT_HI; /* disable hw oob interrupt */ status = sdioh_request_byte(sd, SDIOH_WRITE, 0, SDIOD_CCCR_BRCM_SEPINT, &data); return status; } #endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */ extern SDIOH_API_RC sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) { SDIOH_API_RC status; /* No lock needed since sdioh_request_byte does locking */ status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data); return status; } extern SDIOH_API_RC sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) { /* No lock needed since sdioh_request_byte does locking */ SDIOH_API_RC status; status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data); return status; } static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr) { /* read 24 bits and return valid 17 bit addr */ int i; uint32 scratch, regdata; uint8 *ptr = (uint8 *)&scratch; for (i = 0; i < 3; i++) { if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS) sd_err(("%s: Can't read!\n", __FUNCTION__)); *ptr++ = (uint8) regdata; regaddr++; } /* Only the lower 17-bits are valid */ scratch = ltoh32(scratch); scratch &= 0x0001FFFF; return (scratch); } extern SDIOH_API_RC sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length) { uint32 count; int offset; uint32 foo; uint8 *cis = cisd; sd_trace(("%s: Func = %d\n", __FUNCTION__, func)); if (!sd->func_cis_ptr[func]) { bzero(cis, length); sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func)); return SDIOH_API_RC_FAIL; } sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func])); for (count = 0; count < length; count++) { offset = sd->func_cis_ptr[func] + count; if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) { sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__)); return SDIOH_API_RC_FAIL; } *cis = (uint8)(foo & 0xff); cis++; } return SDIOH_API_RC_SUCCESS; } extern SDIOH_API_RC sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte) { int err_ret; sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr)); DHD_PM_RESUME_WAIT(sdioh_request_byte_wait); DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); if(rw) { /* CMD52 Write */ if (func == 0) { /* Can only directly write to some F0 registers. Handle F2 enable * as a special case. */ if (regaddr == SDIOD_CCCR_IOEN) { if (gInstance->func[2]) { sdio_claim_host(gInstance->func[2]); if (*byte & SDIO_FUNC_ENABLE_2) { /* Enable Function 2 */ err_ret = sdio_enable_func(gInstance->func[2]); if (err_ret) { sd_err(("bcmsdh_sdmmc: enable F2 failed:%d", err_ret)); } } else { /* Disable Function 2 */ err_ret = sdio_disable_func(gInstance->func[2]); if (err_ret) { sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d", err_ret)); } } sdio_release_host(gInstance->func[2]); } } #if defined(MMC_SDIO_ABORT) /* to allow abort command through F1 */ else if (regaddr == SDIOD_CCCR_IOABORT) { sdio_claim_host(gInstance->func[func]); /* * this sdio_f0_writeb() can be replaced with another api * depending upon MMC driver change. * As of this time, this is temporaray one */ sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret); sdio_release_host(gInstance->func[func]); } #endif /* MMC_SDIO_ABORT */ else if (regaddr < 0xF0) { sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr)); } else { /* Claim host controller, perform F0 write, and release */ sdio_claim_host(gInstance->func[func]); sdio_f0_writeb(gInstance->func[func], *byte, regaddr, &err_ret); sdio_release_host(gInstance->func[func]); } } else { /* Claim host controller, perform Fn write, and release */ sdio_claim_host(gInstance->func[func]); sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret); sdio_release_host(gInstance->func[func]); } } else { /* CMD52 Read */ /* Claim host controller, perform Fn read, and release */ sdio_claim_host(gInstance->func[func]); if (func == 0) { *byte = sdio_f0_readb(gInstance->func[func], regaddr, &err_ret); } else { *byte = sdio_readb(gInstance->func[func], regaddr, &err_ret); } sdio_release_host(gInstance->func[func]); } if (err_ret) { sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n", rw ? "Write" : "Read", func, regaddr, *byte, err_ret)); } return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); } extern SDIOH_API_RC sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr, uint32 *word, uint nbytes) { int err_ret = SDIOH_API_RC_FAIL; if (func == 0) { sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__)); return SDIOH_API_RC_FAIL; } sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n", __FUNCTION__, cmd_type, rw, func, addr, nbytes)); DHD_PM_RESUME_WAIT(sdioh_request_word_wait); DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); /* Claim host controller */ sdio_claim_host(gInstance->func[func]); if(rw) { /* CMD52 Write */ if (nbytes == 4) { sdio_writel(gInstance->func[func], *word, addr, &err_ret); } else if (nbytes == 2) { sdio_writew(gInstance->func[func], (*word & 0xFFFF), addr, &err_ret); } else { sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes)); } } else { /* CMD52 Read */ if (nbytes == 4) { *word = sdio_readl(gInstance->func[func], addr, &err_ret); } else if (nbytes == 2) { *word = sdio_readw(gInstance->func[func], addr, &err_ret) & 0xFFFF; } else { sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes)); } } /* Release host controller */ sdio_release_host(gInstance->func[func]); if (err_ret) { sd_err(("bcmsdh_sdmmc: Failed to %s word, Err: 0x%08x", rw ? "Write" : "Read", err_ret)); } return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); } static SDIOH_API_RC sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func, uint addr, void *pkt) { bool fifo = (fix_inc == SDIOH_DATA_FIX); uint32 SGCount = 0; int err_ret = 0; void *pnext, *pprev; uint ttl_len, dma_len, lft_len, xfred_len, pkt_len; uint blk_num; struct mmc_request mmc_req; struct mmc_command mmc_cmd; struct mmc_data mmc_dat; sd_trace(("%s: Enter\n", __FUNCTION__)); ASSERT(pkt); DHD_PM_RESUME_WAIT(sdioh_request_packet_wait); DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); ttl_len = xfred_len = 0; /* at least 4 bytes alignment of skb buff is guaranteed */ for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) ttl_len += PKTLEN(sd->osh, pnext); if (!sd->use_rxchain || ttl_len <= sd->client_block_size[func]) { blk_num = 0; dma_len = 0; } else { blk_num = ttl_len / sd->client_block_size[func]; dma_len = blk_num * sd->client_block_size[func]; } lft_len = ttl_len - dma_len; sd_trace(("%s: %s %dB to func%d:%08x, %d blks with DMA, %dB leftover\n", __FUNCTION__, write ? "W" : "R", ttl_len, func, addr, blk_num, lft_len)); if (0 != dma_len) { memset(&mmc_req, 0, sizeof(struct mmc_request)); memset(&mmc_cmd, 0, sizeof(struct mmc_command)); memset(&mmc_dat, 0, sizeof(struct mmc_data)); /* Set up DMA descriptors */ pprev = pkt; for (pnext = pkt; pnext && dma_len; pnext = PKTNEXT(sd->osh, pnext)) { pkt_len = PKTLEN(sd->osh, pnext); if (dma_len > pkt_len) dma_len -= pkt_len; else { pkt_len = xfred_len = dma_len; dma_len = 0; pkt = pnext; } sg_set_buf(&sd->sg_list[SGCount++], (uint8*)PKTDATA(sd->osh, pnext), pkt_len); if (SGCount >= SDIOH_SDMMC_MAX_SG_ENTRIES) { sd_err(("%s: sg list entries exceed limit\n", __FUNCTION__)); return (SDIOH_API_RC_FAIL); } } mmc_dat.sg = sd->sg_list; mmc_dat.sg_len = SGCount; mmc_dat.blksz = sd->client_block_size[func]; mmc_dat.blocks = blk_num; mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */ mmc_cmd.arg = write ? 1<<31 : 0; mmc_cmd.arg |= (func & 0x7) << 28; mmc_cmd.arg |= 1<<27; mmc_cmd.arg |= fifo ? 0 : 1<<26; mmc_cmd.arg |= (addr & 0x1FFFF) << 9; mmc_cmd.arg |= blk_num & 0x1FF; mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; mmc_req.cmd = &mmc_cmd; mmc_req.data = &mmc_dat; sdio_claim_host(gInstance->func[func]); mmc_set_data_timeout(&mmc_dat, gInstance->func[func]->card); mmc_wait_for_req(gInstance->func[func]->card->host, &mmc_req); sdio_release_host(gInstance->func[func]); err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error; if (0 != err_ret) { sd_err(("%s:CMD53 %s failed with code %d\n", __FUNCTION__, write ? "write" : "read", err_ret)); sd_err(("%s:Disabling rxchain and fire it with PIO\n", __FUNCTION__)); sd->use_rxchain = FALSE; pkt = pprev; lft_len = ttl_len; } else if (!fifo) { addr = addr + ttl_len - lft_len - dma_len; } } /* PIO mode */ if (0 != lft_len) { /* Claim host controller */ sdio_claim_host(gInstance->func[func]); for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) { uint8 *buf = (uint8*)PKTDATA(sd->osh, pnext) + xfred_len; pkt_len = PKTLEN(sd->osh, pnext); if (0 != xfred_len) { pkt_len -= xfred_len; xfred_len = 0; } pkt_len = (pkt_len + 3) & 0xFFFFFFFC; #ifdef CONFIG_MMC_MSM7X00A if ((pkt_len % 64) == 32) { sd_trace(("%s: Rounding up TX packet +=32\n", __FUNCTION__)); pkt_len += 32; } #endif /* CONFIG_MMC_MSM7X00A */ if ((write) && (!fifo)) err_ret = sdio_memcpy_toio( gInstance->func[func], addr, buf, pkt_len); else if (write) err_ret = sdio_memcpy_toio( gInstance->func[func], addr, buf, pkt_len); else if (fifo) err_ret = sdio_readsb( gInstance->func[func], buf, addr, pkt_len); else err_ret = sdio_memcpy_fromio( gInstance->func[func], buf, addr, pkt_len); if (err_ret) sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=%d\n", __FUNCTION__, (write) ? "TX" : "RX", pnext, SGCount, addr, pkt_len, err_ret)); else sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n", __FUNCTION__, (write) ? "TX" : "RX", pnext, SGCount, addr, pkt_len)); if (!fifo) addr += pkt_len; SGCount ++; } sdio_release_host(gInstance->func[func]); } sd_trace(("%s: Exit\n", __FUNCTION__)); return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); } /* * This function takes a buffer or packet, and fixes everything up so that in the * end, a DMA-able packet is created. * * A buffer does not have an associated packet pointer, and may or may not be aligned. * A packet may consist of a single packet, or a packet chain. If it is a packet chain, * then all the packets in the chain must be properly aligned. If the packet data is not * aligned, then there may only be one packet, and in this case, it is copied to a new * aligned packet. * */ extern SDIOH_API_RC sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func, uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt) { SDIOH_API_RC Status; void *mypkt = NULL; sd_trace(("%s: Enter\n", __FUNCTION__)); DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait); DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); /* Case 1: we don't have a packet. */ if (pkt == NULL) { sd_data(("%s: Creating new %s Packet, len=%d\n", __FUNCTION__, write ? "TX" : "RX", buflen_u)); #ifdef CONFIG_DHD_USE_STATIC_BUF if (!(mypkt = PKTGET_STATIC(sd->osh, buflen_u, write ? TRUE : FALSE))) { #else if (!(mypkt = PKTGET(sd->osh, buflen_u, write ? TRUE : FALSE))) { #endif /* CONFIG_DHD_USE_STATIC_BUF */ sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, buflen_u)); return SDIOH_API_RC_FAIL; } /* For a write, copy the buffer data into the packet. */ if (write) { bcopy(buffer, PKTDATA(sd->osh, mypkt), buflen_u); } Status = sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt); /* For a read, copy the packet data back to the buffer. */ if (!write) { bcopy(PKTDATA(sd->osh, mypkt), buffer, buflen_u); } #ifdef CONFIG_DHD_USE_STATIC_BUF PKTFREE_STATIC(sd->osh, mypkt, write ? TRUE : FALSE); #else PKTFREE(sd->osh, mypkt, write ? TRUE : FALSE); #endif /* CONFIG_DHD_USE_STATIC_BUF */ } else if (((uint32)(PKTDATA(sd->osh, pkt)) & DMA_ALIGN_MASK) != 0) { /* Case 2: We have a packet, but it is unaligned. */ /* In this case, we cannot have a chain. */ ASSERT(PKTNEXT(sd->osh, pkt) == NULL); sd_data(("%s: Creating aligned %s Packet, len=%d\n", __FUNCTION__, write ? "TX" : "RX", PKTLEN(sd->osh, pkt))); #ifdef CONFIG_DHD_USE_STATIC_BUF if (!(mypkt = PKTGET_STATIC(sd->osh, PKTLEN(sd->osh, pkt), write ? TRUE : FALSE))) { #else if (!(mypkt = PKTGET(sd->osh, PKTLEN(sd->osh, pkt), write ? TRUE : FALSE))) { #endif /* CONFIG_DHD_USE_STATIC_BUF */ sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, PKTLEN(sd->osh, pkt))); return SDIOH_API_RC_FAIL; } /* For a write, copy the buffer data into the packet. */ if (write) { bcopy(PKTDATA(sd->osh, pkt), PKTDATA(sd->osh, mypkt), PKTLEN(sd->osh, pkt)); } Status = sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt); /* For a read, copy the packet data back to the buffer. */ if (!write) { bcopy(PKTDATA(sd->osh, mypkt), PKTDATA(sd->osh, pkt), PKTLEN(sd->osh, mypkt)); } #ifdef CONFIG_DHD_USE_STATIC_BUF PKTFREE_STATIC(sd->osh, mypkt, write ? TRUE : FALSE); #else PKTFREE(sd->osh, mypkt, write ? TRUE : FALSE); #endif /* CONFIG_DHD_USE_STATIC_BUF */ } else { /* case 3: We have a packet and it is aligned. */ sd_data(("%s: Aligned %s Packet, direct DMA\n", __FUNCTION__, write ? "Tx" : "Rx")); Status = sdioh_request_packet(sd, fix_inc, write, func, addr, pkt); } return (Status); } /* this function performs "abort" for both of host & device */ extern int sdioh_abort(sdioh_info_t *sd, uint func) { #if defined(MMC_SDIO_ABORT) char t_func = (char) func; #endif /* defined(MMC_SDIO_ABORT) */ sd_trace(("%s: Enter\n", __FUNCTION__)); #if defined(MMC_SDIO_ABORT) /* issue abort cmd52 command through F1 */ sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func); #endif /* defined(MMC_SDIO_ABORT) */ sd_trace(("%s: Exit\n", __FUNCTION__)); return SDIOH_API_RC_SUCCESS; } /* Reset and re-initialize the device */ int sdioh_sdio_reset(sdioh_info_t *si) { sd_trace(("%s: Enter\n", __FUNCTION__)); sd_trace(("%s: Exit\n", __FUNCTION__)); return SDIOH_API_RC_SUCCESS; } /* Disable device interrupt */ void sdioh_sdmmc_devintr_off(sdioh_info_t *sd) { sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints)); sd->intmask &= ~CLIENT_INTR; } /* Enable device interrupt */ void sdioh_sdmmc_devintr_on(sdioh_info_t *sd) { sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints)); sd->intmask |= CLIENT_INTR; } /* Read client card reg */ int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) { if ((func == 0) || (regsize == 1)) { uint8 temp = 0; sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp); *data = temp; *data &= 0xff; sd_data(("%s: byte read data=0x%02x\n", __FUNCTION__, *data)); } else { sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize); if (regsize == 2) *data &= 0xffff; sd_data(("%s: word read data=0x%08x\n", __FUNCTION__, *data)); } return SUCCESS; } #if !defined(OOB_INTR_ONLY) /* bcmsdh_sdmmc interrupt handler */ static void IRQHandler(struct sdio_func *func) { sdioh_info_t *sd; sd_trace(("bcmsdh_sdmmc: ***IRQHandler\n")); sd = gInstance->sd; ASSERT(sd != NULL); sdio_release_host(gInstance->func[0]); if (sd->use_client_ints) { sd->intrcount++; ASSERT(sd->intr_handler); ASSERT(sd->intr_handler_arg); (sd->intr_handler)(sd->intr_handler_arg); } else { sd_err(("bcmsdh_sdmmc: ***IRQHandler\n")); sd_err(("%s: Not ready for intr: enabled %d, handler %p\n", __FUNCTION__, sd->client_intr_enabled, sd->intr_handler)); } sdio_claim_host(gInstance->func[0]); } /* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */ static void IRQHandlerF2(struct sdio_func *func) { sdioh_info_t *sd; sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n")); sd = gInstance->sd; ASSERT(sd != NULL); BCM_REFERENCE(sd); } #endif /* !defined(OOB_INTR_ONLY) */ #ifdef NOTUSED /* Write client card reg */ static int sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data) { if ((func == 0) || (regsize == 1)) { uint8 temp; temp = data & 0xff; sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp); sd_data(("%s: byte write data=0x%02x\n", __FUNCTION__, data)); } else { if (regsize == 2) data &= 0xffff; sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize); sd_data(("%s: word write data=0x%08x\n", __FUNCTION__, data)); } return SUCCESS; } #endif /* NOTUSED */ int sdioh_start(sdioh_info_t *si, int stage) { int ret; sdioh_info_t *sd = gInstance->sd; /* Need to do this stages as we can't enable the interrupt till downloading of the firmware is complete, other wise polling sdio access will come in way */ if (gInstance->func[0]) { if (stage == 0) { /* Since the power to the chip is killed, we will have re enumerate the device again. Set the block size and enable the fucntion 1 for in preparation for downloading the code */ /* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux 2.6.27. The implementation prior to that is buggy, and needs broadcom's patch for it */ if ((ret = sdio_reset_comm(gInstance->func[0]->card))) { sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret)); return ret; } else { sd->num_funcs = 2; sd->sd_blockmode = TRUE; sd->use_client_ints = TRUE; sd->client_block_size[0] = 64; /* Claim host controller */ sdio_claim_host(gInstance->func[1]); sd->client_block_size[1] = 64; if (sdio_set_block_size(gInstance->func[1], 64)) { sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n")); } /* Release host controller F1 */ sdio_release_host(gInstance->func[1]); if (gInstance->func[2]) { /* Claim host controller F2 */ sdio_claim_host(gInstance->func[2]); sd->client_block_size[2] = sd_f2_blocksize; if (sdio_set_block_size(gInstance->func[2], sd_f2_blocksize)) { sd_err(("bcmsdh_sdmmc: Failed to set F2 " "blocksize to %d\n", sd_f2_blocksize)); } /* Release host controller F2 */ sdio_release_host(gInstance->func[2]); } sdioh_sdmmc_card_enablefuncs(sd); } } else { #if !defined(OOB_INTR_ONLY) sdio_claim_host(gInstance->func[0]); sdio_claim_irq(gInstance->func[2], IRQHandlerF2); sdio_claim_irq(gInstance->func[1], IRQHandler); sdio_release_host(gInstance->func[0]); #else /* defined(OOB_INTR_ONLY) */ #if defined(HW_OOB) sdioh_enable_func_intr(); #endif bcmsdh_oob_intr_set(TRUE); #endif /* !defined(OOB_INTR_ONLY) */ } } else sd_err(("%s Failed\n", __FUNCTION__)); return (0); } int sdioh_stop(sdioh_info_t *si) { /* MSM7201A Android sdio stack has bug with interrupt So internaly within SDIO stack they are polling which cause issue when device is turned off. So unregister interrupt with SDIO stack to stop the polling */ if (gInstance->func[0]) { #if !defined(OOB_INTR_ONLY) sdio_claim_host(gInstance->func[0]); sdio_release_irq(gInstance->func[1]); sdio_release_irq(gInstance->func[2]); sdio_release_host(gInstance->func[0]); #else /* defined(OOB_INTR_ONLY) */ #if defined(HW_OOB) sdioh_disable_func_intr(); #endif bcmsdh_oob_intr_set(FALSE); #endif /* !defined(OOB_INTR_ONLY) */ } else sd_err(("%s Failed\n", __FUNCTION__)); return (0); } int sdioh_waitlockfree(sdioh_info_t *sd) { return (1); } SDIOH_API_RC sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio) { return SDIOH_API_RC_FAIL; } SDIOH_API_RC sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab) { return SDIOH_API_RC_FAIL; } bool sdioh_gpioin(sdioh_info_t *sd, uint32 gpio) { return FALSE; } SDIOH_API_RC sdioh_gpio_init(sdioh_info_t *sd) { return SDIOH_API_RC_FAIL; }
gpl-2.0
rohanpurohit/android_kernel_sony_msm8930
fs/nfsd/nfs4state.c
2704
129358
/* * Copyright (c) 2001 The Regents of the University of Michigan. * All rights reserved. * * Kendrick Smith <kmsmith@umich.edu> * Andy Adamson <kandros@umich.edu> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <linux/file.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/namei.h> #include <linux/swap.h> #include <linux/pagemap.h> #include <linux/sunrpc/svcauth_gss.h> #include <linux/sunrpc/clnt.h> #include "xdr4.h" #include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_PROC /* Globals */ time_t nfsd4_lease = 90; /* default lease time */ time_t nfsd4_grace = 90; static time_t boot_time; #define all_ones {{~0,~0},~0} static const stateid_t one_stateid = { .si_generation = ~0, .si_opaque = all_ones, }; static const stateid_t zero_stateid = { /* all fields zero */ }; static const stateid_t currentstateid = { .si_generation = 1, }; static u64 current_sessionid = 1; #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t))) #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t))) #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t))) /* forward declarations */ static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner); /* Locking: */ /* Currently used for almost all code touching nfsv4 state: */ static DEFINE_MUTEX(client_mutex); /* * Currently used for the del_recall_lru and file hash table. In an * effort to decrease the scope of the client_mutex, this spinlock may * eventually cover more: */ static DEFINE_SPINLOCK(recall_lock); static struct kmem_cache *openowner_slab = NULL; static struct kmem_cache *lockowner_slab = NULL; static struct kmem_cache *file_slab = NULL; static struct kmem_cache *stateid_slab = NULL; static struct kmem_cache *deleg_slab = NULL; void nfs4_lock_state(void) { mutex_lock(&client_mutex); } static void free_session(struct kref *); /* Must be called under the client_lock */ static void nfsd4_put_session_locked(struct nfsd4_session *ses) { kref_put(&ses->se_ref, free_session); } static void nfsd4_get_session(struct nfsd4_session *ses) { kref_get(&ses->se_ref); } void nfs4_unlock_state(void) { mutex_unlock(&client_mutex); } static inline u32 opaque_hashval(const void *ptr, int nbytes) { unsigned char *cptr = (unsigned char *) ptr; u32 x = 0; while (nbytes--) { x *= 37; x += *cptr++; } return x; } static struct list_head del_recall_lru; static void nfsd4_free_file(struct nfs4_file *f) { kmem_cache_free(file_slab, f); } static inline void put_nfs4_file(struct nfs4_file *fi) { if (atomic_dec_and_lock(&fi->fi_ref, &recall_lock)) { list_del(&fi->fi_hash); spin_unlock(&recall_lock); iput(fi->fi_inode); nfsd4_free_file(fi); } } static inline void get_nfs4_file(struct nfs4_file *fi) { atomic_inc(&fi->fi_ref); } static int num_delegations; unsigned int max_delegations; /* * Open owner state (share locks) */ /* hash tables for lock and open owners */ #define OWNER_HASH_BITS 8 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS) #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1) static unsigned int ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername) { unsigned int ret; ret = opaque_hashval(ownername->data, ownername->len); ret += clientid; return ret & OWNER_HASH_MASK; } static struct list_head ownerstr_hashtbl[OWNER_HASH_SIZE]; /* hash table for nfs4_file */ #define FILE_HASH_BITS 8 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS) static unsigned int file_hashval(struct inode *ino) { /* XXX: why are we hashing on inode pointer, anyway? */ return hash_ptr(ino, FILE_HASH_BITS); } static struct list_head file_hashtbl[FILE_HASH_SIZE]; static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag) { BUG_ON(!(fp->fi_fds[oflag] || fp->fi_fds[O_RDWR])); atomic_inc(&fp->fi_access[oflag]); } static void nfs4_file_get_access(struct nfs4_file *fp, int oflag) { if (oflag == O_RDWR) { __nfs4_file_get_access(fp, O_RDONLY); __nfs4_file_get_access(fp, O_WRONLY); } else __nfs4_file_get_access(fp, oflag); } static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag) { if (fp->fi_fds[oflag]) { fput(fp->fi_fds[oflag]); fp->fi_fds[oflag] = NULL; } } static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag) { if (atomic_dec_and_test(&fp->fi_access[oflag])) { nfs4_file_put_fd(fp, oflag); /* * It's also safe to get rid of the RDWR open *if* * we no longer have need of the other kind of access * or if we already have the other kind of open: */ if (fp->fi_fds[1-oflag] || atomic_read(&fp->fi_access[1 - oflag]) == 0) nfs4_file_put_fd(fp, O_RDWR); } } static void nfs4_file_put_access(struct nfs4_file *fp, int oflag) { if (oflag == O_RDWR) { __nfs4_file_put_access(fp, O_RDONLY); __nfs4_file_put_access(fp, O_WRONLY); } else __nfs4_file_put_access(fp, oflag); } static inline int get_new_stid(struct nfs4_stid *stid) { static int min_stateid = 0; struct idr *stateids = &stid->sc_client->cl_stateids; int new_stid; int error; error = idr_get_new_above(stateids, stid, min_stateid, &new_stid); /* * Note: the necessary preallocation was done in * nfs4_alloc_stateid(). The idr code caps the number of * preallocations that can exist at a time, but the state lock * prevents anyone from using ours before we get here: */ BUG_ON(error); /* * It shouldn't be a problem to reuse an opaque stateid value. * I don't think it is for 4.1. But with 4.0 I worry that, for * example, a stray write retransmission could be accepted by * the server when it should have been rejected. Therefore, * adopt a trick from the sctp code to attempt to maximize the * amount of time until an id is reused, by ensuring they always * "increase" (mod INT_MAX): */ min_stateid = new_stid+1; if (min_stateid == INT_MAX) min_stateid = 0; return new_stid; } static void init_stid(struct nfs4_stid *stid, struct nfs4_client *cl, unsigned char type) { stateid_t *s = &stid->sc_stateid; int new_id; stid->sc_type = type; stid->sc_client = cl; s->si_opaque.so_clid = cl->cl_clientid; new_id = get_new_stid(stid); s->si_opaque.so_id = (u32)new_id; /* Will be incremented before return to client: */ s->si_generation = 0; } static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab) { struct idr *stateids = &cl->cl_stateids; if (!idr_pre_get(stateids, GFP_KERNEL)) return NULL; /* * Note: if we fail here (or any time between now and the time * we actually get the new idr), we won't need to undo the idr * preallocation, since the idr code caps the number of * preallocated entries. */ return kmem_cache_alloc(slab, GFP_KERNEL); } static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp) { return openlockstateid(nfs4_alloc_stid(clp, stateid_slab)); } static struct nfs4_delegation * alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh, u32 type) { struct nfs4_delegation *dp; struct nfs4_file *fp = stp->st_file; dprintk("NFSD alloc_init_deleg\n"); /* * Major work on the lease subsystem (for example, to support * calbacks on stat) will be required before we can support * write delegations properly. */ if (type != NFS4_OPEN_DELEGATE_READ) return NULL; if (fp->fi_had_conflict) return NULL; if (num_delegations > max_delegations) return NULL; dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab)); if (dp == NULL) return dp; init_stid(&dp->dl_stid, clp, NFS4_DELEG_STID); /* * delegation seqid's are never incremented. The 4.1 special * meaning of seqid 0 isn't meaningful, really, but let's avoid * 0 anyway just for consistency and use 1: */ dp->dl_stid.sc_stateid.si_generation = 1; num_delegations++; INIT_LIST_HEAD(&dp->dl_perfile); INIT_LIST_HEAD(&dp->dl_perclnt); INIT_LIST_HEAD(&dp->dl_recall_lru); get_nfs4_file(fp); dp->dl_file = fp; dp->dl_type = type; fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle); dp->dl_time = 0; atomic_set(&dp->dl_count, 1); INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc); return dp; } void nfs4_put_delegation(struct nfs4_delegation *dp) { if (atomic_dec_and_test(&dp->dl_count)) { dprintk("NFSD: freeing dp %p\n",dp); put_nfs4_file(dp->dl_file); kmem_cache_free(deleg_slab, dp); num_delegations--; } } static void nfs4_put_deleg_lease(struct nfs4_file *fp) { if (atomic_dec_and_test(&fp->fi_delegees)) { vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease); fp->fi_lease = NULL; fput(fp->fi_deleg_file); fp->fi_deleg_file = NULL; } } static void unhash_stid(struct nfs4_stid *s) { struct idr *stateids = &s->sc_client->cl_stateids; idr_remove(stateids, s->sc_stateid.si_opaque.so_id); } /* Called under the state lock. */ static void unhash_delegation(struct nfs4_delegation *dp) { unhash_stid(&dp->dl_stid); list_del_init(&dp->dl_perclnt); spin_lock(&recall_lock); list_del_init(&dp->dl_perfile); list_del_init(&dp->dl_recall_lru); spin_unlock(&recall_lock); nfs4_put_deleg_lease(dp->dl_file); nfs4_put_delegation(dp); } /* * SETCLIENTID state */ /* client_lock protects the client lru list and session hash table */ static DEFINE_SPINLOCK(client_lock); /* Hash tables for nfs4_clientid state */ #define CLIENT_HASH_BITS 4 #define CLIENT_HASH_SIZE (1 << CLIENT_HASH_BITS) #define CLIENT_HASH_MASK (CLIENT_HASH_SIZE - 1) static unsigned int clientid_hashval(u32 id) { return id & CLIENT_HASH_MASK; } static unsigned int clientstr_hashval(const char *name) { return opaque_hashval(name, 8) & CLIENT_HASH_MASK; } /* * reclaim_str_hashtbl[] holds known client info from previous reset/reboot * used in reboot/reset lease grace period processing * * conf_id_hashtbl[], and conf_str_hashtbl[] hold confirmed * setclientid_confirmed info. * * unconf_str_hastbl[] and unconf_id_hashtbl[] hold unconfirmed * setclientid info. * * client_lru holds client queue ordered by nfs4_client.cl_time * for lease renewal. * * close_lru holds (open) stateowner queue ordered by nfs4_stateowner.so_time * for last close replay. */ static struct list_head reclaim_str_hashtbl[CLIENT_HASH_SIZE]; static int reclaim_str_hashtbl_size = 0; static struct list_head conf_id_hashtbl[CLIENT_HASH_SIZE]; static struct list_head conf_str_hashtbl[CLIENT_HASH_SIZE]; static struct list_head unconf_str_hashtbl[CLIENT_HASH_SIZE]; static struct list_head unconf_id_hashtbl[CLIENT_HASH_SIZE]; static struct list_head client_lru; static struct list_head close_lru; /* * We store the NONE, READ, WRITE, and BOTH bits separately in the * st_{access,deny}_bmap field of the stateid, in order to track not * only what share bits are currently in force, but also what * combinations of share bits previous opens have used. This allows us * to enforce the recommendation of rfc 3530 14.2.19 that the server * return an error if the client attempt to downgrade to a combination * of share bits not explicable by closing some of its previous opens. * * XXX: This enforcement is actually incomplete, since we don't keep * track of access/deny bit combinations; so, e.g., we allow: * * OPEN allow read, deny write * OPEN allow both, deny none * DOWNGRADE allow read, deny none * * which we should reject. */ static void set_access(unsigned int *access, unsigned long bmap) { int i; *access = 0; for (i = 1; i < 4; i++) { if (test_bit(i, &bmap)) *access |= i; } } static void set_deny(unsigned int *deny, unsigned long bmap) { int i; *deny = 0; for (i = 0; i < 4; i++) { if (test_bit(i, &bmap)) *deny |= i ; } } static int test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) { unsigned int access, deny; set_access(&access, stp->st_access_bmap); set_deny(&deny, stp->st_deny_bmap); if ((access & open->op_share_deny) || (deny & open->op_share_access)) return 0; return 1; } static int nfs4_access_to_omode(u32 access) { switch (access & NFS4_SHARE_ACCESS_BOTH) { case NFS4_SHARE_ACCESS_READ: return O_RDONLY; case NFS4_SHARE_ACCESS_WRITE: return O_WRONLY; case NFS4_SHARE_ACCESS_BOTH: return O_RDWR; } BUG(); } static void unhash_generic_stateid(struct nfs4_ol_stateid *stp) { list_del(&stp->st_perfile); list_del(&stp->st_perstateowner); } static void close_generic_stateid(struct nfs4_ol_stateid *stp) { int i; if (stp->st_access_bmap) { for (i = 1; i < 4; i++) { if (test_bit(i, &stp->st_access_bmap)) nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(i)); __clear_bit(i, &stp->st_access_bmap); } } put_nfs4_file(stp->st_file); stp->st_file = NULL; } static void free_generic_stateid(struct nfs4_ol_stateid *stp) { kmem_cache_free(stateid_slab, stp); } static void release_lock_stateid(struct nfs4_ol_stateid *stp) { struct file *file; unhash_generic_stateid(stp); unhash_stid(&stp->st_stid); file = find_any_file(stp->st_file); if (file) locks_remove_posix(file, (fl_owner_t)lockowner(stp->st_stateowner)); close_generic_stateid(stp); free_generic_stateid(stp); } static void unhash_lockowner(struct nfs4_lockowner *lo) { struct nfs4_ol_stateid *stp; list_del(&lo->lo_owner.so_strhash); list_del(&lo->lo_perstateid); list_del(&lo->lo_owner_ino_hash); while (!list_empty(&lo->lo_owner.so_stateids)) { stp = list_first_entry(&lo->lo_owner.so_stateids, struct nfs4_ol_stateid, st_perstateowner); release_lock_stateid(stp); } } static void release_lockowner(struct nfs4_lockowner *lo) { unhash_lockowner(lo); nfs4_free_lockowner(lo); } static void release_stateid_lockowners(struct nfs4_ol_stateid *open_stp) { struct nfs4_lockowner *lo; while (!list_empty(&open_stp->st_lockowners)) { lo = list_entry(open_stp->st_lockowners.next, struct nfs4_lockowner, lo_perstateid); release_lockowner(lo); } } static void unhash_open_stateid(struct nfs4_ol_stateid *stp) { unhash_generic_stateid(stp); release_stateid_lockowners(stp); close_generic_stateid(stp); } static void release_open_stateid(struct nfs4_ol_stateid *stp) { unhash_open_stateid(stp); unhash_stid(&stp->st_stid); free_generic_stateid(stp); } static void unhash_openowner(struct nfs4_openowner *oo) { struct nfs4_ol_stateid *stp; list_del(&oo->oo_owner.so_strhash); list_del(&oo->oo_perclient); while (!list_empty(&oo->oo_owner.so_stateids)) { stp = list_first_entry(&oo->oo_owner.so_stateids, struct nfs4_ol_stateid, st_perstateowner); release_open_stateid(stp); } } static void release_last_closed_stateid(struct nfs4_openowner *oo) { struct nfs4_ol_stateid *s = oo->oo_last_closed_stid; if (s) { unhash_stid(&s->st_stid); free_generic_stateid(s); oo->oo_last_closed_stid = NULL; } } static void release_openowner(struct nfs4_openowner *oo) { unhash_openowner(oo); list_del(&oo->oo_close_lru); release_last_closed_stateid(oo); nfs4_free_openowner(oo); } #define SESSION_HASH_SIZE 512 static struct list_head sessionid_hashtbl[SESSION_HASH_SIZE]; static inline int hash_sessionid(struct nfs4_sessionid *sessionid) { struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid; return sid->sequence % SESSION_HASH_SIZE; } #ifdef NFSD_DEBUG static inline void dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) { u32 *ptr = (u32 *)(&sessionid->data[0]); dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]); } #else static inline void dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) { } #endif static void gen_sessionid(struct nfsd4_session *ses) { struct nfs4_client *clp = ses->se_client; struct nfsd4_sessionid *sid; sid = (struct nfsd4_sessionid *)ses->se_sessionid.data; sid->clientid = clp->cl_clientid; sid->sequence = current_sessionid++; sid->reserved = 0; } /* * The protocol defines ca_maxresponssize_cached to include the size of * the rpc header, but all we need to cache is the data starting after * the end of the initial SEQUENCE operation--the rest we regenerate * each time. Therefore we can advertise a ca_maxresponssize_cached * value that is the number of bytes in our cache plus a few additional * bytes. In order to stay on the safe side, and not promise more than * we can cache, those additional bytes must be the minimum possible: 24 * bytes of rpc header (xid through accept state, with AUTH_NULL * verifier), 12 for the compound header (with zero-length tag), and 44 * for the SEQUENCE op response: */ #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44) static void free_session_slots(struct nfsd4_session *ses) { int i; for (i = 0; i < ses->se_fchannel.maxreqs; i++) kfree(ses->se_slots[i]); } /* * We don't actually need to cache the rpc and session headers, so we * can allocate a little less for each slot: */ static inline int slot_bytes(struct nfsd4_channel_attrs *ca) { return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; } static int nfsd4_sanitize_slot_size(u32 size) { size -= NFSD_MIN_HDR_SEQ_SZ; /* We don't cache the rpc header */ size = min_t(u32, size, NFSD_SLOT_CACHE_SIZE); return size; } /* * XXX: If we run out of reserved DRC memory we could (up to a point) * re-negotiate active sessions and reduce their slot usage to make * room for new connections. For now we just fail the create session. */ static int nfsd4_get_drc_mem(int slotsize, u32 num) { int avail; num = min_t(u32, num, NFSD_MAX_SLOTS_PER_SESSION); spin_lock(&nfsd_drc_lock); avail = min_t(int, NFSD_MAX_MEM_PER_SESSION, nfsd_drc_max_mem - nfsd_drc_mem_used); num = min_t(int, num, avail / slotsize); nfsd_drc_mem_used += num * slotsize; spin_unlock(&nfsd_drc_lock); return num; } static void nfsd4_put_drc_mem(int slotsize, int num) { spin_lock(&nfsd_drc_lock); nfsd_drc_mem_used -= slotsize * num; spin_unlock(&nfsd_drc_lock); } static struct nfsd4_session *alloc_session(int slotsize, int numslots) { struct nfsd4_session *new; int mem, i; BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *) + sizeof(struct nfsd4_session) > PAGE_SIZE); mem = numslots * sizeof(struct nfsd4_slot *); new = kzalloc(sizeof(*new) + mem, GFP_KERNEL); if (!new) return NULL; /* allocate each struct nfsd4_slot and data cache in one piece */ for (i = 0; i < numslots; i++) { mem = sizeof(struct nfsd4_slot) + slotsize; new->se_slots[i] = kzalloc(mem, GFP_KERNEL); if (!new->se_slots[i]) goto out_free; } return new; out_free: while (i--) kfree(new->se_slots[i]); kfree(new); return NULL; } static void init_forechannel_attrs(struct nfsd4_channel_attrs *new, struct nfsd4_channel_attrs *req, int numslots, int slotsize) { u32 maxrpc = nfsd_serv->sv_max_mesg; new->maxreqs = numslots; new->maxresp_cached = min_t(u32, req->maxresp_cached, slotsize + NFSD_MIN_HDR_SEQ_SZ); new->maxreq_sz = min_t(u32, req->maxreq_sz, maxrpc); new->maxresp_sz = min_t(u32, req->maxresp_sz, maxrpc); new->maxops = min_t(u32, req->maxops, NFSD_MAX_OPS_PER_COMPOUND); } static void free_conn(struct nfsd4_conn *c) { svc_xprt_put(c->cn_xprt); kfree(c); } static void nfsd4_conn_lost(struct svc_xpt_user *u) { struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user); struct nfs4_client *clp = c->cn_session->se_client; spin_lock(&clp->cl_lock); if (!list_empty(&c->cn_persession)) { list_del(&c->cn_persession); free_conn(c); } spin_unlock(&clp->cl_lock); nfsd4_probe_callback(clp); } static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags) { struct nfsd4_conn *conn; conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL); if (!conn) return NULL; svc_xprt_get(rqstp->rq_xprt); conn->cn_xprt = rqstp->rq_xprt; conn->cn_flags = flags; INIT_LIST_HEAD(&conn->cn_xpt_user.list); return conn; } static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) { conn->cn_session = ses; list_add(&conn->cn_persession, &ses->se_conns); } static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) { struct nfs4_client *clp = ses->se_client; spin_lock(&clp->cl_lock); __nfsd4_hash_conn(conn, ses); spin_unlock(&clp->cl_lock); } static int nfsd4_register_conn(struct nfsd4_conn *conn) { conn->cn_xpt_user.callback = nfsd4_conn_lost; return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); } static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses, u32 dir) { struct nfsd4_conn *conn; int ret; conn = alloc_conn(rqstp, dir); if (!conn) return nfserr_jukebox; nfsd4_hash_conn(conn, ses); ret = nfsd4_register_conn(conn); if (ret) /* oops; xprt is already down: */ nfsd4_conn_lost(&conn->cn_xpt_user); return nfs_ok; } static __be32 nfsd4_new_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_session *ses) { u32 dir = NFS4_CDFC4_FORE; if (ses->se_flags & SESSION4_BACK_CHAN) dir |= NFS4_CDFC4_BACK; return nfsd4_new_conn(rqstp, ses, dir); } /* must be called under client_lock */ static void nfsd4_del_conns(struct nfsd4_session *s) { struct nfs4_client *clp = s->se_client; struct nfsd4_conn *c; spin_lock(&clp->cl_lock); while (!list_empty(&s->se_conns)) { c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession); list_del_init(&c->cn_persession); spin_unlock(&clp->cl_lock); unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user); free_conn(c); spin_lock(&clp->cl_lock); } spin_unlock(&clp->cl_lock); } static void free_session(struct kref *kref) { struct nfsd4_session *ses; int mem; BUG_ON(!spin_is_locked(&client_lock)); ses = container_of(kref, struct nfsd4_session, se_ref); nfsd4_del_conns(ses); spin_lock(&nfsd_drc_lock); mem = ses->se_fchannel.maxreqs * slot_bytes(&ses->se_fchannel); nfsd_drc_mem_used -= mem; spin_unlock(&nfsd_drc_lock); free_session_slots(ses); kfree(ses); } void nfsd4_put_session(struct nfsd4_session *ses) { spin_lock(&client_lock); nfsd4_put_session_locked(ses); spin_unlock(&client_lock); } static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, struct nfsd4_create_session *cses) { struct nfsd4_session *new; struct nfsd4_channel_attrs *fchan = &cses->fore_channel; int numslots, slotsize; int status; int idx; /* * Note decreasing slot size below client's request may * make it difficult for client to function correctly, whereas * decreasing the number of slots will (just?) affect * performance. When short on memory we therefore prefer to * decrease number of slots instead of their size. */ slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached); numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs); if (numslots < 1) return NULL; new = alloc_session(slotsize, numslots); if (!new) { nfsd4_put_drc_mem(slotsize, fchan->maxreqs); return NULL; } init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize); new->se_client = clp; gen_sessionid(new); INIT_LIST_HEAD(&new->se_conns); new->se_cb_seq_nr = 1; new->se_flags = cses->flags; new->se_cb_prog = cses->callback_prog; kref_init(&new->se_ref); idx = hash_sessionid(&new->se_sessionid); spin_lock(&client_lock); list_add(&new->se_hash, &sessionid_hashtbl[idx]); spin_lock(&clp->cl_lock); list_add(&new->se_perclnt, &clp->cl_sessions); spin_unlock(&clp->cl_lock); spin_unlock(&client_lock); status = nfsd4_new_conn_from_crses(rqstp, new); /* whoops: benny points out, status is ignored! (err, or bogus) */ if (status) { spin_lock(&client_lock); free_session(&new->se_ref); spin_unlock(&client_lock); return NULL; } if (cses->flags & SESSION4_BACK_CHAN) { struct sockaddr *sa = svc_addr(rqstp); /* * This is a little silly; with sessions there's no real * use for the callback address. Use the peer address * as a reasonable default for now, but consider fixing * the rpc client not to require an address in the * future: */ rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa); clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa); } nfsd4_probe_callback(clp); return new; } /* caller must hold client_lock */ static struct nfsd4_session * find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid) { struct nfsd4_session *elem; int idx; dump_sessionid(__func__, sessionid); idx = hash_sessionid(sessionid); /* Search in the appropriate list */ list_for_each_entry(elem, &sessionid_hashtbl[idx], se_hash) { if (!memcmp(elem->se_sessionid.data, sessionid->data, NFS4_MAX_SESSIONID_LEN)) { return elem; } } dprintk("%s: session not found\n", __func__); return NULL; } /* caller must hold client_lock */ static void unhash_session(struct nfsd4_session *ses) { list_del(&ses->se_hash); spin_lock(&ses->se_client->cl_lock); list_del(&ses->se_perclnt); spin_unlock(&ses->se_client->cl_lock); } /* must be called under the client_lock */ static inline void renew_client_locked(struct nfs4_client *clp) { if (is_client_expired(clp)) { dprintk("%s: client (clientid %08x/%08x) already expired\n", __func__, clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id); return; } dprintk("renewing client (clientid %08x/%08x)\n", clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id); list_move_tail(&clp->cl_lru, &client_lru); clp->cl_time = get_seconds(); } static inline void renew_client(struct nfs4_client *clp) { spin_lock(&client_lock); renew_client_locked(clp); spin_unlock(&client_lock); } /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */ static int STALE_CLIENTID(clientid_t *clid) { if (clid->cl_boot == boot_time) return 0; dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n", clid->cl_boot, clid->cl_id, boot_time); return 1; } /* * XXX Should we use a slab cache ? * This type of memory management is somewhat inefficient, but we use it * anyway since SETCLIENTID is not a common operation. */ static struct nfs4_client *alloc_client(struct xdr_netobj name) { struct nfs4_client *clp; clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL); if (clp == NULL) return NULL; clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL); if (clp->cl_name.data == NULL) { kfree(clp); return NULL; } clp->cl_name.len = name.len; return clp; } static inline void free_client(struct nfs4_client *clp) { BUG_ON(!spin_is_locked(&client_lock)); while (!list_empty(&clp->cl_sessions)) { struct nfsd4_session *ses; ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, se_perclnt); list_del(&ses->se_perclnt); nfsd4_put_session_locked(ses); } if (clp->cl_cred.cr_group_info) put_group_info(clp->cl_cred.cr_group_info); kfree(clp->cl_principal); kfree(clp->cl_name.data); kfree(clp); } void release_session_client(struct nfsd4_session *session) { struct nfs4_client *clp = session->se_client; if (!atomic_dec_and_lock(&clp->cl_refcount, &client_lock)) return; if (is_client_expired(clp)) { free_client(clp); session->se_client = NULL; } else renew_client_locked(clp); spin_unlock(&client_lock); } /* must be called under the client_lock */ static inline void unhash_client_locked(struct nfs4_client *clp) { struct nfsd4_session *ses; mark_client_expired(clp); list_del(&clp->cl_lru); spin_lock(&clp->cl_lock); list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) list_del_init(&ses->se_hash); spin_unlock(&clp->cl_lock); } static void expire_client(struct nfs4_client *clp) { struct nfs4_openowner *oo; struct nfs4_delegation *dp; struct list_head reaplist; INIT_LIST_HEAD(&reaplist); spin_lock(&recall_lock); while (!list_empty(&clp->cl_delegations)) { dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); list_del_init(&dp->dl_perclnt); list_move(&dp->dl_recall_lru, &reaplist); } spin_unlock(&recall_lock); while (!list_empty(&reaplist)) { dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); unhash_delegation(dp); } while (!list_empty(&clp->cl_openowners)) { oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient); release_openowner(oo); } nfsd4_shutdown_callback(clp); if (clp->cl_cb_conn.cb_xprt) svc_xprt_put(clp->cl_cb_conn.cb_xprt); list_del(&clp->cl_idhash); list_del(&clp->cl_strhash); spin_lock(&client_lock); unhash_client_locked(clp); if (atomic_read(&clp->cl_refcount) == 0) free_client(clp); spin_unlock(&client_lock); } static void copy_verf(struct nfs4_client *target, nfs4_verifier *source) { memcpy(target->cl_verifier.data, source->data, sizeof(target->cl_verifier.data)); } static void copy_clid(struct nfs4_client *target, struct nfs4_client *source) { target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; target->cl_clientid.cl_id = source->cl_clientid.cl_id; } static void copy_cred(struct svc_cred *target, struct svc_cred *source) { target->cr_uid = source->cr_uid; target->cr_gid = source->cr_gid; target->cr_group_info = source->cr_group_info; get_group_info(target->cr_group_info); } static int same_name(const char *n1, const char *n2) { return 0 == memcmp(n1, n2, HEXDIR_LEN); } static int same_verf(nfs4_verifier *v1, nfs4_verifier *v2) { return 0 == memcmp(v1->data, v2->data, sizeof(v1->data)); } static int same_clid(clientid_t *cl1, clientid_t *cl2) { return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id); } /* XXX what about NGROUP */ static int same_creds(struct svc_cred *cr1, struct svc_cred *cr2) { return cr1->cr_uid == cr2->cr_uid; } static void gen_clid(struct nfs4_client *clp) { static u32 current_clientid = 1; clp->cl_clientid.cl_boot = boot_time; clp->cl_clientid.cl_id = current_clientid++; } static void gen_confirm(struct nfs4_client *clp) { __be32 verf[2]; static u32 i; verf[0] = (__be32)get_seconds(); verf[1] = (__be32)i++; memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data)); } static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t) { return idr_find(&cl->cl_stateids, t->si_opaque.so_id); } static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask) { struct nfs4_stid *s; s = find_stateid(cl, t); if (!s) return NULL; if (typemask & s->sc_type) return s; return NULL; } static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir, struct svc_rqst *rqstp, nfs4_verifier *verf) { struct nfs4_client *clp; struct sockaddr *sa = svc_addr(rqstp); char *princ; clp = alloc_client(name); if (clp == NULL) return NULL; INIT_LIST_HEAD(&clp->cl_sessions); princ = svc_gss_principal(rqstp); if (princ) { clp->cl_principal = kstrdup(princ, GFP_KERNEL); if (clp->cl_principal == NULL) { spin_lock(&client_lock); free_client(clp); spin_unlock(&client_lock); return NULL; } } idr_init(&clp->cl_stateids); memcpy(clp->cl_recdir, recdir, HEXDIR_LEN); atomic_set(&clp->cl_refcount, 0); clp->cl_cb_state = NFSD4_CB_UNKNOWN; INIT_LIST_HEAD(&clp->cl_idhash); INIT_LIST_HEAD(&clp->cl_strhash); INIT_LIST_HEAD(&clp->cl_openowners); INIT_LIST_HEAD(&clp->cl_delegations); INIT_LIST_HEAD(&clp->cl_lru); INIT_LIST_HEAD(&clp->cl_callbacks); spin_lock_init(&clp->cl_lock); INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_do_callback_rpc); clp->cl_time = get_seconds(); clear_bit(0, &clp->cl_cb_slot_busy); rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); copy_verf(clp, verf); rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa); clp->cl_flavor = rqstp->rq_flavor; copy_cred(&clp->cl_cred, &rqstp->rq_cred); gen_confirm(clp); clp->cl_cb_session = NULL; return clp; } static void add_to_unconfirmed(struct nfs4_client *clp, unsigned int strhashval) { unsigned int idhashval; list_add(&clp->cl_strhash, &unconf_str_hashtbl[strhashval]); idhashval = clientid_hashval(clp->cl_clientid.cl_id); list_add(&clp->cl_idhash, &unconf_id_hashtbl[idhashval]); renew_client(clp); } static void move_to_confirmed(struct nfs4_client *clp) { unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id); unsigned int strhashval; dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp); list_move(&clp->cl_idhash, &conf_id_hashtbl[idhashval]); strhashval = clientstr_hashval(clp->cl_recdir); list_move(&clp->cl_strhash, &conf_str_hashtbl[strhashval]); renew_client(clp); } static struct nfs4_client * find_confirmed_client(clientid_t *clid) { struct nfs4_client *clp; unsigned int idhashval = clientid_hashval(clid->cl_id); list_for_each_entry(clp, &conf_id_hashtbl[idhashval], cl_idhash) { if (same_clid(&clp->cl_clientid, clid)) { renew_client(clp); return clp; } } return NULL; } static struct nfs4_client * find_unconfirmed_client(clientid_t *clid) { struct nfs4_client *clp; unsigned int idhashval = clientid_hashval(clid->cl_id); list_for_each_entry(clp, &unconf_id_hashtbl[idhashval], cl_idhash) { if (same_clid(&clp->cl_clientid, clid)) return clp; } return NULL; } static bool clp_used_exchangeid(struct nfs4_client *clp) { return clp->cl_exchange_flags != 0; } static struct nfs4_client * find_confirmed_client_by_str(const char *dname, unsigned int hashval) { struct nfs4_client *clp; list_for_each_entry(clp, &conf_str_hashtbl[hashval], cl_strhash) { if (same_name(clp->cl_recdir, dname)) return clp; } return NULL; } static struct nfs4_client * find_unconfirmed_client_by_str(const char *dname, unsigned int hashval) { struct nfs4_client *clp; list_for_each_entry(clp, &unconf_str_hashtbl[hashval], cl_strhash) { if (same_name(clp->cl_recdir, dname)) return clp; } return NULL; } static void gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp) { struct nfs4_cb_conn *conn = &clp->cl_cb_conn; struct sockaddr *sa = svc_addr(rqstp); u32 scopeid = rpc_get_scope_id(sa); unsigned short expected_family; /* Currently, we only support tcp and tcp6 for the callback channel */ if (se->se_callback_netid_len == 3 && !memcmp(se->se_callback_netid_val, "tcp", 3)) expected_family = AF_INET; else if (se->se_callback_netid_len == 4 && !memcmp(se->se_callback_netid_val, "tcp6", 4)) expected_family = AF_INET6; else goto out_err; conn->cb_addrlen = rpc_uaddr2sockaddr(&init_net, se->se_callback_addr_val, se->se_callback_addr_len, (struct sockaddr *)&conn->cb_addr, sizeof(conn->cb_addr)); if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family) goto out_err; if (conn->cb_addr.ss_family == AF_INET6) ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid; conn->cb_prog = se->se_callback_prog; conn->cb_ident = se->se_callback_ident; memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen); return; out_err: conn->cb_addr.ss_family = AF_UNSPEC; conn->cb_addrlen = 0; dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) " "will not receive delegations\n", clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id); return; } /* * Cache a reply. nfsd4_check_drc_limit() has bounded the cache size. */ void nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) { struct nfsd4_slot *slot = resp->cstate.slot; unsigned int base; dprintk("--> %s slot %p\n", __func__, slot); slot->sl_opcnt = resp->opcnt; slot->sl_status = resp->cstate.status; slot->sl_flags |= NFSD4_SLOT_INITIALIZED; if (nfsd4_not_cached(resp)) { slot->sl_datalen = 0; return; } slot->sl_datalen = (char *)resp->p - (char *)resp->cstate.datap; base = (char *)resp->cstate.datap - (char *)resp->xbuf->head[0].iov_base; if (read_bytes_from_xdr_buf(resp->xbuf, base, slot->sl_data, slot->sl_datalen)) WARN("%s: sessions DRC could not cache compound\n", __func__); return; } /* * Encode the replay sequence operation from the slot values. * If cachethis is FALSE encode the uncached rep error on the next * operation which sets resp->p and increments resp->opcnt for * nfs4svc_encode_compoundres. * */ static __be32 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args, struct nfsd4_compoundres *resp) { struct nfsd4_op *op; struct nfsd4_slot *slot = resp->cstate.slot; /* Encode the replayed sequence operation */ op = &args->ops[resp->opcnt - 1]; nfsd4_encode_operation(resp, op); /* Return nfserr_retry_uncached_rep in next operation. */ if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) { op = &args->ops[resp->opcnt++]; op->status = nfserr_retry_uncached_rep; nfsd4_encode_operation(resp, op); } return op->status; } /* * The sequence operation is not cached because we can use the slot and * session values. */ __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, struct nfsd4_sequence *seq) { struct nfsd4_slot *slot = resp->cstate.slot; __be32 status; dprintk("--> %s slot %p\n", __func__, slot); /* Either returns 0 or nfserr_retry_uncached */ status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp); if (status == nfserr_retry_uncached_rep) return status; /* The sequence operation has been encoded, cstate->datap set. */ memcpy(resp->cstate.datap, slot->sl_data, slot->sl_datalen); resp->opcnt = slot->sl_opcnt; resp->p = resp->cstate.datap + XDR_QUADLEN(slot->sl_datalen); status = slot->sl_status; return status; } /* * Set the exchange_id flags returned by the server. */ static void nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid) { /* pNFS is not supported */ new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS; /* Referrals are supported, Migration is not. */ new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER; /* set the wire flags to return to client. */ clid->flags = new->cl_exchange_flags; } __be32 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_exchange_id *exid) { struct nfs4_client *unconf, *conf, *new; int status; unsigned int strhashval; char dname[HEXDIR_LEN]; char addr_str[INET6_ADDRSTRLEN]; nfs4_verifier verf = exid->verifier; struct sockaddr *sa = svc_addr(rqstp); rpc_ntop(sa, addr_str, sizeof(addr_str)); dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p " "ip_addr=%s flags %x, spa_how %d\n", __func__, rqstp, exid, exid->clname.len, exid->clname.data, addr_str, exid->flags, exid->spa_how); if (exid->flags & ~EXCHGID4_FLAG_MASK_A) return nfserr_inval; /* Currently only support SP4_NONE */ switch (exid->spa_how) { case SP4_NONE: break; case SP4_SSV: return nfserr_serverfault; default: BUG(); /* checked by xdr code */ case SP4_MACH_CRED: return nfserr_serverfault; /* no excuse :-/ */ } status = nfs4_make_rec_clidname(dname, &exid->clname); if (status) goto error; strhashval = clientstr_hashval(dname); nfs4_lock_state(); status = nfs_ok; conf = find_confirmed_client_by_str(dname, strhashval); if (conf) { if (!clp_used_exchangeid(conf)) { status = nfserr_clid_inuse; /* XXX: ? */ goto out; } if (!same_verf(&verf, &conf->cl_verifier)) { /* 18.35.4 case 8 */ if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) { status = nfserr_not_same; goto out; } /* Client reboot: destroy old state */ expire_client(conf); goto out_new; } if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { /* 18.35.4 case 9 */ if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) { status = nfserr_perm; goto out; } expire_client(conf); goto out_new; } /* * Set bit when the owner id and verifier map to an already * confirmed client id (18.35.3). */ exid->flags |= EXCHGID4_FLAG_CONFIRMED_R; /* * Falling into 18.35.4 case 2, possible router replay. * Leave confirmed record intact and return same result. */ copy_verf(conf, &verf); new = conf; goto out_copy; } /* 18.35.4 case 7 */ if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) { status = nfserr_noent; goto out; } unconf = find_unconfirmed_client_by_str(dname, strhashval); if (unconf) { /* * Possible retry or client restart. Per 18.35.4 case 4, * a new unconfirmed record should be generated regardless * of whether any properties have changed. */ expire_client(unconf); } out_new: /* Normal case */ new = create_client(exid->clname, dname, rqstp, &verf); if (new == NULL) { status = nfserr_jukebox; goto out; } gen_clid(new); add_to_unconfirmed(new, strhashval); out_copy: exid->clientid.cl_boot = new->cl_clientid.cl_boot; exid->clientid.cl_id = new->cl_clientid.cl_id; exid->seqid = 1; nfsd4_set_ex_flags(new, exid); dprintk("nfsd4_exchange_id seqid %d flags %x\n", new->cl_cs_slot.sl_seqid, new->cl_exchange_flags); status = nfs_ok; out: nfs4_unlock_state(); error: dprintk("nfsd4_exchange_id returns %d\n", ntohl(status)); return status; } static int check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse) { dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid, slot_seqid); /* The slot is in use, and no response has been sent. */ if (slot_inuse) { if (seqid == slot_seqid) return nfserr_jukebox; else return nfserr_seq_misordered; } /* Note unsigned 32-bit arithmetic handles wraparound: */ if (likely(seqid == slot_seqid + 1)) return nfs_ok; if (seqid == slot_seqid) return nfserr_replay_cache; return nfserr_seq_misordered; } /* * Cache the create session result into the create session single DRC * slot cache by saving the xdr structure. sl_seqid has been set. * Do this for solo or embedded create session operations. */ static void nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses, struct nfsd4_clid_slot *slot, int nfserr) { slot->sl_status = nfserr; memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses)); } static __be32 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses, struct nfsd4_clid_slot *slot) { memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses)); return slot->sl_status; } #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\ 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \ 1 + /* MIN tag is length with zero, only length */ \ 3 + /* version, opcount, opcode */ \ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ /* seqid, slotID, slotID, cache */ \ 4 ) * sizeof(__be32)) #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\ 2 + /* verifier: AUTH_NULL, length 0 */\ 1 + /* status */ \ 1 + /* MIN tag is length with zero, only length */ \ 3 + /* opcount, opcode, opstatus*/ \ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ /* seqid, slotID, slotID, slotID, status */ \ 5 ) * sizeof(__be32)) static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs fchannel) { return fchannel.maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ || fchannel.maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ; } __be32 nfsd4_create_session(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_create_session *cr_ses) { struct sockaddr *sa = svc_addr(rqstp); struct nfs4_client *conf, *unconf; struct nfsd4_session *new; struct nfsd4_clid_slot *cs_slot = NULL; bool confirm_me = false; int status = 0; if (cr_ses->flags & ~SESSION4_FLAG_MASK_A) return nfserr_inval; nfs4_lock_state(); unconf = find_unconfirmed_client(&cr_ses->clientid); conf = find_confirmed_client(&cr_ses->clientid); if (conf) { cs_slot = &conf->cl_cs_slot; status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); if (status == nfserr_replay_cache) { dprintk("Got a create_session replay! seqid= %d\n", cs_slot->sl_seqid); /* Return the cached reply status */ status = nfsd4_replay_create_session(cr_ses, cs_slot); goto out; } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) { status = nfserr_seq_misordered; dprintk("Sequence misordered!\n"); dprintk("Expected seqid= %d but got seqid= %d\n", cs_slot->sl_seqid, cr_ses->seqid); goto out; } } else if (unconf) { if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { status = nfserr_clid_inuse; goto out; } cs_slot = &unconf->cl_cs_slot; status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); if (status) { /* an unconfirmed replay returns misordered */ status = nfserr_seq_misordered; goto out; } confirm_me = true; conf = unconf; } else { status = nfserr_stale_clientid; goto out; } /* * XXX: we should probably set this at creation time, and check * for consistent minorversion use throughout: */ conf->cl_minorversion = 1; /* * We do not support RDMA or persistent sessions */ cr_ses->flags &= ~SESSION4_PERSIST; cr_ses->flags &= ~SESSION4_RDMA; status = nfserr_toosmall; if (check_forechannel_attrs(cr_ses->fore_channel)) goto out; status = nfserr_jukebox; new = alloc_init_session(rqstp, conf, cr_ses); if (!new) goto out; status = nfs_ok; memcpy(cr_ses->sessionid.data, new->se_sessionid.data, NFS4_MAX_SESSIONID_LEN); memcpy(&cr_ses->fore_channel, &new->se_fchannel, sizeof(struct nfsd4_channel_attrs)); cs_slot->sl_seqid++; cr_ses->seqid = cs_slot->sl_seqid; /* cache solo and embedded create sessions under the state lock */ nfsd4_cache_create_session(cr_ses, cs_slot, status); if (confirm_me) move_to_confirmed(conf); out: nfs4_unlock_state(); dprintk("%s returns %d\n", __func__, ntohl(status)); return status; } static bool nfsd4_last_compound_op(struct svc_rqst *rqstp) { struct nfsd4_compoundres *resp = rqstp->rq_resp; struct nfsd4_compoundargs *argp = rqstp->rq_argp; return argp->opcnt == resp->opcnt; } static __be32 nfsd4_map_bcts_dir(u32 *dir) { switch (*dir) { case NFS4_CDFC4_FORE: case NFS4_CDFC4_BACK: return nfs_ok; case NFS4_CDFC4_FORE_OR_BOTH: case NFS4_CDFC4_BACK_OR_BOTH: *dir = NFS4_CDFC4_BOTH; return nfs_ok; }; return nfserr_inval; } __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_bind_conn_to_session *bcts) { __be32 status; if (!nfsd4_last_compound_op(rqstp)) return nfserr_not_only_op; spin_lock(&client_lock); cstate->session = find_in_sessionid_hashtbl(&bcts->sessionid); /* Sorta weird: we only need the refcnt'ing because new_conn acquires * client_lock iself: */ if (cstate->session) { nfsd4_get_session(cstate->session); atomic_inc(&cstate->session->se_client->cl_refcount); } spin_unlock(&client_lock); if (!cstate->session) return nfserr_badsession; status = nfsd4_map_bcts_dir(&bcts->dir); if (!status) nfsd4_new_conn(rqstp, cstate->session, bcts->dir); return status; } static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid) { if (!session) return 0; return !memcmp(sid, &session->se_sessionid, sizeof(*sid)); } __be32 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_session *sessionid) { struct nfsd4_session *ses; u32 status = nfserr_badsession; /* Notes: * - The confirmed nfs4_client->cl_sessionid holds destroyed sessinid * - Should we return nfserr_back_chan_busy if waiting for * callbacks on to-be-destroyed session? * - Do we need to clear any callback info from previous session? */ if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) { if (!nfsd4_last_compound_op(r)) return nfserr_not_only_op; } dump_sessionid(__func__, &sessionid->sessionid); spin_lock(&client_lock); ses = find_in_sessionid_hashtbl(&sessionid->sessionid); if (!ses) { spin_unlock(&client_lock); goto out; } unhash_session(ses); spin_unlock(&client_lock); nfs4_lock_state(); nfsd4_probe_callback_sync(ses->se_client); nfs4_unlock_state(); spin_lock(&client_lock); nfsd4_del_conns(ses); nfsd4_put_session_locked(ses); spin_unlock(&client_lock); status = nfs_ok; out: dprintk("%s returns %d\n", __func__, ntohl(status)); return status; } static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s) { struct nfsd4_conn *c; list_for_each_entry(c, &s->se_conns, cn_persession) { if (c->cn_xprt == xpt) { return c; } } return NULL; } static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses) { struct nfs4_client *clp = ses->se_client; struct nfsd4_conn *c; int ret; spin_lock(&clp->cl_lock); c = __nfsd4_find_conn(new->cn_xprt, ses); if (c) { spin_unlock(&clp->cl_lock); free_conn(new); return; } __nfsd4_hash_conn(new, ses); spin_unlock(&clp->cl_lock); ret = nfsd4_register_conn(new); if (ret) /* oops; xprt is already down: */ nfsd4_conn_lost(&new->cn_xpt_user); return; } static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session) { struct nfsd4_compoundargs *args = rqstp->rq_argp; return args->opcnt > session->se_fchannel.maxops; } static bool nfsd4_request_too_big(struct svc_rqst *rqstp, struct nfsd4_session *session) { struct xdr_buf *xb = &rqstp->rq_arg; return xb->len > session->se_fchannel.maxreq_sz; } __be32 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_sequence *seq) { struct nfsd4_compoundres *resp = rqstp->rq_resp; struct nfsd4_session *session; struct nfsd4_slot *slot; struct nfsd4_conn *conn; int status; if (resp->opcnt != 1) return nfserr_sequence_pos; /* * Will be either used or freed by nfsd4_sequence_check_conn * below. */ conn = alloc_conn(rqstp, NFS4_CDFC4_FORE); if (!conn) return nfserr_jukebox; spin_lock(&client_lock); status = nfserr_badsession; session = find_in_sessionid_hashtbl(&seq->sessionid); if (!session) goto out; status = nfserr_too_many_ops; if (nfsd4_session_too_many_ops(rqstp, session)) goto out; status = nfserr_req_too_big; if (nfsd4_request_too_big(rqstp, session)) goto out; status = nfserr_badslot; if (seq->slotid >= session->se_fchannel.maxreqs) goto out; slot = session->se_slots[seq->slotid]; dprintk("%s: slotid %d\n", __func__, seq->slotid); /* We do not negotiate the number of slots yet, so set the * maxslots to the session maxreqs which is used to encode * sr_highest_slotid and the sr_target_slot id to maxslots */ seq->maxslots = session->se_fchannel.maxreqs; status = check_slot_seqid(seq->seqid, slot->sl_seqid, slot->sl_flags & NFSD4_SLOT_INUSE); if (status == nfserr_replay_cache) { status = nfserr_seq_misordered; if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED)) goto out; cstate->slot = slot; cstate->session = session; /* Return the cached reply status and set cstate->status * for nfsd4_proc_compound processing */ status = nfsd4_replay_cache_entry(resp, seq); cstate->status = nfserr_replay_cache; goto out; } if (status) goto out; nfsd4_sequence_check_conn(conn, session); conn = NULL; /* Success! bump slot seqid */ slot->sl_seqid = seq->seqid; slot->sl_flags |= NFSD4_SLOT_INUSE; if (seq->cachethis) slot->sl_flags |= NFSD4_SLOT_CACHETHIS; else slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS; cstate->slot = slot; cstate->session = session; out: /* Hold a session reference until done processing the compound. */ if (cstate->session) { struct nfs4_client *clp = session->se_client; nfsd4_get_session(cstate->session); atomic_inc(&clp->cl_refcount); switch (clp->cl_cb_state) { case NFSD4_CB_DOWN: seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN; break; case NFSD4_CB_FAULT: seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT; break; default: seq->status_flags = 0; } } kfree(conn); spin_unlock(&client_lock); dprintk("%s: return %d\n", __func__, ntohl(status)); return status; } static inline bool has_resources(struct nfs4_client *clp) { return !list_empty(&clp->cl_openowners) || !list_empty(&clp->cl_delegations) || !list_empty(&clp->cl_sessions); } __be32 nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc) { struct nfs4_client *conf, *unconf, *clp; int status = 0; nfs4_lock_state(); unconf = find_unconfirmed_client(&dc->clientid); conf = find_confirmed_client(&dc->clientid); if (conf) { clp = conf; if (!is_client_expired(conf) && has_resources(conf)) { status = nfserr_clientid_busy; goto out; } /* rfc5661 18.50.3 */ if (cstate->session && conf == cstate->session->se_client) { status = nfserr_clientid_busy; goto out; } } else if (unconf) clp = unconf; else { status = nfserr_stale_clientid; goto out; } expire_client(clp); out: nfs4_unlock_state(); dprintk("%s return %d\n", __func__, ntohl(status)); return status; } __be32 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc) { int status = 0; if (rc->rca_one_fs) { if (!cstate->current_fh.fh_dentry) return nfserr_nofilehandle; /* * We don't take advantage of the rca_one_fs case. * That's OK, it's optional, we can safely ignore it. */ return nfs_ok; } nfs4_lock_state(); status = nfserr_complete_already; if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->session->se_client->cl_flags)) goto out; status = nfserr_stale_clientid; if (is_client_expired(cstate->session->se_client)) /* * The following error isn't really legal. * But we only get here if the client just explicitly * destroyed the client. Surely it no longer cares what * error it gets back on an operation for the dead * client. */ goto out; status = nfs_ok; nfsd4_client_record_create(cstate->session->se_client); out: nfs4_unlock_state(); return status; } __be32 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_setclientid *setclid) { struct xdr_netobj clname = setclid->se_name; nfs4_verifier clverifier = setclid->se_verf; unsigned int strhashval; struct nfs4_client *conf, *unconf, *new; __be32 status; char dname[HEXDIR_LEN]; status = nfs4_make_rec_clidname(dname, &clname); if (status) return status; /* * XXX The Duplicate Request Cache (DRC) has been checked (??) * We get here on a DRC miss. */ strhashval = clientstr_hashval(dname); nfs4_lock_state(); conf = find_confirmed_client_by_str(dname, strhashval); if (conf) { /* RFC 3530 14.2.33 CASE 0: */ status = nfserr_clid_inuse; if (clp_used_exchangeid(conf)) goto out; if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { char addr_str[INET6_ADDRSTRLEN]; rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str, sizeof(addr_str)); dprintk("NFSD: setclientid: string in use by client " "at %s\n", addr_str); goto out; } } /* * section 14.2.33 of RFC 3530 (under the heading "IMPLEMENTATION") * has a description of SETCLIENTID request processing consisting * of 5 bullet points, labeled as CASE0 - CASE4 below. */ unconf = find_unconfirmed_client_by_str(dname, strhashval); status = nfserr_jukebox; if (!conf) { /* * RFC 3530 14.2.33 CASE 4: * placed first, because it is the normal case */ if (unconf) expire_client(unconf); new = create_client(clname, dname, rqstp, &clverifier); if (new == NULL) goto out; gen_clid(new); } else if (same_verf(&conf->cl_verifier, &clverifier)) { /* * RFC 3530 14.2.33 CASE 1: * probable callback update */ if (unconf) { /* Note this is removing unconfirmed {*x***}, * which is stronger than RFC recommended {vxc**}. * This has the advantage that there is at most * one {*x***} in either list at any time. */ expire_client(unconf); } new = create_client(clname, dname, rqstp, &clverifier); if (new == NULL) goto out; copy_clid(new, conf); } else if (!unconf) { /* * RFC 3530 14.2.33 CASE 2: * probable client reboot; state will be removed if * confirmed. */ new = create_client(clname, dname, rqstp, &clverifier); if (new == NULL) goto out; gen_clid(new); } else { /* * RFC 3530 14.2.33 CASE 3: * probable client reboot; state will be removed if * confirmed. */ expire_client(unconf); new = create_client(clname, dname, rqstp, &clverifier); if (new == NULL) goto out; gen_clid(new); } /* * XXX: we should probably set this at creation time, and check * for consistent minorversion use throughout: */ new->cl_minorversion = 0; gen_callback(new, setclid, rqstp); add_to_unconfirmed(new, strhashval); setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; setclid->se_clientid.cl_id = new->cl_clientid.cl_id; memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data)); status = nfs_ok; out: nfs4_unlock_state(); return status; } /* * Section 14.2.34 of RFC 3530 (under the heading "IMPLEMENTATION") has * a description of SETCLIENTID_CONFIRM request processing consisting of 4 * bullets, labeled as CASE1 - CASE4 below. */ __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_setclientid_confirm *setclientid_confirm) { struct sockaddr *sa = svc_addr(rqstp); struct nfs4_client *conf, *unconf; nfs4_verifier confirm = setclientid_confirm->sc_confirm; clientid_t * clid = &setclientid_confirm->sc_clientid; __be32 status; if (STALE_CLIENTID(clid)) return nfserr_stale_clientid; /* * XXX The Duplicate Request Cache (DRC) has been checked (??) * We get here on a DRC miss. */ nfs4_lock_state(); conf = find_confirmed_client(clid); unconf = find_unconfirmed_client(clid); status = nfserr_clid_inuse; if (conf && !rpc_cmp_addr((struct sockaddr *) &conf->cl_addr, sa)) goto out; if (unconf && !rpc_cmp_addr((struct sockaddr *) &unconf->cl_addr, sa)) goto out; /* * section 14.2.34 of RFC 3530 has a description of * SETCLIENTID_CONFIRM request processing consisting * of 4 bullet points, labeled as CASE1 - CASE4 below. */ if (conf && unconf && same_verf(&confirm, &unconf->cl_confirm)) { /* * RFC 3530 14.2.34 CASE 1: * callback update */ if (!same_creds(&conf->cl_cred, &unconf->cl_cred)) status = nfserr_clid_inuse; else { nfsd4_change_callback(conf, &unconf->cl_cb_conn); nfsd4_probe_callback(conf); expire_client(unconf); status = nfs_ok; } } else if (conf && !unconf) { /* * RFC 3530 14.2.34 CASE 2: * probable retransmitted request; play it safe and * do nothing. */ if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) status = nfserr_clid_inuse; else status = nfs_ok; } else if (!conf && unconf && same_verf(&unconf->cl_confirm, &confirm)) { /* * RFC 3530 14.2.34 CASE 3: * Normal case; new or rebooted client: */ if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred)) { status = nfserr_clid_inuse; } else { unsigned int hash = clientstr_hashval(unconf->cl_recdir); conf = find_confirmed_client_by_str(unconf->cl_recdir, hash); if (conf) { nfsd4_client_record_remove(conf); expire_client(conf); } move_to_confirmed(unconf); conf = unconf; nfsd4_probe_callback(conf); status = nfs_ok; } } else if ((!conf || (conf && !same_verf(&conf->cl_confirm, &confirm))) && (!unconf || (unconf && !same_verf(&unconf->cl_confirm, &confirm)))) { /* * RFC 3530 14.2.34 CASE 4: * Client probably hasn't noticed that we rebooted yet. */ status = nfserr_stale_clientid; } else { /* check that we have hit one of the cases...*/ status = nfserr_clid_inuse; } out: nfs4_unlock_state(); return status; } static struct nfs4_file *nfsd4_alloc_file(void) { return kmem_cache_alloc(file_slab, GFP_KERNEL); } /* OPEN Share state helper functions */ static void nfsd4_init_file(struct nfs4_file *fp, struct inode *ino) { unsigned int hashval = file_hashval(ino); atomic_set(&fp->fi_ref, 1); INIT_LIST_HEAD(&fp->fi_hash); INIT_LIST_HEAD(&fp->fi_stateids); INIT_LIST_HEAD(&fp->fi_delegations); fp->fi_inode = igrab(ino); fp->fi_had_conflict = false; fp->fi_lease = NULL; memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); memset(fp->fi_access, 0, sizeof(fp->fi_access)); spin_lock(&recall_lock); list_add(&fp->fi_hash, &file_hashtbl[hashval]); spin_unlock(&recall_lock); } static void nfsd4_free_slab(struct kmem_cache **slab) { if (*slab == NULL) return; kmem_cache_destroy(*slab); *slab = NULL; } void nfsd4_free_slabs(void) { nfsd4_free_slab(&openowner_slab); nfsd4_free_slab(&lockowner_slab); nfsd4_free_slab(&file_slab); nfsd4_free_slab(&stateid_slab); nfsd4_free_slab(&deleg_slab); } int nfsd4_init_slabs(void) { openowner_slab = kmem_cache_create("nfsd4_openowners", sizeof(struct nfs4_openowner), 0, 0, NULL); if (openowner_slab == NULL) goto out_nomem; lockowner_slab = kmem_cache_create("nfsd4_lockowners", sizeof(struct nfs4_openowner), 0, 0, NULL); if (lockowner_slab == NULL) goto out_nomem; file_slab = kmem_cache_create("nfsd4_files", sizeof(struct nfs4_file), 0, 0, NULL); if (file_slab == NULL) goto out_nomem; stateid_slab = kmem_cache_create("nfsd4_stateids", sizeof(struct nfs4_ol_stateid), 0, 0, NULL); if (stateid_slab == NULL) goto out_nomem; deleg_slab = kmem_cache_create("nfsd4_delegations", sizeof(struct nfs4_delegation), 0, 0, NULL); if (deleg_slab == NULL) goto out_nomem; return 0; out_nomem: nfsd4_free_slabs(); dprintk("nfsd4: out of memory while initializing nfsv4\n"); return -ENOMEM; } void nfs4_free_openowner(struct nfs4_openowner *oo) { kfree(oo->oo_owner.so_owner.data); kmem_cache_free(openowner_slab, oo); } void nfs4_free_lockowner(struct nfs4_lockowner *lo) { kfree(lo->lo_owner.so_owner.data); kmem_cache_free(lockowner_slab, lo); } static void init_nfs4_replay(struct nfs4_replay *rp) { rp->rp_status = nfserr_serverfault; rp->rp_buflen = 0; rp->rp_buf = rp->rp_ibuf; } static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp) { struct nfs4_stateowner *sop; sop = kmem_cache_alloc(slab, GFP_KERNEL); if (!sop) return NULL; sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL); if (!sop->so_owner.data) { kmem_cache_free(slab, sop); return NULL; } sop->so_owner.len = owner->len; INIT_LIST_HEAD(&sop->so_stateids); sop->so_client = clp; init_nfs4_replay(&sop->so_replay); return sop; } static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval) { list_add(&oo->oo_owner.so_strhash, &ownerstr_hashtbl[strhashval]); list_add(&oo->oo_perclient, &clp->cl_openowners); } static struct nfs4_openowner * alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) { struct nfs4_openowner *oo; oo = alloc_stateowner(openowner_slab, &open->op_owner, clp); if (!oo) return NULL; oo->oo_owner.so_is_open_owner = 1; oo->oo_owner.so_seqid = open->op_seqid; oo->oo_flags = NFS4_OO_NEW; oo->oo_time = 0; oo->oo_last_closed_stid = NULL; INIT_LIST_HEAD(&oo->oo_close_lru); hash_openowner(oo, clp, strhashval); return oo; } static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) { struct nfs4_openowner *oo = open->op_openowner; struct nfs4_client *clp = oo->oo_owner.so_client; init_stid(&stp->st_stid, clp, NFS4_OPEN_STID); INIT_LIST_HEAD(&stp->st_lockowners); list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); list_add(&stp->st_perfile, &fp->fi_stateids); stp->st_stateowner = &oo->oo_owner; get_nfs4_file(fp); stp->st_file = fp; stp->st_access_bmap = 0; stp->st_deny_bmap = 0; __set_bit(open->op_share_access, &stp->st_access_bmap); __set_bit(open->op_share_deny, &stp->st_deny_bmap); stp->st_openstp = NULL; } static void move_to_close_lru(struct nfs4_openowner *oo) { dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo); list_move_tail(&oo->oo_close_lru, &close_lru); oo->oo_time = get_seconds(); } static int same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner, clientid_t *clid) { return (sop->so_owner.len == owner->len) && 0 == memcmp(sop->so_owner.data, owner->data, owner->len) && (sop->so_client->cl_clientid.cl_id == clid->cl_id); } static struct nfs4_openowner * find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open) { struct nfs4_stateowner *so; struct nfs4_openowner *oo; list_for_each_entry(so, &ownerstr_hashtbl[hashval], so_strhash) { if (!so->so_is_open_owner) continue; if (same_owner_str(so, &open->op_owner, &open->op_clientid)) { oo = openowner(so); renew_client(oo->oo_owner.so_client); return oo; } } return NULL; } /* search file_hashtbl[] for file */ static struct nfs4_file * find_file(struct inode *ino) { unsigned int hashval = file_hashval(ino); struct nfs4_file *fp; spin_lock(&recall_lock); list_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) { if (fp->fi_inode == ino) { get_nfs4_file(fp); spin_unlock(&recall_lock); return fp; } } spin_unlock(&recall_lock); return NULL; } /* * Called to check deny when READ with all zero stateid or * WRITE with all zero or all one stateid */ static __be32 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type) { struct inode *ino = current_fh->fh_dentry->d_inode; struct nfs4_file *fp; struct nfs4_ol_stateid *stp; __be32 ret; dprintk("NFSD: nfs4_share_conflict\n"); fp = find_file(ino); if (!fp) return nfs_ok; ret = nfserr_locked; /* Search for conflicting share reservations */ list_for_each_entry(stp, &fp->fi_stateids, st_perfile) { if (test_bit(deny_type, &stp->st_deny_bmap) || test_bit(NFS4_SHARE_DENY_BOTH, &stp->st_deny_bmap)) goto out; } ret = nfs_ok; out: put_nfs4_file(fp); return ret; } static void nfsd_break_one_deleg(struct nfs4_delegation *dp) { /* We're assuming the state code never drops its reference * without first removing the lease. Since we're in this lease * callback (and since the lease code is serialized by the kernel * lock) we know the server hasn't removed the lease yet, we know * it's safe to take a reference: */ atomic_inc(&dp->dl_count); list_add_tail(&dp->dl_recall_lru, &del_recall_lru); /* only place dl_time is set. protected by lock_flocks*/ dp->dl_time = get_seconds(); nfsd4_cb_recall(dp); } /* Called from break_lease() with lock_flocks() held. */ static void nfsd_break_deleg_cb(struct file_lock *fl) { struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner; struct nfs4_delegation *dp; BUG_ON(!fp); /* We assume break_lease is only called once per lease: */ BUG_ON(fp->fi_had_conflict); /* * We don't want the locks code to timeout the lease for us; * we'll remove it ourself if a delegation isn't returned * in time: */ fl->fl_break_time = 0; spin_lock(&recall_lock); fp->fi_had_conflict = true; list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) nfsd_break_one_deleg(dp); spin_unlock(&recall_lock); } static int nfsd_change_deleg_cb(struct file_lock **onlist, int arg) { if (arg & F_UNLCK) return lease_modify(onlist, arg); else return -EAGAIN; } static const struct lock_manager_operations nfsd_lease_mng_ops = { .lm_break = nfsd_break_deleg_cb, .lm_change = nfsd_change_deleg_cb, }; static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid) { if (nfsd4_has_session(cstate)) return nfs_ok; if (seqid == so->so_seqid - 1) return nfserr_replay_me; if (seqid == so->so_seqid) return nfs_ok; return nfserr_bad_seqid; } __be32 nfsd4_process_open1(struct nfsd4_compound_state *cstate, struct nfsd4_open *open) { clientid_t *clientid = &open->op_clientid; struct nfs4_client *clp = NULL; unsigned int strhashval; struct nfs4_openowner *oo = NULL; __be32 status; if (STALE_CLIENTID(&open->op_clientid)) return nfserr_stale_clientid; /* * In case we need it later, after we've already created the * file and don't want to risk a further failure: */ open->op_file = nfsd4_alloc_file(); if (open->op_file == NULL) return nfserr_jukebox; strhashval = ownerstr_hashval(clientid->cl_id, &open->op_owner); oo = find_openstateowner_str(strhashval, open); open->op_openowner = oo; if (!oo) { clp = find_confirmed_client(clientid); if (clp == NULL) return nfserr_expired; goto new_owner; } if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { /* Replace unconfirmed owners without checking for replay. */ clp = oo->oo_owner.so_client; release_openowner(oo); open->op_openowner = NULL; goto new_owner; } status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid); if (status) return status; clp = oo->oo_owner.so_client; goto alloc_stateid; new_owner: oo = alloc_init_open_stateowner(strhashval, clp, open); if (oo == NULL) return nfserr_jukebox; open->op_openowner = oo; alloc_stateid: open->op_stp = nfs4_alloc_stateid(clp); if (!open->op_stp) return nfserr_jukebox; return nfs_ok; } static inline __be32 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags) { if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ)) return nfserr_openmode; else return nfs_ok; } static int share_access_to_flags(u32 share_access) { return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE; } static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s) { struct nfs4_stid *ret; ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID); if (!ret) return NULL; return delegstateid(ret); } static bool nfsd4_is_deleg_cur(struct nfsd4_open *open) { return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR || open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH; } static __be32 nfs4_check_deleg(struct nfs4_client *cl, struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_delegation **dp) { int flags; __be32 status = nfserr_bad_stateid; *dp = find_deleg_stateid(cl, &open->op_delegate_stateid); if (*dp == NULL) goto out; flags = share_access_to_flags(open->op_share_access); status = nfs4_check_delegmode(*dp, flags); if (status) *dp = NULL; out: if (!nfsd4_is_deleg_cur(open)) return nfs_ok; if (status) return status; open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; return nfs_ok; } static __be32 nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_ol_stateid **stpp) { struct nfs4_ol_stateid *local; struct nfs4_openowner *oo = open->op_openowner; list_for_each_entry(local, &fp->fi_stateids, st_perfile) { /* ignore lock owners */ if (local->st_stateowner->so_is_open_owner == 0) continue; /* remember if we have seen this open owner */ if (local->st_stateowner == &oo->oo_owner) *stpp = local; /* check for conflicting share reservations */ if (!test_share(local, open)) return nfserr_share_denied; } return nfs_ok; } static void nfs4_free_stateid(struct nfs4_ol_stateid *s) { kmem_cache_free(stateid_slab, s); } static inline int nfs4_access_to_access(u32 nfs4_access) { int flags = 0; if (nfs4_access & NFS4_SHARE_ACCESS_READ) flags |= NFSD_MAY_READ; if (nfs4_access & NFS4_SHARE_ACCESS_WRITE) flags |= NFSD_MAY_WRITE; return flags; } static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfsd4_open *open) { __be32 status; int oflag = nfs4_access_to_omode(open->op_share_access); int access = nfs4_access_to_access(open->op_share_access); if (!fp->fi_fds[oflag]) { status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &fp->fi_fds[oflag]); if (status) return status; } nfs4_file_get_access(fp, oflag); return nfs_ok; } static inline __be32 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh, struct nfsd4_open *open) { struct iattr iattr = { .ia_valid = ATTR_SIZE, .ia_size = 0, }; if (!open->op_truncate) return 0; if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE)) return nfserr_inval; return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0); } static __be32 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open) { u32 op_share_access = open->op_share_access; bool new_access; __be32 status; new_access = !test_bit(op_share_access, &stp->st_access_bmap); if (new_access) { status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open); if (status) return status; } status = nfsd4_truncate(rqstp, cur_fh, open); if (status) { if (new_access) { int oflag = nfs4_access_to_omode(op_share_access); nfs4_file_put_access(fp, oflag); } return status; } /* remember the open */ __set_bit(op_share_access, &stp->st_access_bmap); __set_bit(open->op_share_deny, &stp->st_deny_bmap); return nfs_ok; } static void nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session) { open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; } /* Should we give out recallable state?: */ static bool nfsd4_cb_channel_good(struct nfs4_client *clp) { if (clp->cl_cb_state == NFSD4_CB_UP) return true; /* * In the sessions case, since we don't have to establish a * separate connection for callbacks, we assume it's OK * until we hear otherwise: */ return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; } static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag) { struct file_lock *fl; fl = locks_alloc_lock(); if (!fl) return NULL; locks_init_lock(fl); fl->fl_lmops = &nfsd_lease_mng_ops; fl->fl_flags = FL_LEASE; fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; fl->fl_end = OFFSET_MAX; fl->fl_owner = (fl_owner_t)(dp->dl_file); fl->fl_pid = current->tgid; return fl; } static int nfs4_setlease(struct nfs4_delegation *dp, int flag) { struct nfs4_file *fp = dp->dl_file; struct file_lock *fl; int status; fl = nfs4_alloc_init_lease(dp, flag); if (!fl) return -ENOMEM; fl->fl_file = find_readable_file(fp); list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations); status = vfs_setlease(fl->fl_file, fl->fl_type, &fl); if (status) { list_del_init(&dp->dl_perclnt); locks_free_lock(fl); return -ENOMEM; } fp->fi_lease = fl; fp->fi_deleg_file = fl->fl_file; get_file(fp->fi_deleg_file); atomic_set(&fp->fi_delegees, 1); list_add(&dp->dl_perfile, &fp->fi_delegations); return 0; } static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag) { struct nfs4_file *fp = dp->dl_file; if (!fp->fi_lease) return nfs4_setlease(dp, flag); spin_lock(&recall_lock); if (fp->fi_had_conflict) { spin_unlock(&recall_lock); return -EAGAIN; } atomic_inc(&fp->fi_delegees); list_add(&dp->dl_perfile, &fp->fi_delegations); spin_unlock(&recall_lock); list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations); return 0; } static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status) { open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; if (status == -EAGAIN) open->op_why_no_deleg = WND4_CONTENTION; else { open->op_why_no_deleg = WND4_RESOURCE; switch (open->op_deleg_want) { case NFS4_SHARE_WANT_READ_DELEG: case NFS4_SHARE_WANT_WRITE_DELEG: case NFS4_SHARE_WANT_ANY_DELEG: break; case NFS4_SHARE_WANT_CANCEL: open->op_why_no_deleg = WND4_CANCELLED; break; case NFS4_SHARE_WANT_NO_DELEG: BUG(); /* not supposed to get here */ } } } /* * Attempt to hand out a delegation. */ static void nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_ol_stateid *stp) { struct nfs4_delegation *dp; struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner); int cb_up; int status = 0, flag = 0; cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client); flag = NFS4_OPEN_DELEGATE_NONE; open->op_recall = 0; switch (open->op_claim_type) { case NFS4_OPEN_CLAIM_PREVIOUS: if (!cb_up) open->op_recall = 1; flag = open->op_delegate_type; if (flag == NFS4_OPEN_DELEGATE_NONE) goto out; break; case NFS4_OPEN_CLAIM_NULL: /* Let's not give out any delegations till everyone's * had the chance to reclaim theirs.... */ if (locks_in_grace()) goto out; if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED)) goto out; if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) flag = NFS4_OPEN_DELEGATE_WRITE; else flag = NFS4_OPEN_DELEGATE_READ; break; default: goto out; } dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh, flag); if (dp == NULL) goto out_no_deleg; status = nfs4_set_delegation(dp, flag); if (status) goto out_free; memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid)); dprintk("NFSD: delegation stateid=" STATEID_FMT "\n", STATEID_VAL(&dp->dl_stid.sc_stateid)); out: open->op_delegate_type = flag; if (flag == NFS4_OPEN_DELEGATE_NONE) { if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS && open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) dprintk("NFSD: WARNING: refusing delegation reclaim\n"); /* 4.1 client asking for a delegation? */ if (open->op_deleg_want) nfsd4_open_deleg_none_ext(open, status); } return; out_free: nfs4_put_delegation(dp); out_no_deleg: flag = NFS4_OPEN_DELEGATE_NONE; goto out; } static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open, struct nfs4_delegation *dp) { if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG && dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE; } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG && dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE; } /* Otherwise the client must be confused wanting a delegation * it already has, therefore we don't return * NFS4_OPEN_DELEGATE_NONE_EXT and reason. */ } /* * called with nfs4_lock_state() held. */ __be32 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) { struct nfsd4_compoundres *resp = rqstp->rq_resp; struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; struct nfs4_file *fp = NULL; struct inode *ino = current_fh->fh_dentry->d_inode; struct nfs4_ol_stateid *stp = NULL; struct nfs4_delegation *dp = NULL; __be32 status; /* * Lookup file; if found, lookup stateid and check open request, * and check for delegations in the process of being recalled. * If not found, create the nfs4_file struct */ fp = find_file(ino); if (fp) { if ((status = nfs4_check_open(fp, open, &stp))) goto out; status = nfs4_check_deleg(cl, fp, open, &dp); if (status) goto out; } else { status = nfserr_bad_stateid; if (nfsd4_is_deleg_cur(open)) goto out; status = nfserr_jukebox; fp = open->op_file; open->op_file = NULL; nfsd4_init_file(fp, ino); } /* * OPEN the file, or upgrade an existing OPEN. * If truncate fails, the OPEN fails. */ if (stp) { /* Stateid was found, this is an OPEN upgrade */ status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); if (status) goto out; } else { status = nfs4_get_vfs_file(rqstp, fp, current_fh, open); if (status) goto out; stp = open->op_stp; open->op_stp = NULL; init_open_stateid(stp, fp, open); status = nfsd4_truncate(rqstp, current_fh, open); if (status) { release_open_stateid(stp); goto out; } } update_stateid(&stp->st_stid.sc_stateid); memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); if (nfsd4_has_session(&resp->cstate)) { open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) { open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; open->op_why_no_deleg = WND4_NOT_WANTED; goto nodeleg; } } /* * Attempt to hand out a delegation. No error return, because the * OPEN succeeds even if we fail. */ nfs4_open_delegation(current_fh, open, stp); nodeleg: status = nfs_ok; dprintk("%s: stateid=" STATEID_FMT "\n", __func__, STATEID_VAL(&stp->st_stid.sc_stateid)); out: /* 4.1 client trying to upgrade/downgrade delegation? */ if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp && open->op_deleg_want) nfsd4_deleg_xgrade_none_ext(open, dp); if (fp) put_nfs4_file(fp); if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS) nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate)); /* * To finish the open response, we just need to set the rflags. */ open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX; if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) && !nfsd4_has_session(&resp->cstate)) open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM; return status; } void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status) { if (open->op_openowner) { struct nfs4_openowner *oo = open->op_openowner; if (!list_empty(&oo->oo_owner.so_stateids)) list_del_init(&oo->oo_close_lru); if (oo->oo_flags & NFS4_OO_NEW) { if (status) { release_openowner(oo); open->op_openowner = NULL; } else oo->oo_flags &= ~NFS4_OO_NEW; } } if (open->op_file) nfsd4_free_file(open->op_file); if (open->op_stp) nfs4_free_stateid(open->op_stp); } __be32 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, clientid_t *clid) { struct nfs4_client *clp; __be32 status; nfs4_lock_state(); dprintk("process_renew(%08x/%08x): starting\n", clid->cl_boot, clid->cl_id); status = nfserr_stale_clientid; if (STALE_CLIENTID(clid)) goto out; clp = find_confirmed_client(clid); status = nfserr_expired; if (clp == NULL) { /* We assume the client took too long to RENEW. */ dprintk("nfsd4_renew: clientid not found!\n"); goto out; } status = nfserr_cb_path_down; if (!list_empty(&clp->cl_delegations) && clp->cl_cb_state != NFSD4_CB_UP) goto out; status = nfs_ok; out: nfs4_unlock_state(); return status; } static struct lock_manager nfsd4_manager = { }; static void nfsd4_end_grace(void) { dprintk("NFSD: end of grace period\n"); nfsd4_record_grace_done(&init_net, boot_time); locks_end_grace(&nfsd4_manager); /* * Now that every NFSv4 client has had the chance to recover and * to see the (possibly new, possibly shorter) lease time, we * can safely set the next grace time to the current lease time: */ nfsd4_grace = nfsd4_lease; } static time_t nfs4_laundromat(void) { struct nfs4_client *clp; struct nfs4_openowner *oo; struct nfs4_delegation *dp; struct list_head *pos, *next, reaplist; time_t cutoff = get_seconds() - nfsd4_lease; time_t t, clientid_val = nfsd4_lease; time_t u, test_val = nfsd4_lease; nfs4_lock_state(); dprintk("NFSD: laundromat service - starting\n"); if (locks_in_grace()) nfsd4_end_grace(); INIT_LIST_HEAD(&reaplist); spin_lock(&client_lock); list_for_each_safe(pos, next, &client_lru) { clp = list_entry(pos, struct nfs4_client, cl_lru); if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) { t = clp->cl_time - cutoff; if (clientid_val > t) clientid_val = t; break; } if (atomic_read(&clp->cl_refcount)) { dprintk("NFSD: client in use (clientid %08x)\n", clp->cl_clientid.cl_id); continue; } unhash_client_locked(clp); list_add(&clp->cl_lru, &reaplist); } spin_unlock(&client_lock); list_for_each_safe(pos, next, &reaplist) { clp = list_entry(pos, struct nfs4_client, cl_lru); dprintk("NFSD: purging unused client (clientid %08x)\n", clp->cl_clientid.cl_id); nfsd4_client_record_remove(clp); expire_client(clp); } spin_lock(&recall_lock); list_for_each_safe(pos, next, &del_recall_lru) { dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) { u = dp->dl_time - cutoff; if (test_val > u) test_val = u; break; } list_move(&dp->dl_recall_lru, &reaplist); } spin_unlock(&recall_lock); list_for_each_safe(pos, next, &reaplist) { dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); unhash_delegation(dp); } test_val = nfsd4_lease; list_for_each_safe(pos, next, &close_lru) { oo = container_of(pos, struct nfs4_openowner, oo_close_lru); if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) { u = oo->oo_time - cutoff; if (test_val > u) test_val = u; break; } release_openowner(oo); } if (clientid_val < NFSD_LAUNDROMAT_MINTIMEOUT) clientid_val = NFSD_LAUNDROMAT_MINTIMEOUT; nfs4_unlock_state(); return clientid_val; } static struct workqueue_struct *laundry_wq; static void laundromat_main(struct work_struct *); static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main); static void laundromat_main(struct work_struct *not_used) { time_t t; t = nfs4_laundromat(); dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t); queue_delayed_work(laundry_wq, &laundromat_work, t*HZ); } static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp) { if (fhp->fh_dentry->d_inode != stp->st_file->fi_inode) return nfserr_bad_stateid; return nfs_ok; } static int STALE_STATEID(stateid_t *stateid) { if (stateid->si_opaque.so_clid.cl_boot == boot_time) return 0; dprintk("NFSD: stale stateid " STATEID_FMT "!\n", STATEID_VAL(stateid)); return 1; } static inline int access_permit_read(unsigned long access_bmap) { return test_bit(NFS4_SHARE_ACCESS_READ, &access_bmap) || test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap) || test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap); } static inline int access_permit_write(unsigned long access_bmap) { return test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap) || test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap); } static __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags) { __be32 status = nfserr_openmode; /* For lock stateid's, we test the parent open, not the lock: */ if (stp->st_openstp) stp = stp->st_openstp; if ((flags & WR_STATE) && (!access_permit_write(stp->st_access_bmap))) goto out; if ((flags & RD_STATE) && (!access_permit_read(stp->st_access_bmap))) goto out; status = nfs_ok; out: return status; } static inline __be32 check_special_stateids(svc_fh *current_fh, stateid_t *stateid, int flags) { if (ONE_STATEID(stateid) && (flags & RD_STATE)) return nfs_ok; else if (locks_in_grace()) { /* Answer in remaining cases depends on existence of * conflicting state; so we must wait out the grace period. */ return nfserr_grace; } else if (flags & WR_STATE) return nfs4_share_conflict(current_fh, NFS4_SHARE_DENY_WRITE); else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */ return nfs4_share_conflict(current_fh, NFS4_SHARE_DENY_READ); } /* * Allow READ/WRITE during grace period on recovered state only for files * that are not able to provide mandatory locking. */ static inline int grace_disallows_io(struct inode *inode) { return locks_in_grace() && mandatory_lock(inode); } /* Returns true iff a is later than b: */ static bool stateid_generation_after(stateid_t *a, stateid_t *b) { return (s32)a->si_generation - (s32)b->si_generation > 0; } static int check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session) { /* * When sessions are used the stateid generation number is ignored * when it is zero. */ if (has_session && in->si_generation == 0) return nfs_ok; if (in->si_generation == ref->si_generation) return nfs_ok; /* If the client sends us a stateid from the future, it's buggy: */ if (stateid_generation_after(in, ref)) return nfserr_bad_stateid; /* * However, we could see a stateid from the past, even from a * non-buggy client. For example, if the client sends a lock * while some IO is outstanding, the lock may bump si_generation * while the IO is still in flight. The client could avoid that * situation by waiting for responses on all the IO requests, * but better performance may result in retrying IO that * receives an old_stateid error if requests are rarely * reordered in flight: */ return nfserr_old_stateid; } __be32 nfs4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) { struct nfs4_stid *s; struct nfs4_ol_stateid *ols; __be32 status; if (STALE_STATEID(stateid)) return nfserr_stale_stateid; s = find_stateid(cl, stateid); if (!s) return nfserr_stale_stateid; status = check_stateid_generation(stateid, &s->sc_stateid, 1); if (status) return status; if (!(s->sc_type & (NFS4_OPEN_STID | NFS4_LOCK_STID))) return nfs_ok; ols = openlockstateid(s); if (ols->st_stateowner->so_is_open_owner && !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) return nfserr_bad_stateid; return nfs_ok; } static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask, struct nfs4_stid **s) { struct nfs4_client *cl; if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) return nfserr_bad_stateid; if (STALE_STATEID(stateid)) return nfserr_stale_stateid; cl = find_confirmed_client(&stateid->si_opaque.so_clid); if (!cl) return nfserr_expired; *s = find_stateid_by_type(cl, stateid, typemask); if (!*s) return nfserr_bad_stateid; return nfs_ok; } /* * Checks for stateid operations */ __be32 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate, stateid_t *stateid, int flags, struct file **filpp) { struct nfs4_stid *s; struct nfs4_ol_stateid *stp = NULL; struct nfs4_delegation *dp = NULL; struct svc_fh *current_fh = &cstate->current_fh; struct inode *ino = current_fh->fh_dentry->d_inode; __be32 status; if (filpp) *filpp = NULL; if (grace_disallows_io(ino)) return nfserr_grace; if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) return check_special_stateids(current_fh, stateid, flags); status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, &s); if (status) return status; status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate)); if (status) goto out; switch (s->sc_type) { case NFS4_DELEG_STID: dp = delegstateid(s); status = nfs4_check_delegmode(dp, flags); if (status) goto out; if (filpp) { *filpp = dp->dl_file->fi_deleg_file; BUG_ON(!*filpp); } break; case NFS4_OPEN_STID: case NFS4_LOCK_STID: stp = openlockstateid(s); status = nfs4_check_fh(current_fh, stp); if (status) goto out; if (stp->st_stateowner->so_is_open_owner && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) goto out; status = nfs4_check_openmode(stp, flags); if (status) goto out; if (filpp) { if (flags & RD_STATE) *filpp = find_readable_file(stp->st_file); else *filpp = find_writeable_file(stp->st_file); } break; default: return nfserr_bad_stateid; } status = nfs_ok; out: return status; } static __be32 nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp) { if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner))) return nfserr_locks_held; release_lock_stateid(stp); return nfs_ok; } /* * Test if the stateid is valid */ __be32 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_test_stateid *test_stateid) { struct nfsd4_test_stateid_id *stateid; struct nfs4_client *cl = cstate->session->se_client; nfs4_lock_state(); list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list) stateid->ts_id_status = nfs4_validate_stateid(cl, &stateid->ts_id_stateid); nfs4_unlock_state(); return nfs_ok; } __be32 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *free_stateid) { stateid_t *stateid = &free_stateid->fr_stateid; struct nfs4_stid *s; struct nfs4_client *cl = cstate->session->se_client; __be32 ret = nfserr_bad_stateid; nfs4_lock_state(); s = find_stateid(cl, stateid); if (!s) goto out; switch (s->sc_type) { case NFS4_DELEG_STID: ret = nfserr_locks_held; goto out; case NFS4_OPEN_STID: case NFS4_LOCK_STID: ret = check_stateid_generation(stateid, &s->sc_stateid, 1); if (ret) goto out; if (s->sc_type == NFS4_LOCK_STID) ret = nfsd4_free_lock_stateid(openlockstateid(s)); else ret = nfserr_locks_held; break; default: ret = nfserr_bad_stateid; } out: nfs4_unlock_state(); return ret; } static inline int setlkflg (int type) { return (type == NFS4_READW_LT || type == NFS4_READ_LT) ? RD_STATE : WR_STATE; } static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp) { struct svc_fh *current_fh = &cstate->current_fh; struct nfs4_stateowner *sop = stp->st_stateowner; __be32 status; status = nfsd4_check_seqid(cstate, sop, seqid); if (status) return status; if (stp->st_stid.sc_type == NFS4_CLOSED_STID) /* * "Closed" stateid's exist *only* to return * nfserr_replay_me from the previous step. */ return nfserr_bad_stateid; status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); if (status) return status; return nfs4_check_fh(current_fh, stp); } /* * Checks for sequence id mutating operations. */ static __be32 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, stateid_t *stateid, char typemask, struct nfs4_ol_stateid **stpp) { __be32 status; struct nfs4_stid *s; dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__, seqid, STATEID_VAL(stateid)); *stpp = NULL; status = nfsd4_lookup_stateid(stateid, typemask, &s); if (status) return status; *stpp = openlockstateid(s); cstate->replay_owner = (*stpp)->st_stateowner; return nfs4_seqid_op_checks(cstate, stateid, seqid, *stpp); } static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, stateid_t *stateid, struct nfs4_ol_stateid **stpp) { __be32 status; struct nfs4_openowner *oo; status = nfs4_preprocess_seqid_op(cstate, seqid, stateid, NFS4_OPEN_STID, stpp); if (status) return status; oo = openowner((*stpp)->st_stateowner); if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) return nfserr_bad_stateid; return nfs_ok; } __be32 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_open_confirm *oc) { __be32 status; struct nfs4_openowner *oo; struct nfs4_ol_stateid *stp; dprintk("NFSD: nfsd4_open_confirm on file %.*s\n", (int)cstate->current_fh.fh_dentry->d_name.len, cstate->current_fh.fh_dentry->d_name.name); status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0); if (status) return status; nfs4_lock_state(); status = nfs4_preprocess_seqid_op(cstate, oc->oc_seqid, &oc->oc_req_stateid, NFS4_OPEN_STID, &stp); if (status) goto out; oo = openowner(stp->st_stateowner); status = nfserr_bad_stateid; if (oo->oo_flags & NFS4_OO_CONFIRMED) goto out; oo->oo_flags |= NFS4_OO_CONFIRMED; update_stateid(&stp->st_stid.sc_stateid); memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n", __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid)); nfsd4_client_record_create(oo->oo_owner.so_client); status = nfs_ok; out: if (!cstate->replay_owner) nfs4_unlock_state(); return status; } static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access) { if (!test_bit(access, &stp->st_access_bmap)) return; nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(access)); __clear_bit(access, &stp->st_access_bmap); } static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access) { switch (to_access) { case NFS4_SHARE_ACCESS_READ: nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE); nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); break; case NFS4_SHARE_ACCESS_WRITE: nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ); nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); break; case NFS4_SHARE_ACCESS_BOTH: break; default: BUG(); } } static void reset_union_bmap_deny(unsigned long deny, unsigned long *bmap) { int i; for (i = 0; i < 4; i++) { if ((i & deny) != i) __clear_bit(i, bmap); } } __be32 nfsd4_open_downgrade(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *od) { __be32 status; struct nfs4_ol_stateid *stp; dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n", (int)cstate->current_fh.fh_dentry->d_name.len, cstate->current_fh.fh_dentry->d_name.name); /* We don't yet support WANT bits: */ if (od->od_deleg_want) dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__, od->od_deleg_want); nfs4_lock_state(); status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid, &od->od_stateid, &stp); if (status) goto out; status = nfserr_inval; if (!test_bit(od->od_share_access, &stp->st_access_bmap)) { dprintk("NFSD:access not a subset current bitmap: 0x%lx, input access=%08x\n", stp->st_access_bmap, od->od_share_access); goto out; } if (!test_bit(od->od_share_deny, &stp->st_deny_bmap)) { dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n", stp->st_deny_bmap, od->od_share_deny); goto out; } nfs4_stateid_downgrade(stp, od->od_share_access); reset_union_bmap_deny(od->od_share_deny, &stp->st_deny_bmap); update_stateid(&stp->st_stid.sc_stateid); memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); status = nfs_ok; out: if (!cstate->replay_owner) nfs4_unlock_state(); return status; } void nfsd4_purge_closed_stateid(struct nfs4_stateowner *so) { struct nfs4_openowner *oo; struct nfs4_ol_stateid *s; if (!so->so_is_open_owner) return; oo = openowner(so); s = oo->oo_last_closed_stid; if (!s) return; if (!(oo->oo_flags & NFS4_OO_PURGE_CLOSE)) { /* Release the last_closed_stid on the next seqid bump: */ oo->oo_flags |= NFS4_OO_PURGE_CLOSE; return; } oo->oo_flags &= ~NFS4_OO_PURGE_CLOSE; release_last_closed_stateid(oo); } static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) { unhash_open_stateid(s); s->st_stid.sc_type = NFS4_CLOSED_STID; } /* * nfs4_unlock_state() called after encode */ __be32 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_close *close) { __be32 status; struct nfs4_openowner *oo; struct nfs4_ol_stateid *stp; dprintk("NFSD: nfsd4_close on file %.*s\n", (int)cstate->current_fh.fh_dentry->d_name.len, cstate->current_fh.fh_dentry->d_name.name); nfs4_lock_state(); status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid, &close->cl_stateid, NFS4_OPEN_STID|NFS4_CLOSED_STID, &stp); if (status) goto out; oo = openowner(stp->st_stateowner); status = nfs_ok; update_stateid(&stp->st_stid.sc_stateid); memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); nfsd4_close_open_stateid(stp); oo->oo_last_closed_stid = stp; /* place unused nfs4_stateowners on so_close_lru list to be * released by the laundromat service after the lease period * to enable us to handle CLOSE replay */ if (list_empty(&oo->oo_owner.so_stateids)) move_to_close_lru(oo); out: if (!cstate->replay_owner) nfs4_unlock_state(); return status; } __be32 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *dr) { struct nfs4_delegation *dp; stateid_t *stateid = &dr->dr_stateid; struct nfs4_stid *s; struct inode *inode; __be32 status; if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) return status; inode = cstate->current_fh.fh_dentry->d_inode; nfs4_lock_state(); status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID, &s); if (status) goto out; dp = delegstateid(s); status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate)); if (status) goto out; unhash_delegation(dp); out: nfs4_unlock_state(); return status; } #define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start)) #define LOCKOWNER_INO_HASH_BITS 8 #define LOCKOWNER_INO_HASH_SIZE (1 << LOCKOWNER_INO_HASH_BITS) #define LOCKOWNER_INO_HASH_MASK (LOCKOWNER_INO_HASH_SIZE - 1) static inline u64 end_offset(u64 start, u64 len) { u64 end; end = start + len; return end >= start ? end: NFS4_MAX_UINT64; } /* last octet in a range */ static inline u64 last_byte_offset(u64 start, u64 len) { u64 end; BUG_ON(!len); end = start + len; return end > start ? end - 1: NFS4_MAX_UINT64; } static unsigned int lockowner_ino_hashval(struct inode *inode, u32 cl_id, struct xdr_netobj *ownername) { return (file_hashval(inode) + cl_id + opaque_hashval(ownername->data, ownername->len)) & LOCKOWNER_INO_HASH_MASK; } static struct list_head lockowner_ino_hashtbl[LOCKOWNER_INO_HASH_SIZE]; /* * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that * we can't properly handle lock requests that go beyond the (2^63 - 1)-th * byte, because of sign extension problems. Since NFSv4 calls for 64-bit * locking, this prevents us from being completely protocol-compliant. The * real solution to this problem is to start using unsigned file offsets in * the VFS, but this is a very deep change! */ static inline void nfs4_transform_lock_offset(struct file_lock *lock) { if (lock->fl_start < 0) lock->fl_start = OFFSET_MAX; if (lock->fl_end < 0) lock->fl_end = OFFSET_MAX; } /* Hack!: For now, we're defining this just so we can use a pointer to it * as a unique cookie to identify our (NFSv4's) posix locks. */ static const struct lock_manager_operations nfsd_posix_mng_ops = { }; static inline void nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny) { struct nfs4_lockowner *lo; if (fl->fl_lmops == &nfsd_posix_mng_ops) { lo = (struct nfs4_lockowner *) fl->fl_owner; deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data, lo->lo_owner.so_owner.len, GFP_KERNEL); if (!deny->ld_owner.data) /* We just don't care that much */ goto nevermind; deny->ld_owner.len = lo->lo_owner.so_owner.len; deny->ld_clientid = lo->lo_owner.so_client->cl_clientid; } else { nevermind: deny->ld_owner.len = 0; deny->ld_owner.data = NULL; deny->ld_clientid.cl_boot = 0; deny->ld_clientid.cl_id = 0; } deny->ld_start = fl->fl_start; deny->ld_length = NFS4_MAX_UINT64; if (fl->fl_end != NFS4_MAX_UINT64) deny->ld_length = fl->fl_end - fl->fl_start + 1; deny->ld_type = NFS4_READ_LT; if (fl->fl_type != F_RDLCK) deny->ld_type = NFS4_WRITE_LT; } static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, clientid_t *clid, struct xdr_netobj *owner) { struct nfs4_ol_stateid *lst; if (!same_owner_str(&lo->lo_owner, owner, clid)) return false; lst = list_first_entry(&lo->lo_owner.so_stateids, struct nfs4_ol_stateid, st_perstateowner); return lst->st_file->fi_inode == inode; } static struct nfs4_lockowner * find_lockowner_str(struct inode *inode, clientid_t *clid, struct xdr_netobj *owner) { unsigned int hashval = lockowner_ino_hashval(inode, clid->cl_id, owner); struct nfs4_lockowner *lo; list_for_each_entry(lo, &lockowner_ino_hashtbl[hashval], lo_owner_ino_hash) { if (same_lockowner_ino(lo, inode, clid, owner)) return lo; } return NULL; } static void hash_lockowner(struct nfs4_lockowner *lo, unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp) { struct inode *inode = open_stp->st_file->fi_inode; unsigned int inohash = lockowner_ino_hashval(inode, clp->cl_clientid.cl_id, &lo->lo_owner.so_owner); list_add(&lo->lo_owner.so_strhash, &ownerstr_hashtbl[strhashval]); list_add(&lo->lo_owner_ino_hash, &lockowner_ino_hashtbl[inohash]); list_add(&lo->lo_perstateid, &open_stp->st_lockowners); } /* * Alloc a lock owner structure. * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has * occurred. * * strhashval = ownerstr_hashval */ static struct nfs4_lockowner * alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) { struct nfs4_lockowner *lo; lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp); if (!lo) return NULL; INIT_LIST_HEAD(&lo->lo_owner.so_stateids); lo->lo_owner.so_is_open_owner = 0; /* It is the openowner seqid that will be incremented in encode in the * case of new lockowners; so increment the lock seqid manually: */ lo->lo_owner.so_seqid = lock->lk_new_lock_seqid + 1; hash_lockowner(lo, strhashval, clp, open_stp); return lo; } static struct nfs4_ol_stateid * alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct nfs4_ol_stateid *open_stp) { struct nfs4_ol_stateid *stp; struct nfs4_client *clp = lo->lo_owner.so_client; stp = nfs4_alloc_stateid(clp); if (stp == NULL) return NULL; init_stid(&stp->st_stid, clp, NFS4_LOCK_STID); list_add(&stp->st_perfile, &fp->fi_stateids); list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); stp->st_stateowner = &lo->lo_owner; get_nfs4_file(fp); stp->st_file = fp; stp->st_access_bmap = 0; stp->st_deny_bmap = open_stp->st_deny_bmap; stp->st_openstp = open_stp; return stp; } static int check_lock_length(u64 offset, u64 length) { return ((length == 0) || ((length != NFS4_MAX_UINT64) && LOFF_OVERFLOW(offset, length))); } static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access) { struct nfs4_file *fp = lock_stp->st_file; int oflag = nfs4_access_to_omode(access); if (test_bit(access, &lock_stp->st_access_bmap)) return; nfs4_file_get_access(fp, oflag); __set_bit(access, &lock_stp->st_access_bmap); } __be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **lst, bool *new) { struct nfs4_file *fi = ost->st_file; struct nfs4_openowner *oo = openowner(ost->st_stateowner); struct nfs4_client *cl = oo->oo_owner.so_client; struct nfs4_lockowner *lo; unsigned int strhashval; lo = find_lockowner_str(fi->fi_inode, &cl->cl_clientid, &lock->v.new.owner); if (lo) { if (!cstate->minorversion) return nfserr_bad_seqid; /* XXX: a lockowner always has exactly one stateid: */ *lst = list_first_entry(&lo->lo_owner.so_stateids, struct nfs4_ol_stateid, st_perstateowner); return nfs_ok; } strhashval = ownerstr_hashval(cl->cl_clientid.cl_id, &lock->v.new.owner); lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock); if (lo == NULL) return nfserr_jukebox; *lst = alloc_init_lock_stateid(lo, fi, ost); if (*lst == NULL) { release_lockowner(lo); return nfserr_jukebox; } *new = true; return nfs_ok; } /* * LOCK operation */ __be32 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock) { struct nfs4_openowner *open_sop = NULL; struct nfs4_lockowner *lock_sop = NULL; struct nfs4_ol_stateid *lock_stp; struct nfs4_file *fp; struct file *filp = NULL; struct file_lock file_lock; struct file_lock conflock; __be32 status = 0; bool new_state = false; int lkflg; int err; dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n", (long long) lock->lk_offset, (long long) lock->lk_length); if (check_lock_length(lock->lk_offset, lock->lk_length)) return nfserr_inval; if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, NFSD_MAY_LOCK))) { dprintk("NFSD: nfsd4_lock: permission denied!\n"); return status; } nfs4_lock_state(); if (lock->lk_is_new) { /* * Client indicates that this is a new lockowner. * Use open owner and open stateid to create lock owner and * lock stateid. */ struct nfs4_ol_stateid *open_stp = NULL; if (nfsd4_has_session(cstate)) /* See rfc 5661 18.10.3: given clientid is ignored: */ memcpy(&lock->v.new.clientid, &cstate->session->se_client->cl_clientid, sizeof(clientid_t)); status = nfserr_stale_clientid; if (STALE_CLIENTID(&lock->lk_new_clientid)) goto out; /* validate and update open stateid and open seqid */ status = nfs4_preprocess_confirmed_seqid_op(cstate, lock->lk_new_open_seqid, &lock->lk_new_open_stateid, &open_stp); if (status) goto out; open_sop = openowner(open_stp->st_stateowner); status = nfserr_bad_stateid; if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid, &lock->v.new.clientid)) goto out; status = lookup_or_create_lock_state(cstate, open_stp, lock, &lock_stp, &new_state); if (status) goto out; } else { /* lock (lock owner + lock stateid) already exists */ status = nfs4_preprocess_seqid_op(cstate, lock->lk_old_lock_seqid, &lock->lk_old_lock_stateid, NFS4_LOCK_STID, &lock_stp); if (status) goto out; } lock_sop = lockowner(lock_stp->st_stateowner); fp = lock_stp->st_file; lkflg = setlkflg(lock->lk_type); status = nfs4_check_openmode(lock_stp, lkflg); if (status) goto out; status = nfserr_grace; if (locks_in_grace() && !lock->lk_reclaim) goto out; status = nfserr_no_grace; if (!locks_in_grace() && lock->lk_reclaim) goto out; locks_init_lock(&file_lock); switch (lock->lk_type) { case NFS4_READ_LT: case NFS4_READW_LT: filp = find_readable_file(lock_stp->st_file); if (filp) get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); file_lock.fl_type = F_RDLCK; break; case NFS4_WRITE_LT: case NFS4_WRITEW_LT: filp = find_writeable_file(lock_stp->st_file); if (filp) get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); file_lock.fl_type = F_WRLCK; break; default: status = nfserr_inval; goto out; } if (!filp) { status = nfserr_openmode; goto out; } file_lock.fl_owner = (fl_owner_t)lock_sop; file_lock.fl_pid = current->tgid; file_lock.fl_file = filp; file_lock.fl_flags = FL_POSIX; file_lock.fl_lmops = &nfsd_posix_mng_ops; file_lock.fl_start = lock->lk_offset; file_lock.fl_end = last_byte_offset(lock->lk_offset, lock->lk_length); nfs4_transform_lock_offset(&file_lock); /* * Try to lock the file in the VFS. * Note: locks.c uses the BKL to protect the inode's lock list. */ err = vfs_lock_file(filp, F_SETLK, &file_lock, &conflock); switch (-err) { case 0: /* success! */ update_stateid(&lock_stp->st_stid.sc_stateid); memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid, sizeof(stateid_t)); status = 0; break; case (EAGAIN): /* conflock holds conflicting lock */ status = nfserr_denied; dprintk("NFSD: nfsd4_lock: conflicting lock found!\n"); nfs4_set_lock_denied(&conflock, &lock->lk_denied); break; case (EDEADLK): status = nfserr_deadlock; break; default: dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err); status = nfserrno(err); break; } out: if (status && new_state) release_lockowner(lock_sop); if (!cstate->replay_owner) nfs4_unlock_state(); return status; } /* * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN, * so we do a temporary open here just to get an open file to pass to * vfs_test_lock. (Arguably perhaps test_lock should be done with an * inode operation.) */ static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock) { struct file *file; __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file); if (!err) { err = nfserrno(vfs_test_lock(file, lock)); nfsd_close(file); } return err; } /* * LOCKT operation */ __be32 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_lockt *lockt) { struct inode *inode; struct file_lock file_lock; struct nfs4_lockowner *lo; __be32 status; if (locks_in_grace()) return nfserr_grace; if (check_lock_length(lockt->lt_offset, lockt->lt_length)) return nfserr_inval; nfs4_lock_state(); status = nfserr_stale_clientid; if (!nfsd4_has_session(cstate) && STALE_CLIENTID(&lockt->lt_clientid)) goto out; if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) goto out; inode = cstate->current_fh.fh_dentry->d_inode; locks_init_lock(&file_lock); switch (lockt->lt_type) { case NFS4_READ_LT: case NFS4_READW_LT: file_lock.fl_type = F_RDLCK; break; case NFS4_WRITE_LT: case NFS4_WRITEW_LT: file_lock.fl_type = F_WRLCK; break; default: dprintk("NFSD: nfs4_lockt: bad lock type!\n"); status = nfserr_inval; goto out; } lo = find_lockowner_str(inode, &lockt->lt_clientid, &lockt->lt_owner); if (lo) file_lock.fl_owner = (fl_owner_t)lo; file_lock.fl_pid = current->tgid; file_lock.fl_flags = FL_POSIX; file_lock.fl_start = lockt->lt_offset; file_lock.fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length); nfs4_transform_lock_offset(&file_lock); status = nfsd_test_lock(rqstp, &cstate->current_fh, &file_lock); if (status) goto out; if (file_lock.fl_type != F_UNLCK) { status = nfserr_denied; nfs4_set_lock_denied(&file_lock, &lockt->lt_denied); } out: nfs4_unlock_state(); return status; } __be32 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku) { struct nfs4_ol_stateid *stp; struct file *filp = NULL; struct file_lock file_lock; __be32 status; int err; dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n", (long long) locku->lu_offset, (long long) locku->lu_length); if (check_lock_length(locku->lu_offset, locku->lu_length)) return nfserr_inval; nfs4_lock_state(); status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid, &locku->lu_stateid, NFS4_LOCK_STID, &stp); if (status) goto out; filp = find_any_file(stp->st_file); if (!filp) { status = nfserr_lock_range; goto out; } BUG_ON(!filp); locks_init_lock(&file_lock); file_lock.fl_type = F_UNLCK; file_lock.fl_owner = (fl_owner_t)lockowner(stp->st_stateowner); file_lock.fl_pid = current->tgid; file_lock.fl_file = filp; file_lock.fl_flags = FL_POSIX; file_lock.fl_lmops = &nfsd_posix_mng_ops; file_lock.fl_start = locku->lu_offset; file_lock.fl_end = last_byte_offset(locku->lu_offset, locku->lu_length); nfs4_transform_lock_offset(&file_lock); /* * Try to unlock the file in the VFS. */ err = vfs_lock_file(filp, F_SETLK, &file_lock, NULL); if (err) { dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n"); goto out_nfserr; } /* * OK, unlock succeeded; the only thing left to do is update the stateid. */ update_stateid(&stp->st_stid.sc_stateid); memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); out: if (!cstate->replay_owner) nfs4_unlock_state(); return status; out_nfserr: status = nfserrno(err); goto out; } /* * returns * 1: locks held by lockowner * 0: no locks held by lockowner */ static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner) { struct file_lock **flpp; struct inode *inode = filp->fi_inode; int status = 0; lock_flocks(); for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) { if ((*flpp)->fl_owner == (fl_owner_t)lowner) { status = 1; goto out; } } out: unlock_flocks(); return status; } __be32 nfsd4_release_lockowner(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_release_lockowner *rlockowner) { clientid_t *clid = &rlockowner->rl_clientid; struct nfs4_stateowner *sop; struct nfs4_lockowner *lo; struct nfs4_ol_stateid *stp; struct xdr_netobj *owner = &rlockowner->rl_owner; struct list_head matches; unsigned int hashval = ownerstr_hashval(clid->cl_id, owner); __be32 status; dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n", clid->cl_boot, clid->cl_id); /* XXX check for lease expiration */ status = nfserr_stale_clientid; if (STALE_CLIENTID(clid)) return status; nfs4_lock_state(); status = nfserr_locks_held; INIT_LIST_HEAD(&matches); list_for_each_entry(sop, &ownerstr_hashtbl[hashval], so_strhash) { if (sop->so_is_open_owner) continue; if (!same_owner_str(sop, owner, clid)) continue; list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) { lo = lockowner(sop); if (check_for_locks(stp->st_file, lo)) goto out; list_add(&lo->lo_list, &matches); } } /* Clients probably won't expect us to return with some (but not all) * of the lockowner state released; so don't release any until all * have been checked. */ status = nfs_ok; while (!list_empty(&matches)) { lo = list_entry(matches.next, struct nfs4_lockowner, lo_list); /* unhash_stateowner deletes so_perclient only * for openowners. */ list_del(&lo->lo_list); release_lockowner(lo); } out: nfs4_unlock_state(); return status; } static inline struct nfs4_client_reclaim * alloc_reclaim(void) { return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL); } int nfs4_has_reclaimed_state(const char *name, bool use_exchange_id) { unsigned int strhashval = clientstr_hashval(name); struct nfs4_client *clp; clp = find_confirmed_client_by_str(name, strhashval); if (!clp) return 0; return test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags); } /* * failure => all reset bets are off, nfserr_no_grace... */ int nfs4_client_to_reclaim(const char *name) { unsigned int strhashval; struct nfs4_client_reclaim *crp = NULL; dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name); crp = alloc_reclaim(); if (!crp) return 0; strhashval = clientstr_hashval(name); INIT_LIST_HEAD(&crp->cr_strhash); list_add(&crp->cr_strhash, &reclaim_str_hashtbl[strhashval]); memcpy(crp->cr_recdir, name, HEXDIR_LEN); reclaim_str_hashtbl_size++; return 1; } void nfs4_release_reclaim(void) { struct nfs4_client_reclaim *crp = NULL; int i; for (i = 0; i < CLIENT_HASH_SIZE; i++) { while (!list_empty(&reclaim_str_hashtbl[i])) { crp = list_entry(reclaim_str_hashtbl[i].next, struct nfs4_client_reclaim, cr_strhash); list_del(&crp->cr_strhash); kfree(crp); reclaim_str_hashtbl_size--; } } BUG_ON(reclaim_str_hashtbl_size); } /* * called from OPEN, CLAIM_PREVIOUS with a new clientid. */ struct nfs4_client_reclaim * nfsd4_find_reclaim_client(struct nfs4_client *clp) { unsigned int strhashval; struct nfs4_client_reclaim *crp = NULL; dprintk("NFSD: nfs4_find_reclaim_client for %.*s with recdir %s\n", clp->cl_name.len, clp->cl_name.data, clp->cl_recdir); /* find clp->cl_name in reclaim_str_hashtbl */ strhashval = clientstr_hashval(clp->cl_recdir); list_for_each_entry(crp, &reclaim_str_hashtbl[strhashval], cr_strhash) { if (same_name(crp->cr_recdir, clp->cl_recdir)) { return crp; } } return NULL; } /* * Called from OPEN. Look for clientid in reclaim list. */ __be32 nfs4_check_open_reclaim(clientid_t *clid) { struct nfs4_client *clp; /* find clientid in conf_id_hashtbl */ clp = find_confirmed_client(clid); if (clp == NULL) return nfserr_reclaim_bad; return nfsd4_client_record_check(clp) ? nfserr_reclaim_bad : nfs_ok; } #ifdef CONFIG_NFSD_FAULT_INJECTION void nfsd_forget_clients(u64 num) { struct nfs4_client *clp, *next; int count = 0; nfs4_lock_state(); list_for_each_entry_safe(clp, next, &client_lru, cl_lru) { nfsd4_client_record_remove(clp); expire_client(clp); if (++count == num) break; } nfs4_unlock_state(); printk(KERN_INFO "NFSD: Forgot %d clients", count); } static void release_lockowner_sop(struct nfs4_stateowner *sop) { release_lockowner(lockowner(sop)); } static void release_openowner_sop(struct nfs4_stateowner *sop) { release_openowner(openowner(sop)); } static int nfsd_release_n_owners(u64 num, bool is_open_owner, void (*release_sop)(struct nfs4_stateowner *)) { int i, count = 0; struct nfs4_stateowner *sop, *next; for (i = 0; i < OWNER_HASH_SIZE; i++) { list_for_each_entry_safe(sop, next, &ownerstr_hashtbl[i], so_strhash) { if (sop->so_is_open_owner != is_open_owner) continue; release_sop(sop); if (++count == num) return count; } } return count; } void nfsd_forget_locks(u64 num) { int count; nfs4_lock_state(); count = nfsd_release_n_owners(num, false, release_lockowner_sop); nfs4_unlock_state(); printk(KERN_INFO "NFSD: Forgot %d locks", count); } void nfsd_forget_openowners(u64 num) { int count; nfs4_lock_state(); count = nfsd_release_n_owners(num, true, release_openowner_sop); nfs4_unlock_state(); printk(KERN_INFO "NFSD: Forgot %d open owners", count); } int nfsd_process_n_delegations(u64 num, void (*deleg_func)(struct nfs4_delegation *)) { int i, count = 0; struct nfs4_file *fp, *fnext; struct nfs4_delegation *dp, *dnext; for (i = 0; i < FILE_HASH_SIZE; i++) { list_for_each_entry_safe(fp, fnext, &file_hashtbl[i], fi_hash) { list_for_each_entry_safe(dp, dnext, &fp->fi_delegations, dl_perfile) { deleg_func(dp); if (++count == num) return count; } } } return count; } void nfsd_forget_delegations(u64 num) { unsigned int count; nfs4_lock_state(); count = nfsd_process_n_delegations(num, unhash_delegation); nfs4_unlock_state(); printk(KERN_INFO "NFSD: Forgot %d delegations", count); } void nfsd_recall_delegations(u64 num) { unsigned int count; nfs4_lock_state(); spin_lock(&recall_lock); count = nfsd_process_n_delegations(num, nfsd_break_one_deleg); spin_unlock(&recall_lock); nfs4_unlock_state(); printk(KERN_INFO "NFSD: Recalled %d delegations", count); } #endif /* CONFIG_NFSD_FAULT_INJECTION */ /* initialization to perform at module load time: */ void nfs4_state_init(void) { int i; for (i = 0; i < CLIENT_HASH_SIZE; i++) { INIT_LIST_HEAD(&conf_id_hashtbl[i]); INIT_LIST_HEAD(&conf_str_hashtbl[i]); INIT_LIST_HEAD(&unconf_str_hashtbl[i]); INIT_LIST_HEAD(&unconf_id_hashtbl[i]); INIT_LIST_HEAD(&reclaim_str_hashtbl[i]); } for (i = 0; i < SESSION_HASH_SIZE; i++) INIT_LIST_HEAD(&sessionid_hashtbl[i]); for (i = 0; i < FILE_HASH_SIZE; i++) { INIT_LIST_HEAD(&file_hashtbl[i]); } for (i = 0; i < OWNER_HASH_SIZE; i++) { INIT_LIST_HEAD(&ownerstr_hashtbl[i]); } for (i = 0; i < LOCKOWNER_INO_HASH_SIZE; i++) INIT_LIST_HEAD(&lockowner_ino_hashtbl[i]); INIT_LIST_HEAD(&close_lru); INIT_LIST_HEAD(&client_lru); INIT_LIST_HEAD(&del_recall_lru); reclaim_str_hashtbl_size = 0; } /* * Since the lifetime of a delegation isn't limited to that of an open, a * client may quite reasonably hang on to a delegation as long as it has * the inode cached. This becomes an obvious problem the first time a * client's inode cache approaches the size of the server's total memory. * * For now we avoid this problem by imposing a hard limit on the number * of delegations, which varies according to the server's memory size. */ static void set_max_delegations(void) { /* * Allow at most 4 delegations per megabyte of RAM. Quick * estimates suggest that in the worst case (where every delegation * is for a different inode), a delegation could take about 1.5K, * giving a worst case usage of about 6% of memory. */ max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT); } /* initialization to perform when the nfsd service is started: */ int nfs4_state_start(void) { int ret; /* * FIXME: For now, we hang most of the pernet global stuff off of * init_net until nfsd is fully containerized. Eventually, we'll * need to pass a net pointer into this function, take a reference * to that instead and then do most of the rest of this on a per-net * basis. */ get_net(&init_net); nfsd4_client_tracking_init(&init_net); boot_time = get_seconds(); locks_start_grace(&nfsd4_manager); printk(KERN_INFO "NFSD: starting %ld-second grace period\n", nfsd4_grace); ret = set_callback_cred(); if (ret) { ret = -ENOMEM; goto out_recovery; } laundry_wq = create_singlethread_workqueue("nfsd4"); if (laundry_wq == NULL) { ret = -ENOMEM; goto out_recovery; } ret = nfsd4_create_callback_queue(); if (ret) goto out_free_laundry; queue_delayed_work(laundry_wq, &laundromat_work, nfsd4_grace * HZ); set_max_delegations(); return 0; out_free_laundry: destroy_workqueue(laundry_wq); out_recovery: nfsd4_client_tracking_exit(&init_net); put_net(&init_net); return ret; } static void __nfs4_state_shutdown(void) { int i; struct nfs4_client *clp = NULL; struct nfs4_delegation *dp = NULL; struct list_head *pos, *next, reaplist; for (i = 0; i < CLIENT_HASH_SIZE; i++) { while (!list_empty(&conf_id_hashtbl[i])) { clp = list_entry(conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); expire_client(clp); } while (!list_empty(&unconf_str_hashtbl[i])) { clp = list_entry(unconf_str_hashtbl[i].next, struct nfs4_client, cl_strhash); expire_client(clp); } } INIT_LIST_HEAD(&reaplist); spin_lock(&recall_lock); list_for_each_safe(pos, next, &del_recall_lru) { dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); list_move(&dp->dl_recall_lru, &reaplist); } spin_unlock(&recall_lock); list_for_each_safe(pos, next, &reaplist) { dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); unhash_delegation(dp); } nfsd4_client_tracking_exit(&init_net); put_net(&init_net); } void nfs4_state_shutdown(void) { cancel_delayed_work_sync(&laundromat_work); destroy_workqueue(laundry_wq); locks_end_grace(&nfsd4_manager); nfs4_lock_state(); __nfs4_state_shutdown(); nfs4_unlock_state(); nfsd4_destroy_callback_queue(); } static void get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) { if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid)) memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t)); } static void put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) { if (cstate->minorversion) { memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t)); SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG); } } void clear_current_stateid(struct nfsd4_compound_state *cstate) { CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG); } /* * functions to set current state id */ void nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp) { put_stateid(cstate, &odp->od_stateid); } void nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open) { put_stateid(cstate, &open->op_stateid); } void nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close) { put_stateid(cstate, &close->cl_stateid); } void nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock) { put_stateid(cstate, &lock->lk_resp_stateid); } /* * functions to consume current state id */ void nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp) { get_stateid(cstate, &odp->od_stateid); } void nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp) { get_stateid(cstate, &drp->dr_stateid); } void nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp) { get_stateid(cstate, &fsp->fr_stateid); } void nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr) { get_stateid(cstate, &setattr->sa_stateid); } void nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close) { get_stateid(cstate, &close->cl_stateid); } void nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku) { get_stateid(cstate, &locku->lu_stateid); } void nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read) { get_stateid(cstate, &read->rd_stateid); } void nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write) { get_stateid(cstate, &write->wr_stateid); }
gpl-2.0
SlimRoms/kernel_samsung_crespo
drivers/mmc/host/at91_mci.c
2704
31224
/* * linux/drivers/mmc/host/at91_mci.c - ATMEL AT91 MCI Driver * * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved * * Copyright (C) 2006 Malcolm Noyes * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* This is the AT91 MCI driver that has been tested with both MMC cards and SD-cards. Boards that support write protect are now supported. The CCAT91SBC001 board does not support SD cards. The three entry points are at91_mci_request, at91_mci_set_ios and at91_mci_get_ro. SET IOS This configures the device to put it into the correct mode and clock speed required. MCI REQUEST MCI request processes the commands sent in the mmc_request structure. This can consist of a processing command and a stop command in the case of multiple block transfers. There are three main types of request, commands, reads and writes. Commands are straight forward. The command is submitted to the controller and the request function returns. When the controller generates an interrupt to indicate the command is finished, the response to the command are read and the mmc_request_done function called to end the request. Reads and writes work in a similar manner to normal commands but involve the PDC (DMA) controller to manage the transfers. A read is done from the controller directly to the scatterlist passed in from the request. Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug. The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY A write is slightly different in that the bytes to write are read from the scatterlist into a dma memory buffer (this is in case the source buffer should be read only). The entire write buffer is then done from this single dma memory buffer. The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY GET RO Gets the status of the write protect pin, if available. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/dma-mapping.h> #include <linux/clk.h> #include <linux/atmel_pdc.h> #include <linux/gfp.h> #include <linux/highmem.h> #include <linux/mmc/host.h> #include <linux/mmc/sdio.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/gpio.h> #include <mach/board.h> #include <mach/cpu.h> #include <mach/at91_mci.h> #define DRIVER_NAME "at91_mci" static inline int at91mci_is_mci1rev2xx(void) { return ( cpu_is_at91sam9260() || cpu_is_at91sam9263() || cpu_is_at91cap9() || cpu_is_at91sam9rl() || cpu_is_at91sam9g10() || cpu_is_at91sam9g20() ); } #define FL_SENT_COMMAND (1 << 0) #define FL_SENT_STOP (1 << 1) #define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \ | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \ | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE) #define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg)) #define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg)) #define MCI_BLKSIZE 512 #define MCI_MAXBLKSIZE 4095 #define MCI_BLKATONCE 256 #define MCI_BUFSIZE (MCI_BLKSIZE * MCI_BLKATONCE) /* * Low level type for this driver */ struct at91mci_host { struct mmc_host *mmc; struct mmc_command *cmd; struct mmc_request *request; void __iomem *baseaddr; int irq; struct at91_mmc_data *board; int present; struct clk *mci_clk; /* * Flag indicating when the command has been sent. This is used to * work out whether or not to send the stop */ unsigned int flags; /* flag for current bus settings */ u32 bus_mode; /* DMA buffer used for transmitting */ unsigned int* buffer; dma_addr_t physical_address; unsigned int total_length; /* Latest in the scatterlist that has been enabled for transfer, but not freed */ int in_use_index; /* Latest in the scatterlist that has been enabled for transfer */ int transfer_index; /* Timer for timeouts */ struct timer_list timer; }; /* * Reset the controller and restore most of the state */ static void at91_reset_host(struct at91mci_host *host) { unsigned long flags; u32 mr; u32 sdcr; u32 dtor; u32 imr; local_irq_save(flags); imr = at91_mci_read(host, AT91_MCI_IMR); at91_mci_write(host, AT91_MCI_IDR, 0xffffffff); /* save current state */ mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff; sdcr = at91_mci_read(host, AT91_MCI_SDCR); dtor = at91_mci_read(host, AT91_MCI_DTOR); /* reset the controller */ at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST); /* restore state */ at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN); at91_mci_write(host, AT91_MCI_MR, mr); at91_mci_write(host, AT91_MCI_SDCR, sdcr); at91_mci_write(host, AT91_MCI_DTOR, dtor); at91_mci_write(host, AT91_MCI_IER, imr); /* make sure sdio interrupts will fire */ at91_mci_read(host, AT91_MCI_SR); local_irq_restore(flags); } static void at91_timeout_timer(unsigned long data) { struct at91mci_host *host; host = (struct at91mci_host *)data; if (host->request) { dev_err(host->mmc->parent, "Timeout waiting end of packet\n"); if (host->cmd && host->cmd->data) { host->cmd->data->error = -ETIMEDOUT; } else { if (host->cmd) host->cmd->error = -ETIMEDOUT; else host->request->cmd->error = -ETIMEDOUT; } at91_reset_host(host); mmc_request_done(host->mmc, host->request); } } /* * Copy from sg to a dma block - used for transfers */ static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data) { unsigned int len, i, size; unsigned *dmabuf = host->buffer; size = data->blksz * data->blocks; len = data->sg_len; /* MCI1 rev2xx Data Write Operation and number of bytes erratum */ if (at91mci_is_mci1rev2xx()) if (host->total_length == 12) memset(dmabuf, 0, 12); /* * Just loop through all entries. Size might not * be the entire list though so make sure that * we do not transfer too much. */ for (i = 0; i < len; i++) { struct scatterlist *sg; int amount; unsigned int *sgbuffer; sg = &data->sg[i]; sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; amount = min(size, sg->length); size -= amount; if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */ int index; for (index = 0; index < (amount / 4); index++) *dmabuf++ = swab32(sgbuffer[index]); } else { char *tmpv = (char *)dmabuf; memcpy(tmpv, sgbuffer, amount); tmpv += amount; dmabuf = (unsigned *)tmpv; } kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ); if (size == 0) break; } /* * Check that we didn't get a request to transfer * more data than can fit into the SG list. */ BUG_ON(size != 0); } /* * Handle after a dma read */ static void at91_mci_post_dma_read(struct at91mci_host *host) { struct mmc_command *cmd; struct mmc_data *data; unsigned int len, i, size; unsigned *dmabuf = host->buffer; pr_debug("post dma read\n"); cmd = host->cmd; if (!cmd) { pr_debug("no command\n"); return; } data = cmd->data; if (!data) { pr_debug("no data\n"); return; } size = data->blksz * data->blocks; len = data->sg_len; at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX); at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF); for (i = 0; i < len; i++) { struct scatterlist *sg; int amount; unsigned int *sgbuffer; sg = &data->sg[i]; sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; amount = min(size, sg->length); size -= amount; if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */ int index; for (index = 0; index < (amount / 4); index++) sgbuffer[index] = swab32(*dmabuf++); } else { char *tmpv = (char *)dmabuf; memcpy(sgbuffer, tmpv, amount); tmpv += amount; dmabuf = (unsigned *)tmpv; } flush_kernel_dcache_page(sg_page(sg)); kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ); data->bytes_xfered += amount; if (size == 0) break; } pr_debug("post dma read done\n"); } /* * Handle transmitted data */ static void at91_mci_handle_transmitted(struct at91mci_host *host) { struct mmc_command *cmd; struct mmc_data *data; pr_debug("Handling the transmit\n"); /* Disable the transfer */ at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); /* Now wait for cmd ready */ at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE); cmd = host->cmd; if (!cmd) return; data = cmd->data; if (!data) return; if (cmd->data->blocks > 1) { pr_debug("multiple write : wait for BLKE...\n"); at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE); } else at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY); } /* * Update bytes tranfered count during a write operation */ static void at91_mci_update_bytes_xfered(struct at91mci_host *host) { struct mmc_data *data; /* always deal with the effective request (and not the current cmd) */ if (host->request->cmd && host->request->cmd->error != 0) return; if (host->request->data) { data = host->request->data; if (data->flags & MMC_DATA_WRITE) { /* card is in IDLE mode now */ pr_debug("-> bytes_xfered %d, total_length = %d\n", data->bytes_xfered, host->total_length); data->bytes_xfered = data->blksz * data->blocks; } } } /*Handle after command sent ready*/ static int at91_mci_handle_cmdrdy(struct at91mci_host *host) { if (!host->cmd) return 1; else if (!host->cmd->data) { if (host->flags & FL_SENT_STOP) { /*After multi block write, we must wait for NOTBUSY*/ at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY); } else return 1; } else if (host->cmd->data->flags & MMC_DATA_WRITE) { /*After sendding multi-block-write command, start DMA transfer*/ at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE | AT91_MCI_BLKE); at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); } /* command not completed, have to wait */ return 0; } /* * Enable the controller */ static void at91_mci_enable(struct at91mci_host *host) { unsigned int mr; at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN); at91_mci_write(host, AT91_MCI_IDR, 0xffffffff); at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC); mr = AT91_MCI_PDCMODE | 0x34a; if (at91mci_is_mci1rev2xx()) mr |= AT91_MCI_RDPROOF | AT91_MCI_WRPROOF; at91_mci_write(host, AT91_MCI_MR, mr); /* use Slot A or B (only one at same time) */ at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b); } /* * Disable the controller */ static void at91_mci_disable(struct at91mci_host *host) { at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST); } /* * Send a command */ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd) { unsigned int cmdr, mr; unsigned int block_length; struct mmc_data *data = cmd->data; unsigned int blocks; unsigned int ier = 0; host->cmd = cmd; /* Needed for leaving busy state before CMD1 */ if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) { pr_debug("Clearing timeout\n"); at91_mci_write(host, AT91_MCI_ARGR, 0); at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD); while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) { /* spin */ pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR)); } } cmdr = cmd->opcode; if (mmc_resp_type(cmd) == MMC_RSP_NONE) cmdr |= AT91_MCI_RSPTYP_NONE; else { /* if a response is expected then allow maximum response latancy */ cmdr |= AT91_MCI_MAXLAT; /* set 136 bit response for R2, 48 bit response otherwise */ if (mmc_resp_type(cmd) == MMC_RSP_R2) cmdr |= AT91_MCI_RSPTYP_136; else cmdr |= AT91_MCI_RSPTYP_48; } if (data) { if (cpu_is_at91rm9200() || cpu_is_at91sam9261()) { if (data->blksz & 0x3) { pr_debug("Unsupported block size\n"); cmd->error = -EINVAL; mmc_request_done(host->mmc, host->request); return; } if (data->flags & MMC_DATA_STREAM) { pr_debug("Stream commands not supported\n"); cmd->error = -EINVAL; mmc_request_done(host->mmc, host->request); return; } } block_length = data->blksz; blocks = data->blocks; /* always set data start - also set direction flag for read */ if (data->flags & MMC_DATA_READ) cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START); else if (data->flags & MMC_DATA_WRITE) cmdr |= AT91_MCI_TRCMD_START; if (cmd->opcode == SD_IO_RW_EXTENDED) { cmdr |= AT91_MCI_TRTYP_SDIO_BLOCK; } else { if (data->flags & MMC_DATA_STREAM) cmdr |= AT91_MCI_TRTYP_STREAM; if (data->blocks > 1) cmdr |= AT91_MCI_TRTYP_MULTIPLE; } } else { block_length = 0; blocks = 0; } if (host->flags & FL_SENT_STOP) cmdr |= AT91_MCI_TRCMD_STOP; if (host->bus_mode == MMC_BUSMODE_OPENDRAIN) cmdr |= AT91_MCI_OPDCMD; /* * Set the arguments and send the command */ pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n", cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR)); if (!data) { at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS); at91_mci_write(host, ATMEL_PDC_RPR, 0); at91_mci_write(host, ATMEL_PDC_RCR, 0); at91_mci_write(host, ATMEL_PDC_RNPR, 0); at91_mci_write(host, ATMEL_PDC_RNCR, 0); at91_mci_write(host, ATMEL_PDC_TPR, 0); at91_mci_write(host, ATMEL_PDC_TCR, 0); at91_mci_write(host, ATMEL_PDC_TNPR, 0); at91_mci_write(host, ATMEL_PDC_TNCR, 0); ier = AT91_MCI_CMDRDY; } else { /* zero block length and PDC mode */ mr = at91_mci_read(host, AT91_MCI_MR) & 0x5fff; mr |= (data->blksz & 0x3) ? AT91_MCI_PDCFBYTE : 0; mr |= (block_length << 16); mr |= AT91_MCI_PDCMODE; at91_mci_write(host, AT91_MCI_MR, mr); if (!(cpu_is_at91rm9200() || cpu_is_at91sam9261())) at91_mci_write(host, AT91_MCI_BLKR, AT91_MCI_BLKR_BCNT(blocks) | AT91_MCI_BLKR_BLKLEN(block_length)); /* * Disable the PDC controller */ at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); if (cmdr & AT91_MCI_TRCMD_START) { data->bytes_xfered = 0; host->transfer_index = 0; host->in_use_index = 0; if (cmdr & AT91_MCI_TRDIR) { /* * Handle a read */ host->total_length = 0; at91_mci_write(host, ATMEL_PDC_RPR, host->physical_address); at91_mci_write(host, ATMEL_PDC_RCR, (data->blksz & 0x3) ? (blocks * block_length) : (blocks * block_length) / 4); at91_mci_write(host, ATMEL_PDC_RNPR, 0); at91_mci_write(host, ATMEL_PDC_RNCR, 0); ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */; } else { /* * Handle a write */ host->total_length = block_length * blocks; /* * MCI1 rev2xx Data Write Operation and * number of bytes erratum */ if (at91mci_is_mci1rev2xx()) if (host->total_length < 12) host->total_length = 12; at91_mci_sg_to_dma(host, data); pr_debug("Transmitting %d bytes\n", host->total_length); at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address); at91_mci_write(host, ATMEL_PDC_TCR, (data->blksz & 0x3) ? host->total_length : host->total_length / 4); ier = AT91_MCI_CMDRDY; } } } /* * Send the command and then enable the PDC - not the other way round as * the data sheet says */ at91_mci_write(host, AT91_MCI_ARGR, cmd->arg); at91_mci_write(host, AT91_MCI_CMDR, cmdr); if (cmdr & AT91_MCI_TRCMD_START) { if (cmdr & AT91_MCI_TRDIR) at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); } /* Enable selected interrupts */ at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier); } /* * Process the next step in the request */ static void at91_mci_process_next(struct at91mci_host *host) { if (!(host->flags & FL_SENT_COMMAND)) { host->flags |= FL_SENT_COMMAND; at91_mci_send_command(host, host->request->cmd); } else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) { host->flags |= FL_SENT_STOP; at91_mci_send_command(host, host->request->stop); } else { del_timer(&host->timer); /* the at91rm9200 mci controller hangs after some transfers, * and the workaround is to reset it after each transfer. */ if (cpu_is_at91rm9200()) at91_reset_host(host); mmc_request_done(host->mmc, host->request); } } /* * Handle a command that has been completed */ static void at91_mci_completed_command(struct at91mci_host *host, unsigned int status) { struct mmc_command *cmd = host->cmd; struct mmc_data *data = cmd->data; at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB)); cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0)); cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1)); cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2)); cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3)); pr_debug("Status = %08X/%08x [%08X %08X %08X %08X]\n", status, at91_mci_read(host, AT91_MCI_SR), cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]); if (status & AT91_MCI_ERRORS) { if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) { cmd->error = 0; } else { if (status & (AT91_MCI_DTOE | AT91_MCI_DCRCE)) { if (data) { if (status & AT91_MCI_DTOE) data->error = -ETIMEDOUT; else if (status & AT91_MCI_DCRCE) data->error = -EILSEQ; } } else { if (status & AT91_MCI_RTOE) cmd->error = -ETIMEDOUT; else if (status & AT91_MCI_RCRCE) cmd->error = -EILSEQ; else cmd->error = -EIO; } pr_debug("Error detected and set to %d/%d (cmd = %d, retries = %d)\n", cmd->error, data ? data->error : 0, cmd->opcode, cmd->retries); } } else cmd->error = 0; at91_mci_process_next(host); } /* * Handle an MMC request */ static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct at91mci_host *host = mmc_priv(mmc); host->request = mrq; host->flags = 0; /* more than 1s timeout needed with slow SD cards */ mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000)); at91_mci_process_next(host); } /* * Set the IOS */ static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { int clkdiv; struct at91mci_host *host = mmc_priv(mmc); unsigned long at91_master_clock = clk_get_rate(host->mci_clk); host->bus_mode = ios->bus_mode; if (ios->clock == 0) { /* Disable the MCI controller */ at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS); clkdiv = 0; } else { /* Enable the MCI controller */ at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN); if ((at91_master_clock % (ios->clock * 2)) == 0) clkdiv = ((at91_master_clock / ios->clock) / 2) - 1; else clkdiv = (at91_master_clock / ios->clock) / 2; pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv, at91_master_clock / (2 * (clkdiv + 1))); } if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) { pr_debug("MMC: Setting controller bus width to 4\n"); at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS); } else { pr_debug("MMC: Setting controller bus width to 1\n"); at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS); } /* Set the clock divider */ at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv); /* maybe switch power to the card */ if (host->board->vcc_pin) { switch (ios->power_mode) { case MMC_POWER_OFF: gpio_set_value(host->board->vcc_pin, 0); break; case MMC_POWER_UP: gpio_set_value(host->board->vcc_pin, 1); break; case MMC_POWER_ON: break; default: WARN_ON(1); } } } /* * Handle an interrupt */ static irqreturn_t at91_mci_irq(int irq, void *devid) { struct at91mci_host *host = devid; int completed = 0; unsigned int int_status, int_mask; int_status = at91_mci_read(host, AT91_MCI_SR); int_mask = at91_mci_read(host, AT91_MCI_IMR); pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask, int_status & int_mask); int_status = int_status & int_mask; if (int_status & AT91_MCI_ERRORS) { completed = 1; if (int_status & AT91_MCI_UNRE) pr_debug("MMC: Underrun error\n"); if (int_status & AT91_MCI_OVRE) pr_debug("MMC: Overrun error\n"); if (int_status & AT91_MCI_DTOE) pr_debug("MMC: Data timeout\n"); if (int_status & AT91_MCI_DCRCE) pr_debug("MMC: CRC error in data\n"); if (int_status & AT91_MCI_RTOE) pr_debug("MMC: Response timeout\n"); if (int_status & AT91_MCI_RENDE) pr_debug("MMC: Response end bit error\n"); if (int_status & AT91_MCI_RCRCE) pr_debug("MMC: Response CRC error\n"); if (int_status & AT91_MCI_RDIRE) pr_debug("MMC: Response direction error\n"); if (int_status & AT91_MCI_RINDE) pr_debug("MMC: Response index error\n"); } else { /* Only continue processing if no errors */ if (int_status & AT91_MCI_TXBUFE) { pr_debug("TX buffer empty\n"); at91_mci_handle_transmitted(host); } if (int_status & AT91_MCI_ENDRX) { pr_debug("ENDRX\n"); at91_mci_post_dma_read(host); } if (int_status & AT91_MCI_RXBUFF) { pr_debug("RX buffer full\n"); at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_RXBUFF | AT91_MCI_ENDRX); completed = 1; } if (int_status & AT91_MCI_ENDTX) pr_debug("Transmit has ended\n"); if (int_status & AT91_MCI_NOTBUSY) { pr_debug("Card is ready\n"); at91_mci_update_bytes_xfered(host); completed = 1; } if (int_status & AT91_MCI_DTIP) pr_debug("Data transfer in progress\n"); if (int_status & AT91_MCI_BLKE) { pr_debug("Block transfer has ended\n"); if (host->request->data && host->request->data->blocks > 1) { /* multi block write : complete multi write * command and send stop */ completed = 1; } else { at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY); } } if (int_status & AT91_MCI_SDIOIRQA) mmc_signal_sdio_irq(host->mmc); if (int_status & AT91_MCI_SDIOIRQB) mmc_signal_sdio_irq(host->mmc); if (int_status & AT91_MCI_TXRDY) pr_debug("Ready to transmit\n"); if (int_status & AT91_MCI_RXRDY) pr_debug("Ready to receive\n"); if (int_status & AT91_MCI_CMDRDY) { pr_debug("Command ready\n"); completed = at91_mci_handle_cmdrdy(host); } } if (completed) { pr_debug("Completed command\n"); at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB)); at91_mci_completed_command(host, int_status); } else at91_mci_write(host, AT91_MCI_IDR, int_status & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB)); return IRQ_HANDLED; } static irqreturn_t at91_mmc_det_irq(int irq, void *_host) { struct at91mci_host *host = _host; int present = !gpio_get_value(irq_to_gpio(irq)); /* * we expect this irq on both insert and remove, * and use a short delay to debounce. */ if (present != host->present) { host->present = present; pr_debug("%s: card %s\n", mmc_hostname(host->mmc), present ? "insert" : "remove"); if (!present) { pr_debug("****** Resetting SD-card bus width ******\n"); at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS); } /* 0.5s needed because of early card detect switch firing */ mmc_detect_change(host->mmc, msecs_to_jiffies(500)); } return IRQ_HANDLED; } static int at91_mci_get_ro(struct mmc_host *mmc) { struct at91mci_host *host = mmc_priv(mmc); if (host->board->wp_pin) return !!gpio_get_value(host->board->wp_pin); /* * Board doesn't support read only detection; let the mmc core * decide what to do. */ return -ENOSYS; } static void at91_mci_enable_sdio_irq(struct mmc_host *mmc, int enable) { struct at91mci_host *host = mmc_priv(mmc); pr_debug("%s: sdio_irq %c : %s\n", mmc_hostname(host->mmc), host->board->slot_b ? 'B':'A', enable ? "enable" : "disable"); at91_mci_write(host, enable ? AT91_MCI_IER : AT91_MCI_IDR, host->board->slot_b ? AT91_MCI_SDIOIRQB : AT91_MCI_SDIOIRQA); } static const struct mmc_host_ops at91_mci_ops = { .request = at91_mci_request, .set_ios = at91_mci_set_ios, .get_ro = at91_mci_get_ro, .enable_sdio_irq = at91_mci_enable_sdio_irq, }; /* * Probe for the device */ static int __init at91_mci_probe(struct platform_device *pdev) { struct mmc_host *mmc; struct at91mci_host *host; struct resource *res; int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENXIO; if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME)) return -EBUSY; mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; dev_dbg(&pdev->dev, "couldn't allocate mmc host\n"); goto fail6; } mmc->ops = &at91_mci_ops; mmc->f_min = 375000; mmc->f_max = 25000000; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->caps = 0; mmc->max_blk_size = MCI_MAXBLKSIZE; mmc->max_blk_count = MCI_BLKATONCE; mmc->max_req_size = MCI_BUFSIZE; mmc->max_segs = MCI_BLKATONCE; mmc->max_seg_size = MCI_BUFSIZE; host = mmc_priv(mmc); host->mmc = mmc; host->bus_mode = 0; host->board = pdev->dev.platform_data; if (host->board->wire4) { if (at91mci_is_mci1rev2xx()) mmc->caps |= MMC_CAP_4_BIT_DATA; else dev_warn(&pdev->dev, "4 wire bus mode not supported" " - using 1 wire\n"); } host->buffer = dma_alloc_coherent(&pdev->dev, MCI_BUFSIZE, &host->physical_address, GFP_KERNEL); if (!host->buffer) { ret = -ENOMEM; dev_err(&pdev->dev, "Can't allocate transmit buffer\n"); goto fail5; } /* Add SDIO capability when available */ if (at91mci_is_mci1rev2xx()) { /* at91mci MCI1 rev2xx sdio interrupt erratum */ if (host->board->wire4 || !host->board->slot_b) mmc->caps |= MMC_CAP_SDIO_IRQ; } /* * Reserve GPIOs ... board init code makes sure these pins are set * up as GPIOs with the right direction (input, except for vcc) */ if (host->board->det_pin) { ret = gpio_request(host->board->det_pin, "mmc_detect"); if (ret < 0) { dev_dbg(&pdev->dev, "couldn't claim card detect pin\n"); goto fail4b; } } if (host->board->wp_pin) { ret = gpio_request(host->board->wp_pin, "mmc_wp"); if (ret < 0) { dev_dbg(&pdev->dev, "couldn't claim wp sense pin\n"); goto fail4; } } if (host->board->vcc_pin) { ret = gpio_request(host->board->vcc_pin, "mmc_vcc"); if (ret < 0) { dev_dbg(&pdev->dev, "couldn't claim vcc switch pin\n"); goto fail3; } } /* * Get Clock */ host->mci_clk = clk_get(&pdev->dev, "mci_clk"); if (IS_ERR(host->mci_clk)) { ret = -ENODEV; dev_dbg(&pdev->dev, "no mci_clk?\n"); goto fail2; } /* * Map I/O region */ host->baseaddr = ioremap(res->start, resource_size(res)); if (!host->baseaddr) { ret = -ENOMEM; goto fail1; } /* * Reset hardware */ clk_enable(host->mci_clk); /* Enable the peripheral clock */ at91_mci_disable(host); at91_mci_enable(host); /* * Allocate the MCI interrupt */ host->irq = platform_get_irq(pdev, 0); ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED, mmc_hostname(mmc), host); if (ret) { dev_dbg(&pdev->dev, "request MCI interrupt failed\n"); goto fail0; } setup_timer(&host->timer, at91_timeout_timer, (unsigned long)host); platform_set_drvdata(pdev, mmc); /* * Add host to MMC layer */ if (host->board->det_pin) { host->present = !gpio_get_value(host->board->det_pin); } else host->present = -1; mmc_add_host(mmc); /* * monitor card insertion/removal if we can */ if (host->board->det_pin) { ret = request_irq(gpio_to_irq(host->board->det_pin), at91_mmc_det_irq, 0, mmc_hostname(mmc), host); if (ret) dev_warn(&pdev->dev, "request MMC detect irq failed\n"); else device_init_wakeup(&pdev->dev, 1); } pr_debug("Added MCI driver\n"); return 0; fail0: clk_disable(host->mci_clk); iounmap(host->baseaddr); fail1: clk_put(host->mci_clk); fail2: if (host->board->vcc_pin) gpio_free(host->board->vcc_pin); fail3: if (host->board->wp_pin) gpio_free(host->board->wp_pin); fail4: if (host->board->det_pin) gpio_free(host->board->det_pin); fail4b: if (host->buffer) dma_free_coherent(&pdev->dev, MCI_BUFSIZE, host->buffer, host->physical_address); fail5: mmc_free_host(mmc); fail6: release_mem_region(res->start, resource_size(res)); dev_err(&pdev->dev, "probe failed, err %d\n", ret); return ret; } /* * Remove a device */ static int __exit at91_mci_remove(struct platform_device *pdev) { struct mmc_host *mmc = platform_get_drvdata(pdev); struct at91mci_host *host; struct resource *res; if (!mmc) return -1; host = mmc_priv(mmc); if (host->buffer) dma_free_coherent(&pdev->dev, MCI_BUFSIZE, host->buffer, host->physical_address); if (host->board->det_pin) { if (device_can_wakeup(&pdev->dev)) free_irq(gpio_to_irq(host->board->det_pin), host); device_init_wakeup(&pdev->dev, 0); gpio_free(host->board->det_pin); } at91_mci_disable(host); del_timer_sync(&host->timer); mmc_remove_host(mmc); free_irq(host->irq, host); clk_disable(host->mci_clk); /* Disable the peripheral clock */ clk_put(host->mci_clk); if (host->board->vcc_pin) gpio_free(host->board->vcc_pin); if (host->board->wp_pin) gpio_free(host->board->wp_pin); iounmap(host->baseaddr); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); mmc_free_host(mmc); platform_set_drvdata(pdev, NULL); pr_debug("MCI Removed\n"); return 0; } #ifdef CONFIG_PM static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state) { struct mmc_host *mmc = platform_get_drvdata(pdev); struct at91mci_host *host = mmc_priv(mmc); int ret = 0; if (host->board->det_pin && device_may_wakeup(&pdev->dev)) enable_irq_wake(host->board->det_pin); if (mmc) ret = mmc_suspend_host(mmc); return ret; } static int at91_mci_resume(struct platform_device *pdev) { struct mmc_host *mmc = platform_get_drvdata(pdev); struct at91mci_host *host = mmc_priv(mmc); int ret = 0; if (host->board->det_pin && device_may_wakeup(&pdev->dev)) disable_irq_wake(host->board->det_pin); if (mmc) ret = mmc_resume_host(mmc); return ret; } #else #define at91_mci_suspend NULL #define at91_mci_resume NULL #endif static struct platform_driver at91_mci_driver = { .remove = __exit_p(at91_mci_remove), .suspend = at91_mci_suspend, .resume = at91_mci_resume, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, }; static int __init at91_mci_init(void) { return platform_driver_probe(&at91_mci_driver, at91_mci_probe); } static void __exit at91_mci_exit(void) { platform_driver_unregister(&at91_mci_driver); } module_init(at91_mci_init); module_exit(at91_mci_exit); MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver"); MODULE_AUTHOR("Nick Randell"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:at91_mci");
gpl-2.0
shminer/LG-F460-Kernel
drivers/net/wireless/rtlwifi/rtl8723ae/pwrseqcmd.c
2960
4130
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "pwrseq.h" /* Description: * This routine deals with the Power Configuration CMD * parsing for RTL8723/RTL8188E Series IC. * Assumption: * We should follow specific format that was released from HW SD. */ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version, u8 faversion, u8 interface_type, struct wlan_pwr_cfg pwrcfgcmd[]) { struct wlan_pwr_cfg cfg_cmd = {0}; bool polling_bit = false; u32 ary_idx = 0; u8 value = 0; u32 offset = 0; u32 polling_count = 0; u32 max_polling_cnt = 5000; do { cfg_cmd = pwrcfgcmd[ary_idx]; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "rtl_hal_pwrseqcmdparsing(): offset(%#x),cut_msk(%#x), famsk(%#x)," "interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), value(%#x)\n", GET_PWR_CFG_OFFSET(cfg_cmd), GET_PWR_CFG_CUT_MASK(cfg_cmd), GET_PWR_CFG_FAB_MASK(cfg_cmd), GET_PWR_CFG_INTF_MASK(cfg_cmd), GET_PWR_CFG_BASE(cfg_cmd), GET_PWR_CFG_CMD(cfg_cmd), GET_PWR_CFG_MASK(cfg_cmd), GET_PWR_CFG_VALUE(cfg_cmd)); if ((GET_PWR_CFG_FAB_MASK(cfg_cmd)&faversion) && (GET_PWR_CFG_CUT_MASK(cfg_cmd)&cut_version) && (GET_PWR_CFG_INTF_MASK(cfg_cmd)&interface_type)) { switch (GET_PWR_CFG_CMD(cfg_cmd)) { case PWR_CMD_READ: RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "rtl_hal_pwrseqcmdparsing(): PWR_CMD_READ\n"); break; case PWR_CMD_WRITE: RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "rtl_hal_pwrseqcmdparsing(): PWR_CMD_WRITE\n"); offset = GET_PWR_CFG_OFFSET(cfg_cmd); /*Read the value from system register*/ value = rtl_read_byte(rtlpriv, offset); value &= (~(GET_PWR_CFG_MASK(cfg_cmd))); value |= (GET_PWR_CFG_VALUE(cfg_cmd) & GET_PWR_CFG_MASK(cfg_cmd)); /*Write the value back to sytem register*/ rtl_write_byte(rtlpriv, offset, value); break; case PWR_CMD_POLLING: RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "rtl_hal_pwrseqcmdparsing(): PWR_CMD_POLLING\n"); polling_bit = false; offset = GET_PWR_CFG_OFFSET(cfg_cmd); do { value = rtl_read_byte(rtlpriv, offset); value &= GET_PWR_CFG_MASK(cfg_cmd); if (value == (GET_PWR_CFG_VALUE(cfg_cmd) & GET_PWR_CFG_MASK(cfg_cmd))) polling_bit = true; else udelay(10); if (polling_count++ > max_polling_cnt) return false; } while (!polling_bit); break; case PWR_CMD_DELAY: RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "rtl_hal_pwrseqcmdparsing(): PWR_CMD_DELAY\n"); if (GET_PWR_CFG_VALUE(cfg_cmd) == PWRSEQ_DELAY_US) udelay(GET_PWR_CFG_OFFSET(cfg_cmd)); else mdelay(GET_PWR_CFG_OFFSET(cfg_cmd)); break; case PWR_CMD_END: RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "rtl_hal_pwrseqcmdparsing(): PWR_CMD_END\n"); return true; default: RT_ASSERT(false, "rtl_hal_pwrseqcmdparsing(): Unknown CMD!!\n"); break; } } ary_idx++; } while (1); return true; }
gpl-2.0
boa19861105/B2_UHL
arch/hexagon/kernel/vm_events.c
3216
3077
/* * Mostly IRQ support for Hexagon * * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <linux/kernel.h> #include <asm/registers.h> #include <linux/irq.h> #include <linux/hardirq.h> /* * show_regs - print pt_regs structure * @regs: pointer to pt_regs * * To-do: add all the accessor definitions to registers.h * * Will make this routine a lot easier to write. */ void show_regs(struct pt_regs *regs) { printk(KERN_EMERG "restart_r0: \t0x%08lx syscall_nr: %ld\n", regs->restart_r0, regs->syscall_nr); printk(KERN_EMERG "preds: \t\t0x%08lx\n", regs->preds); printk(KERN_EMERG "lc0: \t0x%08lx sa0: 0x%08lx m0: 0x%08lx\n", regs->lc0, regs->sa0, regs->m0); printk(KERN_EMERG "lc1: \t0x%08lx sa1: 0x%08lx m1: 0x%08lx\n", regs->lc1, regs->sa1, regs->m1); printk(KERN_EMERG "gp: \t0x%08lx ugp: 0x%08lx usr: 0x%08lx\n", regs->gp, regs->ugp, regs->usr); printk(KERN_EMERG "r0: \t0x%08lx %08lx %08lx %08lx\n", regs->r00, regs->r01, regs->r02, regs->r03); printk(KERN_EMERG "r4: \t0x%08lx %08lx %08lx %08lx\n", regs->r04, regs->r05, regs->r06, regs->r07); printk(KERN_EMERG "r8: \t0x%08lx %08lx %08lx %08lx\n", regs->r08, regs->r09, regs->r10, regs->r11); printk(KERN_EMERG "r12: \t0x%08lx %08lx %08lx %08lx\n", regs->r12, regs->r13, regs->r14, regs->r15); printk(KERN_EMERG "r16: \t0x%08lx %08lx %08lx %08lx\n", regs->r16, regs->r17, regs->r18, regs->r19); printk(KERN_EMERG "r20: \t0x%08lx %08lx %08lx %08lx\n", regs->r20, regs->r21, regs->r22, regs->r23); printk(KERN_EMERG "r24: \t0x%08lx %08lx %08lx %08lx\n", regs->r24, regs->r25, regs->r26, regs->r27); printk(KERN_EMERG "r28: \t0x%08lx %08lx %08lx %08lx\n", regs->r28, regs->r29, regs->r30, regs->r31); printk(KERN_EMERG "elr: \t0x%08lx cause: 0x%08lx user_mode: %d\n", pt_elr(regs), pt_cause(regs), user_mode(regs)); printk(KERN_EMERG "psp: \t0x%08lx badva: 0x%08lx int_enabled: %d\n", pt_psp(regs), pt_badva(regs), ints_enabled(regs)); } void dummy_handler(struct pt_regs *regs) { unsigned int elr = pt_elr(regs); printk(KERN_ERR "Unimplemented handler; ELR=0x%08x\n", elr); } void arch_do_IRQ(struct pt_regs *regs) { int irq = pt_cause(regs); struct pt_regs *old_regs = set_irq_regs(regs); irq_enter(); generic_handle_irq(irq); irq_exit(); set_irq_regs(old_regs); }
gpl-2.0
kozmikkick/KozmiKKernel-HTC-One
drivers/dma/mv_xor.c
4496
35674
/* * offload engine driver for the Marvell XOR engine * Copyright (C) 2007, 2008, Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/memory.h> #include <plat/mv_xor.h> #include "dmaengine.h" #include "mv_xor.h" static void mv_xor_issue_pending(struct dma_chan *chan); #define to_mv_xor_chan(chan) \ container_of(chan, struct mv_xor_chan, common) #define to_mv_xor_device(dev) \ container_of(dev, struct mv_xor_device, common) #define to_mv_xor_slot(tx) \ container_of(tx, struct mv_xor_desc_slot, async_tx) static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) { struct mv_xor_desc *hw_desc = desc->hw_desc; hw_desc->status = (1 << 31); hw_desc->phy_next_desc = 0; hw_desc->desc_command = (1 << 31); } static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) { struct mv_xor_desc *hw_desc = desc->hw_desc; return hw_desc->phy_dest_addr; } static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc, int src_idx) { struct mv_xor_desc *hw_desc = desc->hw_desc; return hw_desc->phy_src_addr[src_idx]; } static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, u32 byte_count) { struct mv_xor_desc *hw_desc = desc->hw_desc; hw_desc->byte_count = byte_count; } static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, u32 next_desc_addr) { struct mv_xor_desc *hw_desc = desc->hw_desc; BUG_ON(hw_desc->phy_next_desc); hw_desc->phy_next_desc = next_desc_addr; } static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) { struct mv_xor_desc *hw_desc = desc->hw_desc; hw_desc->phy_next_desc = 0; } static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val) { desc->value = val; } static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, dma_addr_t addr) { struct mv_xor_desc *hw_desc = desc->hw_desc; hw_desc->phy_dest_addr = addr; } static int mv_chan_memset_slot_count(size_t len) { return 1; } #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c) static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, int index, dma_addr_t addr) { struct mv_xor_desc *hw_desc = desc->hw_desc; hw_desc->phy_src_addr[index] = addr; if (desc->type == DMA_XOR) hw_desc->desc_command |= (1 << index); } static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) { return __raw_readl(XOR_CURR_DESC(chan)); } static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, u32 next_desc_addr) { __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan)); } static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr) { __raw_writel(desc_addr, XOR_DEST_POINTER(chan)); } static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size) { __raw_writel(block_size, XOR_BLOCK_SIZE(chan)); } static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value) { __raw_writel(value, XOR_INIT_VALUE_LOW(chan)); __raw_writel(value, XOR_INIT_VALUE_HIGH(chan)); } static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) { u32 val = __raw_readl(XOR_INTR_MASK(chan)); val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); __raw_writel(val, XOR_INTR_MASK(chan)); } static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) { u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan)); intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; return intr_cause; } static int mv_is_err_intr(u32 intr_cause) { if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9))) return 1; return 0; } static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) { u32 val = ~(1 << (chan->idx * 16)); dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); __raw_writel(val, XOR_INTR_CAUSE(chan)); } static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) { u32 val = 0xFFFF0000 >> (chan->idx * 16); __raw_writel(val, XOR_INTR_CAUSE(chan)); } static int mv_can_chain(struct mv_xor_desc_slot *desc) { struct mv_xor_desc_slot *chain_old_tail = list_entry( desc->chain_node.prev, struct mv_xor_desc_slot, chain_node); if (chain_old_tail->type != desc->type) return 0; if (desc->type == DMA_MEMSET) return 0; return 1; } static void mv_set_mode(struct mv_xor_chan *chan, enum dma_transaction_type type) { u32 op_mode; u32 config = __raw_readl(XOR_CONFIG(chan)); switch (type) { case DMA_XOR: op_mode = XOR_OPERATION_MODE_XOR; break; case DMA_MEMCPY: op_mode = XOR_OPERATION_MODE_MEMCPY; break; case DMA_MEMSET: op_mode = XOR_OPERATION_MODE_MEMSET; break; default: dev_printk(KERN_ERR, chan->device->common.dev, "error: unsupported operation %d.\n", type); BUG(); return; } config &= ~0x7; config |= op_mode; __raw_writel(config, XOR_CONFIG(chan)); chan->current_type = type; } static void mv_chan_activate(struct mv_xor_chan *chan) { u32 activation; dev_dbg(chan->device->common.dev, " activate chan.\n"); activation = __raw_readl(XOR_ACTIVATION(chan)); activation |= 0x1; __raw_writel(activation, XOR_ACTIVATION(chan)); } static char mv_chan_is_busy(struct mv_xor_chan *chan) { u32 state = __raw_readl(XOR_ACTIVATION(chan)); state = (state >> 4) & 0x3; return (state == 1) ? 1 : 0; } static int mv_chan_xor_slot_count(size_t len, int src_cnt) { return 1; } /** * mv_xor_free_slots - flags descriptor slots for reuse * @slot: Slot to free * Caller must hold &mv_chan->lock while calling this function */ static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, struct mv_xor_desc_slot *slot) { dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n", __func__, __LINE__, slot); slot->slots_per_op = 0; } /* * mv_xor_start_new_chain - program the engine to operate on new chain headed by * sw_desc * Caller must hold &mv_chan->lock while calling this function */ static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, struct mv_xor_desc_slot *sw_desc) { dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n", __func__, __LINE__, sw_desc); if (sw_desc->type != mv_chan->current_type) mv_set_mode(mv_chan, sw_desc->type); if (sw_desc->type == DMA_MEMSET) { /* for memset requests we need to program the engine, no * descriptors used. */ struct mv_xor_desc *hw_desc = sw_desc->hw_desc; mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr); mv_chan_set_block_size(mv_chan, sw_desc->unmap_len); mv_chan_set_value(mv_chan, sw_desc->value); } else { /* set the hardware chain */ mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); } mv_chan->pending += sw_desc->slot_cnt; mv_xor_issue_pending(&mv_chan->common); } static dma_cookie_t mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, struct mv_xor_chan *mv_chan, dma_cookie_t cookie) { BUG_ON(desc->async_tx.cookie < 0); if (desc->async_tx.cookie > 0) { cookie = desc->async_tx.cookie; /* call the callback (must not sleep or submit new * operations to this channel) */ if (desc->async_tx.callback) desc->async_tx.callback( desc->async_tx.callback_param); /* unmap dma addresses * (unmap_single vs unmap_page?) */ if (desc->group_head && desc->unmap_len) { struct mv_xor_desc_slot *unmap = desc->group_head; struct device *dev = &mv_chan->device->pdev->dev; u32 len = unmap->unmap_len; enum dma_ctrl_flags flags = desc->async_tx.flags; u32 src_cnt; dma_addr_t addr; dma_addr_t dest; src_cnt = unmap->unmap_src_cnt; dest = mv_desc_get_dest_addr(unmap); if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { enum dma_data_direction dir; if (src_cnt > 1) /* is xor ? */ dir = DMA_BIDIRECTIONAL; else dir = DMA_FROM_DEVICE; dma_unmap_page(dev, dest, len, dir); } if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { while (src_cnt--) { addr = mv_desc_get_src_addr(unmap, src_cnt); if (addr == dest) continue; dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); } } desc->group_head = NULL; } } /* run dependent operations */ dma_run_dependencies(&desc->async_tx); return cookie; } static int mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan) { struct mv_xor_desc_slot *iter, *_iter; dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, completed_node) { if (async_tx_test_ack(&iter->async_tx)) { list_del(&iter->completed_node); mv_xor_free_slots(mv_chan, iter); } } return 0; } static int mv_xor_clean_slot(struct mv_xor_desc_slot *desc, struct mv_xor_chan *mv_chan) { dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n", __func__, __LINE__, desc, desc->async_tx.flags); list_del(&desc->chain_node); /* the client is allowed to attach dependent operations * until 'ack' is set */ if (!async_tx_test_ack(&desc->async_tx)) { /* move this slot to the completed_slots */ list_add_tail(&desc->completed_node, &mv_chan->completed_slots); return 0; } mv_xor_free_slots(mv_chan, desc); return 0; } static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) { struct mv_xor_desc_slot *iter, *_iter; dma_cookie_t cookie = 0; int busy = mv_chan_is_busy(mv_chan); u32 current_desc = mv_chan_get_current_desc(mv_chan); int seen_current = 0; dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc); mv_xor_clean_completed_slots(mv_chan); /* free completed slots from the chain starting with * the oldest descriptor */ list_for_each_entry_safe(iter, _iter, &mv_chan->chain, chain_node) { prefetch(_iter); prefetch(&_iter->async_tx); /* do not advance past the current descriptor loaded into the * hardware channel, subsequent descriptors are either in * process or have not been submitted */ if (seen_current) break; /* stop the search if we reach the current descriptor and the * channel is busy */ if (iter->async_tx.phys == current_desc) { seen_current = 1; if (busy) break; } cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie); if (mv_xor_clean_slot(iter, mv_chan)) break; } if ((busy == 0) && !list_empty(&mv_chan->chain)) { struct mv_xor_desc_slot *chain_head; chain_head = list_entry(mv_chan->chain.next, struct mv_xor_desc_slot, chain_node); mv_xor_start_new_chain(mv_chan, chain_head); } if (cookie > 0) mv_chan->common.completed_cookie = cookie; } static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) { spin_lock_bh(&mv_chan->lock); __mv_xor_slot_cleanup(mv_chan); spin_unlock_bh(&mv_chan->lock); } static void mv_xor_tasklet(unsigned long data) { struct mv_xor_chan *chan = (struct mv_xor_chan *) data; mv_xor_slot_cleanup(chan); } static struct mv_xor_desc_slot * mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots, int slots_per_op) { struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL; LIST_HEAD(chain); int slots_found, retry = 0; /* start search from the last allocated descrtiptor * if a contiguous allocation can not be found start searching * from the beginning of the list */ retry: slots_found = 0; if (retry == 0) iter = mv_chan->last_used; else iter = list_entry(&mv_chan->all_slots, struct mv_xor_desc_slot, slot_node); list_for_each_entry_safe_continue( iter, _iter, &mv_chan->all_slots, slot_node) { prefetch(_iter); prefetch(&_iter->async_tx); if (iter->slots_per_op) { /* give up after finding the first busy slot * on the second pass through the list */ if (retry) break; slots_found = 0; continue; } /* start the allocation if the slot is correctly aligned */ if (!slots_found++) alloc_start = iter; if (slots_found == num_slots) { struct mv_xor_desc_slot *alloc_tail = NULL; struct mv_xor_desc_slot *last_used = NULL; iter = alloc_start; while (num_slots) { int i; /* pre-ack all but the last descriptor */ async_tx_ack(&iter->async_tx); list_add_tail(&iter->chain_node, &chain); alloc_tail = iter; iter->async_tx.cookie = 0; iter->slot_cnt = num_slots; iter->xor_check_result = NULL; for (i = 0; i < slots_per_op; i++) { iter->slots_per_op = slots_per_op - i; last_used = iter; iter = list_entry(iter->slot_node.next, struct mv_xor_desc_slot, slot_node); } num_slots -= slots_per_op; } alloc_tail->group_head = alloc_start; alloc_tail->async_tx.cookie = -EBUSY; list_splice(&chain, &alloc_tail->tx_list); mv_chan->last_used = last_used; mv_desc_clear_next_desc(alloc_start); mv_desc_clear_next_desc(alloc_tail); return alloc_tail; } } if (!retry++) goto retry; /* try to free some slots if the allocation fails */ tasklet_schedule(&mv_chan->irq_tasklet); return NULL; } /************************ DMA engine API functions ****************************/ static dma_cookie_t mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) { struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); struct mv_xor_desc_slot *grp_start, *old_chain_tail; dma_cookie_t cookie; int new_hw_chain = 1; dev_dbg(mv_chan->device->common.dev, "%s sw_desc %p: async_tx %p\n", __func__, sw_desc, &sw_desc->async_tx); grp_start = sw_desc->group_head; spin_lock_bh(&mv_chan->lock); cookie = dma_cookie_assign(tx); if (list_empty(&mv_chan->chain)) list_splice_init(&sw_desc->tx_list, &mv_chan->chain); else { new_hw_chain = 0; old_chain_tail = list_entry(mv_chan->chain.prev, struct mv_xor_desc_slot, chain_node); list_splice_init(&grp_start->tx_list, &old_chain_tail->chain_node); if (!mv_can_chain(grp_start)) goto submit_done; dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n", old_chain_tail->async_tx.phys); /* fix up the hardware chain */ mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); /* if the channel is not busy */ if (!mv_chan_is_busy(mv_chan)) { u32 current_desc = mv_chan_get_current_desc(mv_chan); /* * and the curren desc is the end of the chain before * the append, then we need to start the channel */ if (current_desc == old_chain_tail->async_tx.phys) new_hw_chain = 1; } } if (new_hw_chain) mv_xor_start_new_chain(mv_chan, grp_start); submit_done: spin_unlock_bh(&mv_chan->lock); return cookie; } /* returns the number of allocated descriptors */ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) { char *hw_desc; int idx; struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *slot = NULL; struct mv_xor_platform_data *plat_data = mv_chan->device->pdev->dev.platform_data; int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE; /* Allocate descriptor slots */ idx = mv_chan->slots_allocated; while (idx < num_descs_in_pool) { slot = kzalloc(sizeof(*slot), GFP_KERNEL); if (!slot) { printk(KERN_INFO "MV XOR Channel only initialized" " %d descriptor slots", idx); break; } hw_desc = (char *) mv_chan->device->dma_desc_pool_virt; slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; dma_async_tx_descriptor_init(&slot->async_tx, chan); slot->async_tx.tx_submit = mv_xor_tx_submit; INIT_LIST_HEAD(&slot->chain_node); INIT_LIST_HEAD(&slot->slot_node); INIT_LIST_HEAD(&slot->tx_list); hw_desc = (char *) mv_chan->device->dma_desc_pool; slot->async_tx.phys = (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; slot->idx = idx++; spin_lock_bh(&mv_chan->lock); mv_chan->slots_allocated = idx; list_add_tail(&slot->slot_node, &mv_chan->all_slots); spin_unlock_bh(&mv_chan->lock); } if (mv_chan->slots_allocated && !mv_chan->last_used) mv_chan->last_used = list_entry(mv_chan->all_slots.next, struct mv_xor_desc_slot, slot_node); dev_dbg(mv_chan->device->common.dev, "allocated %d descriptor slots last_used: %p\n", mv_chan->slots_allocated, mv_chan->last_used); return mv_chan->slots_allocated ? : -ENOMEM; } static struct dma_async_tx_descriptor * mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *sw_desc, *grp_start; int slot_cnt; dev_dbg(mv_chan->device->common.dev, "%s dest: %x src %x len: %u flags: %ld\n", __func__, dest, src, len, flags); if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) return NULL; BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); spin_lock_bh(&mv_chan->lock); slot_cnt = mv_chan_memcpy_slot_count(len); sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); if (sw_desc) { sw_desc->type = DMA_MEMCPY; sw_desc->async_tx.flags = flags; grp_start = sw_desc->group_head; mv_desc_init(grp_start, flags); mv_desc_set_byte_count(grp_start, len); mv_desc_set_dest_addr(sw_desc->group_head, dest); mv_desc_set_src_addr(grp_start, 0, src); sw_desc->unmap_src_cnt = 1; sw_desc->unmap_len = len; } spin_unlock_bh(&mv_chan->lock); dev_dbg(mv_chan->device->common.dev, "%s sw_desc %p async_tx %p\n", __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0); return sw_desc ? &sw_desc->async_tx : NULL; } static struct dma_async_tx_descriptor * mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, size_t len, unsigned long flags) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *sw_desc, *grp_start; int slot_cnt; dev_dbg(mv_chan->device->common.dev, "%s dest: %x len: %u flags: %ld\n", __func__, dest, len, flags); if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) return NULL; BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); spin_lock_bh(&mv_chan->lock); slot_cnt = mv_chan_memset_slot_count(len); sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); if (sw_desc) { sw_desc->type = DMA_MEMSET; sw_desc->async_tx.flags = flags; grp_start = sw_desc->group_head; mv_desc_init(grp_start, flags); mv_desc_set_byte_count(grp_start, len); mv_desc_set_dest_addr(sw_desc->group_head, dest); mv_desc_set_block_fill_val(grp_start, value); sw_desc->unmap_src_cnt = 1; sw_desc->unmap_len = len; } spin_unlock_bh(&mv_chan->lock); dev_dbg(mv_chan->device->common.dev, "%s sw_desc %p async_tx %p \n", __func__, sw_desc, &sw_desc->async_tx); return sw_desc ? &sw_desc->async_tx : NULL; } static struct dma_async_tx_descriptor * mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *sw_desc, *grp_start; int slot_cnt; if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) return NULL; BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); dev_dbg(mv_chan->device->common.dev, "%s src_cnt: %d len: dest %x %u flags: %ld\n", __func__, src_cnt, len, dest, flags); spin_lock_bh(&mv_chan->lock); slot_cnt = mv_chan_xor_slot_count(len, src_cnt); sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); if (sw_desc) { sw_desc->type = DMA_XOR; sw_desc->async_tx.flags = flags; grp_start = sw_desc->group_head; mv_desc_init(grp_start, flags); /* the byte count field is the same as in memcpy desc*/ mv_desc_set_byte_count(grp_start, len); mv_desc_set_dest_addr(sw_desc->group_head, dest); sw_desc->unmap_src_cnt = src_cnt; sw_desc->unmap_len = len; while (src_cnt--) mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]); } spin_unlock_bh(&mv_chan->lock); dev_dbg(mv_chan->device->common.dev, "%s sw_desc %p async_tx %p \n", __func__, sw_desc, &sw_desc->async_tx); return sw_desc ? &sw_desc->async_tx : NULL; } static void mv_xor_free_chan_resources(struct dma_chan *chan) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *iter, *_iter; int in_use_descs = 0; mv_xor_slot_cleanup(mv_chan); spin_lock_bh(&mv_chan->lock); list_for_each_entry_safe(iter, _iter, &mv_chan->chain, chain_node) { in_use_descs++; list_del(&iter->chain_node); } list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, completed_node) { in_use_descs++; list_del(&iter->completed_node); } list_for_each_entry_safe_reverse( iter, _iter, &mv_chan->all_slots, slot_node) { list_del(&iter->slot_node); kfree(iter); mv_chan->slots_allocated--; } mv_chan->last_used = NULL; dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n", __func__, mv_chan->slots_allocated); spin_unlock_bh(&mv_chan->lock); if (in_use_descs) dev_err(mv_chan->device->common.dev, "freeing %d in use descriptors!\n", in_use_descs); } /** * mv_xor_status - poll the status of an XOR transaction * @chan: XOR channel handle * @cookie: XOR transaction identifier * @txstate: XOR transactions state holder (or NULL) */ static enum dma_status mv_xor_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); if (ret == DMA_SUCCESS) { mv_xor_clean_completed_slots(mv_chan); return ret; } mv_xor_slot_cleanup(mv_chan); return dma_cookie_status(chan, cookie, txstate); } static void mv_dump_xor_regs(struct mv_xor_chan *chan) { u32 val; val = __raw_readl(XOR_CONFIG(chan)); dev_printk(KERN_ERR, chan->device->common.dev, "config 0x%08x.\n", val); val = __raw_readl(XOR_ACTIVATION(chan)); dev_printk(KERN_ERR, chan->device->common.dev, "activation 0x%08x.\n", val); val = __raw_readl(XOR_INTR_CAUSE(chan)); dev_printk(KERN_ERR, chan->device->common.dev, "intr cause 0x%08x.\n", val); val = __raw_readl(XOR_INTR_MASK(chan)); dev_printk(KERN_ERR, chan->device->common.dev, "intr mask 0x%08x.\n", val); val = __raw_readl(XOR_ERROR_CAUSE(chan)); dev_printk(KERN_ERR, chan->device->common.dev, "error cause 0x%08x.\n", val); val = __raw_readl(XOR_ERROR_ADDR(chan)); dev_printk(KERN_ERR, chan->device->common.dev, "error addr 0x%08x.\n", val); } static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, u32 intr_cause) { if (intr_cause & (1 << 4)) { dev_dbg(chan->device->common.dev, "ignore this error\n"); return; } dev_printk(KERN_ERR, chan->device->common.dev, "error on chan %d. intr cause 0x%08x.\n", chan->idx, intr_cause); mv_dump_xor_regs(chan); BUG(); } static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) { struct mv_xor_chan *chan = data; u32 intr_cause = mv_chan_get_intr_cause(chan); dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause); if (mv_is_err_intr(intr_cause)) mv_xor_err_interrupt_handler(chan, intr_cause); tasklet_schedule(&chan->irq_tasklet); mv_xor_device_clear_eoc_cause(chan); return IRQ_HANDLED; } static void mv_xor_issue_pending(struct dma_chan *chan) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); if (mv_chan->pending >= MV_XOR_THRESHOLD) { mv_chan->pending = 0; mv_chan_activate(mv_chan); } } /* * Perform a transaction to verify the HW works. */ #define MV_XOR_TEST_SIZE 2000 static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device) { int i; void *src, *dest; dma_addr_t src_dma, dest_dma; struct dma_chan *dma_chan; dma_cookie_t cookie; struct dma_async_tx_descriptor *tx; int err = 0; struct mv_xor_chan *mv_chan; src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); if (!src) return -ENOMEM; dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); if (!dest) { kfree(src); return -ENOMEM; } /* Fill in src buffer */ for (i = 0; i < MV_XOR_TEST_SIZE; i++) ((u8 *) src)[i] = (u8)i; /* Start copy, using first DMA channel */ dma_chan = container_of(device->common.channels.next, struct dma_chan, device_node); if (mv_xor_alloc_chan_resources(dma_chan) < 1) { err = -ENODEV; goto out; } dest_dma = dma_map_single(dma_chan->device->dev, dest, MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); src_dma = dma_map_single(dma_chan->device->dev, src, MV_XOR_TEST_SIZE, DMA_TO_DEVICE); tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, MV_XOR_TEST_SIZE, 0); cookie = mv_xor_tx_submit(tx); mv_xor_issue_pending(dma_chan); async_tx_ack(tx); msleep(1); if (mv_xor_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_printk(KERN_ERR, dma_chan->device->dev, "Self-test copy timed out, disabling\n"); err = -ENODEV; goto free_resources; } mv_chan = to_mv_xor_chan(dma_chan); dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma, MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { dev_printk(KERN_ERR, dma_chan->device->dev, "Self-test copy failed compare, disabling\n"); err = -ENODEV; goto free_resources; } free_resources: mv_xor_free_chan_resources(dma_chan); out: kfree(src); kfree(dest); return err; } #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ static int __devinit mv_xor_xor_self_test(struct mv_xor_device *device) { int i, src_idx; struct page *dest; struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; dma_addr_t dest_dma; struct dma_async_tx_descriptor *tx; struct dma_chan *dma_chan; dma_cookie_t cookie; u8 cmp_byte = 0; u32 cmp_word; int err = 0; struct mv_xor_chan *mv_chan; for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { xor_srcs[src_idx] = alloc_page(GFP_KERNEL); if (!xor_srcs[src_idx]) { while (src_idx--) __free_page(xor_srcs[src_idx]); return -ENOMEM; } } dest = alloc_page(GFP_KERNEL); if (!dest) { while (src_idx--) __free_page(xor_srcs[src_idx]); return -ENOMEM; } /* Fill in src buffers */ for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { u8 *ptr = page_address(xor_srcs[src_idx]); for (i = 0; i < PAGE_SIZE; i++) ptr[i] = (1 << src_idx); } for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) cmp_byte ^= (u8) (1 << src_idx); cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | (cmp_byte << 8) | cmp_byte; memset(page_address(dest), 0, PAGE_SIZE); dma_chan = container_of(device->common.channels.next, struct dma_chan, device_node); if (mv_xor_alloc_chan_resources(dma_chan) < 1) { err = -ENODEV; goto out; } /* test xor */ dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); cookie = mv_xor_tx_submit(tx); mv_xor_issue_pending(dma_chan); async_tx_ack(tx); msleep(8); if (mv_xor_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_printk(KERN_ERR, dma_chan->device->dev, "Self-test xor timed out, disabling\n"); err = -ENODEV; goto free_resources; } mv_chan = to_mv_xor_chan(dma_chan); dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { u32 *ptr = page_address(dest); if (ptr[i] != cmp_word) { dev_printk(KERN_ERR, dma_chan->device->dev, "Self-test xor failed compare, disabling." " index %d, data %x, expected %x\n", i, ptr[i], cmp_word); err = -ENODEV; goto free_resources; } } free_resources: mv_xor_free_chan_resources(dma_chan); out: src_idx = MV_XOR_NUM_SRC_TEST; while (src_idx--) __free_page(xor_srcs[src_idx]); __free_page(dest); return err; } static int __devexit mv_xor_remove(struct platform_device *dev) { struct mv_xor_device *device = platform_get_drvdata(dev); struct dma_chan *chan, *_chan; struct mv_xor_chan *mv_chan; struct mv_xor_platform_data *plat_data = dev->dev.platform_data; dma_async_device_unregister(&device->common); dma_free_coherent(&dev->dev, plat_data->pool_size, device->dma_desc_pool_virt, device->dma_desc_pool); list_for_each_entry_safe(chan, _chan, &device->common.channels, device_node) { mv_chan = to_mv_xor_chan(chan); list_del(&chan->device_node); } return 0; } static int __devinit mv_xor_probe(struct platform_device *pdev) { int ret = 0; int irq; struct mv_xor_device *adev; struct mv_xor_chan *mv_chan; struct dma_device *dma_dev; struct mv_xor_platform_data *plat_data = pdev->dev.platform_data; adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL); if (!adev) return -ENOMEM; dma_dev = &adev->common; /* allocate coherent memory for hardware descriptors * note: writecombine gives slightly better performance, but * requires that we explicitly flush the writes */ adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, plat_data->pool_size, &adev->dma_desc_pool, GFP_KERNEL); if (!adev->dma_desc_pool_virt) return -ENOMEM; adev->id = plat_data->hw_id; /* discover transaction capabilites from the platform data */ dma_dev->cap_mask = plat_data->cap_mask; adev->pdev = pdev; platform_set_drvdata(pdev, adev); adev->shared = platform_get_drvdata(plat_data->shared); INIT_LIST_HEAD(&dma_dev->channels); /* set base routines */ dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; dma_dev->device_tx_status = mv_xor_status; dma_dev->device_issue_pending = mv_xor_issue_pending; dma_dev->dev = &pdev->dev; /* set prep routines based on capability */ if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset; if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { dma_dev->max_xor = 8; dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; } mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); if (!mv_chan) { ret = -ENOMEM; goto err_free_dma; } mv_chan->device = adev; mv_chan->idx = plat_data->hw_id; mv_chan->mmr_base = adev->shared->xor_base; if (!mv_chan->mmr_base) { ret = -ENOMEM; goto err_free_dma; } tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) mv_chan); /* clear errors before enabling interrupts */ mv_xor_device_clear_err_status(mv_chan); irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = irq; goto err_free_dma; } ret = devm_request_irq(&pdev->dev, irq, mv_xor_interrupt_handler, 0, dev_name(&pdev->dev), mv_chan); if (ret) goto err_free_dma; mv_chan_unmask_interrupts(mv_chan); mv_set_mode(mv_chan, DMA_MEMCPY); spin_lock_init(&mv_chan->lock); INIT_LIST_HEAD(&mv_chan->chain); INIT_LIST_HEAD(&mv_chan->completed_slots); INIT_LIST_HEAD(&mv_chan->all_slots); mv_chan->common.device = dma_dev; dma_cookie_init(&mv_chan->common); list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { ret = mv_xor_memcpy_self_test(adev); dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); if (ret) goto err_free_dma; } if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { ret = mv_xor_xor_self_test(adev); dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); if (ret) goto err_free_dma; } dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: " "( %s%s%s%s)\n", dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); dma_async_device_register(dma_dev); goto out; err_free_dma: dma_free_coherent(&adev->pdev->dev, plat_data->pool_size, adev->dma_desc_pool_virt, adev->dma_desc_pool); out: return ret; } static void mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp, const struct mbus_dram_target_info *dram) { void __iomem *base = msp->xor_base; u32 win_enable = 0; int i; for (i = 0; i < 8; i++) { writel(0, base + WINDOW_BASE(i)); writel(0, base + WINDOW_SIZE(i)); if (i < 4) writel(0, base + WINDOW_REMAP_HIGH(i)); } for (i = 0; i < dram->num_cs; i++) { const struct mbus_dram_window *cs = dram->cs + i; writel((cs->base & 0xffff0000) | (cs->mbus_attr << 8) | dram->mbus_dram_target_id, base + WINDOW_BASE(i)); writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); win_enable |= (1 << i); win_enable |= 3 << (16 + (2 * i)); } writel(win_enable, base + WINDOW_BAR_ENABLE(0)); writel(win_enable, base + WINDOW_BAR_ENABLE(1)); } static struct platform_driver mv_xor_driver = { .probe = mv_xor_probe, .remove = __devexit_p(mv_xor_remove), .driver = { .owner = THIS_MODULE, .name = MV_XOR_NAME, }, }; static int mv_xor_shared_probe(struct platform_device *pdev) { const struct mbus_dram_target_info *dram; struct mv_xor_shared_private *msp; struct resource *res; dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n"); msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); if (!msp) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; msp->xor_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!msp->xor_base) return -EBUSY; res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res) return -ENODEV; msp->xor_high_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!msp->xor_high_base) return -EBUSY; platform_set_drvdata(pdev, msp); /* * (Re-)program MBUS remapping windows if we are asked to. */ dram = mv_mbus_dram_info(); if (dram) mv_xor_conf_mbus_windows(msp, dram); return 0; } static int mv_xor_shared_remove(struct platform_device *pdev) { return 0; } static struct platform_driver mv_xor_shared_driver = { .probe = mv_xor_shared_probe, .remove = mv_xor_shared_remove, .driver = { .owner = THIS_MODULE, .name = MV_XOR_SHARED_NAME, }, }; static int __init mv_xor_init(void) { int rc; rc = platform_driver_register(&mv_xor_shared_driver); if (!rc) { rc = platform_driver_register(&mv_xor_driver); if (rc) platform_driver_unregister(&mv_xor_shared_driver); } return rc; } module_init(mv_xor_init); /* it's currently unsafe to unload this module */ #if 0 static void __exit mv_xor_exit(void) { platform_driver_unregister(&mv_xor_driver); platform_driver_unregister(&mv_xor_shared_driver); return; } module_exit(mv_xor_exit); #endif MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); MODULE_LICENSE("GPL");
gpl-2.0
yeewang/linux-sunxi
drivers/staging/tidspbridge/pmgr/io.c
7824
2376
/* * io.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * IO manager interface: Manages IO between CHNL and msg_ctrl. * * Copyright (C) 2005-2006 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #include <linux/types.h> /* ----------------------------------- Host OS */ #include <dspbridge/host_os.h> /* ----------------------------------- DSP/BIOS Bridge */ #include <dspbridge/dbdefs.h> /* ----------------------------------- Platform Manager */ #include <dspbridge/dev.h> /* ----------------------------------- This */ #include <ioobj.h> #include <dspbridge/io.h> /* * ======== io_create ======== * Purpose: * Create an IO manager object, responsible for managing IO between * CHNL and msg_ctrl */ int io_create(struct io_mgr **io_man, struct dev_object *hdev_obj, const struct io_attrs *mgr_attrts) { struct bridge_drv_interface *intf_fxns; struct io_mgr *hio_mgr = NULL; struct io_mgr_ *pio_mgr = NULL; int status = 0; *io_man = NULL; /* A memory base of 0 implies no memory base: */ if ((mgr_attrts->shm_base != 0) && (mgr_attrts->sm_length == 0)) status = -EINVAL; if (mgr_attrts->word_size == 0) status = -EINVAL; if (!status) { dev_get_intf_fxns(hdev_obj, &intf_fxns); /* Let Bridge channel module finish the create: */ status = (*intf_fxns->io_create) (&hio_mgr, hdev_obj, mgr_attrts); if (!status) { pio_mgr = (struct io_mgr_ *)hio_mgr; pio_mgr->intf_fxns = intf_fxns; pio_mgr->dev_obj = hdev_obj; /* Return the new channel manager handle: */ *io_man = hio_mgr; } } return status; } /* * ======== io_destroy ======== * Purpose: * Delete IO manager. */ int io_destroy(struct io_mgr *hio_mgr) { struct bridge_drv_interface *intf_fxns; struct io_mgr_ *pio_mgr = (struct io_mgr_ *)hio_mgr; int status; intf_fxns = pio_mgr->intf_fxns; /* Let Bridge channel module destroy the io_mgr: */ status = (*intf_fxns->io_destroy) (hio_mgr); return status; }
gpl-2.0
redmi/android_kernel_HM2014811
sound/pci/echoaudio/mia.c
8080
3487
/* * ALSA driver for Echoaudio soundcards. * Copyright (C) 2003-2004 Giuliano Pochini <pochini@shiny.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define ECHO24_FAMILY #define ECHOCARD_MIA #define ECHOCARD_NAME "Mia" #define ECHOCARD_HAS_MONITOR #define ECHOCARD_HAS_INPUT_NOMINAL_LEVEL #define ECHOCARD_HAS_OUTPUT_NOMINAL_LEVEL #define ECHOCARD_HAS_SUPER_INTERLEAVE #define ECHOCARD_HAS_VMIXER #define ECHOCARD_HAS_DIGITAL_IO #define ECHOCARD_HAS_EXTERNAL_CLOCK #define ECHOCARD_HAS_ADAT FALSE #define ECHOCARD_HAS_STEREO_BIG_ENDIAN32 #define ECHOCARD_HAS_MIDI #define ECHOCARD_HAS_LINE_OUT_GAIN /* Pipe indexes */ #define PX_ANALOG_OUT 0 /* 8 */ #define PX_DIGITAL_OUT 8 /* 0 */ #define PX_ANALOG_IN 8 /* 2 */ #define PX_DIGITAL_IN 10 /* 2 */ #define PX_NUM 12 /* Bus indexes */ #define BX_ANALOG_OUT 0 /* 2 */ #define BX_DIGITAL_OUT 2 /* 2 */ #define BX_ANALOG_IN 4 /* 2 */ #define BX_DIGITAL_IN 6 /* 2 */ #define BX_NUM 8 #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/firmware.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/info.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/asoundef.h> #include <sound/initval.h> #include <sound/rawmidi.h> #include <asm/io.h> #include <linux/atomic.h> #include "echoaudio.h" MODULE_FIRMWARE("ea/loader_dsp.fw"); MODULE_FIRMWARE("ea/mia_dsp.fw"); #define FW_361_LOADER 0 #define FW_MIA_DSP 1 static const struct firmware card_fw[] = { {0, "loader_dsp.fw"}, {0, "mia_dsp.fw"} }; static DEFINE_PCI_DEVICE_TABLE(snd_echo_ids) = { {0x1057, 0x3410, 0xECC0, 0x0080, 0, 0, 0}, /* DSP 56361 Mia rev.0 */ {0x1057, 0x3410, 0xECC0, 0x0081, 0, 0, 0}, /* DSP 56361 Mia rev.1 */ {0,} }; static struct snd_pcm_hardware pcm_hardware_skel = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START, .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE, .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000, .rate_min = 8000, .rate_max = 96000, .channels_min = 1, .channels_max = 8, .buffer_bytes_max = 262144, .period_bytes_min = 32, .period_bytes_max = 131072, .periods_min = 2, .periods_max = 220, /* One page (4k) contains 512 instructions. I don't know if the hw supports lists longer than this. In this case periods_max=220 is a safe limit to make sure the list never exceeds 512 instructions. */ }; #include "mia_dsp.c" #include "echoaudio_dsp.c" #include "echoaudio.c" #include "midi.c"
gpl-2.0
emxys1/imx6rex-bombardier-base-linux-3.10.17
arch/m68k/amiga/amiints.c
8592
4218
/* * Amiga Linux interrupt handling code * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/irq.h> #include <asm/irq.h> #include <asm/traps.h> #include <asm/amigahw.h> #include <asm/amigaints.h> #include <asm/amipcmcia.h> /* * Enable/disable a particular machine specific interrupt source. * Note that this may affect other interrupts in case of a shared interrupt. * This function should only be called for a _very_ short time to change some * internal data, that may not be changed by the interrupt at the same time. */ static void amiga_irq_enable(struct irq_data *data) { amiga_custom.intena = IF_SETCLR | (1 << (data->irq - IRQ_USER)); } static void amiga_irq_disable(struct irq_data *data) { amiga_custom.intena = 1 << (data->irq - IRQ_USER); } static struct irq_chip amiga_irq_chip = { .name = "amiga", .irq_enable = amiga_irq_enable, .irq_disable = amiga_irq_disable, }; /* * The builtin Amiga hardware interrupt handlers. */ static void ami_int1(unsigned int irq, struct irq_desc *desc) { unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; /* if serial transmit buffer empty, interrupt */ if (ints & IF_TBE) { amiga_custom.intreq = IF_TBE; generic_handle_irq(IRQ_AMIGA_TBE); } /* if floppy disk transfer complete, interrupt */ if (ints & IF_DSKBLK) { amiga_custom.intreq = IF_DSKBLK; generic_handle_irq(IRQ_AMIGA_DSKBLK); } /* if software interrupt set, interrupt */ if (ints & IF_SOFT) { amiga_custom.intreq = IF_SOFT; generic_handle_irq(IRQ_AMIGA_SOFT); } } static void ami_int3(unsigned int irq, struct irq_desc *desc) { unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; /* if a blitter interrupt */ if (ints & IF_BLIT) { amiga_custom.intreq = IF_BLIT; generic_handle_irq(IRQ_AMIGA_BLIT); } /* if a copper interrupt */ if (ints & IF_COPER) { amiga_custom.intreq = IF_COPER; generic_handle_irq(IRQ_AMIGA_COPPER); } /* if a vertical blank interrupt */ if (ints & IF_VERTB) { amiga_custom.intreq = IF_VERTB; generic_handle_irq(IRQ_AMIGA_VERTB); } } static void ami_int4(unsigned int irq, struct irq_desc *desc) { unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; /* if audio 0 interrupt */ if (ints & IF_AUD0) { amiga_custom.intreq = IF_AUD0; generic_handle_irq(IRQ_AMIGA_AUD0); } /* if audio 1 interrupt */ if (ints & IF_AUD1) { amiga_custom.intreq = IF_AUD1; generic_handle_irq(IRQ_AMIGA_AUD1); } /* if audio 2 interrupt */ if (ints & IF_AUD2) { amiga_custom.intreq = IF_AUD2; generic_handle_irq(IRQ_AMIGA_AUD2); } /* if audio 3 interrupt */ if (ints & IF_AUD3) { amiga_custom.intreq = IF_AUD3; generic_handle_irq(IRQ_AMIGA_AUD3); } } static void ami_int5(unsigned int irq, struct irq_desc *desc) { unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; /* if serial receive buffer full interrupt */ if (ints & IF_RBF) { /* acknowledge of IF_RBF must be done by the serial interrupt */ generic_handle_irq(IRQ_AMIGA_RBF); } /* if a disk sync interrupt */ if (ints & IF_DSKSYN) { amiga_custom.intreq = IF_DSKSYN; generic_handle_irq(IRQ_AMIGA_DSKSYN); } } /* * void amiga_init_IRQ(void) * * Parameters: None * * Returns: Nothing * * This function should be called during kernel startup to initialize * the amiga IRQ handling routines. */ void __init amiga_init_IRQ(void) { m68k_setup_irq_controller(&amiga_irq_chip, handle_simple_irq, IRQ_USER, AMI_STD_IRQS); irq_set_chained_handler(IRQ_AUTO_1, ami_int1); irq_set_chained_handler(IRQ_AUTO_3, ami_int3); irq_set_chained_handler(IRQ_AUTO_4, ami_int4); irq_set_chained_handler(IRQ_AUTO_5, ami_int5); /* turn off PCMCIA interrupts */ if (AMIGAHW_PRESENT(PCMCIA)) gayle.inten = GAYLE_IRQ_IDE; /* turn off all interrupts and enable the master interrupt bit */ amiga_custom.intena = 0x7fff; amiga_custom.intreq = 0x7fff; amiga_custom.intena = IF_SETCLR | IF_INTEN; cia_init_IRQ(&ciaa_base); cia_init_IRQ(&ciab_base); }
gpl-2.0
ruslan250283/alcatel_5042
arch/m68k/amiga/amiints.c
8592
4218
/* * Amiga Linux interrupt handling code * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/irq.h> #include <asm/irq.h> #include <asm/traps.h> #include <asm/amigahw.h> #include <asm/amigaints.h> #include <asm/amipcmcia.h> /* * Enable/disable a particular machine specific interrupt source. * Note that this may affect other interrupts in case of a shared interrupt. * This function should only be called for a _very_ short time to change some * internal data, that may not be changed by the interrupt at the same time. */ static void amiga_irq_enable(struct irq_data *data) { amiga_custom.intena = IF_SETCLR | (1 << (data->irq - IRQ_USER)); } static void amiga_irq_disable(struct irq_data *data) { amiga_custom.intena = 1 << (data->irq - IRQ_USER); } static struct irq_chip amiga_irq_chip = { .name = "amiga", .irq_enable = amiga_irq_enable, .irq_disable = amiga_irq_disable, }; /* * The builtin Amiga hardware interrupt handlers. */ static void ami_int1(unsigned int irq, struct irq_desc *desc) { unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; /* if serial transmit buffer empty, interrupt */ if (ints & IF_TBE) { amiga_custom.intreq = IF_TBE; generic_handle_irq(IRQ_AMIGA_TBE); } /* if floppy disk transfer complete, interrupt */ if (ints & IF_DSKBLK) { amiga_custom.intreq = IF_DSKBLK; generic_handle_irq(IRQ_AMIGA_DSKBLK); } /* if software interrupt set, interrupt */ if (ints & IF_SOFT) { amiga_custom.intreq = IF_SOFT; generic_handle_irq(IRQ_AMIGA_SOFT); } } static void ami_int3(unsigned int irq, struct irq_desc *desc) { unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; /* if a blitter interrupt */ if (ints & IF_BLIT) { amiga_custom.intreq = IF_BLIT; generic_handle_irq(IRQ_AMIGA_BLIT); } /* if a copper interrupt */ if (ints & IF_COPER) { amiga_custom.intreq = IF_COPER; generic_handle_irq(IRQ_AMIGA_COPPER); } /* if a vertical blank interrupt */ if (ints & IF_VERTB) { amiga_custom.intreq = IF_VERTB; generic_handle_irq(IRQ_AMIGA_VERTB); } } static void ami_int4(unsigned int irq, struct irq_desc *desc) { unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; /* if audio 0 interrupt */ if (ints & IF_AUD0) { amiga_custom.intreq = IF_AUD0; generic_handle_irq(IRQ_AMIGA_AUD0); } /* if audio 1 interrupt */ if (ints & IF_AUD1) { amiga_custom.intreq = IF_AUD1; generic_handle_irq(IRQ_AMIGA_AUD1); } /* if audio 2 interrupt */ if (ints & IF_AUD2) { amiga_custom.intreq = IF_AUD2; generic_handle_irq(IRQ_AMIGA_AUD2); } /* if audio 3 interrupt */ if (ints & IF_AUD3) { amiga_custom.intreq = IF_AUD3; generic_handle_irq(IRQ_AMIGA_AUD3); } } static void ami_int5(unsigned int irq, struct irq_desc *desc) { unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar; /* if serial receive buffer full interrupt */ if (ints & IF_RBF) { /* acknowledge of IF_RBF must be done by the serial interrupt */ generic_handle_irq(IRQ_AMIGA_RBF); } /* if a disk sync interrupt */ if (ints & IF_DSKSYN) { amiga_custom.intreq = IF_DSKSYN; generic_handle_irq(IRQ_AMIGA_DSKSYN); } } /* * void amiga_init_IRQ(void) * * Parameters: None * * Returns: Nothing * * This function should be called during kernel startup to initialize * the amiga IRQ handling routines. */ void __init amiga_init_IRQ(void) { m68k_setup_irq_controller(&amiga_irq_chip, handle_simple_irq, IRQ_USER, AMI_STD_IRQS); irq_set_chained_handler(IRQ_AUTO_1, ami_int1); irq_set_chained_handler(IRQ_AUTO_3, ami_int3); irq_set_chained_handler(IRQ_AUTO_4, ami_int4); irq_set_chained_handler(IRQ_AUTO_5, ami_int5); /* turn off PCMCIA interrupts */ if (AMIGAHW_PRESENT(PCMCIA)) gayle.inten = GAYLE_IRQ_IDE; /* turn off all interrupts and enable the master interrupt bit */ amiga_custom.intena = 0x7fff; amiga_custom.intreq = 0x7fff; amiga_custom.intena = IF_SETCLR | IF_INTEN; cia_init_IRQ(&ciaa_base); cia_init_IRQ(&ciab_base); }
gpl-2.0
hoonir/iamroot_hypstudy_5th
arch/ia64/sn/kernel/msi_sn.c
10128
5864
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2006 Silicon Graphics, Inc. All Rights Reserved. */ #include <linux/types.h> #include <linux/irq.h> #include <linux/pci.h> #include <linux/cpumask.h> #include <linux/msi.h> #include <linux/slab.h> #include <asm/sn/addrs.h> #include <asm/sn/intr.h> #include <asm/sn/pcibus_provider_defs.h> #include <asm/sn/pcidev.h> #include <asm/sn/nodepda.h> struct sn_msi_info { u64 pci_addr; struct sn_irq_info *sn_irq_info; }; static struct sn_msi_info sn_msi_info[NR_IRQS]; static struct irq_chip sn_msi_chip; void sn_teardown_msi_irq(unsigned int irq) { nasid_t nasid; int widget; struct pci_dev *pdev; struct pcidev_info *sn_pdev; struct sn_irq_info *sn_irq_info; struct pcibus_bussoft *bussoft; struct sn_pcibus_provider *provider; sn_irq_info = sn_msi_info[irq].sn_irq_info; if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) return; sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; pdev = sn_pdev->pdi_linux_pcidev; provider = SN_PCIDEV_BUSPROVIDER(pdev); (*provider->dma_unmap)(pdev, sn_msi_info[irq].pci_addr, PCI_DMA_FROMDEVICE); sn_msi_info[irq].pci_addr = 0; bussoft = SN_PCIDEV_BUSSOFT(pdev); nasid = NASID_GET(bussoft->bs_base); widget = (nasid & 1) ? TIO_SWIN_WIDGETNUM(bussoft->bs_base) : SWIN_WIDGETNUM(bussoft->bs_base); sn_intr_free(nasid, widget, sn_irq_info); sn_msi_info[irq].sn_irq_info = NULL; destroy_irq(irq); } int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry) { struct msi_msg msg; int widget; int status; nasid_t nasid; u64 bus_addr; struct sn_irq_info *sn_irq_info; struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); int irq; if (!entry->msi_attrib.is_64) return -EINVAL; if (bussoft == NULL) return -EINVAL; if (provider == NULL || provider->dma_map_consistent == NULL) return -EINVAL; irq = create_irq(); if (irq < 0) return irq; /* * Set up the vector plumbing. Let the prom (via sn_intr_alloc) * decide which cpu to direct this msi at by default. */ nasid = NASID_GET(bussoft->bs_base); widget = (nasid & 1) ? TIO_SWIN_WIDGETNUM(bussoft->bs_base) : SWIN_WIDGETNUM(bussoft->bs_base); sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL); if (! sn_irq_info) { destroy_irq(irq); return -ENOMEM; } status = sn_intr_alloc(nasid, widget, sn_irq_info, irq, -1, -1); if (status) { kfree(sn_irq_info); destroy_irq(irq); return -ENOMEM; } sn_irq_info->irq_int_bit = -1; /* mark this as an MSI irq */ sn_irq_fixup(pdev, sn_irq_info); /* Prom probably should fill these in, but doesn't ... */ sn_irq_info->irq_bridge_type = bussoft->bs_asic_type; sn_irq_info->irq_bridge = (void *)bussoft->bs_base; /* * Map the xio address into bus space */ bus_addr = (*provider->dma_map_consistent)(pdev, sn_irq_info->irq_xtalkaddr, sizeof(sn_irq_info->irq_xtalkaddr), SN_DMA_MSI|SN_DMA_ADDR_XIO); if (! bus_addr) { sn_intr_free(nasid, widget, sn_irq_info); kfree(sn_irq_info); destroy_irq(irq); return -ENOMEM; } sn_msi_info[irq].sn_irq_info = sn_irq_info; sn_msi_info[irq].pci_addr = bus_addr; msg.address_hi = (u32)(bus_addr >> 32); msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); /* * In the SN platform, bit 16 is a "send vector" bit which * must be present in order to move the vector through the system. */ msg.data = 0x100 + irq; irq_set_msi_desc(irq, entry); write_msi_msg(irq, &msg); irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq); return 0; } #ifdef CONFIG_SMP static int sn_set_msi_irq_affinity(struct irq_data *data, const struct cpumask *cpu_mask, bool force) { struct msi_msg msg; int slice; nasid_t nasid; u64 bus_addr; struct pci_dev *pdev; struct pcidev_info *sn_pdev; struct sn_irq_info *sn_irq_info; struct sn_irq_info *new_irq_info; struct sn_pcibus_provider *provider; unsigned int cpu, irq = data->irq; cpu = cpumask_first(cpu_mask); sn_irq_info = sn_msi_info[irq].sn_irq_info; if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) return -1; /* * Release XIO resources for the old MSI PCI address */ get_cached_msi_msg(irq, &msg); sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; pdev = sn_pdev->pdi_linux_pcidev; provider = SN_PCIDEV_BUSPROVIDER(pdev); bus_addr = (u64)(msg.address_hi) << 32 | (u64)(msg.address_lo); (*provider->dma_unmap)(pdev, bus_addr, PCI_DMA_FROMDEVICE); sn_msi_info[irq].pci_addr = 0; nasid = cpuid_to_nasid(cpu); slice = cpuid_to_slice(cpu); new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice); sn_msi_info[irq].sn_irq_info = new_irq_info; if (new_irq_info == NULL) return -1; /* * Map the xio address into bus space */ bus_addr = (*provider->dma_map_consistent)(pdev, new_irq_info->irq_xtalkaddr, sizeof(new_irq_info->irq_xtalkaddr), SN_DMA_MSI|SN_DMA_ADDR_XIO); sn_msi_info[irq].pci_addr = bus_addr; msg.address_hi = (u32)(bus_addr >> 32); msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); write_msi_msg(irq, &msg); cpumask_copy(data->affinity, cpu_mask); return 0; } #endif /* CONFIG_SMP */ static void sn_ack_msi_irq(struct irq_data *data) { irq_move_irq(data); ia64_eoi(); } static int sn_msi_retrigger_irq(struct irq_data *data) { unsigned int vector = data->irq; ia64_resend_irq(vector); return 1; } static struct irq_chip sn_msi_chip = { .name = "PCI-MSI", .irq_mask = mask_msi_irq, .irq_unmask = unmask_msi_irq, .irq_ack = sn_ack_msi_irq, #ifdef CONFIG_SMP .irq_set_affinity = sn_set_msi_irq_affinity, #endif .irq_retrigger = sn_msi_retrigger_irq, };
gpl-2.0
skinner12/SkeRneL
drivers/media/dvb/ngene/ngene-i2c.c
12944
4470
/* * ngene-i2c.c: nGene PCIe bridge driver i2c functions * * Copyright (C) 2005-2007 Micronas * * Copyright (C) 2008-2009 Ralph Metzler <rjkm@metzlerbros.de> * Modifications for new nGene firmware, * support for EEPROM-copying, * support for new dual DVB-S2 card prototype * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 only, as published by the Free Software Foundation. * * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA * Or, point your browser to http://www.gnu.org/copyleft/gpl.html */ /* FIXME - some of these can probably be removed */ #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/io.h> #include <asm/div64.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/timer.h> #include <linux/byteorder/generic.h> #include <linux/firmware.h> #include <linux/vmalloc.h> #include "ngene.h" /* Firmware command for i2c operations */ static int ngene_command_i2c_read(struct ngene *dev, u8 adr, u8 *out, u8 outlen, u8 *in, u8 inlen, int flag) { struct ngene_command com; com.cmd.hdr.Opcode = CMD_I2C_READ; com.cmd.hdr.Length = outlen + 3; com.cmd.I2CRead.Device = adr << 1; memcpy(com.cmd.I2CRead.Data, out, outlen); com.cmd.I2CRead.Data[outlen] = inlen; com.cmd.I2CRead.Data[outlen + 1] = 0; com.in_len = outlen + 3; com.out_len = inlen + 1; if (ngene_command(dev, &com) < 0) return -EIO; if ((com.cmd.raw8[0] >> 1) != adr) return -EIO; if (flag) memcpy(in, com.cmd.raw8, inlen + 1); else memcpy(in, com.cmd.raw8 + 1, inlen); return 0; } static int ngene_command_i2c_write(struct ngene *dev, u8 adr, u8 *out, u8 outlen) { struct ngene_command com; com.cmd.hdr.Opcode = CMD_I2C_WRITE; com.cmd.hdr.Length = outlen + 1; com.cmd.I2CRead.Device = adr << 1; memcpy(com.cmd.I2CRead.Data, out, outlen); com.in_len = outlen + 1; com.out_len = 1; if (ngene_command(dev, &com) < 0) return -EIO; if (com.cmd.raw8[0] == 1) return -EIO; return 0; } static void ngene_i2c_set_bus(struct ngene *dev, int bus) { if (!(dev->card_info->i2c_access & 2)) return; if (dev->i2c_current_bus == bus) return; switch (bus) { case 0: ngene_command_gpio_set(dev, 3, 0); ngene_command_gpio_set(dev, 2, 1); break; case 1: ngene_command_gpio_set(dev, 2, 0); ngene_command_gpio_set(dev, 3, 1); break; } dev->i2c_current_bus = bus; } static int ngene_i2c_master_xfer(struct i2c_adapter *adapter, struct i2c_msg msg[], int num) { struct ngene_channel *chan = (struct ngene_channel *)i2c_get_adapdata(adapter); struct ngene *dev = chan->dev; down(&dev->i2c_switch_mutex); ngene_i2c_set_bus(dev, chan->number); if (num == 2 && msg[1].flags & I2C_M_RD && !(msg[0].flags & I2C_M_RD)) if (!ngene_command_i2c_read(dev, msg[0].addr, msg[0].buf, msg[0].len, msg[1].buf, msg[1].len, 0)) goto done; if (num == 1 && !(msg[0].flags & I2C_M_RD)) if (!ngene_command_i2c_write(dev, msg[0].addr, msg[0].buf, msg[0].len)) goto done; if (num == 1 && (msg[0].flags & I2C_M_RD)) if (!ngene_command_i2c_read(dev, msg[0].addr, NULL, 0, msg[0].buf, msg[0].len, 0)) goto done; up(&dev->i2c_switch_mutex); return -EIO; done: up(&dev->i2c_switch_mutex); return num; } static u32 ngene_i2c_functionality(struct i2c_adapter *adap) { return I2C_FUNC_SMBUS_EMUL; } static struct i2c_algorithm ngene_i2c_algo = { .master_xfer = ngene_i2c_master_xfer, .functionality = ngene_i2c_functionality, }; int ngene_i2c_init(struct ngene *dev, int dev_nr) { struct i2c_adapter *adap = &(dev->channel[dev_nr].i2c_adapter); i2c_set_adapdata(adap, &(dev->channel[dev_nr])); strcpy(adap->name, "nGene"); adap->algo = &ngene_i2c_algo; adap->algo_data = (void *)&(dev->channel[dev_nr]); adap->dev.parent = &dev->pci_dev->dev; return i2c_add_adapter(adap); }
gpl-2.0
primiano/edison-kernel
arch/mn10300/unit-asb2303/leds.c
13712
1471
/* ASB2303 peripheral 7-segment LEDs x1 support * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/param.h> #include <linux/init.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/intctl-regs.h> #include <asm/rtc-regs.h> #include <unit/leds.h> #if 0 static const u8 asb2303_led_hex_tbl[16] = { 0x80, 0xf2, 0x48, 0x60, 0x32, 0x24, 0x04, 0xf0, 0x00, 0x20, 0x10, 0x06, 0x8c, 0x42, 0x0c, 0x1c }; #endif static const u8 asb2303_led_chase_tbl[6] = { ~0x02, /* top - segA */ ~0x04, /* right top - segB */ ~0x08, /* right bottom - segC */ ~0x10, /* bottom - segD */ ~0x20, /* left bottom - segE */ ~0x40, /* left top - segF */ }; static unsigned asb2303_led_chase; void peripheral_leds_display_exception(enum exception_code code) { ASB2303_GPIO0DEF = 0x5555; /* configure as an output port */ ASB2303_7SEGLEDS = 0x6d; /* triple horizontal bar */ } void peripheral_leds_led_chase(void) { ASB2303_GPIO0DEF = 0x5555; /* configure as an output port */ ASB2303_7SEGLEDS = asb2303_led_chase_tbl[asb2303_led_chase]; asb2303_led_chase++; if (asb2303_led_chase >= 6) asb2303_led_chase = 0; }
gpl-2.0
neobuddy89/android_kernel_cyanogen_msm8916
sound/soc/msm/msm-audio-pinctrl.c
145
9214
/* Copyright (c) 2015, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/gpio.h> #include <linux/of_gpio.h> #include <linux/platform_device.h> #include "msm-audio-pinctrl.h" /* * pinctrl -- handle to query pinctrl apis * cdc lines -- stores pinctrl handles for pinctrl states * active_set -- maintain the overall pinctrl state */ struct cdc_pinctrl_info { struct pinctrl *pinctrl; struct pinctrl_state **cdc_lines; int active_set; }; /* * gpiosets -- stores all gpiosets mentioned in dtsi file * gpiosets_comb_names -- stores all possible gpioset combinations * gpioset_state -- maintains counter for each gpioset * gpiosets_max -- maintain the total supported gpiosets * gpiosets_comb_max -- maintain the total gpiosets combinations */ struct cdc_gpioset_info { char **gpiosets; char **gpiosets_comb_names; uint8_t *gpioset_state; int gpiosets_max; int gpiosets_comb_max; }; static struct cdc_pinctrl_info pinctrl_info[MAX_PINCTRL_CLIENT]; static struct cdc_gpioset_info gpioset_info[MAX_PINCTRL_CLIENT]; /* Finds the index for the gpio set in the dtsi file */ int msm_get_gpioset_index(enum pinctrl_client client, char *keyword) { int i; for (i = 0; i < gpioset_info[client].gpiosets_max; i++) { if (!(strcmp(gpioset_info[client].gpiosets[i], keyword))) break; } /* Checking if the keyword is present in dtsi or not */ if (i != gpioset_info[client].gpiosets_max) return i; else return -EINVAL; } /* * This function reads the following from dtsi file * 1. All gpio sets * 2. All combinations of gpio sets * 3. Pinctrl handles to gpio sets * * Returns error if there is * 1. Problem reading from dtsi file * 2. Memory allocation failure */ int msm_gpioset_initialize(enum pinctrl_client client, struct device *dev) { struct pinctrl *pinctrl; const char *gpioset_names = "qcom,msm-gpios"; const char *gpioset_combinations = "qcom,pinctrl-names"; const char *gpioset_names_str = NULL; const char *gpioset_comb_str = NULL; int num_strings = 0; int ret = 0; int i = 0; pr_debug("%s\n", __func__); pinctrl = devm_pinctrl_get(dev); if (IS_ERR(pinctrl)) { pr_err("%s: Unable to get pinctrl handle\n", __func__); return -EINVAL; } pinctrl_info[client].pinctrl = pinctrl; /* Reading of gpio sets */ num_strings = of_property_count_strings(dev->of_node, gpioset_names); if (num_strings < 0) { dev_err(dev, "%s: missing %s in dt node or length is incorrect\n", __func__, gpioset_names); goto err; } gpioset_info[client].gpiosets_max = num_strings; gpioset_info[client].gpiosets = devm_kzalloc(dev, gpioset_info[client].gpiosets_max * sizeof(char *), GFP_KERNEL); if (!gpioset_info[client].gpiosets) { dev_err(dev, "Can't allocate memory for gpio set names\n"); ret = -ENOMEM; goto err; } for (i = 0; i < num_strings; i++) { ret = of_property_read_string_index(dev->of_node, gpioset_names, i, &gpioset_names_str); gpioset_info[client].gpiosets[i] = devm_kzalloc(dev, (strlen(gpioset_names_str) + 1), GFP_KERNEL); if (!gpioset_info[client].gpiosets[i]) { dev_err(dev, "%s: Can't allocate gpiosets[%d] data\n", __func__, i); ret = -ENOMEM; goto err; } strlcpy(gpioset_info[client].gpiosets[i], gpioset_names_str, strlen(gpioset_names_str)+1); gpioset_names_str = NULL; } num_strings = 0; /* Allocating memory for gpio set counter */ gpioset_info[client].gpioset_state = devm_kzalloc(dev, gpioset_info[client].gpiosets_max * sizeof(uint8_t), GFP_KERNEL); if (!gpioset_info[client].gpioset_state) { dev_err(dev, "Can't allocate memory for gpio set counter\n"); ret = -ENOMEM; goto err; } /* Reading of all combinations of gpio sets */ num_strings = of_property_count_strings(dev->of_node, gpioset_combinations); if (num_strings < 0) { dev_err(dev, "%s: missing %s in dt node or length is incorrect\n", __func__, gpioset_combinations); goto err; } gpioset_info[client].gpiosets_comb_max = num_strings; gpioset_info[client].gpiosets_comb_names = devm_kzalloc(dev, num_strings * sizeof(char *), GFP_KERNEL); if (!gpioset_info[client].gpiosets_comb_names) { dev_err(dev, "Can't allocate gpio set combination names data\n"); ret = -ENOMEM; goto err; } for (i = 0; i < gpioset_info[client].gpiosets_comb_max; i++) { ret = of_property_read_string_index(dev->of_node, gpioset_combinations, i, &gpioset_comb_str); gpioset_info[client].gpiosets_comb_names[i] = devm_kzalloc(dev, (strlen(gpioset_comb_str) + 1), GFP_KERNEL); if (!gpioset_info[client].gpiosets_comb_names[i]) { dev_err(dev, "%s: Can't allocate combinations[%d] data\n", __func__, i); ret = -ENOMEM; goto err; } strlcpy(gpioset_info[client].gpiosets_comb_names[i], gpioset_comb_str, strlen(gpioset_comb_str)+1); pr_debug("%s: GPIO configuration %s\n", __func__, gpioset_info[client].gpiosets_comb_names[i]); gpioset_comb_str = NULL; } /* Allocating memory for handles to pinctrl states */ pinctrl_info[client].cdc_lines = devm_kzalloc(dev, num_strings * sizeof(char *), GFP_KERNEL); if (!pinctrl_info[client].cdc_lines) { dev_err(dev, "Can't allocate pinctrl_info.cdc_lines data\n"); ret = -ENOMEM; goto err; } /* Get pinctrl handles for gpio sets in dtsi file */ for (i = 0; i < num_strings; i++) { pinctrl_info[client].cdc_lines[i] = pinctrl_lookup_state( pinctrl, (const char *)gpioset_info[client]. gpiosets_comb_names[i]); if (IS_ERR(pinctrl_info[client].cdc_lines[i])) pr_err("%s: Unable to get pinctrl handle for %s\n", __func__, gpioset_info[client]. gpiosets_comb_names[i]); } goto success; err: /* Free up memory allocated for gpio set combinations */ for (i = 0; i < gpioset_info[client].gpiosets_max; i++) { if (NULL != gpioset_info[client].gpiosets[i]) devm_kfree(dev, gpioset_info[client].gpiosets[i]); } if (NULL != gpioset_info[client].gpiosets) devm_kfree(dev, gpioset_info[client].gpiosets); /* Free up memory allocated for gpio set combinations */ for (i = 0; i < gpioset_info[client].gpiosets_comb_max; i++) { if (NULL != gpioset_info[client].gpiosets_comb_names[i]) devm_kfree(dev, gpioset_info[client].gpiosets_comb_names[i]); } if (NULL != gpioset_info[client].gpiosets_comb_names) devm_kfree(dev, gpioset_info[client].gpiosets_comb_names); /* Free up memory allocated for handles to pinctrl states */ if (NULL != pinctrl_info[client].cdc_lines) devm_kfree(dev, pinctrl_info[client].cdc_lines); /* Free up memory allocated for counter of gpio sets */ if (NULL != gpioset_info[client].gpioset_state) devm_kfree(dev, gpioset_info[client].gpioset_state); success: return ret; } int msm_gpioset_activate(enum pinctrl_client client, char *keyword) { int ret = 0; int gp_set = 0; int active_set = 0; gp_set = msm_get_gpioset_index(client, keyword); if (gp_set < 0) { pr_err("%s: gpio set name does not exist\n", __func__); return gp_set; } if (!gpioset_info[client].gpioset_state[gp_set]) { /* * If pinctrl pointer is not valid, * no need to proceed further */ active_set = pinctrl_info[client].active_set; if (IS_ERR(pinctrl_info[client].cdc_lines[active_set])) return 0; pinctrl_info[client].active_set |= (1 << gp_set); active_set = pinctrl_info[client].active_set; pr_debug("%s: pinctrl.active_set: %d\n", __func__, active_set); /* Select the appropriate pinctrl state */ ret = pinctrl_select_state(pinctrl_info[client].pinctrl, pinctrl_info[client].cdc_lines[active_set]); } gpioset_info[client].gpioset_state[gp_set]++; return ret; } int msm_gpioset_suspend(enum pinctrl_client client, char *keyword) { int ret = 0; int gp_set = 0; int active_set = 0; gp_set = msm_get_gpioset_index(client, keyword); if (gp_set < 0) { pr_err("%s: gpio set name does not exist\n", __func__); return gp_set; } if (1 == gpioset_info[client].gpioset_state[gp_set]) { pinctrl_info[client].active_set &= ~(1 << gp_set); /* * If pinctrl pointer is not valid, * no need to proceed further */ active_set = pinctrl_info[client].active_set; if (IS_ERR(pinctrl_info[client].cdc_lines[active_set])) return -EINVAL; pr_debug("%s: pinctrl.active_set: %d\n", __func__, pinctrl_info[client].active_set); /* Select the appropriate pinctrl state */ ret = pinctrl_select_state(pinctrl_info[client].pinctrl, pinctrl_info[client].cdc_lines[pinctrl_info[client]. active_set]); } if (!(gpioset_info[client].gpioset_state[gp_set])) { pr_err("%s: Invalid call to de activate gpios: %d\n", __func__, gpioset_info[client].gpioset_state[gp_set]); return -EINVAL; } gpioset_info[client].gpioset_state[gp_set]--; return ret; }
gpl-2.0
hacklu/linux-2.6.39.1
drivers/isdn/i4l/isdn_common.c
401
59669
/* $Id: isdn_common.c,v 1.1.2.3 2004/02/10 01:07:13 keil Exp $ * * Linux ISDN subsystem, common used functions (linklevel). * * Copyright 1994-1999 by Fritz Elfert (fritz@isdn4linux.de) * Copyright 1995,96 Thinking Objects Software GmbH Wuerzburg * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/isdn.h> #include <linux/mutex.h> #include "isdn_common.h" #include "isdn_tty.h" #include "isdn_net.h" #include "isdn_ppp.h" #ifdef CONFIG_ISDN_AUDIO #include "isdn_audio.h" #endif #ifdef CONFIG_ISDN_DIVERSION_MODULE #define CONFIG_ISDN_DIVERSION #endif #ifdef CONFIG_ISDN_DIVERSION #include <linux/isdn_divertif.h> #endif /* CONFIG_ISDN_DIVERSION */ #include "isdn_v110.h" /* Debugflags */ #undef ISDN_DEBUG_STATCALLB MODULE_DESCRIPTION("ISDN4Linux: link layer"); MODULE_AUTHOR("Fritz Elfert"); MODULE_LICENSE("GPL"); isdn_dev *dev; static DEFINE_MUTEX(isdn_mutex); static char *isdn_revision = "$Revision: 1.1.2.3 $"; extern char *isdn_net_revision; extern char *isdn_tty_revision; #ifdef CONFIG_ISDN_PPP extern char *isdn_ppp_revision; #else static char *isdn_ppp_revision = ": none $"; #endif #ifdef CONFIG_ISDN_AUDIO extern char *isdn_audio_revision; #else static char *isdn_audio_revision = ": none $"; #endif extern char *isdn_v110_revision; #ifdef CONFIG_ISDN_DIVERSION static isdn_divert_if *divert_if; /* = NULL */ #endif /* CONFIG_ISDN_DIVERSION */ static int isdn_writebuf_stub(int, int, const u_char __user *, int); static void set_global_features(void); static int isdn_wildmat(char *s, char *p); static int isdn_add_channels(isdn_driver_t *d, int drvidx, int n, int adding); static inline void isdn_lock_driver(isdn_driver_t *drv) { try_module_get(drv->interface->owner); drv->locks++; } void isdn_lock_drivers(void) { int i; for (i = 0; i < ISDN_MAX_DRIVERS; i++) { if (!dev->drv[i]) continue; isdn_lock_driver(dev->drv[i]); } } static inline void isdn_unlock_driver(isdn_driver_t *drv) { if (drv->locks > 0) { drv->locks--; module_put(drv->interface->owner); } } void isdn_unlock_drivers(void) { int i; for (i = 0; i < ISDN_MAX_DRIVERS; i++) { if (!dev->drv[i]) continue; isdn_unlock_driver(dev->drv[i]); } } #if defined(ISDN_DEBUG_NET_DUMP) || defined(ISDN_DEBUG_MODEM_DUMP) void isdn_dumppkt(char *s, u_char * p, int len, int dumplen) { int dumpc; printk(KERN_DEBUG "%s(%d) ", s, len); for (dumpc = 0; (dumpc < dumplen) && (len); len--, dumpc++) printk(" %02x", *p++); printk("\n"); } #endif /* * I picked the pattern-matching-functions from an old GNU-tar version (1.10) * It was originally written and put to PD by rs@mirror.TMC.COM (Rich Salz) */ static int isdn_star(char *s, char *p) { while (isdn_wildmat(s, p)) { if (*++s == '\0') return (2); } return (0); } /* * Shell-type Pattern-matching for incoming caller-Ids * This function gets a string in s and checks, if it matches the pattern * given in p. * * Return: * 0 = match. * 1 = no match. * 2 = no match. Would eventually match, if s would be longer. * * Possible Patterns: * * '?' matches one character * '*' matches zero or more characters * [xyz] matches the set of characters in brackets. * [^xyz] matches any single character not in the set of characters */ static int isdn_wildmat(char *s, char *p) { register int last; register int matched; register int reverse; register int nostar = 1; if (!(*s) && !(*p)) return(1); for (; *p; s++, p++) switch (*p) { case '\\': /* * Literal match with following character, * fall through. */ p++; default: if (*s != *p) return (*s == '\0')?2:1; continue; case '?': /* Match anything. */ if (*s == '\0') return (2); continue; case '*': nostar = 0; /* Trailing star matches everything. */ return (*++p ? isdn_star(s, p) : 0); case '[': /* [^....] means inverse character class. */ if ((reverse = (p[1] == '^'))) p++; for (last = 0, matched = 0; *++p && (*p != ']'); last = *p) /* This next line requires a good C compiler. */ if (*p == '-' ? *s <= *++p && *s >= last : *s == *p) matched = 1; if (matched == reverse) return (1); continue; } return (*s == '\0')?0:nostar; } int isdn_msncmp( const char * msn1, const char * msn2 ) { char TmpMsn1[ ISDN_MSNLEN ]; char TmpMsn2[ ISDN_MSNLEN ]; char *p; for ( p = TmpMsn1; *msn1 && *msn1 != ':'; ) // Strip off a SPID *p++ = *msn1++; *p = '\0'; for ( p = TmpMsn2; *msn2 && *msn2 != ':'; ) // Strip off a SPID *p++ = *msn2++; *p = '\0'; return isdn_wildmat( TmpMsn1, TmpMsn2 ); } int isdn_dc2minor(int di, int ch) { int i; for (i = 0; i < ISDN_MAX_CHANNELS; i++) if (dev->chanmap[i] == ch && dev->drvmap[i] == di) return i; return -1; } static int isdn_timer_cnt1 = 0; static int isdn_timer_cnt2 = 0; static int isdn_timer_cnt3 = 0; static void isdn_timer_funct(ulong dummy) { int tf = dev->tflags; if (tf & ISDN_TIMER_FAST) { if (tf & ISDN_TIMER_MODEMREAD) isdn_tty_readmodem(); if (tf & ISDN_TIMER_MODEMPLUS) isdn_tty_modem_escape(); if (tf & ISDN_TIMER_MODEMXMIT) isdn_tty_modem_xmit(); } if (tf & ISDN_TIMER_SLOW) { if (++isdn_timer_cnt1 >= ISDN_TIMER_02SEC) { isdn_timer_cnt1 = 0; if (tf & ISDN_TIMER_NETDIAL) isdn_net_dial(); } if (++isdn_timer_cnt2 >= ISDN_TIMER_1SEC) { isdn_timer_cnt2 = 0; if (tf & ISDN_TIMER_NETHANGUP) isdn_net_autohup(); if (++isdn_timer_cnt3 >= ISDN_TIMER_RINGING) { isdn_timer_cnt3 = 0; if (tf & ISDN_TIMER_MODEMRING) isdn_tty_modem_ring(); } if (tf & ISDN_TIMER_CARRIER) isdn_tty_carrier_timeout(); } } if (tf) mod_timer(&dev->timer, jiffies+ISDN_TIMER_RES); } void isdn_timer_ctrl(int tf, int onoff) { unsigned long flags; int old_tflags; spin_lock_irqsave(&dev->timerlock, flags); if ((tf & ISDN_TIMER_SLOW) && (!(dev->tflags & ISDN_TIMER_SLOW))) { /* If the slow-timer wasn't activated until now */ isdn_timer_cnt1 = 0; isdn_timer_cnt2 = 0; } old_tflags = dev->tflags; if (onoff) dev->tflags |= tf; else dev->tflags &= ~tf; if (dev->tflags && !old_tflags) mod_timer(&dev->timer, jiffies+ISDN_TIMER_RES); spin_unlock_irqrestore(&dev->timerlock, flags); } /* * Receive a packet from B-Channel. (Called from low-level-module) */ static void isdn_receive_skb_callback(int di, int channel, struct sk_buff *skb) { int i; if ((i = isdn_dc2minor(di, channel)) == -1) { dev_kfree_skb(skb); return; } /* Update statistics */ dev->ibytes[i] += skb->len; /* First, try to deliver data to network-device */ if (isdn_net_rcv_skb(i, skb)) return; /* V.110 handling * makes sense for async streams only, so it is * called after possible net-device delivery. */ if (dev->v110[i]) { atomic_inc(&dev->v110use[i]); skb = isdn_v110_decode(dev->v110[i], skb); atomic_dec(&dev->v110use[i]); if (!skb) return; } /* No network-device found, deliver to tty or raw-channel */ if (skb->len) { if (isdn_tty_rcv_skb(i, di, channel, skb)) return; wake_up_interruptible(&dev->drv[di]->rcv_waitq[channel]); } else dev_kfree_skb(skb); } /* * Intercept command from Linklevel to Lowlevel. * If layer 2 protocol is V.110 and this is not supported by current * lowlevel-driver, use driver's transparent mode and handle V.110 in * linklevel instead. */ int isdn_command(isdn_ctrl *cmd) { if (cmd->driver == -1) { printk(KERN_WARNING "isdn_command command(%x) driver -1\n", cmd->command); return(1); } if (!dev->drv[cmd->driver]) { printk(KERN_WARNING "isdn_command command(%x) dev->drv[%d] NULL\n", cmd->command, cmd->driver); return(1); } if (!dev->drv[cmd->driver]->interface) { printk(KERN_WARNING "isdn_command command(%x) dev->drv[%d]->interface NULL\n", cmd->command, cmd->driver); return(1); } if (cmd->command == ISDN_CMD_SETL2) { int idx = isdn_dc2minor(cmd->driver, cmd->arg & 255); unsigned long l2prot = (cmd->arg >> 8) & 255; unsigned long features = (dev->drv[cmd->driver]->interface->features >> ISDN_FEATURE_L2_SHIFT) & ISDN_FEATURE_L2_MASK; unsigned long l2_feature = (1 << l2prot); switch (l2prot) { case ISDN_PROTO_L2_V11096: case ISDN_PROTO_L2_V11019: case ISDN_PROTO_L2_V11038: /* If V.110 requested, but not supported by * HL-driver, set emulator-flag and change * Layer-2 to transparent */ if (!(features & l2_feature)) { dev->v110emu[idx] = l2prot; cmd->arg = (cmd->arg & 255) | (ISDN_PROTO_L2_TRANS << 8); } else dev->v110emu[idx] = 0; } } return dev->drv[cmd->driver]->interface->command(cmd); } void isdn_all_eaz(int di, int ch) { isdn_ctrl cmd; if (di < 0) return; cmd.driver = di; cmd.arg = ch; cmd.command = ISDN_CMD_SETEAZ; cmd.parm.num[0] = '\0'; isdn_command(&cmd); } /* * Begin of a CAPI like LL<->HL interface, currently used only for * supplementary service (CAPI 2.0 part III) */ #include <linux/isdn/capicmd.h> static int isdn_capi_rec_hl_msg(capi_msg *cm) { int di; int ch; di = (cm->adr.Controller & 0x7f) -1; ch = isdn_dc2minor(di, (cm->adr.Controller>>8)& 0x7f); switch(cm->Command) { case CAPI_FACILITY: /* in the moment only handled in tty */ return(isdn_tty_capi_facility(cm)); default: return(-1); } } static int isdn_status_callback(isdn_ctrl * c) { int di; u_long flags; int i; int r; int retval = 0; isdn_ctrl cmd; isdn_net_dev *p; di = c->driver; i = isdn_dc2minor(di, c->arg); switch (c->command) { case ISDN_STAT_BSENT: if (i < 0) return -1; if (dev->global_flags & ISDN_GLOBAL_STOPPED) return 0; if (isdn_net_stat_callback(i, c)) return 0; if (isdn_v110_stat_callback(i, c)) return 0; if (isdn_tty_stat_callback(i, c)) return 0; wake_up_interruptible(&dev->drv[di]->snd_waitq[c->arg]); break; case ISDN_STAT_STAVAIL: dev->drv[di]->stavail += c->arg; wake_up_interruptible(&dev->drv[di]->st_waitq); break; case ISDN_STAT_RUN: dev->drv[di]->flags |= DRV_FLAG_RUNNING; for (i = 0; i < ISDN_MAX_CHANNELS; i++) if (dev->drvmap[i] == di) isdn_all_eaz(di, dev->chanmap[i]); set_global_features(); break; case ISDN_STAT_STOP: dev->drv[di]->flags &= ~DRV_FLAG_RUNNING; break; case ISDN_STAT_ICALL: if (i < 0) return -1; #ifdef ISDN_DEBUG_STATCALLB printk(KERN_DEBUG "ICALL (net): %d %ld %s\n", di, c->arg, c->parm.num); #endif if (dev->global_flags & ISDN_GLOBAL_STOPPED) { cmd.driver = di; cmd.arg = c->arg; cmd.command = ISDN_CMD_HANGUP; isdn_command(&cmd); return 0; } /* Try to find a network-interface which will accept incoming call */ r = ((c->command == ISDN_STAT_ICALLW) ? 0 : isdn_net_find_icall(di, c->arg, i, &c->parm.setup)); switch (r) { case 0: /* No network-device replies. * Try ttyI's. * These return 0 on no match, 1 on match and * 3 on eventually match, if CID is longer. */ if (c->command == ISDN_STAT_ICALL) if ((retval = isdn_tty_find_icall(di, c->arg, &c->parm.setup))) return(retval); #ifdef CONFIG_ISDN_DIVERSION if (divert_if) if ((retval = divert_if->stat_callback(c))) return(retval); /* processed */ #endif /* CONFIG_ISDN_DIVERSION */ if ((!retval) && (dev->drv[di]->flags & DRV_FLAG_REJBUS)) { /* No tty responding */ cmd.driver = di; cmd.arg = c->arg; cmd.command = ISDN_CMD_HANGUP; isdn_command(&cmd); retval = 2; } break; case 1: /* Schedule connection-setup */ isdn_net_dial(); cmd.driver = di; cmd.arg = c->arg; cmd.command = ISDN_CMD_ACCEPTD; for ( p = dev->netdev; p; p = p->next ) if ( p->local->isdn_channel == cmd.arg ) { strcpy( cmd.parm.setup.eazmsn, p->local->msn ); isdn_command(&cmd); retval = 1; break; } break; case 2: /* For calling back, first reject incoming call ... */ case 3: /* Interface found, but down, reject call actively */ retval = 2; printk(KERN_INFO "isdn: Rejecting Call\n"); cmd.driver = di; cmd.arg = c->arg; cmd.command = ISDN_CMD_HANGUP; isdn_command(&cmd); if (r == 3) break; /* Fall through */ case 4: /* ... then start callback. */ isdn_net_dial(); break; case 5: /* Number would eventually match, if longer */ retval = 3; break; } #ifdef ISDN_DEBUG_STATCALLB printk(KERN_DEBUG "ICALL: ret=%d\n", retval); #endif return retval; break; case ISDN_STAT_CINF: if (i < 0) return -1; #ifdef ISDN_DEBUG_STATCALLB printk(KERN_DEBUG "CINF: %ld %s\n", c->arg, c->parm.num); #endif if (dev->global_flags & ISDN_GLOBAL_STOPPED) return 0; if (strcmp(c->parm.num, "0")) isdn_net_stat_callback(i, c); isdn_tty_stat_callback(i, c); break; case ISDN_STAT_CAUSE: #ifdef ISDN_DEBUG_STATCALLB printk(KERN_DEBUG "CAUSE: %ld %s\n", c->arg, c->parm.num); #endif printk(KERN_INFO "isdn: %s,ch%ld cause: %s\n", dev->drvid[di], c->arg, c->parm.num); isdn_tty_stat_callback(i, c); #ifdef CONFIG_ISDN_DIVERSION if (divert_if) divert_if->stat_callback(c); #endif /* CONFIG_ISDN_DIVERSION */ break; case ISDN_STAT_DISPLAY: #ifdef ISDN_DEBUG_STATCALLB printk(KERN_DEBUG "DISPLAY: %ld %s\n", c->arg, c->parm.display); #endif isdn_tty_stat_callback(i, c); #ifdef CONFIG_ISDN_DIVERSION if (divert_if) divert_if->stat_callback(c); #endif /* CONFIG_ISDN_DIVERSION */ break; case ISDN_STAT_DCONN: if (i < 0) return -1; #ifdef ISDN_DEBUG_STATCALLB printk(KERN_DEBUG "DCONN: %ld\n", c->arg); #endif if (dev->global_flags & ISDN_GLOBAL_STOPPED) return 0; /* Find any net-device, waiting for D-channel setup */ if (isdn_net_stat_callback(i, c)) break; isdn_v110_stat_callback(i, c); /* Find any ttyI, waiting for D-channel setup */ if (isdn_tty_stat_callback(i, c)) { cmd.driver = di; cmd.arg = c->arg; cmd.command = ISDN_CMD_ACCEPTB; isdn_command(&cmd); break; } break; case ISDN_STAT_DHUP: if (i < 0) return -1; #ifdef ISDN_DEBUG_STATCALLB printk(KERN_DEBUG "DHUP: %ld\n", c->arg); #endif if (dev->global_flags & ISDN_GLOBAL_STOPPED) return 0; dev->drv[di]->online &= ~(1 << (c->arg)); isdn_info_update(); /* Signal hangup to network-devices */ if (isdn_net_stat_callback(i, c)) break; isdn_v110_stat_callback(i, c); if (isdn_tty_stat_callback(i, c)) break; #ifdef CONFIG_ISDN_DIVERSION if (divert_if) divert_if->stat_callback(c); #endif /* CONFIG_ISDN_DIVERSION */ break; break; case ISDN_STAT_BCONN: if (i < 0) return -1; #ifdef ISDN_DEBUG_STATCALLB printk(KERN_DEBUG "BCONN: %ld\n", c->arg); #endif /* Signal B-channel-connect to network-devices */ if (dev->global_flags & ISDN_GLOBAL_STOPPED) return 0; dev->drv[di]->online |= (1 << (c->arg)); isdn_info_update(); if (isdn_net_stat_callback(i, c)) break; isdn_v110_stat_callback(i, c); if (isdn_tty_stat_callback(i, c)) break; break; case ISDN_STAT_BHUP: if (i < 0) return -1; #ifdef ISDN_DEBUG_STATCALLB printk(KERN_DEBUG "BHUP: %ld\n", c->arg); #endif if (dev->global_flags & ISDN_GLOBAL_STOPPED) return 0; dev->drv[di]->online &= ~(1 << (c->arg)); isdn_info_update(); #ifdef CONFIG_ISDN_X25 /* Signal hangup to network-devices */ if (isdn_net_stat_callback(i, c)) break; #endif isdn_v110_stat_callback(i, c); if (isdn_tty_stat_callback(i, c)) break; break; case ISDN_STAT_NODCH: if (i < 0) return -1; #ifdef ISDN_DEBUG_STATCALLB printk(KERN_DEBUG "NODCH: %ld\n", c->arg); #endif if (dev->global_flags & ISDN_GLOBAL_STOPPED) return 0; if (isdn_net_stat_callback(i, c)) break; if (isdn_tty_stat_callback(i, c)) break; break; case ISDN_STAT_ADDCH: spin_lock_irqsave(&dev->lock, flags); if (isdn_add_channels(dev->drv[di], di, c->arg, 1)) { spin_unlock_irqrestore(&dev->lock, flags); return -1; } spin_unlock_irqrestore(&dev->lock, flags); isdn_info_update(); break; case ISDN_STAT_DISCH: spin_lock_irqsave(&dev->lock, flags); for (i = 0; i < ISDN_MAX_CHANNELS; i++) if ((dev->drvmap[i] == di) && (dev->chanmap[i] == c->arg)) { if (c->parm.num[0]) dev->usage[i] &= ~ISDN_USAGE_DISABLED; else if (USG_NONE(dev->usage[i])) { dev->usage[i] |= ISDN_USAGE_DISABLED; } else retval = -1; break; } spin_unlock_irqrestore(&dev->lock, flags); isdn_info_update(); break; case ISDN_STAT_UNLOAD: while (dev->drv[di]->locks > 0) { isdn_unlock_driver(dev->drv[di]); } spin_lock_irqsave(&dev->lock, flags); isdn_tty_stat_callback(i, c); for (i = 0; i < ISDN_MAX_CHANNELS; i++) if (dev->drvmap[i] == di) { dev->drvmap[i] = -1; dev->chanmap[i] = -1; dev->usage[i] &= ~ISDN_USAGE_DISABLED; } dev->drivers--; dev->channels -= dev->drv[di]->channels; kfree(dev->drv[di]->rcverr); kfree(dev->drv[di]->rcvcount); for (i = 0; i < dev->drv[di]->channels; i++) skb_queue_purge(&dev->drv[di]->rpqueue[i]); kfree(dev->drv[di]->rpqueue); kfree(dev->drv[di]->rcv_waitq); kfree(dev->drv[di]); dev->drv[di] = NULL; dev->drvid[di][0] = '\0'; isdn_info_update(); set_global_features(); spin_unlock_irqrestore(&dev->lock, flags); return 0; case ISDN_STAT_L1ERR: break; case CAPI_PUT_MESSAGE: return(isdn_capi_rec_hl_msg(&c->parm.cmsg)); #ifdef CONFIG_ISDN_TTY_FAX case ISDN_STAT_FAXIND: isdn_tty_stat_callback(i, c); break; #endif #ifdef CONFIG_ISDN_AUDIO case ISDN_STAT_AUDIO: isdn_tty_stat_callback(i, c); break; #endif #ifdef CONFIG_ISDN_DIVERSION case ISDN_STAT_PROT: case ISDN_STAT_REDIR: if (divert_if) return(divert_if->stat_callback(c)); #endif /* CONFIG_ISDN_DIVERSION */ default: return -1; } return 0; } /* * Get integer from char-pointer, set pointer to end of number */ int isdn_getnum(char **p) { int v = -1; while (*p[0] >= '0' && *p[0] <= '9') v = ((v < 0) ? 0 : (v * 10)) + (int) ((*p[0]++) - '0'); return v; } #define DLE 0x10 /* * isdn_readbchan() tries to get data from the read-queue. * It MUST be called with interrupts off. * * Be aware that this is not an atomic operation when sleep != 0, even though * interrupts are turned off! Well, like that we are currently only called * on behalf of a read system call on raw device files (which are documented * to be dangerous and for debugging purpose only). The inode semaphore * takes care that this is not called for the same minor device number while * we are sleeping, but access is not serialized against simultaneous read() * from the corresponding ttyI device. Can other ugly events, like changes * of the mapping (di,ch)<->minor, happen during the sleep? --he */ int isdn_readbchan(int di, int channel, u_char * buf, u_char * fp, int len, wait_queue_head_t *sleep) { int count; int count_pull; int count_put; int dflag; struct sk_buff *skb; u_char *cp; if (!dev->drv[di]) return 0; if (skb_queue_empty(&dev->drv[di]->rpqueue[channel])) { if (sleep) interruptible_sleep_on(sleep); else return 0; } if (len > dev->drv[di]->rcvcount[channel]) len = dev->drv[di]->rcvcount[channel]; cp = buf; count = 0; while (len) { if (!(skb = skb_peek(&dev->drv[di]->rpqueue[channel]))) break; #ifdef CONFIG_ISDN_AUDIO if (ISDN_AUDIO_SKB_LOCK(skb)) break; ISDN_AUDIO_SKB_LOCK(skb) = 1; if ((ISDN_AUDIO_SKB_DLECOUNT(skb)) || (dev->drv[di]->DLEflag & (1 << channel))) { char *p = skb->data; unsigned long DLEmask = (1 << channel); dflag = 0; count_pull = count_put = 0; while ((count_pull < skb->len) && (len > 0)) { len--; if (dev->drv[di]->DLEflag & DLEmask) { *cp++ = DLE; dev->drv[di]->DLEflag &= ~DLEmask; } else { *cp++ = *p; if (*p == DLE) { dev->drv[di]->DLEflag |= DLEmask; (ISDN_AUDIO_SKB_DLECOUNT(skb))--; } p++; count_pull++; } count_put++; } if (count_pull >= skb->len) dflag = 1; } else { #endif /* No DLE's in buff, so simply copy it */ dflag = 1; if ((count_pull = skb->len) > len) { count_pull = len; dflag = 0; } count_put = count_pull; skb_copy_from_linear_data(skb, cp, count_put); cp += count_put; len -= count_put; #ifdef CONFIG_ISDN_AUDIO } #endif count += count_put; if (fp) { memset(fp, 0, count_put); fp += count_put; } if (dflag) { /* We got all the data in this buff. * Now we can dequeue it. */ if (fp) *(fp - 1) = 0xff; #ifdef CONFIG_ISDN_AUDIO ISDN_AUDIO_SKB_LOCK(skb) = 0; #endif skb = skb_dequeue(&dev->drv[di]->rpqueue[channel]); dev_kfree_skb(skb); } else { /* Not yet emptied this buff, so it * must stay in the queue, for further calls * but we pull off the data we got until now. */ skb_pull(skb, count_pull); #ifdef CONFIG_ISDN_AUDIO ISDN_AUDIO_SKB_LOCK(skb) = 0; #endif } dev->drv[di]->rcvcount[channel] -= count_put; } return count; } /* * isdn_readbchan_tty() tries to get data from the read-queue. * It MUST be called with interrupts off. * * Be aware that this is not an atomic operation when sleep != 0, even though * interrupts are turned off! Well, like that we are currently only called * on behalf of a read system call on raw device files (which are documented * to be dangerous and for debugging purpose only). The inode semaphore * takes care that this is not called for the same minor device number while * we are sleeping, but access is not serialized against simultaneous read() * from the corresponding ttyI device. Can other ugly events, like changes * of the mapping (di,ch)<->minor, happen during the sleep? --he */ int isdn_readbchan_tty(int di, int channel, struct tty_struct *tty, int cisco_hack) { int count; int count_pull; int count_put; int dflag; struct sk_buff *skb; char last = 0; int len; if (!dev->drv[di]) return 0; if (skb_queue_empty(&dev->drv[di]->rpqueue[channel])) return 0; len = tty_buffer_request_room(tty, dev->drv[di]->rcvcount[channel]); if(len == 0) return len; count = 0; while (len) { if (!(skb = skb_peek(&dev->drv[di]->rpqueue[channel]))) break; #ifdef CONFIG_ISDN_AUDIO if (ISDN_AUDIO_SKB_LOCK(skb)) break; ISDN_AUDIO_SKB_LOCK(skb) = 1; if ((ISDN_AUDIO_SKB_DLECOUNT(skb)) || (dev->drv[di]->DLEflag & (1 << channel))) { char *p = skb->data; unsigned long DLEmask = (1 << channel); dflag = 0; count_pull = count_put = 0; while ((count_pull < skb->len) && (len > 0)) { /* push every character but the last to the tty buffer directly */ if ( count_put ) tty_insert_flip_char(tty, last, TTY_NORMAL); len--; if (dev->drv[di]->DLEflag & DLEmask) { last = DLE; dev->drv[di]->DLEflag &= ~DLEmask; } else { last = *p; if (last == DLE) { dev->drv[di]->DLEflag |= DLEmask; (ISDN_AUDIO_SKB_DLECOUNT(skb))--; } p++; count_pull++; } count_put++; } if (count_pull >= skb->len) dflag = 1; } else { #endif /* No DLE's in buff, so simply copy it */ dflag = 1; if ((count_pull = skb->len) > len) { count_pull = len; dflag = 0; } count_put = count_pull; if(count_put > 1) tty_insert_flip_string(tty, skb->data, count_put - 1); last = skb->data[count_put - 1]; len -= count_put; #ifdef CONFIG_ISDN_AUDIO } #endif count += count_put; if (dflag) { /* We got all the data in this buff. * Now we can dequeue it. */ if(cisco_hack) tty_insert_flip_char(tty, last, 0xFF); else tty_insert_flip_char(tty, last, TTY_NORMAL); #ifdef CONFIG_ISDN_AUDIO ISDN_AUDIO_SKB_LOCK(skb) = 0; #endif skb = skb_dequeue(&dev->drv[di]->rpqueue[channel]); dev_kfree_skb(skb); } else { tty_insert_flip_char(tty, last, TTY_NORMAL); /* Not yet emptied this buff, so it * must stay in the queue, for further calls * but we pull off the data we got until now. */ skb_pull(skb, count_pull); #ifdef CONFIG_ISDN_AUDIO ISDN_AUDIO_SKB_LOCK(skb) = 0; #endif } dev->drv[di]->rcvcount[channel] -= count_put; } return count; } static inline int isdn_minor2drv(int minor) { return (dev->drvmap[minor]); } static inline int isdn_minor2chan(int minor) { return (dev->chanmap[minor]); } static char * isdn_statstr(void) { static char istatbuf[2048]; char *p; int i; sprintf(istatbuf, "idmap:\t"); p = istatbuf + strlen(istatbuf); for (i = 0; i < ISDN_MAX_CHANNELS; i++) { sprintf(p, "%s ", (dev->drvmap[i] < 0) ? "-" : dev->drvid[dev->drvmap[i]]); p = istatbuf + strlen(istatbuf); } sprintf(p, "\nchmap:\t"); p = istatbuf + strlen(istatbuf); for (i = 0; i < ISDN_MAX_CHANNELS; i++) { sprintf(p, "%d ", dev->chanmap[i]); p = istatbuf + strlen(istatbuf); } sprintf(p, "\ndrmap:\t"); p = istatbuf + strlen(istatbuf); for (i = 0; i < ISDN_MAX_CHANNELS; i++) { sprintf(p, "%d ", dev->drvmap[i]); p = istatbuf + strlen(istatbuf); } sprintf(p, "\nusage:\t"); p = istatbuf + strlen(istatbuf); for (i = 0; i < ISDN_MAX_CHANNELS; i++) { sprintf(p, "%d ", dev->usage[i]); p = istatbuf + strlen(istatbuf); } sprintf(p, "\nflags:\t"); p = istatbuf + strlen(istatbuf); for (i = 0; i < ISDN_MAX_DRIVERS; i++) { if (dev->drv[i]) { sprintf(p, "%ld ", dev->drv[i]->online); p = istatbuf + strlen(istatbuf); } else { sprintf(p, "? "); p = istatbuf + strlen(istatbuf); } } sprintf(p, "\nphone:\t"); p = istatbuf + strlen(istatbuf); for (i = 0; i < ISDN_MAX_CHANNELS; i++) { sprintf(p, "%s ", dev->num[i]); p = istatbuf + strlen(istatbuf); } sprintf(p, "\n"); return istatbuf; } /* Module interface-code */ void isdn_info_update(void) { infostruct *p = dev->infochain; while (p) { *(p->private) = 1; p = (infostruct *) p->next; } wake_up_interruptible(&(dev->info_waitq)); } static ssize_t isdn_read(struct file *file, char __user *buf, size_t count, loff_t * off) { uint minor = iminor(file->f_path.dentry->d_inode); int len = 0; int drvidx; int chidx; int retval; char *p; mutex_lock(&isdn_mutex); if (minor == ISDN_MINOR_STATUS) { if (!file->private_data) { if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; goto out; } interruptible_sleep_on(&(dev->info_waitq)); } p = isdn_statstr(); file->private_data = NULL; if ((len = strlen(p)) <= count) { if (copy_to_user(buf, p, len)) { retval = -EFAULT; goto out; } *off += len; retval = len; goto out; } retval = 0; goto out; } if (!dev->drivers) { retval = -ENODEV; goto out; } if (minor <= ISDN_MINOR_BMAX) { printk(KERN_WARNING "isdn_read minor %d obsolete!\n", minor); drvidx = isdn_minor2drv(minor); if (drvidx < 0) { retval = -ENODEV; goto out; } if (!(dev->drv[drvidx]->flags & DRV_FLAG_RUNNING)) { retval = -ENODEV; goto out; } chidx = isdn_minor2chan(minor); if (!(p = kmalloc(count, GFP_KERNEL))) { retval = -ENOMEM; goto out; } len = isdn_readbchan(drvidx, chidx, p, NULL, count, &dev->drv[drvidx]->rcv_waitq[chidx]); *off += len; if (copy_to_user(buf,p,len)) len = -EFAULT; kfree(p); retval = len; goto out; } if (minor <= ISDN_MINOR_CTRLMAX) { drvidx = isdn_minor2drv(minor - ISDN_MINOR_CTRL); if (drvidx < 0) { retval = -ENODEV; goto out; } if (!dev->drv[drvidx]->stavail) { if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; goto out; } interruptible_sleep_on(&(dev->drv[drvidx]->st_waitq)); } if (dev->drv[drvidx]->interface->readstat) { if (count > dev->drv[drvidx]->stavail) count = dev->drv[drvidx]->stavail; len = dev->drv[drvidx]->interface->readstat(buf, count, drvidx, isdn_minor2chan(minor - ISDN_MINOR_CTRL)); if (len < 0) { retval = len; goto out; } } else { len = 0; } if (len) dev->drv[drvidx]->stavail -= len; else dev->drv[drvidx]->stavail = 0; *off += len; retval = len; goto out; } #ifdef CONFIG_ISDN_PPP if (minor <= ISDN_MINOR_PPPMAX) { retval = isdn_ppp_read(minor - ISDN_MINOR_PPP, file, buf, count); goto out; } #endif retval = -ENODEV; out: mutex_unlock(&isdn_mutex); return retval; } static ssize_t isdn_write(struct file *file, const char __user *buf, size_t count, loff_t * off) { uint minor = iminor(file->f_path.dentry->d_inode); int drvidx; int chidx; int retval; if (minor == ISDN_MINOR_STATUS) return -EPERM; if (!dev->drivers) return -ENODEV; mutex_lock(&isdn_mutex); if (minor <= ISDN_MINOR_BMAX) { printk(KERN_WARNING "isdn_write minor %d obsolete!\n", minor); drvidx = isdn_minor2drv(minor); if (drvidx < 0) { retval = -ENODEV; goto out; } if (!(dev->drv[drvidx]->flags & DRV_FLAG_RUNNING)) { retval = -ENODEV; goto out; } chidx = isdn_minor2chan(minor); while ((retval = isdn_writebuf_stub(drvidx, chidx, buf, count)) == 0) interruptible_sleep_on(&dev->drv[drvidx]->snd_waitq[chidx]); goto out; } if (minor <= ISDN_MINOR_CTRLMAX) { drvidx = isdn_minor2drv(minor - ISDN_MINOR_CTRL); if (drvidx < 0) { retval = -ENODEV; goto out; } /* * We want to use the isdnctrl device to load the firmware * if (!(dev->drv[drvidx]->flags & DRV_FLAG_RUNNING)) return -ENODEV; */ if (dev->drv[drvidx]->interface->writecmd) retval = dev->drv[drvidx]->interface-> writecmd(buf, count, drvidx, isdn_minor2chan(minor - ISDN_MINOR_CTRL)); else retval = count; goto out; } #ifdef CONFIG_ISDN_PPP if (minor <= ISDN_MINOR_PPPMAX) { retval = isdn_ppp_write(minor - ISDN_MINOR_PPP, file, buf, count); goto out; } #endif retval = -ENODEV; out: mutex_unlock(&isdn_mutex); return retval; } static unsigned int isdn_poll(struct file *file, poll_table * wait) { unsigned int mask = 0; unsigned int minor = iminor(file->f_path.dentry->d_inode); int drvidx = isdn_minor2drv(minor - ISDN_MINOR_CTRL); mutex_lock(&isdn_mutex); if (minor == ISDN_MINOR_STATUS) { poll_wait(file, &(dev->info_waitq), wait); /* mask = POLLOUT | POLLWRNORM; */ if (file->private_data) { mask |= POLLIN | POLLRDNORM; } goto out; } if (minor >= ISDN_MINOR_CTRL && minor <= ISDN_MINOR_CTRLMAX) { if (drvidx < 0) { /* driver deregistered while file open */ mask = POLLHUP; goto out; } poll_wait(file, &(dev->drv[drvidx]->st_waitq), wait); mask = POLLOUT | POLLWRNORM; if (dev->drv[drvidx]->stavail) { mask |= POLLIN | POLLRDNORM; } goto out; } #ifdef CONFIG_ISDN_PPP if (minor <= ISDN_MINOR_PPPMAX) { mask = isdn_ppp_poll(file, wait); goto out; } #endif mask = POLLERR; out: mutex_unlock(&isdn_mutex); return mask; } static int isdn_ioctl(struct file *file, uint cmd, ulong arg) { uint minor = iminor(file->f_path.dentry->d_inode); isdn_ctrl c; int drvidx; int chidx; int ret; int i; char __user *p; char *s; union iocpar { char name[10]; char bname[22]; isdn_ioctl_struct iocts; isdn_net_ioctl_phone phone; isdn_net_ioctl_cfg cfg; } iocpar; void __user *argp = (void __user *)arg; #define name iocpar.name #define bname iocpar.bname #define iocts iocpar.iocts #define phone iocpar.phone #define cfg iocpar.cfg if (minor == ISDN_MINOR_STATUS) { switch (cmd) { case IIOCGETDVR: return (TTY_DV + (NET_DV << 8) + (INF_DV << 16)); case IIOCGETCPS: if (arg) { ulong __user *p = argp; int i; if (!access_ok(VERIFY_WRITE, p, sizeof(ulong) * ISDN_MAX_CHANNELS * 2)) return -EFAULT; for (i = 0; i < ISDN_MAX_CHANNELS; i++) { put_user(dev->ibytes[i], p++); put_user(dev->obytes[i], p++); } return 0; } else return -EINVAL; break; #ifdef CONFIG_NETDEVICES case IIOCNETGPN: /* Get peer phone number of a connected * isdn network interface */ if (arg) { if (copy_from_user(&phone, argp, sizeof(phone))) return -EFAULT; return isdn_net_getpeer(&phone, argp); } else return -EINVAL; #endif default: return -EINVAL; } } if (!dev->drivers) return -ENODEV; if (minor <= ISDN_MINOR_BMAX) { drvidx = isdn_minor2drv(minor); if (drvidx < 0) return -ENODEV; chidx = isdn_minor2chan(minor); if (!(dev->drv[drvidx]->flags & DRV_FLAG_RUNNING)) return -ENODEV; return 0; } if (minor <= ISDN_MINOR_CTRLMAX) { /* * isdn net devices manage lots of configuration variables as linked lists. * Those lists must only be manipulated from user space. Some of the ioctl's * service routines access user space and are not atomic. Therefore, ioctl's * manipulating the lists and ioctl's sleeping while accessing the lists * are serialized by means of a semaphore. */ switch (cmd) { case IIOCNETDWRSET: printk(KERN_INFO "INFO: ISDN_DW_ABC_EXTENSION not enabled\n"); return(-EINVAL); case IIOCNETLCR: printk(KERN_INFO "INFO: ISDN_ABC_LCR_SUPPORT not enabled\n"); return -ENODEV; #ifdef CONFIG_NETDEVICES case IIOCNETAIF: /* Add a network-interface */ if (arg) { if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; s = name; } else { s = NULL; } ret = mutex_lock_interruptible(&dev->mtx); if( ret ) return ret; if ((s = isdn_net_new(s, NULL))) { if (copy_to_user(argp, s, strlen(s) + 1)){ ret = -EFAULT; } else { ret = 0; } } else ret = -ENODEV; mutex_unlock(&dev->mtx); return ret; case IIOCNETASL: /* Add a slave to a network-interface */ if (arg) { if (copy_from_user(bname, argp, sizeof(bname) - 1)) return -EFAULT; } else return -EINVAL; ret = mutex_lock_interruptible(&dev->mtx); if( ret ) return ret; if ((s = isdn_net_newslave(bname))) { if (copy_to_user(argp, s, strlen(s) + 1)){ ret = -EFAULT; } else { ret = 0; } } else ret = -ENODEV; mutex_unlock(&dev->mtx); return ret; case IIOCNETDIF: /* Delete a network-interface */ if (arg) { if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; ret = mutex_lock_interruptible(&dev->mtx); if( ret ) return ret; ret = isdn_net_rm(name); mutex_unlock(&dev->mtx); return ret; } else return -EINVAL; case IIOCNETSCF: /* Set configurable parameters of a network-interface */ if (arg) { if (copy_from_user(&cfg, argp, sizeof(cfg))) return -EFAULT; return isdn_net_setcfg(&cfg); } else return -EINVAL; case IIOCNETGCF: /* Get configurable parameters of a network-interface */ if (arg) { if (copy_from_user(&cfg, argp, sizeof(cfg))) return -EFAULT; if (!(ret = isdn_net_getcfg(&cfg))) { if (copy_to_user(argp, &cfg, sizeof(cfg))) return -EFAULT; } return ret; } else return -EINVAL; case IIOCNETANM: /* Add a phone-number to a network-interface */ if (arg) { if (copy_from_user(&phone, argp, sizeof(phone))) return -EFAULT; ret = mutex_lock_interruptible(&dev->mtx); if( ret ) return ret; ret = isdn_net_addphone(&phone); mutex_unlock(&dev->mtx); return ret; } else return -EINVAL; case IIOCNETGNM: /* Get list of phone-numbers of a network-interface */ if (arg) { if (copy_from_user(&phone, argp, sizeof(phone))) return -EFAULT; ret = mutex_lock_interruptible(&dev->mtx); if( ret ) return ret; ret = isdn_net_getphones(&phone, argp); mutex_unlock(&dev->mtx); return ret; } else return -EINVAL; case IIOCNETDNM: /* Delete a phone-number of a network-interface */ if (arg) { if (copy_from_user(&phone, argp, sizeof(phone))) return -EFAULT; ret = mutex_lock_interruptible(&dev->mtx); if( ret ) return ret; ret = isdn_net_delphone(&phone); mutex_unlock(&dev->mtx); return ret; } else return -EINVAL; case IIOCNETDIL: /* Force dialing of a network-interface */ if (arg) { if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; return isdn_net_force_dial(name); } else return -EINVAL; #ifdef CONFIG_ISDN_PPP case IIOCNETALN: if (!arg) return -EINVAL; if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; return isdn_ppp_dial_slave(name); case IIOCNETDLN: if (!arg) return -EINVAL; if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; return isdn_ppp_hangup_slave(name); #endif case IIOCNETHUP: /* Force hangup of a network-interface */ if (!arg) return -EINVAL; if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; return isdn_net_force_hangup(name); break; #endif /* CONFIG_NETDEVICES */ case IIOCSETVER: dev->net_verbose = arg; printk(KERN_INFO "isdn: Verbose-Level is %d\n", dev->net_verbose); return 0; case IIOCSETGST: if (arg) dev->global_flags |= ISDN_GLOBAL_STOPPED; else dev->global_flags &= ~ISDN_GLOBAL_STOPPED; printk(KERN_INFO "isdn: Global Mode %s\n", (dev->global_flags & ISDN_GLOBAL_STOPPED) ? "stopped" : "running"); return 0; case IIOCSETBRJ: drvidx = -1; if (arg) { int i; char *p; if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; iocts.drvid[sizeof(iocts.drvid)-1] = 0; if (strlen(iocts.drvid)) { if ((p = strchr(iocts.drvid, ','))) *p = 0; drvidx = -1; for (i = 0; i < ISDN_MAX_DRIVERS; i++) if (!(strcmp(dev->drvid[i], iocts.drvid))) { drvidx = i; break; } } } if (drvidx == -1) return -ENODEV; if (iocts.arg) dev->drv[drvidx]->flags |= DRV_FLAG_REJBUS; else dev->drv[drvidx]->flags &= ~DRV_FLAG_REJBUS; return 0; case IIOCSIGPRF: dev->profd = current; return 0; break; case IIOCGETPRF: /* Get all Modem-Profiles */ if (arg) { char __user *p = argp; int i; if (!access_ok(VERIFY_WRITE, argp, (ISDN_MODEM_NUMREG + ISDN_MSNLEN + ISDN_LMSNLEN) * ISDN_MAX_CHANNELS)) return -EFAULT; for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (copy_to_user(p, dev->mdm.info[i].emu.profile, ISDN_MODEM_NUMREG)) return -EFAULT; p += ISDN_MODEM_NUMREG; if (copy_to_user(p, dev->mdm.info[i].emu.pmsn, ISDN_MSNLEN)) return -EFAULT; p += ISDN_MSNLEN; if (copy_to_user(p, dev->mdm.info[i].emu.plmsn, ISDN_LMSNLEN)) return -EFAULT; p += ISDN_LMSNLEN; } return (ISDN_MODEM_NUMREG + ISDN_MSNLEN + ISDN_LMSNLEN) * ISDN_MAX_CHANNELS; } else return -EINVAL; break; case IIOCSETPRF: /* Set all Modem-Profiles */ if (arg) { char __user *p = argp; int i; if (!access_ok(VERIFY_READ, argp, (ISDN_MODEM_NUMREG + ISDN_MSNLEN + ISDN_LMSNLEN) * ISDN_MAX_CHANNELS)) return -EFAULT; for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (copy_from_user(dev->mdm.info[i].emu.profile, p, ISDN_MODEM_NUMREG)) return -EFAULT; p += ISDN_MODEM_NUMREG; if (copy_from_user(dev->mdm.info[i].emu.plmsn, p, ISDN_LMSNLEN)) return -EFAULT; p += ISDN_LMSNLEN; if (copy_from_user(dev->mdm.info[i].emu.pmsn, p, ISDN_MSNLEN)) return -EFAULT; p += ISDN_MSNLEN; } return 0; } else return -EINVAL; break; case IIOCSETMAP: case IIOCGETMAP: /* Set/Get MSN->EAZ-Mapping for a driver */ if (arg) { if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; iocts.drvid[sizeof(iocts.drvid)-1] = 0; if (strlen(iocts.drvid)) { drvidx = -1; for (i = 0; i < ISDN_MAX_DRIVERS; i++) if (!(strcmp(dev->drvid[i], iocts.drvid))) { drvidx = i; break; } } else drvidx = 0; if (drvidx == -1) return -ENODEV; if (cmd == IIOCSETMAP) { int loop = 1; p = (char __user *) iocts.arg; i = 0; while (loop) { int j = 0; while (1) { if (!access_ok(VERIFY_READ, p, 1)) return -EFAULT; get_user(bname[j], p++); switch (bname[j]) { case '\0': loop = 0; /* Fall through */ case ',': bname[j] = '\0'; strcpy(dev->drv[drvidx]->msn2eaz[i], bname); j = ISDN_MSNLEN; break; default: j++; } if (j >= ISDN_MSNLEN) break; } if (++i > 9) break; } } else { p = (char __user *) iocts.arg; for (i = 0; i < 10; i++) { snprintf(bname, sizeof(bname), "%s%s", strlen(dev->drv[drvidx]->msn2eaz[i]) ? dev->drv[drvidx]->msn2eaz[i] : "_", (i < 9) ? "," : "\0"); if (copy_to_user(p, bname, strlen(bname) + 1)) return -EFAULT; p += strlen(bname); } } return 0; } else return -EINVAL; case IIOCDBGVAR: if (arg) { if (copy_to_user(argp, &dev, sizeof(ulong))) return -EFAULT; return 0; } else return -EINVAL; break; default: if ((cmd & IIOCDRVCTL) == IIOCDRVCTL) cmd = ((cmd >> _IOC_NRSHIFT) & _IOC_NRMASK) & ISDN_DRVIOCTL_MASK; else return -EINVAL; if (arg) { int i; char *p; if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; iocts.drvid[sizeof(iocts.drvid)-1] = 0; if (strlen(iocts.drvid)) { if ((p = strchr(iocts.drvid, ','))) *p = 0; drvidx = -1; for (i = 0; i < ISDN_MAX_DRIVERS; i++) if (!(strcmp(dev->drvid[i], iocts.drvid))) { drvidx = i; break; } } else drvidx = 0; if (drvidx == -1) return -ENODEV; if (!access_ok(VERIFY_WRITE, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; c.driver = drvidx; c.command = ISDN_CMD_IOCTL; c.arg = cmd; memcpy(c.parm.num, &iocts.arg, sizeof(ulong)); ret = isdn_command(&c); memcpy(&iocts.arg, c.parm.num, sizeof(ulong)); if (copy_to_user(argp, &iocts, sizeof(isdn_ioctl_struct))) return -EFAULT; return ret; } else return -EINVAL; } } #ifdef CONFIG_ISDN_PPP if (minor <= ISDN_MINOR_PPPMAX) return (isdn_ppp_ioctl(minor - ISDN_MINOR_PPP, file, cmd, arg)); #endif return -ENODEV; #undef name #undef bname #undef iocts #undef phone #undef cfg } static long isdn_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; mutex_lock(&isdn_mutex); ret = isdn_ioctl(file, cmd, arg); mutex_unlock(&isdn_mutex); return ret; } /* * Open the device code. */ static int isdn_open(struct inode *ino, struct file *filep) { uint minor = iminor(ino); int drvidx; int chidx; int retval = -ENODEV; mutex_lock(&isdn_mutex); if (minor == ISDN_MINOR_STATUS) { infostruct *p; if ((p = kmalloc(sizeof(infostruct), GFP_KERNEL))) { p->next = (char *) dev->infochain; p->private = (char *) &(filep->private_data); dev->infochain = p; /* At opening we allow a single update */ filep->private_data = (char *) 1; retval = 0; goto out; } else { retval = -ENOMEM; goto out; } } if (!dev->channels) goto out; if (minor <= ISDN_MINOR_BMAX) { printk(KERN_WARNING "isdn_open minor %d obsolete!\n", minor); drvidx = isdn_minor2drv(minor); if (drvidx < 0) goto out; chidx = isdn_minor2chan(minor); if (!(dev->drv[drvidx]->flags & DRV_FLAG_RUNNING)) goto out; if (!(dev->drv[drvidx]->online & (1 << chidx))) goto out; isdn_lock_drivers(); retval = 0; goto out; } if (minor <= ISDN_MINOR_CTRLMAX) { drvidx = isdn_minor2drv(minor - ISDN_MINOR_CTRL); if (drvidx < 0) goto out; isdn_lock_drivers(); retval = 0; goto out; } #ifdef CONFIG_ISDN_PPP if (minor <= ISDN_MINOR_PPPMAX) { retval = isdn_ppp_open(minor - ISDN_MINOR_PPP, filep); if (retval == 0) isdn_lock_drivers(); goto out; } #endif out: nonseekable_open(ino, filep); mutex_unlock(&isdn_mutex); return retval; } static int isdn_close(struct inode *ino, struct file *filep) { uint minor = iminor(ino); mutex_lock(&isdn_mutex); if (minor == ISDN_MINOR_STATUS) { infostruct *p = dev->infochain; infostruct *q = NULL; while (p) { if (p->private == (char *) &(filep->private_data)) { if (q) q->next = p->next; else dev->infochain = (infostruct *) (p->next); kfree(p); goto out; } q = p; p = (infostruct *) (p->next); } printk(KERN_WARNING "isdn: No private data while closing isdnctrl\n"); goto out; } isdn_unlock_drivers(); if (minor <= ISDN_MINOR_BMAX) goto out; if (minor <= ISDN_MINOR_CTRLMAX) { if (dev->profd == current) dev->profd = NULL; goto out; } #ifdef CONFIG_ISDN_PPP if (minor <= ISDN_MINOR_PPPMAX) isdn_ppp_release(minor - ISDN_MINOR_PPP, filep); #endif out: mutex_unlock(&isdn_mutex); return 0; } static const struct file_operations isdn_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = isdn_read, .write = isdn_write, .poll = isdn_poll, .unlocked_ioctl = isdn_unlocked_ioctl, .open = isdn_open, .release = isdn_close, }; char * isdn_map_eaz2msn(char *msn, int di) { isdn_driver_t *this = dev->drv[di]; int i; if (strlen(msn) == 1) { i = msn[0] - '0'; if ((i >= 0) && (i <= 9)) if (strlen(this->msn2eaz[i])) return (this->msn2eaz[i]); } return (msn); } /* * Find an unused ISDN-channel, whose feature-flags match the * given L2- and L3-protocols. */ #define L2V (~(ISDN_FEATURE_L2_V11096|ISDN_FEATURE_L2_V11019|ISDN_FEATURE_L2_V11038)) /* * This function must be called with holding the dev->lock. */ int isdn_get_free_channel(int usage, int l2_proto, int l3_proto, int pre_dev ,int pre_chan, char *msn) { int i; ulong features; ulong vfeatures; features = ((1 << l2_proto) | (0x10000 << l3_proto)); vfeatures = (((1 << l2_proto) | (0x10000 << l3_proto)) & ~(ISDN_FEATURE_L2_V11096|ISDN_FEATURE_L2_V11019|ISDN_FEATURE_L2_V11038)); /* If Layer-2 protocol is V.110, accept drivers with * transparent feature even if these don't support V.110 * because we can emulate this in linklevel. */ for (i = 0; i < ISDN_MAX_CHANNELS; i++) if (USG_NONE(dev->usage[i]) && (dev->drvmap[i] != -1)) { int d = dev->drvmap[i]; if ((dev->usage[i] & ISDN_USAGE_EXCLUSIVE) && ((pre_dev != d) || (pre_chan != dev->chanmap[i]))) continue; if (!strcmp(isdn_map_eaz2msn(msn, d), "-")) continue; if (dev->usage[i] & ISDN_USAGE_DISABLED) continue; /* usage not allowed */ if (dev->drv[d]->flags & DRV_FLAG_RUNNING) { if (((dev->drv[d]->interface->features & features) == features) || (((dev->drv[d]->interface->features & vfeatures) == vfeatures) && (dev->drv[d]->interface->features & ISDN_FEATURE_L2_TRANS))) { if ((pre_dev < 0) || (pre_chan < 0)) { dev->usage[i] &= ISDN_USAGE_EXCLUSIVE; dev->usage[i] |= usage; isdn_info_update(); return i; } else { if ((pre_dev == d) && (pre_chan == dev->chanmap[i])) { dev->usage[i] &= ISDN_USAGE_EXCLUSIVE; dev->usage[i] |= usage; isdn_info_update(); return i; } } } } } return -1; } /* * Set state of ISDN-channel to 'unused' */ void isdn_free_channel(int di, int ch, int usage) { int i; if ((di < 0) || (ch < 0)) { printk(KERN_WARNING "%s: called with invalid drv(%d) or channel(%d)\n", __func__, di, ch); return; } for (i = 0; i < ISDN_MAX_CHANNELS; i++) if (((!usage) || ((dev->usage[i] & ISDN_USAGE_MASK) == usage)) && (dev->drvmap[i] == di) && (dev->chanmap[i] == ch)) { dev->usage[i] &= (ISDN_USAGE_NONE | ISDN_USAGE_EXCLUSIVE); strcpy(dev->num[i], "???"); dev->ibytes[i] = 0; dev->obytes[i] = 0; // 20.10.99 JIM, try to reinitialize v110 ! dev->v110emu[i] = 0; atomic_set(&(dev->v110use[i]), 0); isdn_v110_close(dev->v110[i]); dev->v110[i] = NULL; // 20.10.99 JIM, try to reinitialize v110 ! isdn_info_update(); if (dev->drv[di]) skb_queue_purge(&dev->drv[di]->rpqueue[ch]); } } /* * Cancel Exclusive-Flag for ISDN-channel */ void isdn_unexclusive_channel(int di, int ch) { int i; for (i = 0; i < ISDN_MAX_CHANNELS; i++) if ((dev->drvmap[i] == di) && (dev->chanmap[i] == ch)) { dev->usage[i] &= ~ISDN_USAGE_EXCLUSIVE; isdn_info_update(); return; } } /* * writebuf replacement for SKB_ABLE drivers */ static int isdn_writebuf_stub(int drvidx, int chan, const u_char __user * buf, int len) { int ret; int hl = dev->drv[drvidx]->interface->hl_hdrlen; struct sk_buff *skb = alloc_skb(hl + len, GFP_ATOMIC); if (!skb) return -ENOMEM; skb_reserve(skb, hl); if (copy_from_user(skb_put(skb, len), buf, len)) { dev_kfree_skb(skb); return -EFAULT; } ret = dev->drv[drvidx]->interface->writebuf_skb(drvidx, chan, 1, skb); if (ret <= 0) dev_kfree_skb(skb); if (ret > 0) dev->obytes[isdn_dc2minor(drvidx, chan)] += ret; return ret; } /* * Return: length of data on success, -ERRcode on failure. */ int isdn_writebuf_skb_stub(int drvidx, int chan, int ack, struct sk_buff *skb) { int ret; struct sk_buff *nskb = NULL; int v110_ret = skb->len; int idx = isdn_dc2minor(drvidx, chan); if (dev->v110[idx]) { atomic_inc(&dev->v110use[idx]); nskb = isdn_v110_encode(dev->v110[idx], skb); atomic_dec(&dev->v110use[idx]); if (!nskb) return 0; v110_ret = *((int *)nskb->data); skb_pull(nskb, sizeof(int)); if (!nskb->len) { dev_kfree_skb(nskb); return v110_ret; } /* V.110 must always be acknowledged */ ack = 1; ret = dev->drv[drvidx]->interface->writebuf_skb(drvidx, chan, ack, nskb); } else { int hl = dev->drv[drvidx]->interface->hl_hdrlen; if( skb_headroom(skb) < hl ){ /* * This should only occur when new HL driver with * increased hl_hdrlen was loaded after netdevice * was created and connected to the new driver. * * The V.110 branch (re-allocates on its own) does * not need this */ struct sk_buff * skb_tmp; skb_tmp = skb_realloc_headroom(skb, hl); printk(KERN_DEBUG "isdn_writebuf_skb_stub: reallocating headroom%s\n", skb_tmp ? "" : " failed"); if (!skb_tmp) return -ENOMEM; /* 0 better? */ ret = dev->drv[drvidx]->interface->writebuf_skb(drvidx, chan, ack, skb_tmp); if( ret > 0 ){ dev_kfree_skb(skb); } else { dev_kfree_skb(skb_tmp); } } else { ret = dev->drv[drvidx]->interface->writebuf_skb(drvidx, chan, ack, skb); } } if (ret > 0) { dev->obytes[idx] += ret; if (dev->v110[idx]) { atomic_inc(&dev->v110use[idx]); dev->v110[idx]->skbuser++; atomic_dec(&dev->v110use[idx]); /* For V.110 return unencoded data length */ ret = v110_ret; /* if the complete frame was send we free the skb; if not upper function will requeue the skb */ if (ret == skb->len) dev_kfree_skb(skb); } } else if (dev->v110[idx]) dev_kfree_skb(nskb); return ret; } static int isdn_add_channels(isdn_driver_t *d, int drvidx, int n, int adding) { int j, k, m; init_waitqueue_head(&d->st_waitq); if (d->flags & DRV_FLAG_RUNNING) return -1; if (n < 1) return 0; m = (adding) ? d->channels + n : n; if (dev->channels + n > ISDN_MAX_CHANNELS) { printk(KERN_WARNING "register_isdn: Max. %d channels supported\n", ISDN_MAX_CHANNELS); return -1; } if ((adding) && (d->rcverr)) kfree(d->rcverr); if (!(d->rcverr = kzalloc(sizeof(int) * m, GFP_ATOMIC))) { printk(KERN_WARNING "register_isdn: Could not alloc rcverr\n"); return -1; } if ((adding) && (d->rcvcount)) kfree(d->rcvcount); if (!(d->rcvcount = kzalloc(sizeof(int) * m, GFP_ATOMIC))) { printk(KERN_WARNING "register_isdn: Could not alloc rcvcount\n"); if (!adding) kfree(d->rcverr); return -1; } if ((adding) && (d->rpqueue)) { for (j = 0; j < d->channels; j++) skb_queue_purge(&d->rpqueue[j]); kfree(d->rpqueue); } if (!(d->rpqueue = kmalloc(sizeof(struct sk_buff_head) * m, GFP_ATOMIC))) { printk(KERN_WARNING "register_isdn: Could not alloc rpqueue\n"); if (!adding) { kfree(d->rcvcount); kfree(d->rcverr); } return -1; } for (j = 0; j < m; j++) { skb_queue_head_init(&d->rpqueue[j]); } if ((adding) && (d->rcv_waitq)) kfree(d->rcv_waitq); d->rcv_waitq = kmalloc(sizeof(wait_queue_head_t) * 2 * m, GFP_ATOMIC); if (!d->rcv_waitq) { printk(KERN_WARNING "register_isdn: Could not alloc rcv_waitq\n"); if (!adding) { kfree(d->rpqueue); kfree(d->rcvcount); kfree(d->rcverr); } return -1; } d->snd_waitq = d->rcv_waitq + m; for (j = 0; j < m; j++) { init_waitqueue_head(&d->rcv_waitq[j]); init_waitqueue_head(&d->snd_waitq[j]); } dev->channels += n; for (j = d->channels; j < m; j++) for (k = 0; k < ISDN_MAX_CHANNELS; k++) if (dev->chanmap[k] < 0) { dev->chanmap[k] = j; dev->drvmap[k] = drvidx; break; } d->channels = m; return 0; } /* * Low-level-driver registration */ static void set_global_features(void) { int drvidx; dev->global_features = 0; for (drvidx = 0; drvidx < ISDN_MAX_DRIVERS; drvidx++) { if (!dev->drv[drvidx]) continue; if (dev->drv[drvidx]->interface) dev->global_features |= dev->drv[drvidx]->interface->features; } } #ifdef CONFIG_ISDN_DIVERSION static char *map_drvname(int di) { if ((di < 0) || (di >= ISDN_MAX_DRIVERS)) return(NULL); return(dev->drvid[di]); /* driver name */ } /* map_drvname */ static int map_namedrv(char *id) { int i; for (i = 0; i < ISDN_MAX_DRIVERS; i++) { if (!strcmp(dev->drvid[i],id)) return(i); } return(-1); } /* map_namedrv */ int DIVERT_REG_NAME(isdn_divert_if *i_div) { if (i_div->if_magic != DIVERT_IF_MAGIC) return(DIVERT_VER_ERR); switch (i_div->cmd) { case DIVERT_CMD_REL: if (divert_if != i_div) return(DIVERT_REL_ERR); divert_if = NULL; /* free interface */ return(DIVERT_NO_ERR); case DIVERT_CMD_REG: if (divert_if) return(DIVERT_REG_ERR); i_div->ll_cmd = isdn_command; /* set command function */ i_div->drv_to_name = map_drvname; i_div->name_to_drv = map_namedrv; divert_if = i_div; /* remember interface */ return(DIVERT_NO_ERR); default: return(DIVERT_CMD_ERR); } } /* DIVERT_REG_NAME */ EXPORT_SYMBOL(DIVERT_REG_NAME); #endif /* CONFIG_ISDN_DIVERSION */ EXPORT_SYMBOL(register_isdn); #ifdef CONFIG_ISDN_PPP EXPORT_SYMBOL(isdn_ppp_register_compressor); EXPORT_SYMBOL(isdn_ppp_unregister_compressor); #endif int register_isdn(isdn_if * i) { isdn_driver_t *d; int j; ulong flags; int drvidx; if (dev->drivers >= ISDN_MAX_DRIVERS) { printk(KERN_WARNING "register_isdn: Max. %d drivers supported\n", ISDN_MAX_DRIVERS); return 0; } if (!i->writebuf_skb) { printk(KERN_WARNING "register_isdn: No write routine given.\n"); return 0; } if (!(d = kzalloc(sizeof(isdn_driver_t), GFP_KERNEL))) { printk(KERN_WARNING "register_isdn: Could not alloc driver-struct\n"); return 0; } d->maxbufsize = i->maxbufsize; d->pktcount = 0; d->stavail = 0; d->flags = DRV_FLAG_LOADED; d->online = 0; d->interface = i; d->channels = 0; spin_lock_irqsave(&dev->lock, flags); for (drvidx = 0; drvidx < ISDN_MAX_DRIVERS; drvidx++) if (!dev->drv[drvidx]) break; if (isdn_add_channels(d, drvidx, i->channels, 0)) { spin_unlock_irqrestore(&dev->lock, flags); kfree(d); return 0; } i->channels = drvidx; i->rcvcallb_skb = isdn_receive_skb_callback; i->statcallb = isdn_status_callback; if (!strlen(i->id)) sprintf(i->id, "line%d", drvidx); for (j = 0; j < drvidx; j++) if (!strcmp(i->id, dev->drvid[j])) sprintf(i->id, "line%d", drvidx); dev->drv[drvidx] = d; strcpy(dev->drvid[drvidx], i->id); isdn_info_update(); dev->drivers++; set_global_features(); spin_unlock_irqrestore(&dev->lock, flags); return 1; } /* ***************************************************************************** * And now the modules code. ***************************************************************************** */ static char * isdn_getrev(const char *revision) { char *rev; char *p; if ((p = strchr(revision, ':'))) { rev = p + 2; p = strchr(rev, '$'); *--p = 0; } else rev = "???"; return rev; } /* * Allocate and initialize all data, register modem-devices */ static int __init isdn_init(void) { int i; char tmprev[50]; if (!(dev = vmalloc(sizeof(isdn_dev)))) { printk(KERN_WARNING "isdn: Could not allocate device-struct.\n"); return -EIO; } memset((char *) dev, 0, sizeof(isdn_dev)); init_timer(&dev->timer); dev->timer.function = isdn_timer_funct; spin_lock_init(&dev->lock); spin_lock_init(&dev->timerlock); #ifdef MODULE dev->owner = THIS_MODULE; #endif mutex_init(&dev->mtx); init_waitqueue_head(&dev->info_waitq); for (i = 0; i < ISDN_MAX_CHANNELS; i++) { dev->drvmap[i] = -1; dev->chanmap[i] = -1; dev->m_idx[i] = -1; strcpy(dev->num[i], "???"); init_waitqueue_head(&dev->mdm.info[i].open_wait); init_waitqueue_head(&dev->mdm.info[i].close_wait); } if (register_chrdev(ISDN_MAJOR, "isdn", &isdn_fops)) { printk(KERN_WARNING "isdn: Could not register control devices\n"); vfree(dev); return -EIO; } if ((isdn_tty_modem_init()) < 0) { printk(KERN_WARNING "isdn: Could not register tty devices\n"); vfree(dev); unregister_chrdev(ISDN_MAJOR, "isdn"); return -EIO; } #ifdef CONFIG_ISDN_PPP if (isdn_ppp_init() < 0) { printk(KERN_WARNING "isdn: Could not create PPP-device-structs\n"); isdn_tty_exit(); unregister_chrdev(ISDN_MAJOR, "isdn"); vfree(dev); return -EIO; } #endif /* CONFIG_ISDN_PPP */ strcpy(tmprev, isdn_revision); printk(KERN_NOTICE "ISDN subsystem Rev: %s/", isdn_getrev(tmprev)); strcpy(tmprev, isdn_tty_revision); printk("%s/", isdn_getrev(tmprev)); strcpy(tmprev, isdn_net_revision); printk("%s/", isdn_getrev(tmprev)); strcpy(tmprev, isdn_ppp_revision); printk("%s/", isdn_getrev(tmprev)); strcpy(tmprev, isdn_audio_revision); printk("%s/", isdn_getrev(tmprev)); strcpy(tmprev, isdn_v110_revision); printk("%s", isdn_getrev(tmprev)); #ifdef MODULE printk(" loaded\n"); #else printk("\n"); #endif isdn_info_update(); return 0; } /* * Unload module */ static void __exit isdn_exit(void) { #ifdef CONFIG_ISDN_PPP isdn_ppp_cleanup(); #endif if (isdn_net_rmall() < 0) { printk(KERN_WARNING "isdn: net-device busy, remove cancelled\n"); return; } isdn_tty_exit(); unregister_chrdev(ISDN_MAJOR, "isdn"); del_timer(&dev->timer); /* call vfree with interrupts enabled, else it will hang */ vfree(dev); printk(KERN_NOTICE "ISDN-subsystem unloaded\n"); } module_init(isdn_init); module_exit(isdn_exit);
gpl-2.0
shubhangi-shrivastava/drm-intel-nightly
drivers/net/wireless/libertas/mesh.c
1681
30727
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/hardirq.h> #include <linux/netdevice.h> #include <linux/if_ether.h> #include <linux/if_arp.h> #include <linux/kthread.h> #include <linux/kfifo.h> #include <net/cfg80211.h> #include "mesh.h" #include "decl.h" #include "cmd.h" static int lbs_add_mesh(struct lbs_private *priv); /*************************************************************************** * Mesh command handling */ static int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action, struct cmd_ds_mesh_access *cmd) { int ret; lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action); cmd->hdr.command = cpu_to_le16(CMD_MESH_ACCESS); cmd->hdr.size = cpu_to_le16(sizeof(*cmd)); cmd->hdr.result = 0; cmd->action = cpu_to_le16(cmd_action); ret = lbs_cmd_with_response(priv, CMD_MESH_ACCESS, cmd); lbs_deb_leave(LBS_DEB_CMD); return ret; } static int __lbs_mesh_config_send(struct lbs_private *priv, struct cmd_ds_mesh_config *cmd, uint16_t action, uint16_t type) { int ret; u16 command = CMD_MESH_CONFIG_OLD; lbs_deb_enter(LBS_DEB_CMD); /* * Command id is 0xac for v10 FW along with mesh interface * id in bits 14-13-12. */ if (priv->mesh_tlv == TLV_TYPE_MESH_ID) command = CMD_MESH_CONFIG | (MESH_IFACE_ID << MESH_IFACE_BIT_OFFSET); cmd->hdr.command = cpu_to_le16(command); cmd->hdr.size = cpu_to_le16(sizeof(struct cmd_ds_mesh_config)); cmd->hdr.result = 0; cmd->type = cpu_to_le16(type); cmd->action = cpu_to_le16(action); ret = lbs_cmd_with_response(priv, command, cmd); lbs_deb_leave(LBS_DEB_CMD); return ret; } static int lbs_mesh_config_send(struct lbs_private *priv, struct cmd_ds_mesh_config *cmd, uint16_t action, uint16_t type) { int ret; if (!(priv->fwcapinfo & FW_CAPINFO_PERSISTENT_CONFIG)) return -EOPNOTSUPP; ret = __lbs_mesh_config_send(priv, cmd, action, type); return ret; } /* This function is the CMD_MESH_CONFIG legacy function. It only handles the * START and STOP actions. The extended actions supported by CMD_MESH_CONFIG * are all handled by preparing a struct cmd_ds_mesh_config and passing it to * lbs_mesh_config_send. */ static int lbs_mesh_config(struct lbs_private *priv, uint16_t action, uint16_t chan) { struct cmd_ds_mesh_config cmd; struct mrvl_meshie *ie; memset(&cmd, 0, sizeof(cmd)); cmd.channel = cpu_to_le16(chan); ie = (struct mrvl_meshie *)cmd.data; switch (action) { case CMD_ACT_MESH_CONFIG_START: ie->id = WLAN_EID_VENDOR_SPECIFIC; ie->val.oui[0] = 0x00; ie->val.oui[1] = 0x50; ie->val.oui[2] = 0x43; ie->val.type = MARVELL_MESH_IE_TYPE; ie->val.subtype = MARVELL_MESH_IE_SUBTYPE; ie->val.version = MARVELL_MESH_IE_VERSION; ie->val.active_protocol_id = MARVELL_MESH_PROTO_ID_HWMP; ie->val.active_metric_id = MARVELL_MESH_METRIC_ID; ie->val.mesh_capability = MARVELL_MESH_CAPABILITY; ie->val.mesh_id_len = priv->mesh_ssid_len; memcpy(ie->val.mesh_id, priv->mesh_ssid, priv->mesh_ssid_len); ie->len = sizeof(struct mrvl_meshie_val) - IEEE80211_MAX_SSID_LEN + priv->mesh_ssid_len; cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie_val)); break; case CMD_ACT_MESH_CONFIG_STOP: break; default: return -1; } lbs_deb_cmd("mesh config action %d type %x channel %d SSID %*pE\n", action, priv->mesh_tlv, chan, priv->mesh_ssid_len, priv->mesh_ssid); return __lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv); } int lbs_mesh_set_channel(struct lbs_private *priv, u8 channel) { priv->mesh_channel = channel; return lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, channel); } static uint16_t lbs_mesh_get_channel(struct lbs_private *priv) { return priv->mesh_channel ?: 1; } /*************************************************************************** * Mesh sysfs support */ /* * Attributes exported through sysfs */ /** * lbs_anycast_get - Get function for sysfs attribute anycast_mask * @dev: the &struct device * @attr: device attributes * @buf: buffer where data will be returned */ static ssize_t lbs_anycast_get(struct device *dev, struct device_attribute *attr, char * buf) { struct lbs_private *priv = to_net_dev(dev)->ml_priv; struct cmd_ds_mesh_access mesh_access; int ret; memset(&mesh_access, 0, sizeof(mesh_access)); ret = lbs_mesh_access(priv, CMD_ACT_MESH_GET_ANYCAST, &mesh_access); if (ret) return ret; return snprintf(buf, 12, "0x%X\n", le32_to_cpu(mesh_access.data[0])); } /** * lbs_anycast_set - Set function for sysfs attribute anycast_mask * @dev: the &struct device * @attr: device attributes * @buf: buffer that contains new attribute value * @count: size of buffer */ static ssize_t lbs_anycast_set(struct device *dev, struct device_attribute *attr, const char * buf, size_t count) { struct lbs_private *priv = to_net_dev(dev)->ml_priv; struct cmd_ds_mesh_access mesh_access; uint32_t datum; int ret; memset(&mesh_access, 0, sizeof(mesh_access)); sscanf(buf, "%x", &datum); mesh_access.data[0] = cpu_to_le32(datum); ret = lbs_mesh_access(priv, CMD_ACT_MESH_SET_ANYCAST, &mesh_access); if (ret) return ret; return strlen(buf); } /** * lbs_prb_rsp_limit_get - Get function for sysfs attribute prb_rsp_limit * @dev: the &struct device * @attr: device attributes * @buf: buffer where data will be returned */ static ssize_t lbs_prb_rsp_limit_get(struct device *dev, struct device_attribute *attr, char *buf) { struct lbs_private *priv = to_net_dev(dev)->ml_priv; struct cmd_ds_mesh_access mesh_access; int ret; u32 retry_limit; memset(&mesh_access, 0, sizeof(mesh_access)); mesh_access.data[0] = cpu_to_le32(CMD_ACT_GET); ret = lbs_mesh_access(priv, CMD_ACT_MESH_SET_GET_PRB_RSP_LIMIT, &mesh_access); if (ret) return ret; retry_limit = le32_to_cpu(mesh_access.data[1]); return snprintf(buf, 10, "%d\n", retry_limit); } /** * lbs_prb_rsp_limit_set - Set function for sysfs attribute prb_rsp_limit * @dev: the &struct device * @attr: device attributes * @buf: buffer that contains new attribute value * @count: size of buffer */ static ssize_t lbs_prb_rsp_limit_set(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct lbs_private *priv = to_net_dev(dev)->ml_priv; struct cmd_ds_mesh_access mesh_access; int ret; unsigned long retry_limit; memset(&mesh_access, 0, sizeof(mesh_access)); mesh_access.data[0] = cpu_to_le32(CMD_ACT_SET); if (!kstrtoul(buf, 10, &retry_limit)) return -ENOTSUPP; if (retry_limit > 15) return -ENOTSUPP; mesh_access.data[1] = cpu_to_le32(retry_limit); ret = lbs_mesh_access(priv, CMD_ACT_MESH_SET_GET_PRB_RSP_LIMIT, &mesh_access); if (ret) return ret; return strlen(buf); } /** * lbs_mesh_get - Get function for sysfs attribute mesh * @dev: the &struct device * @attr: device attributes * @buf: buffer where data will be returned */ static ssize_t lbs_mesh_get(struct device *dev, struct device_attribute *attr, char * buf) { struct lbs_private *priv = to_net_dev(dev)->ml_priv; return snprintf(buf, 5, "0x%X\n", !!priv->mesh_dev); } /** * lbs_mesh_set - Set function for sysfs attribute mesh * @dev: the &struct device * @attr: device attributes * @buf: buffer that contains new attribute value * @count: size of buffer */ static ssize_t lbs_mesh_set(struct device *dev, struct device_attribute *attr, const char * buf, size_t count) { struct lbs_private *priv = to_net_dev(dev)->ml_priv; int enable; sscanf(buf, "%x", &enable); enable = !!enable; if (enable == !!priv->mesh_dev) return count; if (enable) lbs_add_mesh(priv); else lbs_remove_mesh(priv); return count; } /* * lbs_mesh attribute to be exported per ethX interface * through sysfs (/sys/class/net/ethX/lbs_mesh) */ static DEVICE_ATTR(lbs_mesh, 0644, lbs_mesh_get, lbs_mesh_set); /* * anycast_mask attribute to be exported per mshX interface * through sysfs (/sys/class/net/mshX/anycast_mask) */ static DEVICE_ATTR(anycast_mask, 0644, lbs_anycast_get, lbs_anycast_set); /* * prb_rsp_limit attribute to be exported per mshX interface * through sysfs (/sys/class/net/mshX/prb_rsp_limit) */ static DEVICE_ATTR(prb_rsp_limit, 0644, lbs_prb_rsp_limit_get, lbs_prb_rsp_limit_set); static struct attribute *lbs_mesh_sysfs_entries[] = { &dev_attr_anycast_mask.attr, &dev_attr_prb_rsp_limit.attr, NULL, }; static const struct attribute_group lbs_mesh_attr_group = { .attrs = lbs_mesh_sysfs_entries, }; /*************************************************************************** * Persistent configuration support */ static int mesh_get_default_parameters(struct device *dev, struct mrvl_mesh_defaults *defs) { struct lbs_private *priv = to_net_dev(dev)->ml_priv; struct cmd_ds_mesh_config cmd; int ret; memset(&cmd, 0, sizeof(struct cmd_ds_mesh_config)); ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_GET, CMD_TYPE_MESH_GET_DEFAULTS); if (ret) return -EOPNOTSUPP; memcpy(defs, &cmd.data[0], sizeof(struct mrvl_mesh_defaults)); return 0; } /** * bootflag_get - Get function for sysfs attribute bootflag * @dev: the &struct device * @attr: device attributes * @buf: buffer where data will be returned */ static ssize_t bootflag_get(struct device *dev, struct device_attribute *attr, char *buf) { struct mrvl_mesh_defaults defs; int ret; ret = mesh_get_default_parameters(dev, &defs); if (ret) return ret; return snprintf(buf, 12, "%d\n", le32_to_cpu(defs.bootflag)); } /** * bootflag_set - Set function for sysfs attribute bootflag * @dev: the &struct device * @attr: device attributes * @buf: buffer that contains new attribute value * @count: size of buffer */ static ssize_t bootflag_set(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct lbs_private *priv = to_net_dev(dev)->ml_priv; struct cmd_ds_mesh_config cmd; uint32_t datum; int ret; memset(&cmd, 0, sizeof(cmd)); ret = sscanf(buf, "%d", &datum); if ((ret != 1) || (datum > 1)) return -EINVAL; *((__le32 *)&cmd.data[0]) = cpu_to_le32(!!datum); cmd.length = cpu_to_le16(sizeof(uint32_t)); ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, CMD_TYPE_MESH_SET_BOOTFLAG); if (ret) return ret; return strlen(buf); } /** * boottime_get - Get function for sysfs attribute boottime * @dev: the &struct device * @attr: device attributes * @buf: buffer where data will be returned */ static ssize_t boottime_get(struct device *dev, struct device_attribute *attr, char *buf) { struct mrvl_mesh_defaults defs; int ret; ret = mesh_get_default_parameters(dev, &defs); if (ret) return ret; return snprintf(buf, 12, "%d\n", defs.boottime); } /** * boottime_set - Set function for sysfs attribute boottime * @dev: the &struct device * @attr: device attributes * @buf: buffer that contains new attribute value * @count: size of buffer */ static ssize_t boottime_set(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct lbs_private *priv = to_net_dev(dev)->ml_priv; struct cmd_ds_mesh_config cmd; uint32_t datum; int ret; memset(&cmd, 0, sizeof(cmd)); ret = sscanf(buf, "%d", &datum); if ((ret != 1) || (datum > 255)) return -EINVAL; /* A too small boot time will result in the device booting into * standalone (no-host) mode before the host can take control of it, * so the change will be hard to revert. This may be a desired * feature (e.g to configure a very fast boot time for devices that * will not be attached to a host), but dangerous. So I'm enforcing a * lower limit of 20 seconds: remove and recompile the driver if this * does not work for you. */ datum = (datum < 20) ? 20 : datum; cmd.data[0] = datum; cmd.length = cpu_to_le16(sizeof(uint8_t)); ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, CMD_TYPE_MESH_SET_BOOTTIME); if (ret) return ret; return strlen(buf); } /** * channel_get - Get function for sysfs attribute channel * @dev: the &struct device * @attr: device attributes * @buf: buffer where data will be returned */ static ssize_t channel_get(struct device *dev, struct device_attribute *attr, char *buf) { struct mrvl_mesh_defaults defs; int ret; ret = mesh_get_default_parameters(dev, &defs); if (ret) return ret; return snprintf(buf, 12, "%d\n", le16_to_cpu(defs.channel)); } /** * channel_set - Set function for sysfs attribute channel * @dev: the &struct device * @attr: device attributes * @buf: buffer that contains new attribute value * @count: size of buffer */ static ssize_t channel_set(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct lbs_private *priv = to_net_dev(dev)->ml_priv; struct cmd_ds_mesh_config cmd; uint32_t datum; int ret; memset(&cmd, 0, sizeof(cmd)); ret = sscanf(buf, "%d", &datum); if (ret != 1 || datum < 1 || datum > 11) return -EINVAL; *((__le16 *)&cmd.data[0]) = cpu_to_le16(datum); cmd.length = cpu_to_le16(sizeof(uint16_t)); ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, CMD_TYPE_MESH_SET_DEF_CHANNEL); if (ret) return ret; return strlen(buf); } /** * mesh_id_get - Get function for sysfs attribute mesh_id * @dev: the &struct device * @attr: device attributes * @buf: buffer where data will be returned */ static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr, char *buf) { struct mrvl_mesh_defaults defs; int ret; ret = mesh_get_default_parameters(dev, &defs); if (ret) return ret; if (defs.meshie.val.mesh_id_len > IEEE80211_MAX_SSID_LEN) { dev_err(dev, "inconsistent mesh ID length\n"); defs.meshie.val.mesh_id_len = IEEE80211_MAX_SSID_LEN; } memcpy(buf, defs.meshie.val.mesh_id, defs.meshie.val.mesh_id_len); buf[defs.meshie.val.mesh_id_len] = '\n'; buf[defs.meshie.val.mesh_id_len + 1] = '\0'; return defs.meshie.val.mesh_id_len + 1; } /** * mesh_id_set - Set function for sysfs attribute mesh_id * @dev: the &struct device * @attr: device attributes * @buf: buffer that contains new attribute value * @count: size of buffer */ static ssize_t mesh_id_set(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cmd_ds_mesh_config cmd; struct mrvl_mesh_defaults defs; struct mrvl_meshie *ie; struct lbs_private *priv = to_net_dev(dev)->ml_priv; int len; int ret; if (count < 2 || count > IEEE80211_MAX_SSID_LEN + 1) return -EINVAL; memset(&cmd, 0, sizeof(struct cmd_ds_mesh_config)); ie = (struct mrvl_meshie *) &cmd.data[0]; /* fetch all other Information Element parameters */ ret = mesh_get_default_parameters(dev, &defs); cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie)); /* transfer IE elements */ memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie)); len = count - 1; memcpy(ie->val.mesh_id, buf, len); /* SSID len */ ie->val.mesh_id_len = len; /* IE len */ ie->len = sizeof(struct mrvl_meshie_val) - IEEE80211_MAX_SSID_LEN + len; ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, CMD_TYPE_MESH_SET_MESH_IE); if (ret) return ret; return strlen(buf); } /** * protocol_id_get - Get function for sysfs attribute protocol_id * @dev: the &struct device * @attr: device attributes * @buf: buffer where data will be returned */ static ssize_t protocol_id_get(struct device *dev, struct device_attribute *attr, char *buf) { struct mrvl_mesh_defaults defs; int ret; ret = mesh_get_default_parameters(dev, &defs); if (ret) return ret; return snprintf(buf, 5, "%d\n", defs.meshie.val.active_protocol_id); } /** * protocol_id_set - Set function for sysfs attribute protocol_id * @dev: the &struct device * @attr: device attributes * @buf: buffer that contains new attribute value * @count: size of buffer */ static ssize_t protocol_id_set(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cmd_ds_mesh_config cmd; struct mrvl_mesh_defaults defs; struct mrvl_meshie *ie; struct lbs_private *priv = to_net_dev(dev)->ml_priv; uint32_t datum; int ret; memset(&cmd, 0, sizeof(cmd)); ret = sscanf(buf, "%d", &datum); if ((ret != 1) || (datum > 255)) return -EINVAL; /* fetch all other Information Element parameters */ ret = mesh_get_default_parameters(dev, &defs); cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie)); /* transfer IE elements */ ie = (struct mrvl_meshie *) &cmd.data[0]; memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie)); /* update protocol id */ ie->val.active_protocol_id = datum; ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, CMD_TYPE_MESH_SET_MESH_IE); if (ret) return ret; return strlen(buf); } /** * metric_id_get - Get function for sysfs attribute metric_id * @dev: the &struct device * @attr: device attributes * @buf: buffer where data will be returned */ static ssize_t metric_id_get(struct device *dev, struct device_attribute *attr, char *buf) { struct mrvl_mesh_defaults defs; int ret; ret = mesh_get_default_parameters(dev, &defs); if (ret) return ret; return snprintf(buf, 5, "%d\n", defs.meshie.val.active_metric_id); } /** * metric_id_set - Set function for sysfs attribute metric_id * @dev: the &struct device * @attr: device attributes * @buf: buffer that contains new attribute value * @count: size of buffer */ static ssize_t metric_id_set(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cmd_ds_mesh_config cmd; struct mrvl_mesh_defaults defs; struct mrvl_meshie *ie; struct lbs_private *priv = to_net_dev(dev)->ml_priv; uint32_t datum; int ret; memset(&cmd, 0, sizeof(cmd)); ret = sscanf(buf, "%d", &datum); if ((ret != 1) || (datum > 255)) return -EINVAL; /* fetch all other Information Element parameters */ ret = mesh_get_default_parameters(dev, &defs); cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie)); /* transfer IE elements */ ie = (struct mrvl_meshie *) &cmd.data[0]; memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie)); /* update metric id */ ie->val.active_metric_id = datum; ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, CMD_TYPE_MESH_SET_MESH_IE); if (ret) return ret; return strlen(buf); } /** * capability_get - Get function for sysfs attribute capability * @dev: the &struct device * @attr: device attributes * @buf: buffer where data will be returned */ static ssize_t capability_get(struct device *dev, struct device_attribute *attr, char *buf) { struct mrvl_mesh_defaults defs; int ret; ret = mesh_get_default_parameters(dev, &defs); if (ret) return ret; return snprintf(buf, 5, "%d\n", defs.meshie.val.mesh_capability); } /** * capability_set - Set function for sysfs attribute capability * @dev: the &struct device * @attr: device attributes * @buf: buffer that contains new attribute value * @count: size of buffer */ static ssize_t capability_set(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cmd_ds_mesh_config cmd; struct mrvl_mesh_defaults defs; struct mrvl_meshie *ie; struct lbs_private *priv = to_net_dev(dev)->ml_priv; uint32_t datum; int ret; memset(&cmd, 0, sizeof(cmd)); ret = sscanf(buf, "%d", &datum); if ((ret != 1) || (datum > 255)) return -EINVAL; /* fetch all other Information Element parameters */ ret = mesh_get_default_parameters(dev, &defs); cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie)); /* transfer IE elements */ ie = (struct mrvl_meshie *) &cmd.data[0]; memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie)); /* update value */ ie->val.mesh_capability = datum; ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, CMD_TYPE_MESH_SET_MESH_IE); if (ret) return ret; return strlen(buf); } static DEVICE_ATTR(bootflag, 0644, bootflag_get, bootflag_set); static DEVICE_ATTR(boottime, 0644, boottime_get, boottime_set); static DEVICE_ATTR(channel, 0644, channel_get, channel_set); static DEVICE_ATTR(mesh_id, 0644, mesh_id_get, mesh_id_set); static DEVICE_ATTR(protocol_id, 0644, protocol_id_get, protocol_id_set); static DEVICE_ATTR(metric_id, 0644, metric_id_get, metric_id_set); static DEVICE_ATTR(capability, 0644, capability_get, capability_set); static struct attribute *boot_opts_attrs[] = { &dev_attr_bootflag.attr, &dev_attr_boottime.attr, &dev_attr_channel.attr, NULL }; static const struct attribute_group boot_opts_group = { .name = "boot_options", .attrs = boot_opts_attrs, }; static struct attribute *mesh_ie_attrs[] = { &dev_attr_mesh_id.attr, &dev_attr_protocol_id.attr, &dev_attr_metric_id.attr, &dev_attr_capability.attr, NULL }; static const struct attribute_group mesh_ie_group = { .name = "mesh_ie", .attrs = mesh_ie_attrs, }; static void lbs_persist_config_init(struct net_device *dev) { int ret; ret = sysfs_create_group(&(dev->dev.kobj), &boot_opts_group); ret = sysfs_create_group(&(dev->dev.kobj), &mesh_ie_group); } static void lbs_persist_config_remove(struct net_device *dev) { sysfs_remove_group(&(dev->dev.kobj), &boot_opts_group); sysfs_remove_group(&(dev->dev.kobj), &mesh_ie_group); } /*************************************************************************** * Initializing and starting, stopping mesh */ /* * Check mesh FW version and appropriately send the mesh start * command */ int lbs_init_mesh(struct lbs_private *priv) { int ret = 0; lbs_deb_enter(LBS_DEB_MESH); /* Determine mesh_fw_ver from fwrelease and fwcapinfo */ /* 5.0.16p0 9.0.0.p0 is known to NOT support any mesh */ /* 5.110.22 have mesh command with 0xa3 command id */ /* 10.0.0.p0 FW brings in mesh config command with different id */ /* Check FW version MSB and initialize mesh_fw_ver */ if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5) { /* Enable mesh, if supported, and work out which TLV it uses. 0x100 + 291 is an unofficial value used in 5.110.20.pXX 0x100 + 37 is the official value used in 5.110.21.pXX but we check them in that order because 20.pXX doesn't give an error -- it just silently fails. */ /* 5.110.20.pXX firmware will fail the command if the channel doesn't match the existing channel. But only if the TLV is correct. If the channel is wrong, _BOTH_ versions will give an error to 0x100+291, and allow 0x100+37 to succeed. It's just that 5.110.20.pXX will not have done anything useful */ priv->mesh_tlv = TLV_TYPE_OLD_MESH_ID; if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, 1)) { priv->mesh_tlv = TLV_TYPE_MESH_ID; if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, 1)) priv->mesh_tlv = 0; } } else if ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) && (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK)) { /* 10.0.0.pXX new firmwares should succeed with TLV * 0x100+37; Do not invoke command with old TLV. */ priv->mesh_tlv = TLV_TYPE_MESH_ID; if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, 1)) priv->mesh_tlv = 0; } /* Stop meshing until interface is brought up */ lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_STOP, 1); if (priv->mesh_tlv) { sprintf(priv->mesh_ssid, "mesh"); priv->mesh_ssid_len = 4; ret = 1; } lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret); return ret; } void lbs_start_mesh(struct lbs_private *priv) { lbs_add_mesh(priv); if (device_create_file(&priv->dev->dev, &dev_attr_lbs_mesh)) netdev_err(priv->dev, "cannot register lbs_mesh attribute\n"); } int lbs_deinit_mesh(struct lbs_private *priv) { struct net_device *dev = priv->dev; int ret = 0; lbs_deb_enter(LBS_DEB_MESH); if (priv->mesh_tlv) { device_remove_file(&dev->dev, &dev_attr_lbs_mesh); ret = 1; } lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret); return ret; } /** * lbs_mesh_stop - close the mshX interface * * @dev: A pointer to &net_device structure * returns: 0 */ static int lbs_mesh_stop(struct net_device *dev) { struct lbs_private *priv = dev->ml_priv; lbs_deb_enter(LBS_DEB_MESH); lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_STOP, lbs_mesh_get_channel(priv)); spin_lock_irq(&priv->driver_lock); netif_stop_queue(dev); netif_carrier_off(dev); spin_unlock_irq(&priv->driver_lock); lbs_update_mcast(priv); if (!lbs_iface_active(priv)) lbs_stop_iface(priv); lbs_deb_leave(LBS_DEB_MESH); return 0; } /** * lbs_mesh_dev_open - open the mshX interface * * @dev: A pointer to &net_device structure * returns: 0 or -EBUSY if monitor mode active */ static int lbs_mesh_dev_open(struct net_device *dev) { struct lbs_private *priv = dev->ml_priv; int ret = 0; lbs_deb_enter(LBS_DEB_NET); if (!priv->iface_running) { ret = lbs_start_iface(priv); if (ret) goto out; } spin_lock_irq(&priv->driver_lock); if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) { ret = -EBUSY; spin_unlock_irq(&priv->driver_lock); goto out; } netif_carrier_on(dev); if (!priv->tx_pending_len) netif_wake_queue(dev); spin_unlock_irq(&priv->driver_lock); ret = lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, lbs_mesh_get_channel(priv)); out: lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret); return ret; } static const struct net_device_ops mesh_netdev_ops = { .ndo_open = lbs_mesh_dev_open, .ndo_stop = lbs_mesh_stop, .ndo_start_xmit = lbs_hard_start_xmit, .ndo_set_mac_address = lbs_set_mac_address, .ndo_set_rx_mode = lbs_set_multicast_list, }; /** * lbs_add_mesh - add mshX interface * * @priv: A pointer to the &struct lbs_private structure * returns: 0 if successful, -X otherwise */ static int lbs_add_mesh(struct lbs_private *priv) { struct net_device *mesh_dev = NULL; struct wireless_dev *mesh_wdev; int ret = 0; lbs_deb_enter(LBS_DEB_MESH); /* Allocate a virtual mesh device */ mesh_wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); if (!mesh_wdev) { lbs_deb_mesh("init mshX wireless device failed\n"); ret = -ENOMEM; goto done; } mesh_dev = alloc_netdev(0, "msh%d", NET_NAME_UNKNOWN, ether_setup); if (!mesh_dev) { lbs_deb_mesh("init mshX device failed\n"); ret = -ENOMEM; goto err_free_wdev; } mesh_wdev->iftype = NL80211_IFTYPE_MESH_POINT; mesh_wdev->wiphy = priv->wdev->wiphy; mesh_wdev->netdev = mesh_dev; mesh_dev->ml_priv = priv; mesh_dev->ieee80211_ptr = mesh_wdev; priv->mesh_dev = mesh_dev; mesh_dev->netdev_ops = &mesh_netdev_ops; mesh_dev->ethtool_ops = &lbs_ethtool_ops; eth_hw_addr_inherit(mesh_dev, priv->dev); SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent); mesh_dev->flags |= IFF_BROADCAST | IFF_MULTICAST; /* Register virtual mesh interface */ ret = register_netdev(mesh_dev); if (ret) { pr_err("cannot register mshX virtual interface\n"); goto err_free_netdev; } ret = sysfs_create_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group); if (ret) goto err_unregister; lbs_persist_config_init(mesh_dev); /* Everything successful */ ret = 0; goto done; err_unregister: unregister_netdev(mesh_dev); err_free_netdev: free_netdev(mesh_dev); err_free_wdev: kfree(mesh_wdev); done: lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret); return ret; } void lbs_remove_mesh(struct lbs_private *priv) { struct net_device *mesh_dev; mesh_dev = priv->mesh_dev; if (!mesh_dev) return; lbs_deb_enter(LBS_DEB_MESH); netif_stop_queue(mesh_dev); netif_carrier_off(mesh_dev); sysfs_remove_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group); lbs_persist_config_remove(mesh_dev); unregister_netdev(mesh_dev); priv->mesh_dev = NULL; kfree(mesh_dev->ieee80211_ptr); free_netdev(mesh_dev); lbs_deb_leave(LBS_DEB_MESH); } /*************************************************************************** * Sending and receiving */ struct net_device *lbs_mesh_set_dev(struct lbs_private *priv, struct net_device *dev, struct rxpd *rxpd) { if (priv->mesh_dev) { if (priv->mesh_tlv == TLV_TYPE_OLD_MESH_ID) { if (rxpd->rx_control & RxPD_MESH_FRAME) dev = priv->mesh_dev; } else if (priv->mesh_tlv == TLV_TYPE_MESH_ID) { if (rxpd->u.bss.bss_num == MESH_IFACE_ID) dev = priv->mesh_dev; } } return dev; } void lbs_mesh_set_txpd(struct lbs_private *priv, struct net_device *dev, struct txpd *txpd) { if (dev == priv->mesh_dev) { if (priv->mesh_tlv == TLV_TYPE_OLD_MESH_ID) txpd->tx_control |= cpu_to_le32(TxPD_MESH_FRAME); else if (priv->mesh_tlv == TLV_TYPE_MESH_ID) txpd->u.bss.bss_num = MESH_IFACE_ID; } } /*************************************************************************** * Ethtool related */ static const char * const mesh_stat_strings[] = { "drop_duplicate_bcast", "drop_ttl_zero", "drop_no_fwd_route", "drop_no_buffers", "fwded_unicast_cnt", "fwded_bcast_cnt", "drop_blind_table", "tx_failed_cnt" }; void lbs_mesh_ethtool_get_stats(struct net_device *dev, struct ethtool_stats *stats, uint64_t *data) { struct lbs_private *priv = dev->ml_priv; struct cmd_ds_mesh_access mesh_access; int ret; lbs_deb_enter(LBS_DEB_ETHTOOL); /* Get Mesh Statistics */ ret = lbs_mesh_access(priv, CMD_ACT_MESH_GET_STATS, &mesh_access); if (ret) { memset(data, 0, MESH_STATS_NUM*(sizeof(uint64_t))); return; } priv->mstats.fwd_drop_rbt = le32_to_cpu(mesh_access.data[0]); priv->mstats.fwd_drop_ttl = le32_to_cpu(mesh_access.data[1]); priv->mstats.fwd_drop_noroute = le32_to_cpu(mesh_access.data[2]); priv->mstats.fwd_drop_nobuf = le32_to_cpu(mesh_access.data[3]); priv->mstats.fwd_unicast_cnt = le32_to_cpu(mesh_access.data[4]); priv->mstats.fwd_bcast_cnt = le32_to_cpu(mesh_access.data[5]); priv->mstats.drop_blind = le32_to_cpu(mesh_access.data[6]); priv->mstats.tx_failed_cnt = le32_to_cpu(mesh_access.data[7]); data[0] = priv->mstats.fwd_drop_rbt; data[1] = priv->mstats.fwd_drop_ttl; data[2] = priv->mstats.fwd_drop_noroute; data[3] = priv->mstats.fwd_drop_nobuf; data[4] = priv->mstats.fwd_unicast_cnt; data[5] = priv->mstats.fwd_bcast_cnt; data[6] = priv->mstats.drop_blind; data[7] = priv->mstats.tx_failed_cnt; lbs_deb_enter(LBS_DEB_ETHTOOL); } int lbs_mesh_ethtool_get_sset_count(struct net_device *dev, int sset) { struct lbs_private *priv = dev->ml_priv; if (sset == ETH_SS_STATS && dev == priv->mesh_dev) return MESH_STATS_NUM; return -EOPNOTSUPP; } void lbs_mesh_ethtool_get_strings(struct net_device *dev, uint32_t stringset, uint8_t *s) { int i; lbs_deb_enter(LBS_DEB_ETHTOOL); switch (stringset) { case ETH_SS_STATS: for (i = 0; i < MESH_STATS_NUM; i++) { memcpy(s + i * ETH_GSTRING_LEN, mesh_stat_strings[i], ETH_GSTRING_LEN); } break; } lbs_deb_enter(LBS_DEB_ETHTOOL); }
gpl-2.0
bju2000/android_kernel_lenovo_k30t
tools/perf/ui/browser.c
2193
15985
#include "../util.h" #include "../cache.h" #include "../../perf.h" #include "libslang.h" #include "ui.h" #include "util.h" #include <linux/compiler.h> #include <linux/list.h> #include <linux/rbtree.h> #include <stdlib.h> #include <sys/ttydefaults.h> #include "browser.h" #include "helpline.h" #include "keysyms.h" #include "../color.h" static int ui_browser__percent_color(struct ui_browser *browser, double percent, bool current) { if (current && (!browser->use_navkeypressed || browser->navkeypressed)) return HE_COLORSET_SELECTED; if (percent >= MIN_RED) return HE_COLORSET_TOP; if (percent >= MIN_GREEN) return HE_COLORSET_MEDIUM; return HE_COLORSET_NORMAL; } int ui_browser__set_color(struct ui_browser *browser, int color) { int ret = browser->current_color; browser->current_color = color; SLsmg_set_color(color); return ret; } void ui_browser__set_percent_color(struct ui_browser *browser, double percent, bool current) { int color = ui_browser__percent_color(browser, percent, current); ui_browser__set_color(browser, color); } void ui_browser__gotorc(struct ui_browser *browser, int y, int x) { SLsmg_gotorc(browser->y + y, browser->x + x); } static struct list_head * ui_browser__list_head_filter_entries(struct ui_browser *browser, struct list_head *pos) { do { if (!browser->filter || !browser->filter(browser, pos)) return pos; pos = pos->next; } while (pos != browser->entries); return NULL; } static struct list_head * ui_browser__list_head_filter_prev_entries(struct ui_browser *browser, struct list_head *pos) { do { if (!browser->filter || !browser->filter(browser, pos)) return pos; pos = pos->prev; } while (pos != browser->entries); return NULL; } void ui_browser__list_head_seek(struct ui_browser *browser, off_t offset, int whence) { struct list_head *head = browser->entries; struct list_head *pos; if (browser->nr_entries == 0) return; switch (whence) { case SEEK_SET: pos = ui_browser__list_head_filter_entries(browser, head->next); break; case SEEK_CUR: pos = browser->top; break; case SEEK_END: pos = ui_browser__list_head_filter_prev_entries(browser, head->prev); break; default: return; } assert(pos != NULL); if (offset > 0) { while (offset-- != 0) pos = ui_browser__list_head_filter_entries(browser, pos->next); } else { while (offset++ != 0) pos = ui_browser__list_head_filter_prev_entries(browser, pos->prev); } browser->top = pos; } void ui_browser__rb_tree_seek(struct ui_browser *browser, off_t offset, int whence) { struct rb_root *root = browser->entries; struct rb_node *nd; switch (whence) { case SEEK_SET: nd = rb_first(root); break; case SEEK_CUR: nd = browser->top; break; case SEEK_END: nd = rb_last(root); break; default: return; } if (offset > 0) { while (offset-- != 0) nd = rb_next(nd); } else { while (offset++ != 0) nd = rb_prev(nd); } browser->top = nd; } unsigned int ui_browser__rb_tree_refresh(struct ui_browser *browser) { struct rb_node *nd; int row = 0; if (browser->top == NULL) browser->top = rb_first(browser->entries); nd = browser->top; while (nd != NULL) { ui_browser__gotorc(browser, row, 0); browser->write(browser, nd, row); if (++row == browser->height) break; nd = rb_next(nd); } return row; } bool ui_browser__is_current_entry(struct ui_browser *browser, unsigned row) { return browser->top_idx + row == browser->index; } void ui_browser__refresh_dimensions(struct ui_browser *browser) { browser->width = SLtt_Screen_Cols - 1; browser->height = SLtt_Screen_Rows - 2; browser->y = 1; browser->x = 0; } void ui_browser__handle_resize(struct ui_browser *browser) { ui__refresh_dimensions(false); ui_browser__show(browser, browser->title, ui_helpline__current); ui_browser__refresh(browser); } int ui_browser__warning(struct ui_browser *browser, int timeout, const char *format, ...) { va_list args; char *text; int key = 0, err; va_start(args, format); err = vasprintf(&text, format, args); va_end(args); if (err < 0) { va_start(args, format); ui_helpline__vpush(format, args); va_end(args); } else { while ((key == ui__question_window("Warning!", text, "Press any key...", timeout)) == K_RESIZE) ui_browser__handle_resize(browser); free(text); } return key; } int ui_browser__help_window(struct ui_browser *browser, const char *text) { int key; while ((key = ui__help_window(text)) == K_RESIZE) ui_browser__handle_resize(browser); return key; } bool ui_browser__dialog_yesno(struct ui_browser *browser, const char *text) { int key; while ((key = ui__dialog_yesno(text)) == K_RESIZE) ui_browser__handle_resize(browser); return key == K_ENTER || toupper(key) == 'Y'; } void ui_browser__reset_index(struct ui_browser *browser) { browser->index = browser->top_idx = 0; browser->seek(browser, 0, SEEK_SET); } void __ui_browser__show_title(struct ui_browser *browser, const char *title) { SLsmg_gotorc(0, 0); ui_browser__set_color(browser, HE_COLORSET_ROOT); slsmg_write_nstring(title, browser->width + 1); } void ui_browser__show_title(struct ui_browser *browser, const char *title) { pthread_mutex_lock(&ui__lock); __ui_browser__show_title(browser, title); pthread_mutex_unlock(&ui__lock); } int ui_browser__show(struct ui_browser *browser, const char *title, const char *helpline, ...) { int err; va_list ap; ui_browser__refresh_dimensions(browser); pthread_mutex_lock(&ui__lock); __ui_browser__show_title(browser, title); browser->title = title; free(browser->helpline); browser->helpline = NULL; va_start(ap, helpline); err = vasprintf(&browser->helpline, helpline, ap); va_end(ap); if (err > 0) ui_helpline__push(browser->helpline); pthread_mutex_unlock(&ui__lock); return err ? 0 : -1; } void ui_browser__hide(struct ui_browser *browser __maybe_unused) { pthread_mutex_lock(&ui__lock); ui_helpline__pop(); free(browser->helpline); browser->helpline = NULL; pthread_mutex_unlock(&ui__lock); } static void ui_browser__scrollbar_set(struct ui_browser *browser) { int height = browser->height, h = 0, pct = 0, col = browser->width, row = browser->y - 1; if (browser->nr_entries > 1) { pct = ((browser->index * (browser->height - 1)) / (browser->nr_entries - 1)); } SLsmg_set_char_set(1); while (h < height) { ui_browser__gotorc(browser, row++, col); SLsmg_write_char(h == pct ? SLSMG_DIAMOND_CHAR : SLSMG_CKBRD_CHAR); ++h; } SLsmg_set_char_set(0); } static int __ui_browser__refresh(struct ui_browser *browser) { int row; int width = browser->width; row = browser->refresh(browser); ui_browser__set_color(browser, HE_COLORSET_NORMAL); if (!browser->use_navkeypressed || browser->navkeypressed) ui_browser__scrollbar_set(browser); else width += 1; SLsmg_fill_region(browser->y + row, browser->x, browser->height - row, width, ' '); return 0; } int ui_browser__refresh(struct ui_browser *browser) { pthread_mutex_lock(&ui__lock); __ui_browser__refresh(browser); pthread_mutex_unlock(&ui__lock); return 0; } /* * Here we're updating nr_entries _after_ we started browsing, i.e. we have to * forget about any reference to any entry in the underlying data structure, * that is why we do a SEEK_SET. Think about 'perf top' in the hists browser * after an output_resort and hist decay. */ void ui_browser__update_nr_entries(struct ui_browser *browser, u32 nr_entries) { off_t offset = nr_entries - browser->nr_entries; browser->nr_entries = nr_entries; if (offset < 0) { if (browser->top_idx < (u64)-offset) offset = -browser->top_idx; browser->index += offset; browser->top_idx += offset; } browser->top = NULL; browser->seek(browser, browser->top_idx, SEEK_SET); } int ui_browser__run(struct ui_browser *browser, int delay_secs) { int err, key; while (1) { off_t offset; pthread_mutex_lock(&ui__lock); err = __ui_browser__refresh(browser); SLsmg_refresh(); pthread_mutex_unlock(&ui__lock); if (err < 0) break; key = ui__getch(delay_secs); if (key == K_RESIZE) { ui__refresh_dimensions(false); ui_browser__refresh_dimensions(browser); __ui_browser__show_title(browser, browser->title); ui_helpline__puts(browser->helpline); continue; } if (browser->use_navkeypressed && !browser->navkeypressed) { if (key == K_DOWN || key == K_UP || key == K_PGDN || key == K_PGUP || key == K_HOME || key == K_END || key == ' ') { browser->navkeypressed = true; continue; } else return key; } switch (key) { case K_DOWN: if (browser->index == browser->nr_entries - 1) break; ++browser->index; if (browser->index == browser->top_idx + browser->height) { ++browser->top_idx; browser->seek(browser, +1, SEEK_CUR); } break; case K_UP: if (browser->index == 0) break; --browser->index; if (browser->index < browser->top_idx) { --browser->top_idx; browser->seek(browser, -1, SEEK_CUR); } break; case K_PGDN: case ' ': if (browser->top_idx + browser->height > browser->nr_entries - 1) break; offset = browser->height; if (browser->index + offset > browser->nr_entries - 1) offset = browser->nr_entries - 1 - browser->index; browser->index += offset; browser->top_idx += offset; browser->seek(browser, +offset, SEEK_CUR); break; case K_PGUP: if (browser->top_idx == 0) break; if (browser->top_idx < browser->height) offset = browser->top_idx; else offset = browser->height; browser->index -= offset; browser->top_idx -= offset; browser->seek(browser, -offset, SEEK_CUR); break; case K_HOME: ui_browser__reset_index(browser); break; case K_END: offset = browser->height - 1; if (offset >= browser->nr_entries) offset = browser->nr_entries - 1; browser->index = browser->nr_entries - 1; browser->top_idx = browser->index - offset; browser->seek(browser, -offset, SEEK_END); break; default: return key; } } return -1; } unsigned int ui_browser__list_head_refresh(struct ui_browser *browser) { struct list_head *pos; struct list_head *head = browser->entries; int row = 0; if (browser->top == NULL || browser->top == browser->entries) browser->top = ui_browser__list_head_filter_entries(browser, head->next); pos = browser->top; list_for_each_from(pos, head) { if (!browser->filter || !browser->filter(browser, pos)) { ui_browser__gotorc(browser, row, 0); browser->write(browser, pos, row); if (++row == browser->height) break; } } return row; } static struct ui_browser_colorset { const char *name, *fg, *bg; int colorset; } ui_browser__colorsets[] = { { .colorset = HE_COLORSET_TOP, .name = "top", .fg = "red", .bg = "default", }, { .colorset = HE_COLORSET_MEDIUM, .name = "medium", .fg = "green", .bg = "default", }, { .colorset = HE_COLORSET_NORMAL, .name = "normal", .fg = "default", .bg = "default", }, { .colorset = HE_COLORSET_SELECTED, .name = "selected", .fg = "black", .bg = "lightgray", }, { .colorset = HE_COLORSET_CODE, .name = "code", .fg = "blue", .bg = "default", }, { .colorset = HE_COLORSET_ADDR, .name = "addr", .fg = "magenta", .bg = "default", }, { .colorset = HE_COLORSET_ROOT, .name = "root", .fg = "white", .bg = "blue", }, { .name = NULL, } }; static int ui_browser__color_config(const char *var, const char *value, void *data __maybe_unused) { char *fg = NULL, *bg; int i; /* same dir for all commands */ if (prefixcmp(var, "colors.") != 0) return 0; for (i = 0; ui_browser__colorsets[i].name != NULL; ++i) { const char *name = var + 7; if (strcmp(ui_browser__colorsets[i].name, name) != 0) continue; fg = strdup(value); if (fg == NULL) break; bg = strchr(fg, ','); if (bg == NULL) break; *bg = '\0'; while (isspace(*++bg)); ui_browser__colorsets[i].bg = bg; ui_browser__colorsets[i].fg = fg; return 0; } free(fg); return -1; } void ui_browser__argv_seek(struct ui_browser *browser, off_t offset, int whence) { switch (whence) { case SEEK_SET: browser->top = browser->entries; break; case SEEK_CUR: browser->top = browser->top + browser->top_idx + offset; break; case SEEK_END: browser->top = browser->top + browser->nr_entries + offset; break; default: return; } } unsigned int ui_browser__argv_refresh(struct ui_browser *browser) { unsigned int row = 0, idx = browser->top_idx; char **pos; if (browser->top == NULL) browser->top = browser->entries; pos = (char **)browser->top; while (idx < browser->nr_entries) { if (!browser->filter || !browser->filter(browser, *pos)) { ui_browser__gotorc(browser, row, 0); browser->write(browser, pos, row); if (++row == browser->height) break; } ++idx; ++pos; } return row; } void __ui_browser__vline(struct ui_browser *browser, unsigned int column, u16 start, u16 end) { SLsmg_set_char_set(1); ui_browser__gotorc(browser, start, column); SLsmg_draw_vline(end - start + 1); SLsmg_set_char_set(0); } void ui_browser__write_graph(struct ui_browser *browser __maybe_unused, int graph) { SLsmg_set_char_set(1); SLsmg_write_char(graph); SLsmg_set_char_set(0); } static void __ui_browser__line_arrow_up(struct ui_browser *browser, unsigned int column, u64 start, u64 end) { unsigned int row, end_row; SLsmg_set_char_set(1); if (start < browser->top_idx + browser->height) { row = start - browser->top_idx; ui_browser__gotorc(browser, row, column); SLsmg_write_char(SLSMG_LLCORN_CHAR); ui_browser__gotorc(browser, row, column + 1); SLsmg_draw_hline(2); if (row-- == 0) goto out; } else row = browser->height - 1; if (end > browser->top_idx) end_row = end - browser->top_idx; else end_row = 0; ui_browser__gotorc(browser, end_row, column); SLsmg_draw_vline(row - end_row + 1); ui_browser__gotorc(browser, end_row, column); if (end >= browser->top_idx) { SLsmg_write_char(SLSMG_ULCORN_CHAR); ui_browser__gotorc(browser, end_row, column + 1); SLsmg_write_char(SLSMG_HLINE_CHAR); ui_browser__gotorc(browser, end_row, column + 2); SLsmg_write_char(SLSMG_RARROW_CHAR); } out: SLsmg_set_char_set(0); } static void __ui_browser__line_arrow_down(struct ui_browser *browser, unsigned int column, u64 start, u64 end) { unsigned int row, end_row; SLsmg_set_char_set(1); if (start >= browser->top_idx) { row = start - browser->top_idx; ui_browser__gotorc(browser, row, column); SLsmg_write_char(SLSMG_ULCORN_CHAR); ui_browser__gotorc(browser, row, column + 1); SLsmg_draw_hline(2); if (row++ == 0) goto out; } else row = 0; if (end >= browser->top_idx + browser->height) end_row = browser->height - 1; else end_row = end - browser->top_idx;; ui_browser__gotorc(browser, row, column); SLsmg_draw_vline(end_row - row + 1); ui_browser__gotorc(browser, end_row, column); if (end < browser->top_idx + browser->height) { SLsmg_write_char(SLSMG_LLCORN_CHAR); ui_browser__gotorc(browser, end_row, column + 1); SLsmg_write_char(SLSMG_HLINE_CHAR); ui_browser__gotorc(browser, end_row, column + 2); SLsmg_write_char(SLSMG_RARROW_CHAR); } out: SLsmg_set_char_set(0); } void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column, u64 start, u64 end) { if (start > end) __ui_browser__line_arrow_up(browser, column, start, end); else __ui_browser__line_arrow_down(browser, column, start, end); } void ui_browser__init(void) { int i = 0; perf_config(ui_browser__color_config, NULL); while (ui_browser__colorsets[i].name) { struct ui_browser_colorset *c = &ui_browser__colorsets[i++]; sltt_set_color(c->colorset, c->name, c->fg, c->bg); } annotate_browser__init(); }
gpl-2.0
networkimprov/linux
drivers/gpu/drm/nouveau/core/core/gpuobj.c
2961
8037
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <core/object.h> #include <core/gpuobj.h> #include <subdev/instmem.h> #include <subdev/bar.h> #include <subdev/vm.h> void nouveau_gpuobj_destroy(struct nouveau_gpuobj *gpuobj) { int i; if (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE) { for (i = 0; i < gpuobj->size; i += 4) nv_wo32(gpuobj, i, 0x00000000); } if (gpuobj->node) { nouveau_mm_free(&nv_gpuobj(gpuobj->parent)->heap, &gpuobj->node); } if (gpuobj->heap.block_size) nouveau_mm_fini(&gpuobj->heap); nouveau_object_destroy(&gpuobj->base); } int nouveau_gpuobj_create_(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, u32 pclass, struct nouveau_object *pargpu, u32 size, u32 align, u32 flags, int length, void **pobject) { struct nouveau_instmem *imem = nouveau_instmem(parent); struct nouveau_bar *bar = nouveau_bar(parent); struct nouveau_gpuobj *gpuobj; struct nouveau_mm *heap = NULL; int ret, i; u64 addr; *pobject = NULL; if (pargpu) { while ((pargpu = nv_pclass(pargpu, NV_GPUOBJ_CLASS))) { if (nv_gpuobj(pargpu)->heap.block_size) break; pargpu = pargpu->parent; } if (unlikely(pargpu == NULL)) { nv_error(parent, "no gpuobj heap\n"); return -EINVAL; } addr = nv_gpuobj(pargpu)->addr; heap = &nv_gpuobj(pargpu)->heap; atomic_inc(&parent->refcount); } else { ret = imem->alloc(imem, parent, size, align, &parent); pargpu = parent; if (ret) return ret; addr = nv_memobj(pargpu)->addr; size = nv_memobj(pargpu)->size; if (bar && bar->alloc) { struct nouveau_instobj *iobj = (void *)parent; struct nouveau_mem **mem = (void *)(iobj + 1); struct nouveau_mem *node = *mem; if (!bar->alloc(bar, parent, node, &pargpu)) { nouveau_object_ref(NULL, &parent); parent = pargpu; } } } ret = nouveau_object_create_(parent, engine, oclass, pclass | NV_GPUOBJ_CLASS, length, pobject); nouveau_object_ref(NULL, &parent); gpuobj = *pobject; if (ret) return ret; gpuobj->parent = pargpu; gpuobj->flags = flags; gpuobj->addr = addr; gpuobj->size = size; if (heap) { ret = nouveau_mm_head(heap, 1, size, size, max(align, (u32)1), &gpuobj->node); if (ret) return ret; gpuobj->addr += gpuobj->node->offset; } if (gpuobj->flags & NVOBJ_FLAG_HEAP) { ret = nouveau_mm_init(&gpuobj->heap, 0, gpuobj->size, 1); if (ret) return ret; } if (flags & NVOBJ_FLAG_ZERO_ALLOC) { for (i = 0; i < gpuobj->size; i += 4) nv_wo32(gpuobj, i, 0x00000000); } return ret; } struct nouveau_gpuobj_class { struct nouveau_object *pargpu; u64 size; u32 align; u32 flags; }; static int _nouveau_gpuobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nouveau_gpuobj_class *args = data; struct nouveau_gpuobj *object; int ret; ret = nouveau_gpuobj_create(parent, engine, oclass, 0, args->pargpu, args->size, args->align, args->flags, &object); *pobject = nv_object(object); if (ret) return ret; return 0; } void _nouveau_gpuobj_dtor(struct nouveau_object *object) { nouveau_gpuobj_destroy(nv_gpuobj(object)); } int _nouveau_gpuobj_init(struct nouveau_object *object) { return nouveau_gpuobj_init(nv_gpuobj(object)); } int _nouveau_gpuobj_fini(struct nouveau_object *object, bool suspend) { return nouveau_gpuobj_fini(nv_gpuobj(object), suspend); } u32 _nouveau_gpuobj_rd32(struct nouveau_object *object, u64 addr) { struct nouveau_gpuobj *gpuobj = nv_gpuobj(object); struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent); if (gpuobj->node) addr += gpuobj->node->offset; return pfuncs->rd32(gpuobj->parent, addr); } void _nouveau_gpuobj_wr32(struct nouveau_object *object, u64 addr, u32 data) { struct nouveau_gpuobj *gpuobj = nv_gpuobj(object); struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent); if (gpuobj->node) addr += gpuobj->node->offset; pfuncs->wr32(gpuobj->parent, addr, data); } static struct nouveau_oclass _nouveau_gpuobj_oclass = { .handle = 0x00000000, .ofuncs = &(struct nouveau_ofuncs) { .ctor = _nouveau_gpuobj_ctor, .dtor = _nouveau_gpuobj_dtor, .init = _nouveau_gpuobj_init, .fini = _nouveau_gpuobj_fini, .rd32 = _nouveau_gpuobj_rd32, .wr32 = _nouveau_gpuobj_wr32, }, }; int nouveau_gpuobj_new(struct nouveau_object *parent, struct nouveau_object *pargpu, u32 size, u32 align, u32 flags, struct nouveau_gpuobj **pgpuobj) { struct nouveau_object *engine = parent; struct nouveau_gpuobj_class args = { .pargpu = pargpu, .size = size, .align = align, .flags = flags, }; if (!nv_iclass(engine, NV_SUBDEV_CLASS)) engine = engine->engine; BUG_ON(engine == NULL); return nouveau_object_ctor(parent, engine, &_nouveau_gpuobj_oclass, &args, sizeof(args), (struct nouveau_object **)pgpuobj); } int nouveau_gpuobj_map(struct nouveau_gpuobj *gpuobj, u32 access, struct nouveau_vma *vma) { struct nouveau_bar *bar = nouveau_bar(gpuobj); int ret = -EINVAL; if (bar && bar->umap) { struct nouveau_instobj *iobj = (void *) nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS); struct nouveau_mem **mem = (void *)(iobj + 1); ret = bar->umap(bar, *mem, access, vma); } return ret; } int nouveau_gpuobj_map_vm(struct nouveau_gpuobj *gpuobj, struct nouveau_vm *vm, u32 access, struct nouveau_vma *vma) { struct nouveau_instobj *iobj = (void *) nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS); struct nouveau_mem **mem = (void *)(iobj + 1); int ret; ret = nouveau_vm_get(vm, gpuobj->size, 12, access, vma); if (ret) return ret; nouveau_vm_map(vma, *mem); return 0; } void nouveau_gpuobj_unmap(struct nouveau_vma *vma) { if (vma->node) { nouveau_vm_unmap(vma); nouveau_vm_put(vma); } } /* the below is basically only here to support sharing the paged dma object * for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work * anywhere else. */ static void nouveau_gpudup_dtor(struct nouveau_object *object) { struct nouveau_gpuobj *gpuobj = (void *)object; nouveau_object_ref(NULL, &gpuobj->parent); nouveau_object_destroy(&gpuobj->base); } static struct nouveau_oclass nouveau_gpudup_oclass = { .handle = NV_GPUOBJ_CLASS, .ofuncs = &(struct nouveau_ofuncs) { .dtor = nouveau_gpudup_dtor, .init = nouveau_object_init, .fini = nouveau_object_fini, }, }; int nouveau_gpuobj_dup(struct nouveau_object *parent, struct nouveau_gpuobj *base, struct nouveau_gpuobj **pgpuobj) { struct nouveau_gpuobj *gpuobj; int ret; ret = nouveau_object_create(parent, parent->engine, &nouveau_gpudup_oclass, 0, &gpuobj); *pgpuobj = gpuobj; if (ret) return ret; nouveau_object_ref(nv_object(base), &gpuobj->parent); gpuobj->addr = base->addr; gpuobj->size = base->size; return 0; }
gpl-2.0
xDARKMATT3Rx/xDARKMATT3Rx_davidskernel
drivers/tty/hvc/hvc_console.c
3985
23665
/* * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM * Copyright (C) 2004 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. * Copyright (C) 2004 IBM Corporation * * Additional Author(s): * Ryan S. Arnold <rsa@us.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/console.h> #include <linux/cpumask.h> #include <linux/init.h> #include <linux/kbd_kern.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/list.h> #include <linux/module.h> #include <linux/major.h> #include <linux/sysrq.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/freezer.h> #include <linux/slab.h> #include <linux/serial_core.h> #include <asm/uaccess.h> #include "hvc_console.h" #define HVC_MAJOR 229 #define HVC_MINOR 0 /* * Wait this long per iteration while trying to push buffered data to the * hypervisor before allowing the tty to complete a close operation. */ #define HVC_CLOSE_WAIT (HZ/100) /* 1/10 of a second */ /* * These sizes are most efficient for vio, because they are the * native transfer size. We could make them selectable in the * future to better deal with backends that want other buffer sizes. */ #define N_OUTBUF 16 #define N_INBUF 16 #define __ALIGNED__ __attribute__((__aligned__(sizeof(long)))) static struct tty_driver *hvc_driver; static struct task_struct *hvc_task; /* Picks up late kicks after list walk but before schedule() */ static int hvc_kicked; static int hvc_init(void); #ifdef CONFIG_MAGIC_SYSRQ static int sysrq_pressed; #endif /* dynamic list of hvc_struct instances */ static LIST_HEAD(hvc_structs); /* * Protect the list of hvc_struct instances from inserts and removals during * list traversal. */ static DEFINE_SPINLOCK(hvc_structs_lock); /* * This value is used to assign a tty->index value to a hvc_struct based * upon order of exposure via hvc_probe(), when we can not match it to * a console candidate registered with hvc_instantiate(). */ static int last_hvc = -1; /* * Do not call this function with either the hvc_structs_lock or the hvc_struct * lock held. If successful, this function increments the kref reference * count against the target hvc_struct so it should be released when finished. */ static struct hvc_struct *hvc_get_by_index(int index) { struct hvc_struct *hp; unsigned long flags; spin_lock(&hvc_structs_lock); list_for_each_entry(hp, &hvc_structs, next) { spin_lock_irqsave(&hp->lock, flags); if (hp->index == index) { kref_get(&hp->kref); spin_unlock_irqrestore(&hp->lock, flags); spin_unlock(&hvc_structs_lock); return hp; } spin_unlock_irqrestore(&hp->lock, flags); } hp = NULL; spin_unlock(&hvc_structs_lock); return hp; } /* * Initial console vtermnos for console API usage prior to full console * initialization. Any vty adapter outside this range will not have usable * console interfaces but can still be used as a tty device. This has to be * static because kmalloc will not work during early console init. */ static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES]; static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] = {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1}; /* * Console APIs, NOT TTY. These APIs are available immediately when * hvc_console_setup() finds adapters. */ static void hvc_console_print(struct console *co, const char *b, unsigned count) { char c[N_OUTBUF] __ALIGNED__; unsigned i = 0, n = 0; int r, donecr = 0, index = co->index; /* Console access attempt outside of acceptable console range. */ if (index >= MAX_NR_HVC_CONSOLES) return; /* This console adapter was removed so it is not usable. */ if (vtermnos[index] == -1) return; while (count > 0 || i > 0) { if (count > 0 && i < sizeof(c)) { if (b[n] == '\n' && !donecr) { c[i++] = '\r'; donecr = 1; } else { c[i++] = b[n++]; donecr = 0; --count; } } else { r = cons_ops[index]->put_chars(vtermnos[index], c, i); if (r <= 0) { /* throw away characters on error * but spin in case of -EAGAIN */ if (r != -EAGAIN) i = 0; } else if (r > 0) { i -= r; if (i > 0) memmove(c, c+r, i); } } } } static struct tty_driver *hvc_console_device(struct console *c, int *index) { if (vtermnos[c->index] == -1) return NULL; *index = c->index; return hvc_driver; } static int __init hvc_console_setup(struct console *co, char *options) { if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES) return -ENODEV; if (vtermnos[co->index] == -1) return -ENODEV; return 0; } static struct console hvc_console = { .name = "hvc", .write = hvc_console_print, .device = hvc_console_device, .setup = hvc_console_setup, .flags = CON_PRINTBUFFER, .index = -1, }; /* * Early console initialization. Precedes driver initialization. * * (1) we are first, and the user specified another driver * -- index will remain -1 * (2) we are first and the user specified no driver * -- index will be set to 0, then we will fail setup. * (3) we are first and the user specified our driver * -- index will be set to user specified driver, and we will fail * (4) we are after driver, and this initcall will register us * -- if the user didn't specify a driver then the console will match * * Note that for cases 2 and 3, we will match later when the io driver * calls hvc_instantiate() and call register again. */ static int __init hvc_console_init(void) { register_console(&hvc_console); return 0; } console_initcall(hvc_console_init); /* callback when the kboject ref count reaches zero. */ static void destroy_hvc_struct(struct kref *kref) { struct hvc_struct *hp = container_of(kref, struct hvc_struct, kref); unsigned long flags; spin_lock(&hvc_structs_lock); spin_lock_irqsave(&hp->lock, flags); list_del(&(hp->next)); spin_unlock_irqrestore(&hp->lock, flags); spin_unlock(&hvc_structs_lock); kfree(hp); } /* * hvc_instantiate() is an early console discovery method which locates * consoles * prior to the vio subsystem discovering them. Hotplugged * vty adapters do NOT get an hvc_instantiate() callback since they * appear after early console init. */ int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops) { struct hvc_struct *hp; if (index < 0 || index >= MAX_NR_HVC_CONSOLES) return -1; if (vtermnos[index] != -1) return -1; /* make sure no no tty has been registered in this index */ hp = hvc_get_by_index(index); if (hp) { kref_put(&hp->kref, destroy_hvc_struct); return -1; } vtermnos[index] = vtermno; cons_ops[index] = ops; /* reserve all indices up to and including this index */ if (last_hvc < index) last_hvc = index; /* if this index is what the user requested, then register * now (setup won't fail at this point). It's ok to just * call register again if previously .setup failed. */ if (index == hvc_console.index) register_console(&hvc_console); return 0; } EXPORT_SYMBOL_GPL(hvc_instantiate); /* Wake the sleeping khvcd */ void hvc_kick(void) { hvc_kicked = 1; wake_up_process(hvc_task); } EXPORT_SYMBOL_GPL(hvc_kick); static void hvc_unthrottle(struct tty_struct *tty) { hvc_kick(); } /* * The TTY interface won't be used until after the vio layer has exposed the vty * adapter to the kernel. */ static int hvc_open(struct tty_struct *tty, struct file * filp) { struct hvc_struct *hp; unsigned long flags; int rc = 0; /* Auto increments kref reference if found. */ if (!(hp = hvc_get_by_index(tty->index))) return -ENODEV; spin_lock_irqsave(&hp->lock, flags); /* Check and then increment for fast path open. */ if (hp->count++ > 0) { tty_kref_get(tty); spin_unlock_irqrestore(&hp->lock, flags); hvc_kick(); return 0; } /* else count == 0 */ tty->driver_data = hp; hp->tty = tty_kref_get(tty); spin_unlock_irqrestore(&hp->lock, flags); if (hp->ops->notifier_add) rc = hp->ops->notifier_add(hp, hp->data); /* * If the notifier fails we return an error. The tty layer * will call hvc_close() after a failed open but we don't want to clean * up there so we'll clean up here and clear out the previously set * tty fields and return the kref reference. */ if (rc) { spin_lock_irqsave(&hp->lock, flags); hp->tty = NULL; spin_unlock_irqrestore(&hp->lock, flags); tty_kref_put(tty); tty->driver_data = NULL; kref_put(&hp->kref, destroy_hvc_struct); printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc); } /* Force wakeup of the polling thread */ hvc_kick(); return rc; } static void hvc_close(struct tty_struct *tty, struct file * filp) { struct hvc_struct *hp; unsigned long flags; if (tty_hung_up_p(filp)) return; /* * No driver_data means that this close was issued after a failed * hvc_open by the tty layer's release_dev() function and we can just * exit cleanly because the kref reference wasn't made. */ if (!tty->driver_data) return; hp = tty->driver_data; spin_lock_irqsave(&hp->lock, flags); if (--hp->count == 0) { /* We are done with the tty pointer now. */ hp->tty = NULL; spin_unlock_irqrestore(&hp->lock, flags); if (hp->ops->notifier_del) hp->ops->notifier_del(hp, hp->data); /* cancel pending tty resize work */ cancel_work_sync(&hp->tty_resize); /* * Chain calls chars_in_buffer() and returns immediately if * there is no buffered data otherwise sleeps on a wait queue * waking periodically to check chars_in_buffer(). */ tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT); } else { if (hp->count < 0) printk(KERN_ERR "hvc_close %X: oops, count is %d\n", hp->vtermno, hp->count); spin_unlock_irqrestore(&hp->lock, flags); } tty_kref_put(tty); kref_put(&hp->kref, destroy_hvc_struct); } static void hvc_hangup(struct tty_struct *tty) { struct hvc_struct *hp = tty->driver_data; unsigned long flags; int temp_open_count; if (!hp) return; /* cancel pending tty resize work */ cancel_work_sync(&hp->tty_resize); spin_lock_irqsave(&hp->lock, flags); /* * The N_TTY line discipline has problems such that in a close vs * open->hangup case this can be called after the final close so prevent * that from happening for now. */ if (hp->count <= 0) { spin_unlock_irqrestore(&hp->lock, flags); return; } temp_open_count = hp->count; hp->count = 0; hp->n_outbuf = 0; hp->tty = NULL; spin_unlock_irqrestore(&hp->lock, flags); if (hp->ops->notifier_hangup) hp->ops->notifier_hangup(hp, hp->data); while(temp_open_count) { --temp_open_count; tty_kref_put(tty); kref_put(&hp->kref, destroy_hvc_struct); } } /* * Push buffered characters whether they were just recently buffered or waiting * on a blocked hypervisor. Call this function with hp->lock held. */ static int hvc_push(struct hvc_struct *hp) { int n; n = hp->ops->put_chars(hp->vtermno, hp->outbuf, hp->n_outbuf); if (n <= 0) { if (n == 0 || n == -EAGAIN) { hp->do_wakeup = 1; return 0; } /* throw away output on error; this happens when there is no session connected to the vterm. */ hp->n_outbuf = 0; } else hp->n_outbuf -= n; if (hp->n_outbuf > 0) memmove(hp->outbuf, hp->outbuf + n, hp->n_outbuf); else hp->do_wakeup = 1; return n; } static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count) { struct hvc_struct *hp = tty->driver_data; unsigned long flags; int rsize, written = 0; /* This write was probably executed during a tty close. */ if (!hp) return -EPIPE; if (hp->count <= 0) return -EIO; spin_lock_irqsave(&hp->lock, flags); /* Push pending writes */ if (hp->n_outbuf > 0) hvc_push(hp); while (count > 0 && (rsize = hp->outbuf_size - hp->n_outbuf) > 0) { if (rsize > count) rsize = count; memcpy(hp->outbuf + hp->n_outbuf, buf, rsize); count -= rsize; buf += rsize; hp->n_outbuf += rsize; written += rsize; hvc_push(hp); } spin_unlock_irqrestore(&hp->lock, flags); /* * Racy, but harmless, kick thread if there is still pending data. */ if (hp->n_outbuf) hvc_kick(); return written; } /** * hvc_set_winsz() - Resize the hvc tty terminal window. * @work: work structure. * * The routine shall not be called within an atomic context because it * might sleep. * * Locking: hp->lock */ static void hvc_set_winsz(struct work_struct *work) { struct hvc_struct *hp; unsigned long hvc_flags; struct tty_struct *tty; struct winsize ws; hp = container_of(work, struct hvc_struct, tty_resize); spin_lock_irqsave(&hp->lock, hvc_flags); if (!hp->tty) { spin_unlock_irqrestore(&hp->lock, hvc_flags); return; } ws = hp->ws; tty = tty_kref_get(hp->tty); spin_unlock_irqrestore(&hp->lock, hvc_flags); tty_do_resize(tty, &ws); tty_kref_put(tty); } /* * This is actually a contract between the driver and the tty layer outlining * how much write room the driver can guarantee will be sent OR BUFFERED. This * driver MUST honor the return value. */ static int hvc_write_room(struct tty_struct *tty) { struct hvc_struct *hp = tty->driver_data; if (!hp) return -1; return hp->outbuf_size - hp->n_outbuf; } static int hvc_chars_in_buffer(struct tty_struct *tty) { struct hvc_struct *hp = tty->driver_data; if (!hp) return 0; return hp->n_outbuf; } /* * timeout will vary between the MIN and MAX values defined here. By default * and during console activity we will use a default MIN_TIMEOUT of 10. When * the console is idle, we increase the timeout value on each pass through * msleep until we reach the max. This may be noticeable as a brief (average * one second) delay on the console before the console responds to input when * there has been no input for some time. */ #define MIN_TIMEOUT (10) #define MAX_TIMEOUT (2000) static u32 timeout = MIN_TIMEOUT; #define HVC_POLL_READ 0x00000001 #define HVC_POLL_WRITE 0x00000002 int hvc_poll(struct hvc_struct *hp) { struct tty_struct *tty; int i, n, poll_mask = 0; char buf[N_INBUF] __ALIGNED__; unsigned long flags; int read_total = 0; int written_total = 0; spin_lock_irqsave(&hp->lock, flags); /* Push pending writes */ if (hp->n_outbuf > 0) written_total = hvc_push(hp); /* Reschedule us if still some write pending */ if (hp->n_outbuf > 0) { poll_mask |= HVC_POLL_WRITE; /* If hvc_push() was not able to write, sleep a few msecs */ timeout = (written_total) ? 0 : MIN_TIMEOUT; } /* No tty attached, just skip */ tty = tty_kref_get(hp->tty); if (tty == NULL) goto bail; /* Now check if we can get data (are we throttled ?) */ if (test_bit(TTY_THROTTLED, &tty->flags)) goto throttled; /* If we aren't notifier driven and aren't throttled, we always * request a reschedule */ if (!hp->irq_requested) poll_mask |= HVC_POLL_READ; /* Read data if any */ for (;;) { int count = tty_buffer_request_room(tty, N_INBUF); /* If flip is full, just reschedule a later read */ if (count == 0) { poll_mask |= HVC_POLL_READ; break; } n = hp->ops->get_chars(hp->vtermno, buf, count); if (n <= 0) { /* Hangup the tty when disconnected from host */ if (n == -EPIPE) { spin_unlock_irqrestore(&hp->lock, flags); tty_hangup(tty); spin_lock_irqsave(&hp->lock, flags); } else if ( n == -EAGAIN ) { /* * Some back-ends can only ensure a certain min * num of bytes read, which may be > 'count'. * Let the tty clear the flip buff to make room. */ poll_mask |= HVC_POLL_READ; } break; } for (i = 0; i < n; ++i) { #ifdef CONFIG_MAGIC_SYSRQ if (hp->index == hvc_console.index) { /* Handle the SysRq Hack */ /* XXX should support a sequence */ if (buf[i] == '\x0f') { /* ^O */ /* if ^O is pressed again, reset * sysrq_pressed and flip ^O char */ sysrq_pressed = !sysrq_pressed; if (sysrq_pressed) continue; } else if (sysrq_pressed) { handle_sysrq(buf[i]); sysrq_pressed = 0; continue; } } #endif /* CONFIG_MAGIC_SYSRQ */ tty_insert_flip_char(tty, buf[i], 0); } read_total += n; } throttled: /* Wakeup write queue if necessary */ if (hp->do_wakeup) { hp->do_wakeup = 0; tty_wakeup(tty); } bail: spin_unlock_irqrestore(&hp->lock, flags); if (read_total) { /* Activity is occurring, so reset the polling backoff value to a minimum for performance. */ timeout = MIN_TIMEOUT; tty_flip_buffer_push(tty); } if (tty) tty_kref_put(tty); return poll_mask; } EXPORT_SYMBOL_GPL(hvc_poll); /** * __hvc_resize() - Update terminal window size information. * @hp: HVC console pointer * @ws: Terminal window size structure * * Stores the specified window size information in the hvc structure of @hp. * The function schedule the tty resize update. * * Locking: Locking free; the function MUST be called holding hp->lock */ void __hvc_resize(struct hvc_struct *hp, struct winsize ws) { hp->ws = ws; schedule_work(&hp->tty_resize); } EXPORT_SYMBOL_GPL(__hvc_resize); /* * This kthread is either polling or interrupt driven. This is determined by * calling hvc_poll() who determines whether a console adapter support * interrupts. */ static int khvcd(void *unused) { int poll_mask; struct hvc_struct *hp; set_freezable(); do { poll_mask = 0; hvc_kicked = 0; try_to_freeze(); wmb(); if (!cpus_are_in_xmon()) { spin_lock(&hvc_structs_lock); list_for_each_entry(hp, &hvc_structs, next) { poll_mask |= hvc_poll(hp); } spin_unlock(&hvc_structs_lock); } else poll_mask |= HVC_POLL_READ; if (hvc_kicked) continue; set_current_state(TASK_INTERRUPTIBLE); if (!hvc_kicked) { if (poll_mask == 0) schedule(); else { if (timeout < MAX_TIMEOUT) timeout += (timeout >> 6) + 1; msleep_interruptible(timeout); } } __set_current_state(TASK_RUNNING); } while (!kthread_should_stop()); return 0; } static int hvc_tiocmget(struct tty_struct *tty) { struct hvc_struct *hp = tty->driver_data; if (!hp || !hp->ops->tiocmget) return -EINVAL; return hp->ops->tiocmget(hp); } static int hvc_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct hvc_struct *hp = tty->driver_data; if (!hp || !hp->ops->tiocmset) return -EINVAL; return hp->ops->tiocmset(hp, set, clear); } #ifdef CONFIG_CONSOLE_POLL int hvc_poll_init(struct tty_driver *driver, int line, char *options) { return 0; } static int hvc_poll_get_char(struct tty_driver *driver, int line) { struct tty_struct *tty = driver->ttys[0]; struct hvc_struct *hp = tty->driver_data; int n; char ch; n = hp->ops->get_chars(hp->vtermno, &ch, 1); if (n == 0) return NO_POLL_CHAR; return ch; } static void hvc_poll_put_char(struct tty_driver *driver, int line, char ch) { struct tty_struct *tty = driver->ttys[0]; struct hvc_struct *hp = tty->driver_data; int n; do { n = hp->ops->put_chars(hp->vtermno, &ch, 1); } while (n <= 0); } #endif static const struct tty_operations hvc_ops = { .open = hvc_open, .close = hvc_close, .write = hvc_write, .hangup = hvc_hangup, .unthrottle = hvc_unthrottle, .write_room = hvc_write_room, .chars_in_buffer = hvc_chars_in_buffer, .tiocmget = hvc_tiocmget, .tiocmset = hvc_tiocmset, #ifdef CONFIG_CONSOLE_POLL .poll_init = hvc_poll_init, .poll_get_char = hvc_poll_get_char, .poll_put_char = hvc_poll_put_char, #endif }; struct hvc_struct *hvc_alloc(uint32_t vtermno, int data, const struct hv_ops *ops, int outbuf_size) { struct hvc_struct *hp; int i; /* We wait until a driver actually comes along */ if (!hvc_driver) { int err = hvc_init(); if (err) return ERR_PTR(err); } hp = kzalloc(ALIGN(sizeof(*hp), sizeof(long)) + outbuf_size, GFP_KERNEL); if (!hp) return ERR_PTR(-ENOMEM); hp->vtermno = vtermno; hp->data = data; hp->ops = ops; hp->outbuf_size = outbuf_size; hp->outbuf = &((char *)hp)[ALIGN(sizeof(*hp), sizeof(long))]; kref_init(&hp->kref); INIT_WORK(&hp->tty_resize, hvc_set_winsz); spin_lock_init(&hp->lock); spin_lock(&hvc_structs_lock); /* * find index to use: * see if this vterm id matches one registered for console. */ for (i=0; i < MAX_NR_HVC_CONSOLES; i++) if (vtermnos[i] == hp->vtermno && cons_ops[i] == hp->ops) break; /* no matching slot, just use a counter */ if (i >= MAX_NR_HVC_CONSOLES) i = ++last_hvc; hp->index = i; list_add_tail(&(hp->next), &hvc_structs); spin_unlock(&hvc_structs_lock); return hp; } EXPORT_SYMBOL_GPL(hvc_alloc); int hvc_remove(struct hvc_struct *hp) { unsigned long flags; struct tty_struct *tty; spin_lock_irqsave(&hp->lock, flags); tty = tty_kref_get(hp->tty); if (hp->index < MAX_NR_HVC_CONSOLES) vtermnos[hp->index] = -1; /* Don't whack hp->irq because tty_hangup() will need to free the irq. */ spin_unlock_irqrestore(&hp->lock, flags); /* * We 'put' the instance that was grabbed when the kref instance * was initialized using kref_init(). Let the last holder of this * kref cause it to be removed, which will probably be the tty_vhangup * below. */ kref_put(&hp->kref, destroy_hvc_struct); /* * This function call will auto chain call hvc_hangup. */ if (tty) { tty_vhangup(tty); tty_kref_put(tty); } return 0; } EXPORT_SYMBOL_GPL(hvc_remove); /* Driver initialization: called as soon as someone uses hvc_alloc(). */ static int hvc_init(void) { struct tty_driver *drv; int err; /* We need more than hvc_count adapters due to hotplug additions. */ drv = alloc_tty_driver(HVC_ALLOC_TTY_ADAPTERS); if (!drv) { err = -ENOMEM; goto out; } drv->driver_name = "hvc"; drv->name = "hvc"; drv->major = HVC_MAJOR; drv->minor_start = HVC_MINOR; drv->type = TTY_DRIVER_TYPE_SYSTEM; drv->init_termios = tty_std_termios; drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS; tty_set_operations(drv, &hvc_ops); /* Always start the kthread because there can be hotplug vty adapters * added later. */ hvc_task = kthread_run(khvcd, NULL, "khvcd"); if (IS_ERR(hvc_task)) { printk(KERN_ERR "Couldn't create kthread for console.\n"); err = PTR_ERR(hvc_task); goto put_tty; } err = tty_register_driver(drv); if (err) { printk(KERN_ERR "Couldn't register hvc console driver\n"); goto stop_thread; } /* * Make sure tty is fully registered before allowing it to be * found by hvc_console_device. */ smp_mb(); hvc_driver = drv; return 0; stop_thread: kthread_stop(hvc_task); hvc_task = NULL; put_tty: put_tty_driver(drv); out: return err; } /* This isn't particularly necessary due to this being a console driver * but it is nice to be thorough. */ static void __exit hvc_exit(void) { if (hvc_driver) { kthread_stop(hvc_task); tty_unregister_driver(hvc_driver); /* return tty_struct instances allocated in hvc_init(). */ put_tty_driver(hvc_driver); unregister_console(&hvc_console); } } module_exit(hvc_exit);
gpl-2.0
samurai0000000/linux
arch/blackfin/mach-common/cpufreq.c
4497
5568
/* * Blackfin core clock scaling * * Copyright 2008-2011 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/fs.h> #include <linux/delay.h> #include <asm/blackfin.h> #include <asm/time.h> #include <asm/dpmc.h> /* this is the table of CCLK frequencies, in Hz */ /* .index is the entry in the auxiliary dpm_state_table[] */ static struct cpufreq_frequency_table bfin_freq_table[] = { { .frequency = CPUFREQ_TABLE_END, .index = 0, }, { .frequency = CPUFREQ_TABLE_END, .index = 1, }, { .frequency = CPUFREQ_TABLE_END, .index = 2, }, { .frequency = CPUFREQ_TABLE_END, .index = 0, }, }; static struct bfin_dpm_state { unsigned int csel; /* system clock divider */ unsigned int tscale; /* change the divider on the core timer interrupt */ } dpm_state_table[3]; #if defined(CONFIG_CYCLES_CLOCKSOURCE) /* * normalized to maximum frequency offset for CYCLES, * used in time-ts cycles clock source, but could be used * somewhere also. */ unsigned long long __bfin_cycles_off; unsigned int __bfin_cycles_mod; #endif /**************************************************************************/ static void __init bfin_init_tables(unsigned long cclk, unsigned long sclk) { unsigned long csel, min_cclk; int index; /* Anomaly 273 seems to still exist on non-BF54x w/dcache turned on */ #if ANOMALY_05000273 || ANOMALY_05000274 || \ (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE)) min_cclk = sclk * 2; #else min_cclk = sclk; #endif csel = ((bfin_read_PLL_DIV() & CSEL) >> 4); for (index = 0; (cclk >> index) >= min_cclk && csel <= 3; index++, csel++) { bfin_freq_table[index].frequency = cclk >> index; dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */ dpm_state_table[index].tscale = (TIME_SCALE / (1 << csel)) - 1; pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n", bfin_freq_table[index].frequency, dpm_state_table[index].csel, dpm_state_table[index].tscale); } return; } static void bfin_adjust_core_timer(void *info) { unsigned int tscale; unsigned int index = *(unsigned int *)info; /* we have to adjust the core timer, because it is using cclk */ tscale = dpm_state_table[index].tscale; bfin_write_TSCALE(tscale); return; } static unsigned int bfin_getfreq_khz(unsigned int cpu) { /* Both CoreA/B have the same core clock */ return get_cclk() / 1000; } static int bfin_target(struct cpufreq_policy *poli, unsigned int target_freq, unsigned int relation) { unsigned int index, plldiv, cpu; unsigned long flags, cclk_hz; struct cpufreq_freqs freqs; static unsigned long lpj_ref; static unsigned int lpj_ref_freq; #if defined(CONFIG_CYCLES_CLOCKSOURCE) cycles_t cycles; #endif for_each_online_cpu(cpu) { struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); if (!policy) continue; if (cpufreq_frequency_table_target(policy, bfin_freq_table, target_freq, relation, &index)) return -EINVAL; cclk_hz = bfin_freq_table[index].frequency; freqs.old = bfin_getfreq_khz(0); freqs.new = cclk_hz; freqs.cpu = cpu; pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n", cclk_hz, target_freq, freqs.old); cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); if (cpu == CPUFREQ_CPU) { flags = hard_local_irq_save(); plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel; bfin_write_PLL_DIV(plldiv); on_each_cpu(bfin_adjust_core_timer, &index, 1); #if defined(CONFIG_CYCLES_CLOCKSOURCE) cycles = get_cycles(); SSYNC(); cycles += 10; /* ~10 cycles we lose after get_cycles() */ __bfin_cycles_off += (cycles << __bfin_cycles_mod) - (cycles << index); __bfin_cycles_mod = index; #endif if (!lpj_ref_freq) { lpj_ref = loops_per_jiffy; lpj_ref_freq = freqs.old; } if (freqs.new != freqs.old) { loops_per_jiffy = cpufreq_scale(lpj_ref, lpj_ref_freq, freqs.new); } hard_local_irq_restore(flags); } /* TODO: just test case for cycles clock source, remove later */ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } pr_debug("cpufreq: done\n"); return 0; } static int bfin_verify_speed(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, bfin_freq_table); } static int __init __bfin_cpu_init(struct cpufreq_policy *policy) { unsigned long cclk, sclk; cclk = get_cclk() / 1000; sclk = get_sclk() / 1000; if (policy->cpu == CPUFREQ_CPU) bfin_init_tables(cclk, sclk); policy->cpuinfo.transition_latency = 50000; /* 50us assumed */ policy->cur = cclk; cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu); return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table); } static struct freq_attr *bfin_freq_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static struct cpufreq_driver bfin_driver = { .verify = bfin_verify_speed, .target = bfin_target, .get = bfin_getfreq_khz, .init = __bfin_cpu_init, .name = "bfin cpufreq", .owner = THIS_MODULE, .attr = bfin_freq_attr, }; static int __init bfin_cpu_init(void) { return cpufreq_register_driver(&bfin_driver); } static void __exit bfin_cpu_exit(void) { cpufreq_unregister_driver(&bfin_driver); } MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("cpufreq driver for Blackfin"); MODULE_LICENSE("GPL"); module_init(bfin_cpu_init); module_exit(bfin_cpu_exit);
gpl-2.0
gauravdatir/linux
drivers/parisc/hppb.c
4497
2745
/* ** hppb.c: ** HP-PB bus driver for the NOVA and K-Class systems. ** ** (c) Copyright 2002 Ryan Bradetich ** (c) Copyright 2002 Hewlett-Packard Company ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published by ** the Free Software Foundation; either version 2 of the License, or ** (at your option) any later version. ** */ #include <linux/types.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/ioport.h> #include <asm/io.h> #include <asm/hardware.h> #include <asm/parisc-device.h> struct hppb_card { unsigned long hpa; struct resource mmio_region; struct hppb_card *next; }; static struct hppb_card hppb_card_head = { .hpa = 0, .next = NULL, }; #define IO_IO_LOW offsetof(struct bc_module, io_io_low) #define IO_IO_HIGH offsetof(struct bc_module, io_io_high) /** * hppb_probe - Determine if the hppb driver should claim this device. * @dev: The device which has been found * * Determine if hppb driver should claim this chip (return 0) or not * (return 1). If so, initialize the chip and tell other partners in crime * they have work to do. */ static int hppb_probe(struct parisc_device *dev) { int status; struct hppb_card *card = &hppb_card_head; while(card->next) { card = card->next; } if(card->hpa) { card->next = kzalloc(sizeof(struct hppb_card), GFP_KERNEL); if(!card->next) { printk(KERN_ERR "HP-PB: Unable to allocate memory.\n"); return 1; } card = card->next; } printk(KERN_INFO "Found GeckoBoa at 0x%llx\n", (unsigned long long) dev->hpa.start); card->hpa = dev->hpa.start; card->mmio_region.name = "HP-PB Bus"; card->mmio_region.flags = IORESOURCE_MEM; card->mmio_region.start = gsc_readl(dev->hpa.start + IO_IO_LOW); card->mmio_region.end = gsc_readl(dev->hpa.start + IO_IO_HIGH) - 1; status = ccio_request_resource(dev, &card->mmio_region); if(status < 0) { printk(KERN_ERR "%s: failed to claim HP-PB bus space (%pR)\n", __FILE__, &card->mmio_region); } return 0; } static struct parisc_device_id hppb_tbl[] = { { HPHW_BCPORT, HVERSION_REV_ANY_ID, 0x500, 0xc }, /* E25 and K */ { HPHW_BCPORT, 0x0, 0x501, 0xc }, /* E35 */ { HPHW_BCPORT, 0x0, 0x502, 0xc }, /* E45 */ { HPHW_BCPORT, 0x0, 0x503, 0xc }, /* E55 */ { 0, } }; static struct parisc_driver hppb_driver = { .name = "gecko_boa", .id_table = hppb_tbl, .probe = hppb_probe, }; /** * hppb_init - HP-PB bus initialization procedure. * * Register this driver. */ void __init hppb_init(void) { register_parisc_driver(&hppb_driver); }
gpl-2.0
mericon/Xperia-S-msm8660
drivers/input/touchscreen/htcpen.c
5009
5955
/* * HTC Shift touchscreen driver * * Copyright (C) 2008 Pau Oliva Fora <pof@eslack.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/isa.h> #include <linux/ioport.h> #include <linux/dmi.h> MODULE_AUTHOR("Pau Oliva Fora <pau@eslack.org>"); MODULE_DESCRIPTION("HTC Shift touchscreen driver"); MODULE_LICENSE("GPL"); #define HTCPEN_PORT_IRQ_CLEAR 0x068 #define HTCPEN_PORT_INIT 0x06c #define HTCPEN_PORT_INDEX 0x0250 #define HTCPEN_PORT_DATA 0x0251 #define HTCPEN_IRQ 3 #define DEVICE_ENABLE 0xa2 #define DEVICE_DISABLE 0xa3 #define X_INDEX 3 #define Y_INDEX 5 #define TOUCH_INDEX 0xb #define LSB_XY_INDEX 0xc #define X_AXIS_MAX 2040 #define Y_AXIS_MAX 2040 static int invert_x; module_param(invert_x, bool, 0644); MODULE_PARM_DESC(invert_x, "If set, X axis is inverted"); static int invert_y; module_param(invert_y, bool, 0644); MODULE_PARM_DESC(invert_y, "If set, Y axis is inverted"); static struct pnp_device_id pnp_ids[] = { { .id = "PNP0cc0" }, { .id = "" } }; MODULE_DEVICE_TABLE(pnp, pnp_ids); static irqreturn_t htcpen_interrupt(int irq, void *handle) { struct input_dev *htcpen_dev = handle; unsigned short x, y, xy; /* 0 = press; 1 = release */ outb_p(TOUCH_INDEX, HTCPEN_PORT_INDEX); if (inb_p(HTCPEN_PORT_DATA)) { input_report_key(htcpen_dev, BTN_TOUCH, 0); } else { outb_p(X_INDEX, HTCPEN_PORT_INDEX); x = inb_p(HTCPEN_PORT_DATA); outb_p(Y_INDEX, HTCPEN_PORT_INDEX); y = inb_p(HTCPEN_PORT_DATA); outb_p(LSB_XY_INDEX, HTCPEN_PORT_INDEX); xy = inb_p(HTCPEN_PORT_DATA); /* get high resolution value of X and Y using LSB */ x = X_AXIS_MAX - ((x * 8) + ((xy >> 4) & 0xf)); y = (y * 8) + (xy & 0xf); if (invert_x) x = X_AXIS_MAX - x; if (invert_y) y = Y_AXIS_MAX - y; if (x != X_AXIS_MAX && x != 0) { input_report_key(htcpen_dev, BTN_TOUCH, 1); input_report_abs(htcpen_dev, ABS_X, x); input_report_abs(htcpen_dev, ABS_Y, y); } } input_sync(htcpen_dev); inb_p(HTCPEN_PORT_IRQ_CLEAR); return IRQ_HANDLED; } static int htcpen_open(struct input_dev *dev) { outb_p(DEVICE_ENABLE, HTCPEN_PORT_INIT); return 0; } static void htcpen_close(struct input_dev *dev) { outb_p(DEVICE_DISABLE, HTCPEN_PORT_INIT); synchronize_irq(HTCPEN_IRQ); } static int __devinit htcpen_isa_probe(struct device *dev, unsigned int id) { struct input_dev *htcpen_dev; int err = -EBUSY; if (!request_region(HTCPEN_PORT_IRQ_CLEAR, 1, "htcpen")) { printk(KERN_ERR "htcpen: unable to get IO region 0x%x\n", HTCPEN_PORT_IRQ_CLEAR); goto request_region1_failed; } if (!request_region(HTCPEN_PORT_INIT, 1, "htcpen")) { printk(KERN_ERR "htcpen: unable to get IO region 0x%x\n", HTCPEN_PORT_INIT); goto request_region2_failed; } if (!request_region(HTCPEN_PORT_INDEX, 2, "htcpen")) { printk(KERN_ERR "htcpen: unable to get IO region 0x%x\n", HTCPEN_PORT_INDEX); goto request_region3_failed; } htcpen_dev = input_allocate_device(); if (!htcpen_dev) { printk(KERN_ERR "htcpen: can't allocate device\n"); err = -ENOMEM; goto input_alloc_failed; } htcpen_dev->name = "HTC Shift EC TouchScreen"; htcpen_dev->id.bustype = BUS_ISA; htcpen_dev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY); htcpen_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(htcpen_dev, ABS_X, 0, X_AXIS_MAX, 0, 0); input_set_abs_params(htcpen_dev, ABS_Y, 0, Y_AXIS_MAX, 0, 0); htcpen_dev->open = htcpen_open; htcpen_dev->close = htcpen_close; err = request_irq(HTCPEN_IRQ, htcpen_interrupt, 0, "htcpen", htcpen_dev); if (err) { printk(KERN_ERR "htcpen: irq busy\n"); goto request_irq_failed; } inb_p(HTCPEN_PORT_IRQ_CLEAR); err = input_register_device(htcpen_dev); if (err) goto input_register_failed; dev_set_drvdata(dev, htcpen_dev); return 0; input_register_failed: free_irq(HTCPEN_IRQ, htcpen_dev); request_irq_failed: input_free_device(htcpen_dev); input_alloc_failed: release_region(HTCPEN_PORT_INDEX, 2); request_region3_failed: release_region(HTCPEN_PORT_INIT, 1); request_region2_failed: release_region(HTCPEN_PORT_IRQ_CLEAR, 1); request_region1_failed: return err; } static int __devexit htcpen_isa_remove(struct device *dev, unsigned int id) { struct input_dev *htcpen_dev = dev_get_drvdata(dev); input_unregister_device(htcpen_dev); free_irq(HTCPEN_IRQ, htcpen_dev); release_region(HTCPEN_PORT_INDEX, 2); release_region(HTCPEN_PORT_INIT, 1); release_region(HTCPEN_PORT_IRQ_CLEAR, 1); dev_set_drvdata(dev, NULL); return 0; } #ifdef CONFIG_PM static int htcpen_isa_suspend(struct device *dev, unsigned int n, pm_message_t state) { outb_p(DEVICE_DISABLE, HTCPEN_PORT_INIT); return 0; } static int htcpen_isa_resume(struct device *dev, unsigned int n) { outb_p(DEVICE_ENABLE, HTCPEN_PORT_INIT); return 0; } #endif static struct isa_driver htcpen_isa_driver = { .probe = htcpen_isa_probe, .remove = __devexit_p(htcpen_isa_remove), #ifdef CONFIG_PM .suspend = htcpen_isa_suspend, .resume = htcpen_isa_resume, #endif .driver = { .owner = THIS_MODULE, .name = "htcpen", } }; static struct dmi_system_id __initdata htcshift_dmi_table[] = { { .ident = "Shift", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "High Tech Computer Corp"), DMI_MATCH(DMI_PRODUCT_NAME, "Shift"), }, }, { } }; static int __init htcpen_isa_init(void) { if (!dmi_check_system(htcshift_dmi_table)) return -ENODEV; return isa_register_driver(&htcpen_isa_driver, 1); } static void __exit htcpen_isa_exit(void) { isa_unregister_driver(&htcpen_isa_driver); } module_init(htcpen_isa_init); module_exit(htcpen_isa_exit);
gpl-2.0
Shabbypenguin/Photon-Kernel
drivers/input/touchscreen/htcpen.c
5009
5955
/* * HTC Shift touchscreen driver * * Copyright (C) 2008 Pau Oliva Fora <pof@eslack.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/isa.h> #include <linux/ioport.h> #include <linux/dmi.h> MODULE_AUTHOR("Pau Oliva Fora <pau@eslack.org>"); MODULE_DESCRIPTION("HTC Shift touchscreen driver"); MODULE_LICENSE("GPL"); #define HTCPEN_PORT_IRQ_CLEAR 0x068 #define HTCPEN_PORT_INIT 0x06c #define HTCPEN_PORT_INDEX 0x0250 #define HTCPEN_PORT_DATA 0x0251 #define HTCPEN_IRQ 3 #define DEVICE_ENABLE 0xa2 #define DEVICE_DISABLE 0xa3 #define X_INDEX 3 #define Y_INDEX 5 #define TOUCH_INDEX 0xb #define LSB_XY_INDEX 0xc #define X_AXIS_MAX 2040 #define Y_AXIS_MAX 2040 static int invert_x; module_param(invert_x, bool, 0644); MODULE_PARM_DESC(invert_x, "If set, X axis is inverted"); static int invert_y; module_param(invert_y, bool, 0644); MODULE_PARM_DESC(invert_y, "If set, Y axis is inverted"); static struct pnp_device_id pnp_ids[] = { { .id = "PNP0cc0" }, { .id = "" } }; MODULE_DEVICE_TABLE(pnp, pnp_ids); static irqreturn_t htcpen_interrupt(int irq, void *handle) { struct input_dev *htcpen_dev = handle; unsigned short x, y, xy; /* 0 = press; 1 = release */ outb_p(TOUCH_INDEX, HTCPEN_PORT_INDEX); if (inb_p(HTCPEN_PORT_DATA)) { input_report_key(htcpen_dev, BTN_TOUCH, 0); } else { outb_p(X_INDEX, HTCPEN_PORT_INDEX); x = inb_p(HTCPEN_PORT_DATA); outb_p(Y_INDEX, HTCPEN_PORT_INDEX); y = inb_p(HTCPEN_PORT_DATA); outb_p(LSB_XY_INDEX, HTCPEN_PORT_INDEX); xy = inb_p(HTCPEN_PORT_DATA); /* get high resolution value of X and Y using LSB */ x = X_AXIS_MAX - ((x * 8) + ((xy >> 4) & 0xf)); y = (y * 8) + (xy & 0xf); if (invert_x) x = X_AXIS_MAX - x; if (invert_y) y = Y_AXIS_MAX - y; if (x != X_AXIS_MAX && x != 0) { input_report_key(htcpen_dev, BTN_TOUCH, 1); input_report_abs(htcpen_dev, ABS_X, x); input_report_abs(htcpen_dev, ABS_Y, y); } } input_sync(htcpen_dev); inb_p(HTCPEN_PORT_IRQ_CLEAR); return IRQ_HANDLED; } static int htcpen_open(struct input_dev *dev) { outb_p(DEVICE_ENABLE, HTCPEN_PORT_INIT); return 0; } static void htcpen_close(struct input_dev *dev) { outb_p(DEVICE_DISABLE, HTCPEN_PORT_INIT); synchronize_irq(HTCPEN_IRQ); } static int __devinit htcpen_isa_probe(struct device *dev, unsigned int id) { struct input_dev *htcpen_dev; int err = -EBUSY; if (!request_region(HTCPEN_PORT_IRQ_CLEAR, 1, "htcpen")) { printk(KERN_ERR "htcpen: unable to get IO region 0x%x\n", HTCPEN_PORT_IRQ_CLEAR); goto request_region1_failed; } if (!request_region(HTCPEN_PORT_INIT, 1, "htcpen")) { printk(KERN_ERR "htcpen: unable to get IO region 0x%x\n", HTCPEN_PORT_INIT); goto request_region2_failed; } if (!request_region(HTCPEN_PORT_INDEX, 2, "htcpen")) { printk(KERN_ERR "htcpen: unable to get IO region 0x%x\n", HTCPEN_PORT_INDEX); goto request_region3_failed; } htcpen_dev = input_allocate_device(); if (!htcpen_dev) { printk(KERN_ERR "htcpen: can't allocate device\n"); err = -ENOMEM; goto input_alloc_failed; } htcpen_dev->name = "HTC Shift EC TouchScreen"; htcpen_dev->id.bustype = BUS_ISA; htcpen_dev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY); htcpen_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(htcpen_dev, ABS_X, 0, X_AXIS_MAX, 0, 0); input_set_abs_params(htcpen_dev, ABS_Y, 0, Y_AXIS_MAX, 0, 0); htcpen_dev->open = htcpen_open; htcpen_dev->close = htcpen_close; err = request_irq(HTCPEN_IRQ, htcpen_interrupt, 0, "htcpen", htcpen_dev); if (err) { printk(KERN_ERR "htcpen: irq busy\n"); goto request_irq_failed; } inb_p(HTCPEN_PORT_IRQ_CLEAR); err = input_register_device(htcpen_dev); if (err) goto input_register_failed; dev_set_drvdata(dev, htcpen_dev); return 0; input_register_failed: free_irq(HTCPEN_IRQ, htcpen_dev); request_irq_failed: input_free_device(htcpen_dev); input_alloc_failed: release_region(HTCPEN_PORT_INDEX, 2); request_region3_failed: release_region(HTCPEN_PORT_INIT, 1); request_region2_failed: release_region(HTCPEN_PORT_IRQ_CLEAR, 1); request_region1_failed: return err; } static int __devexit htcpen_isa_remove(struct device *dev, unsigned int id) { struct input_dev *htcpen_dev = dev_get_drvdata(dev); input_unregister_device(htcpen_dev); free_irq(HTCPEN_IRQ, htcpen_dev); release_region(HTCPEN_PORT_INDEX, 2); release_region(HTCPEN_PORT_INIT, 1); release_region(HTCPEN_PORT_IRQ_CLEAR, 1); dev_set_drvdata(dev, NULL); return 0; } #ifdef CONFIG_PM static int htcpen_isa_suspend(struct device *dev, unsigned int n, pm_message_t state) { outb_p(DEVICE_DISABLE, HTCPEN_PORT_INIT); return 0; } static int htcpen_isa_resume(struct device *dev, unsigned int n) { outb_p(DEVICE_ENABLE, HTCPEN_PORT_INIT); return 0; } #endif static struct isa_driver htcpen_isa_driver = { .probe = htcpen_isa_probe, .remove = __devexit_p(htcpen_isa_remove), #ifdef CONFIG_PM .suspend = htcpen_isa_suspend, .resume = htcpen_isa_resume, #endif .driver = { .owner = THIS_MODULE, .name = "htcpen", } }; static struct dmi_system_id __initdata htcshift_dmi_table[] = { { .ident = "Shift", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "High Tech Computer Corp"), DMI_MATCH(DMI_PRODUCT_NAME, "Shift"), }, }, { } }; static int __init htcpen_isa_init(void) { if (!dmi_check_system(htcshift_dmi_table)) return -ENODEV; return isa_register_driver(&htcpen_isa_driver, 1); } static void __exit htcpen_isa_exit(void) { isa_unregister_driver(&htcpen_isa_driver); } module_init(htcpen_isa_init); module_exit(htcpen_isa_exit);
gpl-2.0
blitzmohit/dragonboard-rtlinux-3.4
arch/mips/kernel/watch.c
9361
4633
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2008 David Daney */ #include <linux/sched.h> #include <asm/processor.h> #include <asm/watch.h> /* * Install the watch registers for the current thread. A maximum of * four registers are installed although the machine may have more. */ void mips_install_watch_registers(void) { struct mips3264_watch_reg_state *watches = &current->thread.watch.mips3264; switch (current_cpu_data.watch_reg_use_cnt) { default: BUG(); case 4: write_c0_watchlo3(watches->watchlo[3]); /* Write 1 to the I, R, and W bits to clear them, and 1 to G so all ASIDs are trapped. */ write_c0_watchhi3(0x40000007 | watches->watchhi[3]); case 3: write_c0_watchlo2(watches->watchlo[2]); write_c0_watchhi2(0x40000007 | watches->watchhi[2]); case 2: write_c0_watchlo1(watches->watchlo[1]); write_c0_watchhi1(0x40000007 | watches->watchhi[1]); case 1: write_c0_watchlo0(watches->watchlo[0]); write_c0_watchhi0(0x40000007 | watches->watchhi[0]); } } /* * Read back the watchhi registers so the user space debugger has * access to the I, R, and W bits. A maximum of four registers are * read although the machine may have more. */ void mips_read_watch_registers(void) { struct mips3264_watch_reg_state *watches = &current->thread.watch.mips3264; switch (current_cpu_data.watch_reg_use_cnt) { default: BUG(); case 4: watches->watchhi[3] = (read_c0_watchhi3() & 0x0fff); case 3: watches->watchhi[2] = (read_c0_watchhi2() & 0x0fff); case 2: watches->watchhi[1] = (read_c0_watchhi1() & 0x0fff); case 1: watches->watchhi[0] = (read_c0_watchhi0() & 0x0fff); } if (current_cpu_data.watch_reg_use_cnt == 1 && (watches->watchhi[0] & 7) == 0) { /* Pathological case of release 1 architecture that * doesn't set the condition bits. We assume that * since we got here, the watch condition was met and * signal that the conditions requested in watchlo * were met. */ watches->watchhi[0] |= (watches->watchlo[0] & 7); } } /* * Disable all watch registers. Although only four registers are * installed, all are cleared to eliminate the possibility of endless * looping in the watch handler. */ void mips_clear_watch_registers(void) { switch (current_cpu_data.watch_reg_count) { default: BUG(); case 8: write_c0_watchlo7(0); case 7: write_c0_watchlo6(0); case 6: write_c0_watchlo5(0); case 5: write_c0_watchlo4(0); case 4: write_c0_watchlo3(0); case 3: write_c0_watchlo2(0); case 2: write_c0_watchlo1(0); case 1: write_c0_watchlo0(0); } } __cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c) { unsigned int t; if ((c->options & MIPS_CPU_WATCH) == 0) return; /* * Check which of the I,R and W bits are supported, then * disable the register. */ write_c0_watchlo0(7); t = read_c0_watchlo0(); write_c0_watchlo0(0); c->watch_reg_masks[0] = t & 7; /* Write the mask bits and read them back to determine which * can be used. */ c->watch_reg_count = 1; c->watch_reg_use_cnt = 1; t = read_c0_watchhi0(); write_c0_watchhi0(t | 0xff8); t = read_c0_watchhi0(); c->watch_reg_masks[0] |= (t & 0xff8); if ((t & 0x80000000) == 0) return; write_c0_watchlo1(7); t = read_c0_watchlo1(); write_c0_watchlo1(0); c->watch_reg_masks[1] = t & 7; c->watch_reg_count = 2; c->watch_reg_use_cnt = 2; t = read_c0_watchhi1(); write_c0_watchhi1(t | 0xff8); t = read_c0_watchhi1(); c->watch_reg_masks[1] |= (t & 0xff8); if ((t & 0x80000000) == 0) return; write_c0_watchlo2(7); t = read_c0_watchlo2(); write_c0_watchlo2(0); c->watch_reg_masks[2] = t & 7; c->watch_reg_count = 3; c->watch_reg_use_cnt = 3; t = read_c0_watchhi2(); write_c0_watchhi2(t | 0xff8); t = read_c0_watchhi2(); c->watch_reg_masks[2] |= (t & 0xff8); if ((t & 0x80000000) == 0) return; write_c0_watchlo3(7); t = read_c0_watchlo3(); write_c0_watchlo3(0); c->watch_reg_masks[3] = t & 7; c->watch_reg_count = 4; c->watch_reg_use_cnt = 4; t = read_c0_watchhi3(); write_c0_watchhi3(t | 0xff8); t = read_c0_watchhi3(); c->watch_reg_masks[3] |= (t & 0xff8); if ((t & 0x80000000) == 0) return; /* We use at most 4, but probe and report up to 8. */ c->watch_reg_count = 5; t = read_c0_watchhi4(); if ((t & 0x80000000) == 0) return; c->watch_reg_count = 6; t = read_c0_watchhi5(); if ((t & 0x80000000) == 0) return; c->watch_reg_count = 7; t = read_c0_watchhi6(); if ((t & 0x80000000) == 0) return; c->watch_reg_count = 8; }
gpl-2.0
Pafcholini/Nadia-kernel-LL-N910F-EUR-LL-OpenSource
drivers/media/usb/dvb-usb/friio.c
9617
12421
/* DVB USB compliant Linux driver for the Friio USB2.0 ISDB-T receiver. * * Copyright (C) 2009 Akihiro Tsukada <tskd2@yahoo.co.jp> * * This module is based off the the gl861 and vp702x modules. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation, version 2. * * see Documentation/dvb/README.dvb-usb for more information */ #include "friio.h" /* debug */ int dvb_usb_friio_debug; module_param_named(debug, dvb_usb_friio_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info,2=xfer,4=rc,8=fe (or-able))." DVB_USB_DEBUG_STATUS); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); /** * Indirect I2C access to the PLL via FE. * whole I2C protocol data to the PLL is sent via the FE's I2C register. * This is done by a control msg to the FE with the I2C data accompanied, and * a specific USB request number is assigned for that purpose. * * this func sends wbuf[1..] to the I2C register wbuf[0] at addr (= at FE). * TODO: refoctored, smarter i2c functions. */ static int gl861_i2c_ctrlmsg_data(struct dvb_usb_device *d, u8 addr, u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) { u16 index = wbuf[0]; /* must be JDVBT90502_2ND_I2C_REG(=0xFE) */ u16 value = addr << (8 + 1); int wo = (rbuf == NULL || rlen == 0); /* write only */ u8 req, type; deb_xfer("write to PLL:0x%02x via FE reg:0x%02x, len:%d\n", wbuf[1], wbuf[0], wlen - 1); if (wo && wlen >= 2) { req = GL861_REQ_I2C_DATA_CTRL_WRITE; type = GL861_WRITE; udelay(20); return usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0), req, type, value, index, &wbuf[1], wlen - 1, 2000); } deb_xfer("not supported ctrl-msg, aborting."); return -EINVAL; } /* normal I2C access (without extra data arguments). * write to the register wbuf[0] at I2C address addr with the value wbuf[1], * or read from the register wbuf[0]. * register address can be 16bit (wbuf[2]<<8 | wbuf[0]) if wlen==3 */ static int gl861_i2c_msg(struct dvb_usb_device *d, u8 addr, u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) { u16 index; u16 value = addr << (8 + 1); int wo = (rbuf == NULL || rlen == 0); /* write-only */ u8 req, type; unsigned int pipe; /* special case for the indirect I2C access to the PLL via FE, */ if (addr == friio_fe_config.demod_address && wbuf[0] == JDVBT90502_2ND_I2C_REG) return gl861_i2c_ctrlmsg_data(d, addr, wbuf, wlen, rbuf, rlen); if (wo) { req = GL861_REQ_I2C_WRITE; type = GL861_WRITE; pipe = usb_sndctrlpipe(d->udev, 0); } else { /* rw */ req = GL861_REQ_I2C_READ; type = GL861_READ; pipe = usb_rcvctrlpipe(d->udev, 0); } switch (wlen) { case 1: index = wbuf[0]; break; case 2: index = wbuf[0]; value = value + wbuf[1]; break; case 3: /* special case for 16bit register-address */ index = (wbuf[2] << 8) | wbuf[0]; value = value + wbuf[1]; break; default: deb_xfer("wlen = %x, aborting.", wlen); return -EINVAL; } msleep(1); return usb_control_msg(d->udev, pipe, req, type, value, index, rbuf, rlen, 2000); } /* I2C */ static int gl861_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int i; if (num > 2) return -EINVAL; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (i = 0; i < num; i++) { /* write/read request */ if (i + 1 < num && (msg[i + 1].flags & I2C_M_RD)) { if (gl861_i2c_msg(d, msg[i].addr, msg[i].buf, msg[i].len, msg[i + 1].buf, msg[i + 1].len) < 0) break; i++; } else if (gl861_i2c_msg(d, msg[i].addr, msg[i].buf, msg[i].len, NULL, 0) < 0) break; } mutex_unlock(&d->i2c_mutex); return i; } static u32 gl861_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static int friio_ext_ctl(struct dvb_usb_adapter *adap, u32 sat_color, int lnb_on) { int i; int ret; struct i2c_msg msg; u8 *buf; u32 mask; u8 lnb = (lnb_on) ? FRIIO_CTL_LNB : 0; buf = kmalloc(2, GFP_KERNEL); if (!buf) return -ENOMEM; msg.addr = 0x00; msg.flags = 0; msg.len = 2; msg.buf = buf; buf[0] = 0x00; /* send 2bit header (&B10) */ buf[1] = lnb | FRIIO_CTL_LED | FRIIO_CTL_STROBE; ret = gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1); buf[1] |= FRIIO_CTL_CLK; ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1); buf[1] = lnb | FRIIO_CTL_STROBE; ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1); buf[1] |= FRIIO_CTL_CLK; ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1); /* send 32bit(satur, R, G, B) data in serial */ mask = 1 << 31; for (i = 0; i < 32; i++) { buf[1] = lnb | FRIIO_CTL_STROBE; if (sat_color & mask) buf[1] |= FRIIO_CTL_LED; ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1); buf[1] |= FRIIO_CTL_CLK; ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1); mask >>= 1; } /* set the strobe off */ buf[1] = lnb; ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1); buf[1] |= FRIIO_CTL_CLK; ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1); kfree(buf); return (ret == 70); } static int friio_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff); /* TODO: move these init cmds to the FE's init routine? */ static u8 streaming_init_cmds[][2] = { {0x33, 0x08}, {0x37, 0x40}, {0x3A, 0x1F}, {0x3B, 0xFF}, {0x3C, 0x1F}, {0x3D, 0xFF}, {0x38, 0x00}, {0x35, 0x00}, {0x39, 0x00}, {0x36, 0x00}, }; static int cmdlen = sizeof(streaming_init_cmds) / 2; /* * Command sequence in this init function is a replay * of the captured USB commands from the Windows proprietary driver. */ static int friio_initialize(struct dvb_usb_device *d) { int ret; int i; int retry = 0; u8 *rbuf, *wbuf; deb_info("%s called.\n", __func__); wbuf = kmalloc(3, GFP_KERNEL); if (!wbuf) return -ENOMEM; rbuf = kmalloc(2, GFP_KERNEL); if (!rbuf) { kfree(wbuf); return -ENOMEM; } /* use gl861_i2c_msg instead of gl861_i2c_xfer(), */ /* because the i2c device is not set up yet. */ wbuf[0] = 0x11; wbuf[1] = 0x02; ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0); if (ret < 0) goto error; msleep(2); wbuf[0] = 0x11; wbuf[1] = 0x00; ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0); if (ret < 0) goto error; msleep(1); /* following msgs should be in the FE's init code? */ /* cmd sequence to identify the device type? (friio black/white) */ wbuf[0] = 0x03; wbuf[1] = 0x80; /* can't use gl861_i2c_cmd, as the register-addr is 16bit(0x0100) */ ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0), GL861_REQ_I2C_DATA_CTRL_WRITE, GL861_WRITE, 0x1200, 0x0100, wbuf, 2, 2000); if (ret < 0) goto error; msleep(2); wbuf[0] = 0x00; wbuf[2] = 0x01; /* reg.0x0100 */ wbuf[1] = 0x00; ret = gl861_i2c_msg(d, 0x12 >> 1, wbuf, 3, rbuf, 2); /* my Friio White returns 0xffff. */ if (ret < 0 || rbuf[0] != 0xff || rbuf[1] != 0xff) goto error; msleep(2); wbuf[0] = 0x03; wbuf[1] = 0x80; ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0), GL861_REQ_I2C_DATA_CTRL_WRITE, GL861_WRITE, 0x9000, 0x0100, wbuf, 2, 2000); if (ret < 0) goto error; msleep(2); wbuf[0] = 0x00; wbuf[2] = 0x01; /* reg.0x0100 */ wbuf[1] = 0x00; ret = gl861_i2c_msg(d, 0x90 >> 1, wbuf, 3, rbuf, 2); /* my Friio White returns 0xffff again. */ if (ret < 0 || rbuf[0] != 0xff || rbuf[1] != 0xff) goto error; msleep(1); restart: /* ============ start DEMOD init cmds ================== */ /* read PLL status to clear the POR bit */ wbuf[0] = JDVBT90502_2ND_I2C_REG; wbuf[1] = (FRIIO_PLL_ADDR << 1) + 1; /* +1 for reading */ ret = gl861_i2c_msg(d, FRIIO_DEMOD_ADDR, wbuf, 2, NULL, 0); if (ret < 0) goto error; msleep(5); /* note: DEMODULATOR has 16bit register-address. */ wbuf[0] = 0x00; wbuf[2] = 0x01; /* reg addr: 0x0100 */ wbuf[1] = 0x00; /* val: not used */ ret = gl861_i2c_msg(d, FRIIO_DEMOD_ADDR, wbuf, 3, rbuf, 1); if (ret < 0) goto error; /* msleep(1); wbuf[0] = 0x80; wbuf[1] = 0x00; ret = gl861_i2c_msg(d, FRIIO_DEMOD_ADDR, wbuf, 2, rbuf, 1); if (ret < 0) goto error; */ if (rbuf[0] & 0x80) { /* still in PowerOnReset state? */ if (++retry > 3) { deb_info("failed to get the correct" " FE demod status:0x%02x\n", rbuf[0]); goto error; } msleep(100); goto restart; } /* TODO: check return value in rbuf */ /* =========== end DEMOD init cmds ===================== */ msleep(1); wbuf[0] = 0x30; wbuf[1] = 0x04; ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0); if (ret < 0) goto error; msleep(2); /* following 2 cmds unnecessary? */ wbuf[0] = 0x00; wbuf[1] = 0x01; ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0); if (ret < 0) goto error; wbuf[0] = 0x06; wbuf[1] = 0x0F; ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0); if (ret < 0) goto error; /* some streaming ctl cmds (maybe) */ msleep(10); for (i = 0; i < cmdlen; i++) { ret = gl861_i2c_msg(d, 0x00, streaming_init_cmds[i], 2, NULL, 0); if (ret < 0) goto error; msleep(1); } msleep(20); /* change the LED color etc. */ ret = friio_streaming_ctrl(&d->adapter[0], 0); if (ret < 0) goto error; return 0; error: kfree(wbuf); kfree(rbuf); deb_info("%s:ret == %d\n", __func__, ret); return -EIO; } /* Callbacks for DVB USB */ static int friio_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { int ret; deb_info("%s called.(%d)\n", __func__, onoff); /* set the LED color and saturation (and LNB on) */ if (onoff) ret = friio_ext_ctl(adap, 0x6400ff64, 1); else ret = friio_ext_ctl(adap, 0x96ff00ff, 1); if (ret != 1) { deb_info("%s failed to send cmdx. ret==%d\n", __func__, ret); return -EREMOTEIO; } return 0; } static int friio_frontend_attach(struct dvb_usb_adapter *adap) { if (friio_initialize(adap->dev) < 0) return -EIO; adap->fe_adap[0].fe = jdvbt90502_attach(adap->dev); if (adap->fe_adap[0].fe == NULL) return -EIO; return 0; } /* DVB USB Driver stuff */ static struct dvb_usb_device_properties friio_properties; static int friio_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct dvb_usb_device *d; struct usb_host_interface *alt; int ret; if (intf->num_altsetting < GL861_ALTSETTING_COUNT) return -ENODEV; alt = usb_altnum_to_altsetting(intf, FRIIO_BULK_ALTSETTING); if (alt == NULL) { deb_rc("not alt found!\n"); return -ENODEV; } ret = usb_set_interface(interface_to_usbdev(intf), alt->desc.bInterfaceNumber, alt->desc.bAlternateSetting); if (ret != 0) { deb_rc("failed to set alt-setting!\n"); return ret; } ret = dvb_usb_device_init(intf, &friio_properties, THIS_MODULE, &d, adapter_nr); if (ret == 0) friio_streaming_ctrl(&d->adapter[0], 1); return ret; } struct jdvbt90502_config friio_fe_config = { .demod_address = FRIIO_DEMOD_ADDR, .pll_address = FRIIO_PLL_ADDR, }; static struct i2c_algorithm gl861_i2c_algo = { .master_xfer = gl861_i2c_xfer, .functionality = gl861_i2c_func, }; static struct usb_device_id friio_table[] = { { USB_DEVICE(USB_VID_774, USB_PID_FRIIO_WHITE) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, friio_table); static struct dvb_usb_device_properties friio_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = 0, .num_adapters = 1, .adapter = { /* caps:0 => no pid filter, 188B TS packet */ /* GL861 has a HW pid filter, but no info available. */ { .num_frontends = 1, .fe = {{ .caps = 0, .frontend_attach = friio_frontend_attach, .streaming_ctrl = friio_streaming_ctrl, .stream = { .type = USB_BULK, /* count <= MAX_NO_URBS_FOR_DATA_STREAM(10) */ .count = 8, .endpoint = 0x01, .u = { /* GL861 has 6KB buf inside */ .bulk = { .buffersize = 16384, } } }, }}, } }, .i2c_algo = &gl861_i2c_algo, .num_device_descs = 1, .devices = { { .name = "774 Friio ISDB-T USB2.0", .cold_ids = { NULL }, .warm_ids = { &friio_table[0], NULL }, }, } }; static struct usb_driver friio_driver = { .name = "dvb_usb_friio", .probe = friio_probe, .disconnect = dvb_usb_device_exit, .id_table = friio_table, }; module_usb_driver(friio_driver); MODULE_AUTHOR("Akihiro Tsukada <tskd2@yahoo.co.jp>"); MODULE_DESCRIPTION("Driver for Friio ISDB-T USB2.0 Receiver"); MODULE_VERSION("0.2"); MODULE_LICENSE("GPL");
gpl-2.0
geekboxzone/lollipop_kernel
drivers/media/usb/tm6000/tm6000-i2c.c
9617
8434
/* * tm6000-i2c.c - driver for TM5600/TM6000/TM6010 USB video capture devices * * Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org> * * Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com> * - Fix SMBus Read Byte command * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation version 2 * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/usb.h> #include <linux/i2c.h> #include "tm6000.h" #include "tm6000-regs.h" #include <media/v4l2-common.h> #include <media/tuner.h> #include "tuner-xc2028.h" /* ----------------------------------------------------------- */ static unsigned int i2c_debug; module_param(i2c_debug, int, 0644); MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]"); #define i2c_dprintk(lvl, fmt, args...) if (i2c_debug >= lvl) do { \ printk(KERN_DEBUG "%s at %s: " fmt, \ dev->name, __func__, ##args); } while (0) static int tm6000_i2c_send_regs(struct tm6000_core *dev, unsigned char addr, __u8 reg, char *buf, int len) { int rc; unsigned int i2c_packet_limit = 16; if (dev->dev_type == TM6010) i2c_packet_limit = 80; if (!buf) return -1; if (len < 1 || len > i2c_packet_limit) { printk(KERN_ERR "Incorrect length of i2c packet = %d, limit set to %d\n", len, i2c_packet_limit); return -1; } /* capture mutex */ rc = tm6000_read_write_usb(dev, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, REQ_16_SET_GET_I2C_WR1_RDN, addr | reg << 8, 0, buf, len); if (rc < 0) { /* release mutex */ return rc; } /* release mutex */ return rc; } /* Generic read - doesn't work fine with 16bit registers */ static int tm6000_i2c_recv_regs(struct tm6000_core *dev, unsigned char addr, __u8 reg, char *buf, int len) { int rc; u8 b[2]; unsigned int i2c_packet_limit = 16; if (dev->dev_type == TM6010) i2c_packet_limit = 64; if (!buf) return -1; if (len < 1 || len > i2c_packet_limit) { printk(KERN_ERR "Incorrect length of i2c packet = %d, limit set to %d\n", len, i2c_packet_limit); return -1; } /* capture mutex */ if ((dev->caps.has_zl10353) && (dev->demod_addr << 1 == addr) && (reg % 2 == 0)) { /* * Workaround an I2C bug when reading from zl10353 */ reg -= 1; len += 1; rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, REQ_16_SET_GET_I2C_WR1_RDN, addr | reg << 8, 0, b, len); *buf = b[1]; } else { rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, REQ_16_SET_GET_I2C_WR1_RDN, addr | reg << 8, 0, buf, len); } /* release mutex */ return rc; } /* * read from a 16bit register * for example xc2028, xc3028 or xc3028L */ static int tm6000_i2c_recv_regs16(struct tm6000_core *dev, unsigned char addr, __u16 reg, char *buf, int len) { int rc; unsigned char ureg; if (!buf || len != 2) return -1; /* capture mutex */ if (dev->dev_type == TM6010) { ureg = reg & 0xFF; rc = tm6000_read_write_usb(dev, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, REQ_16_SET_GET_I2C_WR1_RDN, addr | (reg & 0xFF00), 0, &ureg, 1); if (rc < 0) { /* release mutex */ return rc; } rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, REQ_35_AFTEK_TUNER_READ, reg, 0, buf, len); } else { rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, REQ_14_SET_GET_I2C_WR2_RDN, addr, reg, buf, len); } /* release mutex */ return rc; } static int tm6000_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num) { struct tm6000_core *dev = i2c_adap->algo_data; int addr, rc, i, byte; if (num <= 0) return 0; for (i = 0; i < num; i++) { addr = (msgs[i].addr << 1) & 0xff; i2c_dprintk(2, "%s %s addr=0x%x len=%d:", (msgs[i].flags & I2C_M_RD) ? "read" : "write", i == num - 1 ? "stop" : "nonstop", addr, msgs[i].len); if (msgs[i].flags & I2C_M_RD) { /* read request without preceding register selection */ /* * The TM6000 only supports a read transaction * immediately after a 1 or 2 byte write to select * a register. We cannot fulfil this request. */ i2c_dprintk(2, " read without preceding write not" " supported"); rc = -EOPNOTSUPP; goto err; } else if (i + 1 < num && msgs[i].len <= 2 && (msgs[i + 1].flags & I2C_M_RD) && msgs[i].addr == msgs[i + 1].addr) { /* 1 or 2 byte write followed by a read */ if (i2c_debug >= 2) for (byte = 0; byte < msgs[i].len; byte++) printk(KERN_CONT " %02x", msgs[i].buf[byte]); i2c_dprintk(2, "; joined to read %s len=%d:", i == num - 2 ? "stop" : "nonstop", msgs[i + 1].len); if (msgs[i].len == 2) { rc = tm6000_i2c_recv_regs16(dev, addr, msgs[i].buf[0] << 8 | msgs[i].buf[1], msgs[i + 1].buf, msgs[i + 1].len); } else { rc = tm6000_i2c_recv_regs(dev, addr, msgs[i].buf[0], msgs[i + 1].buf, msgs[i + 1].len); } i++; if (addr == dev->tuner_addr << 1) { tm6000_set_reg(dev, REQ_50_SET_START, 0, 0); tm6000_set_reg(dev, REQ_51_SET_STOP, 0, 0); } if (i2c_debug >= 2) for (byte = 0; byte < msgs[i].len; byte++) printk(KERN_CONT " %02x", msgs[i].buf[byte]); } else { /* write bytes */ if (i2c_debug >= 2) for (byte = 0; byte < msgs[i].len; byte++) printk(KERN_CONT " %02x", msgs[i].buf[byte]); rc = tm6000_i2c_send_regs(dev, addr, msgs[i].buf[0], msgs[i].buf + 1, msgs[i].len - 1); } if (i2c_debug >= 2) printk(KERN_CONT "\n"); if (rc < 0) goto err; } return num; err: i2c_dprintk(2, " ERROR: %i\n", rc); return rc; } static int tm6000_i2c_eeprom(struct tm6000_core *dev) { int i, rc; unsigned char *p = dev->eedata; unsigned char bytes[17]; dev->i2c_client.addr = 0xa0 >> 1; dev->eedata_size = 0; bytes[16] = '\0'; for (i = 0; i < sizeof(dev->eedata); ) { *p = i; rc = tm6000_i2c_recv_regs(dev, 0xa0, i, p, 1); if (rc < 1) { if (p == dev->eedata) goto noeeprom; else { printk(KERN_WARNING "%s: i2c eeprom read error (err=%d)\n", dev->name, rc); } return -EINVAL; } dev->eedata_size++; p++; if (0 == (i % 16)) printk(KERN_INFO "%s: i2c eeprom %02x:", dev->name, i); printk(KERN_CONT " %02x", dev->eedata[i]); if ((dev->eedata[i] >= ' ') && (dev->eedata[i] <= 'z')) bytes[i%16] = dev->eedata[i]; else bytes[i%16] = '.'; i++; if (0 == (i % 16)) { bytes[16] = '\0'; printk(KERN_CONT " %s\n", bytes); } } if (0 != (i%16)) { bytes[i%16] = '\0'; for (i %= 16; i < 16; i++) printk(KERN_CONT " "); printk(KERN_CONT " %s\n", bytes); } return 0; noeeprom: printk(KERN_INFO "%s: Huh, no eeprom present (err=%d)?\n", dev->name, rc); return -EINVAL; } /* ----------------------------------------------------------- */ /* * functionality() */ static u32 functionality(struct i2c_adapter *adap) { return I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm tm6000_algo = { .master_xfer = tm6000_i2c_xfer, .functionality = functionality, }; /* ----------------------------------------------------------- */ /* * tm6000_i2c_register() * register i2c bus */ int tm6000_i2c_register(struct tm6000_core *dev) { int rc; dev->i2c_adap.owner = THIS_MODULE; dev->i2c_adap.algo = &tm6000_algo; dev->i2c_adap.dev.parent = &dev->udev->dev; strlcpy(dev->i2c_adap.name, dev->name, sizeof(dev->i2c_adap.name)); dev->i2c_adap.algo_data = dev; i2c_set_adapdata(&dev->i2c_adap, &dev->v4l2_dev); rc = i2c_add_adapter(&dev->i2c_adap); if (rc) return rc; dev->i2c_client.adapter = &dev->i2c_adap; strlcpy(dev->i2c_client.name, "tm6000 internal", I2C_NAME_SIZE); tm6000_i2c_eeprom(dev); return 0; } /* * tm6000_i2c_unregister() * unregister i2c_bus */ int tm6000_i2c_unregister(struct tm6000_core *dev) { i2c_del_adapter(&dev->i2c_adap); return 0; }
gpl-2.0
Slayjay78/android_kernel_lge_hammerhead
drivers/input/joystick/grip.c
9873
11537
/* * Copyright (c) 1998-2001 Vojtech Pavlik */ /* * Gravis/Kensington GrIP protocol joystick and gamepad driver for Linux */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/gameport.h> #include <linux/input.h> #include <linux/jiffies.h> #define DRIVER_DESC "Gravis GrIP protocol joystick driver" MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); #define GRIP_MODE_GPP 1 #define GRIP_MODE_BD 2 #define GRIP_MODE_XT 3 #define GRIP_MODE_DC 4 #define GRIP_LENGTH_GPP 24 #define GRIP_STROBE_GPP 200 /* 200 us */ #define GRIP_LENGTH_XT 4 #define GRIP_STROBE_XT 64 /* 64 us */ #define GRIP_MAX_CHUNKS_XT 10 #define GRIP_MAX_BITS_XT 30 struct grip { struct gameport *gameport; struct input_dev *dev[2]; unsigned char mode[2]; int reads; int bads; char phys[2][32]; }; static int grip_btn_gpp[] = { BTN_START, BTN_SELECT, BTN_TR2, BTN_Y, 0, BTN_TL2, BTN_A, BTN_B, BTN_X, 0, BTN_TL, BTN_TR, -1 }; static int grip_btn_bd[] = { BTN_THUMB, BTN_THUMB2, BTN_TRIGGER, BTN_TOP, BTN_BASE, -1 }; static int grip_btn_xt[] = { BTN_TRIGGER, BTN_THUMB, BTN_A, BTN_B, BTN_C, BTN_X, BTN_Y, BTN_Z, BTN_SELECT, BTN_START, BTN_MODE, -1 }; static int grip_btn_dc[] = { BTN_TRIGGER, BTN_THUMB, BTN_TOP, BTN_TOP2, BTN_BASE, BTN_BASE2, BTN_BASE3, BTN_BASE4, BTN_BASE5, -1 }; static int grip_abs_gpp[] = { ABS_X, ABS_Y, -1 }; static int grip_abs_bd[] = { ABS_X, ABS_Y, ABS_THROTTLE, ABS_HAT0X, ABS_HAT0Y, -1 }; static int grip_abs_xt[] = { ABS_X, ABS_Y, ABS_BRAKE, ABS_GAS, ABS_THROTTLE, ABS_HAT0X, ABS_HAT0Y, ABS_HAT1X, ABS_HAT1Y, -1 }; static int grip_abs_dc[] = { ABS_X, ABS_Y, ABS_RX, ABS_RY, ABS_THROTTLE, ABS_HAT0X, ABS_HAT0Y, -1 }; static char *grip_name[] = { NULL, "Gravis GamePad Pro", "Gravis Blackhawk Digital", "Gravis Xterminator Digital", "Gravis Xterminator DualControl" }; static int *grip_abs[] = { NULL, grip_abs_gpp, grip_abs_bd, grip_abs_xt, grip_abs_dc }; static int *grip_btn[] = { NULL, grip_btn_gpp, grip_btn_bd, grip_btn_xt, grip_btn_dc }; static char grip_anx[] = { 0, 0, 3, 5, 5 }; static char grip_cen[] = { 0, 0, 2, 2, 4 }; /* * grip_gpp_read_packet() reads a Gravis GamePad Pro packet. */ static int grip_gpp_read_packet(struct gameport *gameport, int shift, unsigned int *data) { unsigned long flags; unsigned char u, v; unsigned int t; int i; int strobe = gameport_time(gameport, GRIP_STROBE_GPP); data[0] = 0; t = strobe; i = 0; local_irq_save(flags); v = gameport_read(gameport) >> shift; do { t--; u = v; v = (gameport_read(gameport) >> shift) & 3; if (~v & u & 1) { data[0] |= (v >> 1) << i++; t = strobe; } } while (i < GRIP_LENGTH_GPP && t > 0); local_irq_restore(flags); if (i < GRIP_LENGTH_GPP) return -1; for (i = 0; i < GRIP_LENGTH_GPP && (data[0] & 0xfe4210) ^ 0x7c0000; i++) data[0] = data[0] >> 1 | (data[0] & 1) << (GRIP_LENGTH_GPP - 1); return -(i == GRIP_LENGTH_GPP); } /* * grip_xt_read_packet() reads a Gravis Xterminator packet. */ static int grip_xt_read_packet(struct gameport *gameport, int shift, unsigned int *data) { unsigned int i, j, buf, crc; unsigned char u, v, w; unsigned long flags; unsigned int t; char status; int strobe = gameport_time(gameport, GRIP_STROBE_XT); data[0] = data[1] = data[2] = data[3] = 0; status = buf = i = j = 0; t = strobe; local_irq_save(flags); v = w = (gameport_read(gameport) >> shift) & 3; do { t--; u = (gameport_read(gameport) >> shift) & 3; if (u ^ v) { if ((u ^ v) & 1) { buf = (buf << 1) | (u >> 1); t = strobe; i++; } else if ((((u ^ v) & (v ^ w)) >> 1) & ~(u | v | w) & 1) { if (i == 20) { crc = buf ^ (buf >> 7) ^ (buf >> 14); if (!((crc ^ (0x25cb9e70 >> ((crc >> 2) & 0x1c))) & 0xf)) { data[buf >> 18] = buf >> 4; status |= 1 << (buf >> 18); } j++; } t = strobe; buf = 0; i = 0; } w = v; v = u; } } while (status != 0xf && i < GRIP_MAX_BITS_XT && j < GRIP_MAX_CHUNKS_XT && t > 0); local_irq_restore(flags); return -(status != 0xf); } /* * grip_timer() repeatedly polls the joysticks and generates events. */ static void grip_poll(struct gameport *gameport) { struct grip *grip = gameport_get_drvdata(gameport); unsigned int data[GRIP_LENGTH_XT]; struct input_dev *dev; int i, j; for (i = 0; i < 2; i++) { dev = grip->dev[i]; if (!dev) continue; grip->reads++; switch (grip->mode[i]) { case GRIP_MODE_GPP: if (grip_gpp_read_packet(grip->gameport, (i << 1) + 4, data)) { grip->bads++; break; } input_report_abs(dev, ABS_X, ((*data >> 15) & 1) - ((*data >> 16) & 1)); input_report_abs(dev, ABS_Y, ((*data >> 13) & 1) - ((*data >> 12) & 1)); for (j = 0; j < 12; j++) if (grip_btn_gpp[j]) input_report_key(dev, grip_btn_gpp[j], (*data >> j) & 1); break; case GRIP_MODE_BD: if (grip_xt_read_packet(grip->gameport, (i << 1) + 4, data)) { grip->bads++; break; } input_report_abs(dev, ABS_X, (data[0] >> 2) & 0x3f); input_report_abs(dev, ABS_Y, 63 - ((data[0] >> 8) & 0x3f)); input_report_abs(dev, ABS_THROTTLE, (data[2] >> 8) & 0x3f); input_report_abs(dev, ABS_HAT0X, ((data[2] >> 1) & 1) - ( data[2] & 1)); input_report_abs(dev, ABS_HAT0Y, ((data[2] >> 2) & 1) - ((data[2] >> 3) & 1)); for (j = 0; j < 5; j++) input_report_key(dev, grip_btn_bd[j], (data[3] >> (j + 4)) & 1); break; case GRIP_MODE_XT: if (grip_xt_read_packet(grip->gameport, (i << 1) + 4, data)) { grip->bads++; break; } input_report_abs(dev, ABS_X, (data[0] >> 2) & 0x3f); input_report_abs(dev, ABS_Y, 63 - ((data[0] >> 8) & 0x3f)); input_report_abs(dev, ABS_BRAKE, (data[1] >> 2) & 0x3f); input_report_abs(dev, ABS_GAS, (data[1] >> 8) & 0x3f); input_report_abs(dev, ABS_THROTTLE, (data[2] >> 8) & 0x3f); input_report_abs(dev, ABS_HAT0X, ((data[2] >> 1) & 1) - ( data[2] & 1)); input_report_abs(dev, ABS_HAT0Y, ((data[2] >> 2) & 1) - ((data[2] >> 3) & 1)); input_report_abs(dev, ABS_HAT1X, ((data[2] >> 5) & 1) - ((data[2] >> 4) & 1)); input_report_abs(dev, ABS_HAT1Y, ((data[2] >> 6) & 1) - ((data[2] >> 7) & 1)); for (j = 0; j < 11; j++) input_report_key(dev, grip_btn_xt[j], (data[3] >> (j + 3)) & 1); break; case GRIP_MODE_DC: if (grip_xt_read_packet(grip->gameport, (i << 1) + 4, data)) { grip->bads++; break; } input_report_abs(dev, ABS_X, (data[0] >> 2) & 0x3f); input_report_abs(dev, ABS_Y, (data[0] >> 8) & 0x3f); input_report_abs(dev, ABS_RX, (data[1] >> 2) & 0x3f); input_report_abs(dev, ABS_RY, (data[1] >> 8) & 0x3f); input_report_abs(dev, ABS_THROTTLE, (data[2] >> 8) & 0x3f); input_report_abs(dev, ABS_HAT0X, ((data[2] >> 1) & 1) - ( data[2] & 1)); input_report_abs(dev, ABS_HAT0Y, ((data[2] >> 2) & 1) - ((data[2] >> 3) & 1)); for (j = 0; j < 9; j++) input_report_key(dev, grip_btn_dc[j], (data[3] >> (j + 3)) & 1); break; } input_sync(dev); } } static int grip_open(struct input_dev *dev) { struct grip *grip = input_get_drvdata(dev); gameport_start_polling(grip->gameport); return 0; } static void grip_close(struct input_dev *dev) { struct grip *grip = input_get_drvdata(dev); gameport_stop_polling(grip->gameport); } static int grip_connect(struct gameport *gameport, struct gameport_driver *drv) { struct grip *grip; struct input_dev *input_dev; unsigned int data[GRIP_LENGTH_XT]; int i, j, t; int err; if (!(grip = kzalloc(sizeof(struct grip), GFP_KERNEL))) return -ENOMEM; grip->gameport = gameport; gameport_set_drvdata(gameport, grip); err = gameport_open(gameport, drv, GAMEPORT_MODE_RAW); if (err) goto fail1; for (i = 0; i < 2; i++) { if (!grip_gpp_read_packet(gameport, (i << 1) + 4, data)) { grip->mode[i] = GRIP_MODE_GPP; continue; } if (!grip_xt_read_packet(gameport, (i << 1) + 4, data)) { if (!(data[3] & 7)) { grip->mode[i] = GRIP_MODE_BD; continue; } if (!(data[2] & 0xf0)) { grip->mode[i] = GRIP_MODE_XT; continue; } grip->mode[i] = GRIP_MODE_DC; continue; } } if (!grip->mode[0] && !grip->mode[1]) { err = -ENODEV; goto fail2; } gameport_set_poll_handler(gameport, grip_poll); gameport_set_poll_interval(gameport, 20); for (i = 0; i < 2; i++) { if (!grip->mode[i]) continue; grip->dev[i] = input_dev = input_allocate_device(); if (!input_dev) { err = -ENOMEM; goto fail3; } snprintf(grip->phys[i], sizeof(grip->phys[i]), "%s/input%d", gameport->phys, i); input_dev->name = grip_name[grip->mode[i]]; input_dev->phys = grip->phys[i]; input_dev->id.bustype = BUS_GAMEPORT; input_dev->id.vendor = GAMEPORT_ID_VENDOR_GRAVIS; input_dev->id.product = grip->mode[i]; input_dev->id.version = 0x0100; input_dev->dev.parent = &gameport->dev; input_set_drvdata(input_dev, grip); input_dev->open = grip_open; input_dev->close = grip_close; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); for (j = 0; (t = grip_abs[grip->mode[i]][j]) >= 0; j++) { if (j < grip_cen[grip->mode[i]]) input_set_abs_params(input_dev, t, 14, 52, 1, 2); else if (j < grip_anx[grip->mode[i]]) input_set_abs_params(input_dev, t, 3, 57, 1, 0); else input_set_abs_params(input_dev, t, -1, 1, 0, 0); } for (j = 0; (t = grip_btn[grip->mode[i]][j]) >= 0; j++) if (t > 0) set_bit(t, input_dev->keybit); err = input_register_device(grip->dev[i]); if (err) goto fail4; } return 0; fail4: input_free_device(grip->dev[i]); fail3: while (--i >= 0) if (grip->dev[i]) input_unregister_device(grip->dev[i]); fail2: gameport_close(gameport); fail1: gameport_set_drvdata(gameport, NULL); kfree(grip); return err; } static void grip_disconnect(struct gameport *gameport) { struct grip *grip = gameport_get_drvdata(gameport); int i; for (i = 0; i < 2; i++) if (grip->dev[i]) input_unregister_device(grip->dev[i]); gameport_close(gameport); gameport_set_drvdata(gameport, NULL); kfree(grip); } static struct gameport_driver grip_drv = { .driver = { .name = "grip", .owner = THIS_MODULE, }, .description = DRIVER_DESC, .connect = grip_connect, .disconnect = grip_disconnect, }; static int __init grip_init(void) { return gameport_register_driver(&grip_drv); } static void __exit grip_exit(void) { gameport_unregister_driver(&grip_drv); } module_init(grip_init); module_exit(grip_exit);
gpl-2.0
jokerfr9/DragonsKernel_Kylessopen
arch/sh/boards/mach-microdev/fdc37c93xapm.c
13969
6415
/* * * Setup for the SMSC FDC37C93xAPM * * Copyright (C) 2003 Sean McGoogan (Sean.McGoogan@superh.com) * Copyright (C) 2003, 2004 SuperH, Inc. * Copyright (C) 2004, 2005 Paul Mundt * * SuperH SH4-202 MicroDev board support. * * May be copied or modified under the terms of the GNU General Public * License. See linux/COPYING for more information. */ #include <linux/init.h> #include <linux/ioport.h> #include <linux/io.h> #include <linux/err.h> #include <mach/microdev.h> #define SMSC_CONFIG_PORT_ADDR (0x3F0) #define SMSC_INDEX_PORT_ADDR SMSC_CONFIG_PORT_ADDR #define SMSC_DATA_PORT_ADDR (SMSC_INDEX_PORT_ADDR + 1) #define SMSC_ENTER_CONFIG_KEY 0x55 #define SMSC_EXIT_CONFIG_KEY 0xaa #define SMCS_LOGICAL_DEV_INDEX 0x07 /* Logical Device Number */ #define SMSC_DEVICE_ID_INDEX 0x20 /* Device ID */ #define SMSC_DEVICE_REV_INDEX 0x21 /* Device Revision */ #define SMSC_ACTIVATE_INDEX 0x30 /* Activate */ #define SMSC_PRIMARY_BASE_INDEX 0x60 /* Primary Base Address */ #define SMSC_SECONDARY_BASE_INDEX 0x62 /* Secondary Base Address */ #define SMSC_PRIMARY_INT_INDEX 0x70 /* Primary Interrupt Select */ #define SMSC_SECONDARY_INT_INDEX 0x72 /* Secondary Interrupt Select */ #define SMSC_HDCS0_INDEX 0xf0 /* HDCS0 Address Decoder */ #define SMSC_HDCS1_INDEX 0xf1 /* HDCS1 Address Decoder */ #define SMSC_IDE1_DEVICE 1 /* IDE #1 logical device */ #define SMSC_IDE2_DEVICE 2 /* IDE #2 logical device */ #define SMSC_PARALLEL_DEVICE 3 /* Parallel Port logical device */ #define SMSC_SERIAL1_DEVICE 4 /* Serial #1 logical device */ #define SMSC_SERIAL2_DEVICE 5 /* Serial #2 logical device */ #define SMSC_KEYBOARD_DEVICE 7 /* Keyboard logical device */ #define SMSC_CONFIG_REGISTERS 8 /* Configuration Registers (Aux I/O) */ #define SMSC_READ_INDEXED(index) ({ \ outb((index), SMSC_INDEX_PORT_ADDR); \ inb(SMSC_DATA_PORT_ADDR); }) #define SMSC_WRITE_INDEXED(val, index) ({ \ outb((index), SMSC_INDEX_PORT_ADDR); \ outb((val), SMSC_DATA_PORT_ADDR); }) #define IDE1_PRIMARY_BASE 0x01f0 /* Task File Registe base for IDE #1 */ #define IDE1_SECONDARY_BASE 0x03f6 /* Miscellaneous AT registers for IDE #1 */ #define IDE2_PRIMARY_BASE 0x0170 /* Task File Registe base for IDE #2 */ #define IDE2_SECONDARY_BASE 0x0376 /* Miscellaneous AT registers for IDE #2 */ #define SERIAL1_PRIMARY_BASE 0x03f8 #define SERIAL2_PRIMARY_BASE 0x02f8 #define MSB(x) ( (x) >> 8 ) #define LSB(x) ( (x) & 0xff ) /* General-Purpose base address on CPU-board FPGA */ #define MICRODEV_FPGA_GP_BASE 0xa6100000ul static int __init smsc_superio_setup(void) { unsigned char devid, devrev; /* Initially the chip is in run state */ /* Put it into configuration state */ outb(SMSC_ENTER_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); /* Read device ID info */ devid = SMSC_READ_INDEXED(SMSC_DEVICE_ID_INDEX); devrev = SMSC_READ_INDEXED(SMSC_DEVICE_REV_INDEX); if ((devid == 0x30) && (devrev == 0x01)) printk("SMSC FDC37C93xAPM SuperIO device detected\n"); else return -ENODEV; /* Select the keyboard device */ SMSC_WRITE_INDEXED(SMSC_KEYBOARD_DEVICE, SMCS_LOGICAL_DEV_INDEX); /* enable it */ SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); /* enable the interrupts */ SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_KEYBOARD, SMSC_PRIMARY_INT_INDEX); SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_MOUSE, SMSC_SECONDARY_INT_INDEX); /* Select the Serial #1 device */ SMSC_WRITE_INDEXED(SMSC_SERIAL1_DEVICE, SMCS_LOGICAL_DEV_INDEX); /* enable it */ SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); /* program with port addresses */ SMSC_WRITE_INDEXED(MSB(SERIAL1_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+0); SMSC_WRITE_INDEXED(LSB(SERIAL1_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+1); SMSC_WRITE_INDEXED(0x00, SMSC_HDCS0_INDEX); /* enable the interrupts */ SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_SERIAL1, SMSC_PRIMARY_INT_INDEX); /* Select the Serial #2 device */ SMSC_WRITE_INDEXED(SMSC_SERIAL2_DEVICE, SMCS_LOGICAL_DEV_INDEX); /* enable it */ SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); /* program with port addresses */ SMSC_WRITE_INDEXED(MSB(SERIAL2_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+0); SMSC_WRITE_INDEXED(LSB(SERIAL2_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+1); SMSC_WRITE_INDEXED(0x00, SMSC_HDCS0_INDEX); /* enable the interrupts */ SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_SERIAL2, SMSC_PRIMARY_INT_INDEX); /* Select the IDE#1 device */ SMSC_WRITE_INDEXED(SMSC_IDE1_DEVICE, SMCS_LOGICAL_DEV_INDEX); /* enable it */ SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); /* program with port addresses */ SMSC_WRITE_INDEXED(MSB(IDE1_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+0); SMSC_WRITE_INDEXED(LSB(IDE1_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+1); SMSC_WRITE_INDEXED(MSB(IDE1_SECONDARY_BASE), SMSC_SECONDARY_BASE_INDEX+0); SMSC_WRITE_INDEXED(LSB(IDE1_SECONDARY_BASE), SMSC_SECONDARY_BASE_INDEX+1); SMSC_WRITE_INDEXED(0x0c, SMSC_HDCS0_INDEX); SMSC_WRITE_INDEXED(0x00, SMSC_HDCS1_INDEX); /* select the interrupt */ SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_IDE1, SMSC_PRIMARY_INT_INDEX); /* Select the IDE#2 device */ SMSC_WRITE_INDEXED(SMSC_IDE2_DEVICE, SMCS_LOGICAL_DEV_INDEX); /* enable it */ SMSC_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); /* program with port addresses */ SMSC_WRITE_INDEXED(MSB(IDE2_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+0); SMSC_WRITE_INDEXED(LSB(IDE2_PRIMARY_BASE), SMSC_PRIMARY_BASE_INDEX+1); SMSC_WRITE_INDEXED(MSB(IDE2_SECONDARY_BASE), SMSC_SECONDARY_BASE_INDEX+0); SMSC_WRITE_INDEXED(LSB(IDE2_SECONDARY_BASE), SMSC_SECONDARY_BASE_INDEX+1); /* select the interrupt */ SMSC_WRITE_INDEXED(MICRODEV_FPGA_IRQ_IDE2, SMSC_PRIMARY_INT_INDEX); /* Select the configuration registers */ SMSC_WRITE_INDEXED(SMSC_CONFIG_REGISTERS, SMCS_LOGICAL_DEV_INDEX); /* enable the appropriate GPIO pins for IDE functionality: * bit[0] In/Out 1==input; 0==output * bit[1] Polarity 1==invert; 0==no invert * bit[2] Int Enb #1 1==Enable Combined IRQ #1; 0==disable * bit[3:4] Function Select 00==original; 01==Alternate Function #1 */ SMSC_WRITE_INDEXED(0x00, 0xc2); /* GP42 = nIDE1_OE */ SMSC_WRITE_INDEXED(0x01, 0xc5); /* GP45 = IDE1_IRQ */ SMSC_WRITE_INDEXED(0x00, 0xc6); /* GP46 = nIOROP */ SMSC_WRITE_INDEXED(0x00, 0xc7); /* GP47 = nIOWOP */ SMSC_WRITE_INDEXED(0x08, 0xe8); /* GP20 = nIDE2_OE */ /* Exit the configuration state */ outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); return 0; } device_initcall(smsc_superio_setup);
gpl-2.0
pio-masaki/kernel_at1s0
lib/vsprintf.c
146
48335
/* * linux/lib/vsprintf.c * * Copyright (C) 1991, 1992 Linus Torvalds */ /* vsprintf.c -- Lars Wirzenius & Linus Torvalds. */ /* * Wirzenius wrote this portably, Torvalds fucked it up :-) */ /* * Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com> * - changed to provide snprintf and vsnprintf functions * So Feb 1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de> * - scnprintf and vscnprintf */ #include <stdarg.h> #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/kernel.h> #include <linux/kallsyms.h> #include <linux/uaccess.h> #include <linux/ioport.h> #include <net/addrconf.h> #include <asm/page.h> /* for PAGE_SIZE */ #include <asm/div64.h> #include <asm/sections.h> /* for dereference_function_descriptor() */ /* Works only for digits and letters, but small and fast */ #define TOLOWER(x) ((x) | 0x20) static unsigned int simple_guess_base(const char *cp) { if (cp[0] == '0') { if (TOLOWER(cp[1]) == 'x' && isxdigit(cp[2])) return 16; else return 8; } else { return 10; } } /** * simple_strtoull - convert a string to an unsigned long long * @cp: The start of the string * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use */ unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) { unsigned long long result = 0; if (!base) base = simple_guess_base(cp); if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x') cp += 2; while (isxdigit(*cp)) { unsigned int value; value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10; if (value >= base) break; result = result * base + value; cp++; } if (endp) *endp = (char *)cp; return result; } EXPORT_SYMBOL(simple_strtoull); /** * simple_strtoul - convert a string to an unsigned long * @cp: The start of the string * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use */ unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base) { return simple_strtoull(cp, endp, base); } EXPORT_SYMBOL(simple_strtoul); /** * simple_strtol - convert a string to a signed long * @cp: The start of the string * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use */ long simple_strtol(const char *cp, char **endp, unsigned int base) { if (*cp == '-') return -simple_strtoul(cp + 1, endp, base); return simple_strtoul(cp, endp, base); } EXPORT_SYMBOL(simple_strtol); /** * simple_strtoll - convert a string to a signed long long * @cp: The start of the string * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use */ long long simple_strtoll(const char *cp, char **endp, unsigned int base) { if (*cp == '-') return -simple_strtoull(cp + 1, endp, base); return simple_strtoull(cp, endp, base); } EXPORT_SYMBOL(simple_strtoll); static noinline_for_stack int skip_atoi(const char **s) { int i = 0; while (isdigit(**s)) i = i*10 + *((*s)++) - '0'; return i; } /* Decimal conversion is by far the most typical, and is used * for /proc and /sys data. This directly impacts e.g. top performance * with many processes running. We optimize it for speed * using code from * http://www.cs.uiowa.edu/~jones/bcd/decimal.html * (with permission from the author, Douglas W. Jones). */ /* Formats correctly any integer in [0,99999]. * Outputs from one to five digits depending on input. * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */ static noinline_for_stack char *put_dec_trunc(char *buf, unsigned q) { unsigned d3, d2, d1, d0; d1 = (q>>4) & 0xf; d2 = (q>>8) & 0xf; d3 = (q>>12); d0 = 6*(d3 + d2 + d1) + (q & 0xf); q = (d0 * 0xcd) >> 11; d0 = d0 - 10*q; *buf++ = d0 + '0'; /* least significant digit */ d1 = q + 9*d3 + 5*d2 + d1; if (d1 != 0) { q = (d1 * 0xcd) >> 11; d1 = d1 - 10*q; *buf++ = d1 + '0'; /* next digit */ d2 = q + 2*d2; if ((d2 != 0) || (d3 != 0)) { q = (d2 * 0xd) >> 7; d2 = d2 - 10*q; *buf++ = d2 + '0'; /* next digit */ d3 = q + 4*d3; if (d3 != 0) { q = (d3 * 0xcd) >> 11; d3 = d3 - 10*q; *buf++ = d3 + '0'; /* next digit */ if (q != 0) *buf++ = q + '0'; /* most sign. digit */ } } } return buf; } /* Same with if's removed. Always emits five digits */ static noinline_for_stack char *put_dec_full(char *buf, unsigned q) { /* BTW, if q is in [0,9999], 8-bit ints will be enough, */ /* but anyway, gcc produces better code with full-sized ints */ unsigned d3, d2, d1, d0; d1 = (q>>4) & 0xf; d2 = (q>>8) & 0xf; d3 = (q>>12); /* * Possible ways to approx. divide by 10 * gcc -O2 replaces multiply with shifts and adds * (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386) * (x * 0x67) >> 10: 1100111 * (x * 0x34) >> 9: 110100 - same * (x * 0x1a) >> 8: 11010 - same * (x * 0x0d) >> 7: 1101 - same, shortest code (on i386) */ d0 = 6*(d3 + d2 + d1) + (q & 0xf); q = (d0 * 0xcd) >> 11; d0 = d0 - 10*q; *buf++ = d0 + '0'; d1 = q + 9*d3 + 5*d2 + d1; q = (d1 * 0xcd) >> 11; d1 = d1 - 10*q; *buf++ = d1 + '0'; d2 = q + 2*d2; q = (d2 * 0xd) >> 7; d2 = d2 - 10*q; *buf++ = d2 + '0'; d3 = q + 4*d3; q = (d3 * 0xcd) >> 11; /* - shorter code */ /* q = (d3 * 0x67) >> 10; - would also work */ d3 = d3 - 10*q; *buf++ = d3 + '0'; *buf++ = q + '0'; return buf; } /* No inlining helps gcc to use registers better */ static noinline_for_stack char *put_dec(char *buf, unsigned long long num) { while (1) { unsigned rem; if (num < 100000) return put_dec_trunc(buf, num); rem = do_div(num, 100000); buf = put_dec_full(buf, rem); } } #define ZEROPAD 1 /* pad with zero */ #define SIGN 2 /* unsigned/signed long */ #define PLUS 4 /* show plus */ #define SPACE 8 /* space if plus */ #define LEFT 16 /* left justified */ #define SMALL 32 /* use lowercase in hex (must be 32 == 0x20) */ #define SPECIAL 64 /* prefix hex with "0x", octal with "0" */ enum format_type { FORMAT_TYPE_NONE, /* Just a string part */ FORMAT_TYPE_WIDTH, FORMAT_TYPE_PRECISION, FORMAT_TYPE_CHAR, FORMAT_TYPE_STR, FORMAT_TYPE_PTR, FORMAT_TYPE_PERCENT_CHAR, FORMAT_TYPE_INVALID, FORMAT_TYPE_LONG_LONG, FORMAT_TYPE_ULONG, FORMAT_TYPE_LONG, FORMAT_TYPE_UBYTE, FORMAT_TYPE_BYTE, FORMAT_TYPE_USHORT, FORMAT_TYPE_SHORT, FORMAT_TYPE_UINT, FORMAT_TYPE_INT, FORMAT_TYPE_NRCHARS, FORMAT_TYPE_SIZE_T, FORMAT_TYPE_PTRDIFF }; struct printf_spec { u8 type; /* format_type enum */ u8 flags; /* flags to number() */ u8 base; /* number base, 8, 10 or 16 only */ u8 qualifier; /* number qualifier, one of 'hHlLtzZ' */ s16 field_width; /* width of output field */ s16 precision; /* # of digits/chars */ }; static noinline_for_stack char *number(char *buf, char *end, unsigned long long num, struct printf_spec spec) { /* we are called with base 8, 10 or 16, only, thus don't need "G..." */ static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */ char tmp[66]; char sign; char locase; int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10); int i; /* locase = 0 or 0x20. ORing digits or letters with 'locase' * produces same digits or (maybe lowercased) letters */ locase = (spec.flags & SMALL); if (spec.flags & LEFT) spec.flags &= ~ZEROPAD; sign = 0; if (spec.flags & SIGN) { if ((signed long long)num < 0) { sign = '-'; num = -(signed long long)num; spec.field_width--; } else if (spec.flags & PLUS) { sign = '+'; spec.field_width--; } else if (spec.flags & SPACE) { sign = ' '; spec.field_width--; } } if (need_pfx) { spec.field_width--; if (spec.base == 16) spec.field_width--; } /* generate full string in tmp[], in reverse order */ i = 0; if (num == 0) tmp[i++] = '0'; /* Generic code, for any base: else do { tmp[i++] = (digits[do_div(num,base)] | locase); } while (num != 0); */ else if (spec.base != 10) { /* 8 or 16 */ int mask = spec.base - 1; int shift = 3; if (spec.base == 16) shift = 4; do { tmp[i++] = (digits[((unsigned char)num) & mask] | locase); num >>= shift; } while (num); } else { /* base 10 */ i = put_dec(tmp, num) - tmp; } /* printing 100 using %2d gives "100", not "00" */ if (i > spec.precision) spec.precision = i; /* leading space padding */ spec.field_width -= spec.precision; if (!(spec.flags & (ZEROPAD+LEFT))) { while (--spec.field_width >= 0) { if (buf < end) *buf = ' '; ++buf; } } /* sign */ if (sign) { if (buf < end) *buf = sign; ++buf; } /* "0x" / "0" prefix */ if (need_pfx) { if (buf < end) *buf = '0'; ++buf; if (spec.base == 16) { if (buf < end) *buf = ('X' | locase); ++buf; } } /* zero or space padding */ if (!(spec.flags & LEFT)) { char c = (spec.flags & ZEROPAD) ? '0' : ' '; while (--spec.field_width >= 0) { if (buf < end) *buf = c; ++buf; } } /* hmm even more zero padding? */ while (i <= --spec.precision) { if (buf < end) *buf = '0'; ++buf; } /* actual digits of result */ while (--i >= 0) { if (buf < end) *buf = tmp[i]; ++buf; } /* trailing space padding */ while (--spec.field_width >= 0) { if (buf < end) *buf = ' '; ++buf; } return buf; } static noinline_for_stack char *string(char *buf, char *end, const char *s, struct printf_spec spec) { int len, i; if ((unsigned long)s < PAGE_SIZE) s = "(null)"; len = strnlen(s, spec.precision); if (!(spec.flags & LEFT)) { while (len < spec.field_width--) { if (buf < end) *buf = ' '; ++buf; } } for (i = 0; i < len; ++i) { if (buf < end) *buf = *s; ++buf; ++s; } while (len < spec.field_width--) { if (buf < end) *buf = ' '; ++buf; } return buf; } static noinline_for_stack char *symbol_string(char *buf, char *end, void *ptr, struct printf_spec spec, char ext) { unsigned long value = (unsigned long) ptr; #ifdef CONFIG_KALLSYMS char sym[KSYM_SYMBOL_LEN]; if (ext == 'B') sprint_backtrace(sym, value); else if (ext != 'f' && ext != 's') sprint_symbol(sym, value); else kallsyms_lookup(value, NULL, NULL, NULL, sym); return string(buf, end, sym, spec); #else spec.field_width = 2 * sizeof(void *); spec.flags |= SPECIAL | SMALL | ZEROPAD; spec.base = 16; return number(buf, end, value, spec); #endif } static noinline_for_stack char *resource_string(char *buf, char *end, struct resource *res, struct printf_spec spec, const char *fmt) { #ifndef IO_RSRC_PRINTK_SIZE #define IO_RSRC_PRINTK_SIZE 6 #endif #ifndef MEM_RSRC_PRINTK_SIZE #define MEM_RSRC_PRINTK_SIZE 10 #endif static const struct printf_spec io_spec = { .base = 16, .field_width = IO_RSRC_PRINTK_SIZE, .precision = -1, .flags = SPECIAL | SMALL | ZEROPAD, }; static const struct printf_spec mem_spec = { .base = 16, .field_width = MEM_RSRC_PRINTK_SIZE, .precision = -1, .flags = SPECIAL | SMALL | ZEROPAD, }; static const struct printf_spec bus_spec = { .base = 16, .field_width = 2, .precision = -1, .flags = SMALL | ZEROPAD, }; static const struct printf_spec dec_spec = { .base = 10, .precision = -1, .flags = 0, }; static const struct printf_spec str_spec = { .field_width = -1, .precision = 10, .flags = LEFT, }; static const struct printf_spec flag_spec = { .base = 16, .precision = -1, .flags = SPECIAL | SMALL, }; /* 32-bit res (sizeof==4): 10 chars in dec, 10 in hex ("0x" + 8) * 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */ #define RSRC_BUF_SIZE ((2 * sizeof(resource_size_t)) + 4) #define FLAG_BUF_SIZE (2 * sizeof(res->flags)) #define DECODED_BUF_SIZE sizeof("[mem - 64bit pref window disabled]") #define RAW_BUF_SIZE sizeof("[mem - flags 0x]") char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE, 2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)]; char *p = sym, *pend = sym + sizeof(sym); int decode = (fmt[0] == 'R') ? 1 : 0; const struct printf_spec *specp; *p++ = '['; if (res->flags & IORESOURCE_IO) { p = string(p, pend, "io ", str_spec); specp = &io_spec; } else if (res->flags & IORESOURCE_MEM) { p = string(p, pend, "mem ", str_spec); specp = &mem_spec; } else if (res->flags & IORESOURCE_IRQ) { p = string(p, pend, "irq ", str_spec); specp = &dec_spec; } else if (res->flags & IORESOURCE_DMA) { p = string(p, pend, "dma ", str_spec); specp = &dec_spec; } else if (res->flags & IORESOURCE_BUS) { p = string(p, pend, "bus ", str_spec); specp = &bus_spec; } else { p = string(p, pend, "??? ", str_spec); specp = &mem_spec; decode = 0; } p = number(p, pend, res->start, *specp); if (res->start != res->end) { *p++ = '-'; p = number(p, pend, res->end, *specp); } if (decode) { if (res->flags & IORESOURCE_MEM_64) p = string(p, pend, " 64bit", str_spec); if (res->flags & IORESOURCE_PREFETCH) p = string(p, pend, " pref", str_spec); if (res->flags & IORESOURCE_WINDOW) p = string(p, pend, " window", str_spec); if (res->flags & IORESOURCE_DISABLED) p = string(p, pend, " disabled", str_spec); } else { p = string(p, pend, " flags ", str_spec); p = number(p, pend, res->flags, flag_spec); } *p++ = ']'; *p = '\0'; return string(buf, end, sym, spec); } static noinline_for_stack char *mac_address_string(char *buf, char *end, u8 *addr, struct printf_spec spec, const char *fmt) { char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")]; char *p = mac_addr; int i; char separator; if (fmt[1] == 'F') { /* FDDI canonical format */ separator = '-'; } else { separator = ':'; } for (i = 0; i < 6; i++) { p = pack_hex_byte(p, addr[i]); if (fmt[0] == 'M' && i != 5) *p++ = separator; } *p = '\0'; return string(buf, end, mac_addr, spec); } static noinline_for_stack char *ip4_string(char *p, const u8 *addr, const char *fmt) { int i; bool leading_zeros = (fmt[0] == 'i'); int index; int step; switch (fmt[2]) { case 'h': #ifdef __BIG_ENDIAN index = 0; step = 1; #else index = 3; step = -1; #endif break; case 'l': index = 3; step = -1; break; case 'n': case 'b': default: index = 0; step = 1; break; } for (i = 0; i < 4; i++) { char temp[3]; /* hold each IP quad in reverse order */ int digits = put_dec_trunc(temp, addr[index]) - temp; if (leading_zeros) { if (digits < 3) *p++ = '0'; if (digits < 2) *p++ = '0'; } /* reverse the digits in the quad */ while (digits--) *p++ = temp[digits]; if (i < 3) *p++ = '.'; index += step; } *p = '\0'; return p; } static noinline_for_stack char *ip6_compressed_string(char *p, const char *addr) { int i, j, range; unsigned char zerolength[8]; int longest = 1; int colonpos = -1; u16 word; u8 hi, lo; bool needcolon = false; bool useIPv4; struct in6_addr in6; memcpy(&in6, addr, sizeof(struct in6_addr)); useIPv4 = ipv6_addr_v4mapped(&in6) || ipv6_addr_is_isatap(&in6); memset(zerolength, 0, sizeof(zerolength)); if (useIPv4) range = 6; else range = 8; /* find position of longest 0 run */ for (i = 0; i < range; i++) { for (j = i; j < range; j++) { if (in6.s6_addr16[j] != 0) break; zerolength[i]++; } } for (i = 0; i < range; i++) { if (zerolength[i] > longest) { longest = zerolength[i]; colonpos = i; } } /* emit address */ for (i = 0; i < range; i++) { if (i == colonpos) { if (needcolon || i == 0) *p++ = ':'; *p++ = ':'; needcolon = false; i += longest - 1; continue; } if (needcolon) { *p++ = ':'; needcolon = false; } /* hex u16 without leading 0s */ word = ntohs(in6.s6_addr16[i]); hi = word >> 8; lo = word & 0xff; if (hi) { if (hi > 0x0f) p = pack_hex_byte(p, hi); else *p++ = hex_asc_lo(hi); p = pack_hex_byte(p, lo); } else if (lo > 0x0f) p = pack_hex_byte(p, lo); else *p++ = hex_asc_lo(lo); needcolon = true; } if (useIPv4) { if (needcolon) *p++ = ':'; p = ip4_string(p, &in6.s6_addr[12], "I4"); } *p = '\0'; return p; } static noinline_for_stack char *ip6_string(char *p, const char *addr, const char *fmt) { int i; for (i = 0; i < 8; i++) { p = pack_hex_byte(p, *addr++); p = pack_hex_byte(p, *addr++); if (fmt[0] == 'I' && i != 7) *p++ = ':'; } *p = '\0'; return p; } static noinline_for_stack char *ip6_addr_string(char *buf, char *end, const u8 *addr, struct printf_spec spec, const char *fmt) { char ip6_addr[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255")]; if (fmt[0] == 'I' && fmt[2] == 'c') ip6_compressed_string(ip6_addr, addr); else ip6_string(ip6_addr, addr, fmt); return string(buf, end, ip6_addr, spec); } static noinline_for_stack char *ip4_addr_string(char *buf, char *end, const u8 *addr, struct printf_spec spec, const char *fmt) { char ip4_addr[sizeof("255.255.255.255")]; ip4_string(ip4_addr, addr, fmt); return string(buf, end, ip4_addr, spec); } static noinline_for_stack char *uuid_string(char *buf, char *end, const u8 *addr, struct printf_spec spec, const char *fmt) { char uuid[sizeof("xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx")]; char *p = uuid; int i; static const u8 be[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}; static const u8 le[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15}; const u8 *index = be; bool uc = false; switch (*(++fmt)) { case 'L': uc = true; /* fall-through */ case 'l': index = le; break; case 'B': uc = true; break; } for (i = 0; i < 16; i++) { p = pack_hex_byte(p, addr[index[i]]); switch (i) { case 3: case 5: case 7: case 9: *p++ = '-'; break; } } *p = 0; if (uc) { p = uuid; do { *p = toupper(*p); } while (*(++p)); } return string(buf, end, uuid, spec); } int kptr_restrict __read_mostly; /* * Show a '%p' thing. A kernel extension is that the '%p' is followed * by an extra set of alphanumeric characters that are extended format * specifiers. * * Right now we handle: * * - 'F' For symbolic function descriptor pointers with offset * - 'f' For simple symbolic function names without offset * - 'S' For symbolic direct pointers with offset * - 's' For symbolic direct pointers without offset * - 'B' For backtraced symbolic direct pointers with offset * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref] * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201] * - 'M' For a 6-byte MAC address, it prints the address in the * usual colon-separated hex notation * - 'm' For a 6-byte MAC address, it prints the hex address without colons * - 'MF' For a 6-byte MAC FDDI address, it prints the address * with a dash-separated hex notation * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) * IPv6 uses colon separated network-order 16 bit hex with leading 0's * - 'i' [46] for 'raw' IPv4/IPv6 addresses * IPv6 omits the colons (01020304...0f) * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006) * - '[Ii]4[hnbl]' IPv4 addresses in host, network, big or little endian order * - 'I6c' for IPv6 addresses printed as specified by * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00 * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form * "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" * Options for %pU are: * b big endian lower case hex (default) * B big endian UPPER case hex * l little endian lower case hex * L little endian UPPER case hex * big endian output byte order is: * [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15] * little endian output byte order is: * [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15] * - 'V' For a struct va_format which contains a format string * and va_list *, * call vsnprintf(->format, *->va_list). * Implements a "recursive vsnprintf". * Do not use this feature without some mechanism to verify the * correctness of the format string and va_list arguments. * - 'K' For a kernel pointer that should be hidden from unprivileged users * * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 * function pointers are really function descriptors, which contain a * pointer to the real address. */ static noinline_for_stack char *pointer(const char *fmt, char *buf, char *end, void *ptr, struct printf_spec spec) { if (!ptr && *fmt != 'K') { /* * Print (null) with the same width as a pointer so it makes * tabular output look nice. */ if (spec.field_width == -1) spec.field_width = 2 * sizeof(void *); return string(buf, end, "(null)", spec); } switch (*fmt) { case 'F': case 'f': ptr = dereference_function_descriptor(ptr); /* Fallthrough */ case 'S': case 's': case 'B': return symbol_string(buf, end, ptr, spec, *fmt); case 'R': case 'r': return resource_string(buf, end, ptr, spec, fmt); case 'M': /* Colon separated: 00:01:02:03:04:05 */ case 'm': /* Contiguous: 000102030405 */ /* [mM]F (FDDI, bit reversed) */ return mac_address_string(buf, end, ptr, spec, fmt); case 'I': /* Formatted IP supported * 4: 1.2.3.4 * 6: 0001:0203:...:0708 * 6c: 1::708 or 1::1.2.3.4 */ case 'i': /* Contiguous: * 4: 001.002.003.004 * 6: 000102...0f */ switch (fmt[1]) { case '6': return ip6_addr_string(buf, end, ptr, spec, fmt); case '4': return ip4_addr_string(buf, end, ptr, spec, fmt); } break; case 'U': return uuid_string(buf, end, ptr, spec, fmt); case 'V': return buf + vsnprintf(buf, end - buf, ((struct va_format *)ptr)->fmt, *(((struct va_format *)ptr)->va)); case 'K': /* * %pK cannot be used in IRQ context because its test * for CAP_SYSLOG would be meaningless. */ if (in_irq() || in_serving_softirq() || in_nmi()) { if (spec.field_width == -1) spec.field_width = 2 * sizeof(void *); return string(buf, end, "pK-error", spec); } if (!((kptr_restrict == 0) || (kptr_restrict == 1 && has_capability_noaudit(current, CAP_SYSLOG)))) ptr = NULL; break; } spec.flags |= SMALL; if (spec.field_width == -1) { spec.field_width = 2 * sizeof(void *); spec.flags |= ZEROPAD; } spec.base = 16; return number(buf, end, (unsigned long) ptr, spec); } /* * Helper function to decode printf style format. * Each call decode a token from the format and return the * number of characters read (or likely the delta where it wants * to go on the next call). * The decoded token is returned through the parameters * * 'h', 'l', or 'L' for integer fields * 'z' support added 23/7/1999 S.H. * 'z' changed to 'Z' --davidm 1/25/99 * 't' added for ptrdiff_t * * @fmt: the format string * @type of the token returned * @flags: various flags such as +, -, # tokens.. * @field_width: overwritten width * @base: base of the number (octal, hex, ...) * @precision: precision of a number * @qualifier: qualifier of a number (long, size_t, ...) */ static noinline_for_stack int format_decode(const char *fmt, struct printf_spec *spec) { const char *start = fmt; /* we finished early by reading the field width */ if (spec->type == FORMAT_TYPE_WIDTH) { if (spec->field_width < 0) { spec->field_width = -spec->field_width; spec->flags |= LEFT; } spec->type = FORMAT_TYPE_NONE; goto precision; } /* we finished early by reading the precision */ if (spec->type == FORMAT_TYPE_PRECISION) { if (spec->precision < 0) spec->precision = 0; spec->type = FORMAT_TYPE_NONE; goto qualifier; } /* By default */ spec->type = FORMAT_TYPE_NONE; for (; *fmt ; ++fmt) { if (*fmt == '%') break; } /* Return the current non-format string */ if (fmt != start || !*fmt) return fmt - start; /* Process flags */ spec->flags = 0; while (1) { /* this also skips first '%' */ bool found = true; ++fmt; switch (*fmt) { case '-': spec->flags |= LEFT; break; case '+': spec->flags |= PLUS; break; case ' ': spec->flags |= SPACE; break; case '#': spec->flags |= SPECIAL; break; case '0': spec->flags |= ZEROPAD; break; default: found = false; } if (!found) break; } /* get field width */ spec->field_width = -1; if (isdigit(*fmt)) spec->field_width = skip_atoi(&fmt); else if (*fmt == '*') { /* it's the next argument */ spec->type = FORMAT_TYPE_WIDTH; return ++fmt - start; } precision: /* get the precision */ spec->precision = -1; if (*fmt == '.') { ++fmt; if (isdigit(*fmt)) { spec->precision = skip_atoi(&fmt); if (spec->precision < 0) spec->precision = 0; } else if (*fmt == '*') { /* it's the next argument */ spec->type = FORMAT_TYPE_PRECISION; return ++fmt - start; } } qualifier: /* get the conversion qualifier */ spec->qualifier = -1; if (*fmt == 'h' || TOLOWER(*fmt) == 'l' || TOLOWER(*fmt) == 'z' || *fmt == 't') { spec->qualifier = *fmt++; if (unlikely(spec->qualifier == *fmt)) { if (spec->qualifier == 'l') { spec->qualifier = 'L'; ++fmt; } else if (spec->qualifier == 'h') { spec->qualifier = 'H'; ++fmt; } } } /* default base */ spec->base = 10; switch (*fmt) { case 'c': spec->type = FORMAT_TYPE_CHAR; return ++fmt - start; case 's': spec->type = FORMAT_TYPE_STR; return ++fmt - start; case 'p': spec->type = FORMAT_TYPE_PTR; return fmt - start; /* skip alnum */ case 'n': spec->type = FORMAT_TYPE_NRCHARS; return ++fmt - start; case '%': spec->type = FORMAT_TYPE_PERCENT_CHAR; return ++fmt - start; /* integer number formats - set up the flags and "break" */ case 'o': spec->base = 8; break; case 'x': spec->flags |= SMALL; case 'X': spec->base = 16; break; case 'd': case 'i': spec->flags |= SIGN; case 'u': break; default: spec->type = FORMAT_TYPE_INVALID; return fmt - start; } if (spec->qualifier == 'L') spec->type = FORMAT_TYPE_LONG_LONG; else if (spec->qualifier == 'l') { if (spec->flags & SIGN) spec->type = FORMAT_TYPE_LONG; else spec->type = FORMAT_TYPE_ULONG; } else if (TOLOWER(spec->qualifier) == 'z') { spec->type = FORMAT_TYPE_SIZE_T; } else if (spec->qualifier == 't') { spec->type = FORMAT_TYPE_PTRDIFF; } else if (spec->qualifier == 'H') { if (spec->flags & SIGN) spec->type = FORMAT_TYPE_BYTE; else spec->type = FORMAT_TYPE_UBYTE; } else if (spec->qualifier == 'h') { if (spec->flags & SIGN) spec->type = FORMAT_TYPE_SHORT; else spec->type = FORMAT_TYPE_USHORT; } else { if (spec->flags & SIGN) spec->type = FORMAT_TYPE_INT; else spec->type = FORMAT_TYPE_UINT; } return ++fmt - start; } /** * vsnprintf - Format a string and place it in a buffer * @buf: The buffer to place the result into * @size: The size of the buffer, including the trailing null space * @fmt: The format string to use * @args: Arguments for the format string * * This function follows C99 vsnprintf, but has some extensions: * %pS output the name of a text symbol with offset * %ps output the name of a text symbol without offset * %pF output the name of a function pointer with its offset * %pf output the name of a function pointer without its offset * %pB output the name of a backtrace symbol with its offset * %pR output the address range in a struct resource with decoded flags * %pr output the address range in a struct resource with raw flags * %pM output a 6-byte MAC address with colons * %pm output a 6-byte MAC address without colons * %pI4 print an IPv4 address without leading zeros * %pi4 print an IPv4 address with leading zeros * %pI6 print an IPv6 address with colons * %pi6 print an IPv6 address without colons * %pI6c print an IPv6 address as specified by * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00 * %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper * case. * %n is ignored * * The return value is the number of characters which would * be generated for the given input, excluding the trailing * '\0', as per ISO C99. If you want to have the exact * number of characters written into @buf as return value * (not including the trailing '\0'), use vscnprintf(). If the * return is greater than or equal to @size, the resulting * string is truncated. * * Call this function if you are already dealing with a va_list. * You probably want snprintf() instead. */ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) { unsigned long long num; char *str, *end; struct printf_spec spec = {0}; /* Reject out-of-range values early. Large positive sizes are used for unknown buffer sizes. */ if (WARN_ON_ONCE((int) size < 0)) return 0; str = buf; end = buf + size; /* Make sure end is always >= buf */ if (end < buf) { end = ((void *)-1); size = end - buf; } while (*fmt) { const char *old_fmt = fmt; int read = format_decode(fmt, &spec); fmt += read; switch (spec.type) { case FORMAT_TYPE_NONE: { int copy = read; if (str < end) { if (copy > end - str) copy = end - str; memcpy(str, old_fmt, copy); } str += read; break; } case FORMAT_TYPE_WIDTH: spec.field_width = va_arg(args, int); break; case FORMAT_TYPE_PRECISION: spec.precision = va_arg(args, int); break; case FORMAT_TYPE_CHAR: { char c; if (!(spec.flags & LEFT)) { while (--spec.field_width > 0) { if (str < end) *str = ' '; ++str; } } c = (unsigned char) va_arg(args, int); if (str < end) *str = c; ++str; while (--spec.field_width > 0) { if (str < end) *str = ' '; ++str; } break; } case FORMAT_TYPE_STR: str = string(str, end, va_arg(args, char *), spec); break; case FORMAT_TYPE_PTR: str = pointer(fmt+1, str, end, va_arg(args, void *), spec); while (isalnum(*fmt)) fmt++; break; case FORMAT_TYPE_PERCENT_CHAR: if (str < end) *str = '%'; ++str; break; case FORMAT_TYPE_INVALID: if (str < end) *str = '%'; ++str; break; case FORMAT_TYPE_NRCHARS: { u8 qualifier = spec.qualifier; if (qualifier == 'l') { long *ip = va_arg(args, long *); *ip = (str - buf); } else if (TOLOWER(qualifier) == 'z') { size_t *ip = va_arg(args, size_t *); *ip = (str - buf); } else { int *ip = va_arg(args, int *); *ip = (str - buf); } break; } default: switch (spec.type) { case FORMAT_TYPE_LONG_LONG: num = va_arg(args, long long); break; case FORMAT_TYPE_ULONG: num = va_arg(args, unsigned long); break; case FORMAT_TYPE_LONG: num = va_arg(args, long); break; case FORMAT_TYPE_SIZE_T: num = va_arg(args, size_t); break; case FORMAT_TYPE_PTRDIFF: num = va_arg(args, ptrdiff_t); break; case FORMAT_TYPE_UBYTE: num = (unsigned char) va_arg(args, int); break; case FORMAT_TYPE_BYTE: num = (signed char) va_arg(args, int); break; case FORMAT_TYPE_USHORT: num = (unsigned short) va_arg(args, int); break; case FORMAT_TYPE_SHORT: num = (short) va_arg(args, int); break; case FORMAT_TYPE_INT: num = (int) va_arg(args, int); break; default: num = va_arg(args, unsigned int); } str = number(str, end, num, spec); } } if (size > 0) { if (str < end) *str = '\0'; else end[-1] = '\0'; } /* the trailing null byte doesn't count towards the total */ return str-buf; } EXPORT_SYMBOL(vsnprintf); /** * vscnprintf - Format a string and place it in a buffer * @buf: The buffer to place the result into * @size: The size of the buffer, including the trailing null space * @fmt: The format string to use * @args: Arguments for the format string * * The return value is the number of characters which have been written into * the @buf not including the trailing '\0'. If @size is == 0 the function * returns 0. * * Call this function if you are already dealing with a va_list. * You probably want scnprintf() instead. * * See the vsnprintf() documentation for format string extensions over C99. */ int vscnprintf(char *buf, size_t size, const char *fmt, va_list args) { int i; i = vsnprintf(buf, size, fmt, args); if (likely(i < size)) return i; if (size != 0) return size - 1; return 0; } EXPORT_SYMBOL(vscnprintf); /** * snprintf - Format a string and place it in a buffer * @buf: The buffer to place the result into * @size: The size of the buffer, including the trailing null space * @fmt: The format string to use * @...: Arguments for the format string * * The return value is the number of characters which would be * generated for the given input, excluding the trailing null, * as per ISO C99. If the return is greater than or equal to * @size, the resulting string is truncated. * * See the vsnprintf() documentation for format string extensions over C99. */ int snprintf(char *buf, size_t size, const char *fmt, ...) { va_list args; int i; va_start(args, fmt); i = vsnprintf(buf, size, fmt, args); va_end(args); return i; } EXPORT_SYMBOL(snprintf); /** * scnprintf - Format a string and place it in a buffer * @buf: The buffer to place the result into * @size: The size of the buffer, including the trailing null space * @fmt: The format string to use * @...: Arguments for the format string * * The return value is the number of characters written into @buf not including * the trailing '\0'. If @size is == 0 the function returns 0. */ int scnprintf(char *buf, size_t size, const char *fmt, ...) { va_list args; int i; va_start(args, fmt); i = vscnprintf(buf, size, fmt, args); va_end(args); return i; } EXPORT_SYMBOL(scnprintf); /** * vsprintf - Format a string and place it in a buffer * @buf: The buffer to place the result into * @fmt: The format string to use * @args: Arguments for the format string * * The function returns the number of characters written * into @buf. Use vsnprintf() or vscnprintf() in order to avoid * buffer overflows. * * Call this function if you are already dealing with a va_list. * You probably want sprintf() instead. * * See the vsnprintf() documentation for format string extensions over C99. */ int vsprintf(char *buf, const char *fmt, va_list args) { return vsnprintf(buf, INT_MAX, fmt, args); } EXPORT_SYMBOL(vsprintf); /** * sprintf - Format a string and place it in a buffer * @buf: The buffer to place the result into * @fmt: The format string to use * @...: Arguments for the format string * * The function returns the number of characters written * into @buf. Use snprintf() or scnprintf() in order to avoid * buffer overflows. * * See the vsnprintf() documentation for format string extensions over C99. */ int sprintf(char *buf, const char *fmt, ...) { va_list args; int i; va_start(args, fmt); i = vsnprintf(buf, INT_MAX, fmt, args); va_end(args); return i; } EXPORT_SYMBOL(sprintf); #ifdef CONFIG_BINARY_PRINTF /* * bprintf service: * vbin_printf() - VA arguments to binary data * bstr_printf() - Binary data to text string */ /** * vbin_printf - Parse a format string and place args' binary value in a buffer * @bin_buf: The buffer to place args' binary value * @size: The size of the buffer(by words(32bits), not characters) * @fmt: The format string to use * @args: Arguments for the format string * * The format follows C99 vsnprintf, except %n is ignored, and its argument * is skiped. * * The return value is the number of words(32bits) which would be generated for * the given input. * * NOTE: * If the return value is greater than @size, the resulting bin_buf is NOT * valid for bstr_printf(). */ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args) { struct printf_spec spec = {0}; char *str, *end; str = (char *)bin_buf; end = (char *)(bin_buf + size); #define save_arg(type) \ do { \ if (sizeof(type) == 8) { \ unsigned long long value; \ str = PTR_ALIGN(str, sizeof(u32)); \ value = va_arg(args, unsigned long long); \ if (str + sizeof(type) <= end) { \ *(u32 *)str = *(u32 *)&value; \ *(u32 *)(str + 4) = *((u32 *)&value + 1); \ } \ } else { \ unsigned long value; \ str = PTR_ALIGN(str, sizeof(type)); \ value = va_arg(args, int); \ if (str + sizeof(type) <= end) \ *(typeof(type) *)str = (type)value; \ } \ str += sizeof(type); \ } while (0) while (*fmt) { int read = format_decode(fmt, &spec); fmt += read; switch (spec.type) { case FORMAT_TYPE_NONE: case FORMAT_TYPE_INVALID: case FORMAT_TYPE_PERCENT_CHAR: break; case FORMAT_TYPE_WIDTH: case FORMAT_TYPE_PRECISION: save_arg(int); break; case FORMAT_TYPE_CHAR: save_arg(char); break; case FORMAT_TYPE_STR: { const char *save_str = va_arg(args, char *); size_t len; if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE || (unsigned long)save_str < PAGE_SIZE) save_str = "(null)"; len = strlen(save_str) + 1; if (str + len < end) memcpy(str, save_str, len); str += len; break; } case FORMAT_TYPE_PTR: save_arg(void *); /* skip all alphanumeric pointer suffixes */ while (isalnum(*fmt)) fmt++; break; case FORMAT_TYPE_NRCHARS: { /* skip %n 's argument */ u8 qualifier = spec.qualifier; void *skip_arg; if (qualifier == 'l') skip_arg = va_arg(args, long *); else if (TOLOWER(qualifier) == 'z') skip_arg = va_arg(args, size_t *); else skip_arg = va_arg(args, int *); break; } default: switch (spec.type) { case FORMAT_TYPE_LONG_LONG: save_arg(long long); break; case FORMAT_TYPE_ULONG: case FORMAT_TYPE_LONG: save_arg(unsigned long); break; case FORMAT_TYPE_SIZE_T: save_arg(size_t); break; case FORMAT_TYPE_PTRDIFF: save_arg(ptrdiff_t); break; case FORMAT_TYPE_UBYTE: case FORMAT_TYPE_BYTE: save_arg(char); break; case FORMAT_TYPE_USHORT: case FORMAT_TYPE_SHORT: save_arg(short); break; default: save_arg(int); } } } return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf; #undef save_arg } EXPORT_SYMBOL_GPL(vbin_printf); /** * bstr_printf - Format a string from binary arguments and place it in a buffer * @buf: The buffer to place the result into * @size: The size of the buffer, including the trailing null space * @fmt: The format string to use * @bin_buf: Binary arguments for the format string * * This function like C99 vsnprintf, but the difference is that vsnprintf gets * arguments from stack, and bstr_printf gets arguments from @bin_buf which is * a binary buffer that generated by vbin_printf. * * The format follows C99 vsnprintf, but has some extensions: * see vsnprintf comment for details. * * The return value is the number of characters which would * be generated for the given input, excluding the trailing * '\0', as per ISO C99. If you want to have the exact * number of characters written into @buf as return value * (not including the trailing '\0'), use vscnprintf(). If the * return is greater than or equal to @size, the resulting * string is truncated. */ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) { struct printf_spec spec = {0}; char *str, *end; const char *args = (const char *)bin_buf; if (WARN_ON_ONCE((int) size < 0)) return 0; str = buf; end = buf + size; #define get_arg(type) \ ({ \ typeof(type) value; \ if (sizeof(type) == 8) { \ args = PTR_ALIGN(args, sizeof(u32)); \ *(u32 *)&value = *(u32 *)args; \ *((u32 *)&value + 1) = *(u32 *)(args + 4); \ } else { \ args = PTR_ALIGN(args, sizeof(type)); \ value = *(typeof(type) *)args; \ } \ args += sizeof(type); \ value; \ }) /* Make sure end is always >= buf */ if (end < buf) { end = ((void *)-1); size = end - buf; } while (*fmt) { const char *old_fmt = fmt; int read = format_decode(fmt, &spec); fmt += read; switch (spec.type) { case FORMAT_TYPE_NONE: { int copy = read; if (str < end) { if (copy > end - str) copy = end - str; memcpy(str, old_fmt, copy); } str += read; break; } case FORMAT_TYPE_WIDTH: spec.field_width = get_arg(int); break; case FORMAT_TYPE_PRECISION: spec.precision = get_arg(int); break; case FORMAT_TYPE_CHAR: { char c; if (!(spec.flags & LEFT)) { while (--spec.field_width > 0) { if (str < end) *str = ' '; ++str; } } c = (unsigned char) get_arg(char); if (str < end) *str = c; ++str; while (--spec.field_width > 0) { if (str < end) *str = ' '; ++str; } break; } case FORMAT_TYPE_STR: { const char *str_arg = args; args += strlen(str_arg) + 1; str = string(str, end, (char *)str_arg, spec); break; } case FORMAT_TYPE_PTR: str = pointer(fmt+1, str, end, get_arg(void *), spec); while (isalnum(*fmt)) fmt++; break; case FORMAT_TYPE_PERCENT_CHAR: case FORMAT_TYPE_INVALID: if (str < end) *str = '%'; ++str; break; case FORMAT_TYPE_NRCHARS: /* skip */ break; default: { unsigned long long num; switch (spec.type) { case FORMAT_TYPE_LONG_LONG: num = get_arg(long long); break; case FORMAT_TYPE_ULONG: case FORMAT_TYPE_LONG: num = get_arg(unsigned long); break; case FORMAT_TYPE_SIZE_T: num = get_arg(size_t); break; case FORMAT_TYPE_PTRDIFF: num = get_arg(ptrdiff_t); break; case FORMAT_TYPE_UBYTE: num = get_arg(unsigned char); break; case FORMAT_TYPE_BYTE: num = get_arg(signed char); break; case FORMAT_TYPE_USHORT: num = get_arg(unsigned short); break; case FORMAT_TYPE_SHORT: num = get_arg(short); break; case FORMAT_TYPE_UINT: num = get_arg(unsigned int); break; default: num = get_arg(int); } str = number(str, end, num, spec); } /* default: */ } /* switch(spec.type) */ } /* while(*fmt) */ if (size > 0) { if (str < end) *str = '\0'; else end[-1] = '\0'; } #undef get_arg /* the trailing null byte doesn't count towards the total */ return str - buf; } EXPORT_SYMBOL_GPL(bstr_printf); /** * bprintf - Parse a format string and place args' binary value in a buffer * @bin_buf: The buffer to place args' binary value * @size: The size of the buffer(by words(32bits), not characters) * @fmt: The format string to use * @...: Arguments for the format string * * The function returns the number of words(u32) written * into @bin_buf. */ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) { va_list args; int ret; va_start(args, fmt); ret = vbin_printf(bin_buf, size, fmt, args); va_end(args); return ret; } EXPORT_SYMBOL_GPL(bprintf); #endif /* CONFIG_BINARY_PRINTF */ /** * vsscanf - Unformat a buffer into a list of arguments * @buf: input buffer * @fmt: format of buffer * @args: arguments */ int vsscanf(const char *buf, const char *fmt, va_list args) { const char *str = buf; char *next; char digit; int num = 0; u8 qualifier; u8 base; s16 field_width; bool is_sign; while (*fmt && *str) { /* skip any white space in format */ /* white space in format matchs any amount of * white space, including none, in the input. */ if (isspace(*fmt)) { fmt = skip_spaces(++fmt); str = skip_spaces(str); } /* anything that is not a conversion must match exactly */ if (*fmt != '%' && *fmt) { if (*fmt++ != *str++) break; continue; } if (!*fmt) break; ++fmt; /* skip this conversion. * advance both strings to next white space */ if (*fmt == '*') { while (!isspace(*fmt) && *fmt != '%' && *fmt) fmt++; while (!isspace(*str) && *str) str++; continue; } /* get field width */ field_width = -1; if (isdigit(*fmt)) field_width = skip_atoi(&fmt); /* get conversion qualifier */ qualifier = -1; if (*fmt == 'h' || TOLOWER(*fmt) == 'l' || TOLOWER(*fmt) == 'z') { qualifier = *fmt++; if (unlikely(qualifier == *fmt)) { if (qualifier == 'h') { qualifier = 'H'; fmt++; } else if (qualifier == 'l') { qualifier = 'L'; fmt++; } } } if (!*fmt || !*str) break; base = 10; is_sign = 0; switch (*fmt++) { case 'c': { char *s = (char *)va_arg(args, char*); if (field_width == -1) field_width = 1; do { *s++ = *str++; } while (--field_width > 0 && *str); num++; } continue; case 's': { char *s = (char *)va_arg(args, char *); if (field_width == -1) field_width = SHRT_MAX; /* first, skip leading white space in buffer */ str = skip_spaces(str); /* now copy until next white space */ while (*str && !isspace(*str) && field_width--) *s++ = *str++; *s = '\0'; num++; } continue; case 'n': /* return number of characters read so far */ { int *i = (int *)va_arg(args, int*); *i = str - buf; } continue; case 'o': base = 8; break; case 'x': case 'X': base = 16; break; case 'i': base = 0; case 'd': is_sign = 1; case 'u': break; case '%': /* looking for '%' in str */ if (*str++ != '%') return num; continue; default: /* invalid format; stop here */ return num; } /* have some sort of integer conversion. * first, skip white space in buffer. */ str = skip_spaces(str); digit = *str; if (is_sign && digit == '-') digit = *(str + 1); if (!digit || (base == 16 && !isxdigit(digit)) || (base == 10 && !isdigit(digit)) || (base == 8 && (!isdigit(digit) || digit > '7')) || (base == 0 && !isdigit(digit))) break; switch (qualifier) { case 'H': /* that's 'hh' in format */ if (is_sign) { signed char *s = (signed char *)va_arg(args, signed char *); *s = (signed char)simple_strtol(str, &next, base); } else { unsigned char *s = (unsigned char *)va_arg(args, unsigned char *); *s = (unsigned char)simple_strtoul(str, &next, base); } break; case 'h': if (is_sign) { short *s = (short *)va_arg(args, short *); *s = (short)simple_strtol(str, &next, base); } else { unsigned short *s = (unsigned short *)va_arg(args, unsigned short *); *s = (unsigned short)simple_strtoul(str, &next, base); } break; case 'l': if (is_sign) { long *l = (long *)va_arg(args, long *); *l = simple_strtol(str, &next, base); } else { unsigned long *l = (unsigned long *)va_arg(args, unsigned long *); *l = simple_strtoul(str, &next, base); } break; case 'L': if (is_sign) { long long *l = (long long *)va_arg(args, long long *); *l = simple_strtoll(str, &next, base); } else { unsigned long long *l = (unsigned long long *)va_arg(args, unsigned long long *); *l = simple_strtoull(str, &next, base); } break; case 'Z': case 'z': { size_t *s = (size_t *)va_arg(args, size_t *); *s = (size_t)simple_strtoul(str, &next, base); } break; default: if (is_sign) { int *i = (int *)va_arg(args, int *); *i = (int)simple_strtol(str, &next, base); } else { unsigned int *i = (unsigned int *)va_arg(args, unsigned int*); *i = (unsigned int)simple_strtoul(str, &next, base); } break; } num++; if (!next) break; str = next; } /* * Now we've come all the way through so either the input string or the * format ended. In the former case, there can be a %n at the current * position in the format that needs to be filled. */ if (*fmt == '%' && *(fmt + 1) == 'n') { int *p = (int *)va_arg(args, int *); *p = str - buf; } return num; } EXPORT_SYMBOL(vsscanf); /** * sscanf - Unformat a buffer into a list of arguments * @buf: input buffer * @fmt: formatting of buffer * @...: resulting arguments */ int sscanf(const char *buf, const char *fmt, ...) { va_list args; int i; va_start(args, fmt); i = vsscanf(buf, fmt, args); va_end(args); return i; } EXPORT_SYMBOL(sscanf);
gpl-2.0
hchunhui/newlib
libgloss/xstormy16/eva_app.c
146
1923
/* eva_app.c -- Glue code for linking apps to run under GDB debugger control. * * Copyright (c) 2001 Red Hat, Inc. * * The authors hereby grant permission to use, copy, modify, distribute, * and license this software and its documentation for any purpose, provided * that existing copyright notices are retained in all copies and that this * notice is included verbatim in any distributions. No written agreement, * license, or royalty fee is required for any of the authorized uses. * Modifications to this software may be copyrighted by their authors * and need not follow the licensing terms described here, provided that * the new terms are clearly indicated on the first page of each file where * they apply. */ #include "glue.h" typedef void (*write_proc_t)(char *buf, int nbytes); typedef int (*read_proc_t)(char *buf, int nbytes); /* There is no "syscall", so we just call directly into the stub code at fixed addresses. */ #define STUB_WRITE(p,n) ((write_proc_t)0x8084)((p),(n)) #define STUB_READ(p,n) ((read_proc_t)0x8088)((p),(n)) /* * print -- do a raw print of a string */ void print(char *ptr) { STUB_WRITE(ptr, strlen(ptr)); } /* * write -- write bytes to the serial port. Ignore fd, since * stdout and stderr are the same. Since we have no filesystem, * open will only return an error. */ int _write (int fd, char *buf, int nbytes) { STUB_WRITE(buf, nbytes); return (nbytes); } int _read (int fd, char *buf, int nbytes) { return STUB_READ(buf, nbytes); } extern char _end[]; #define HEAP_LIMIT ((char *)0xffff) void * _sbrk(int inc) { static char *heap_ptr = _end; void *base; if (inc > (HEAP_LIMIT - heap_ptr)) return (void *)-1; base = heap_ptr; heap_ptr += inc; return base; } void _exit(int n) { while (1) { asm volatile ("nop"); asm volatile (".hword 0x0006"); /* breakpoint (special illegal insn) */ } }
gpl-2.0
kmihelich/linux-espressobin
drivers/media/usb/stk1160/stk1160-v4l.c
146
21336
/* * STK1160 driver * * Copyright (C) 2012 Ezequiel Garcia * <elezegarcia--a.t--gmail.com> * * Based on Easycap driver by R.M. Thomas * Copyright (C) 2010 R.M. Thomas * <rmthomas--a.t--sciolus.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/usb.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-fh.h> #include <media/v4l2-event.h> #include <media/videobuf2-vmalloc.h> #include <media/saa7115.h> #include "stk1160.h" #include "stk1160-reg.h" static bool keep_buffers; module_param(keep_buffers, bool, 0644); MODULE_PARM_DESC(keep_buffers, "don't release buffers upon stop streaming"); enum stk1160_decimate_mode { STK1160_DECIMATE_MORE_THAN_HALF, STK1160_DECIMATE_LESS_THAN_HALF, }; struct stk1160_decimate_ctrl { bool col_en, row_en; enum stk1160_decimate_mode col_mode, row_mode; unsigned int col_n, row_n; }; /* supported video standards */ static struct stk1160_fmt format[] = { { .name = "16 bpp YUY2, 4:2:2, packed", .fourcc = V4L2_PIX_FMT_UYVY, .depth = 16, } }; /* * Helper to find the next divisor that results in modulo being zero. * This is required to guarantee valid decimation unit counts. */ static unsigned int div_round_integer(unsigned int x, unsigned int y) { for (;; y++) { if (x % y == 0) return x / y; } } static void stk1160_set_std(struct stk1160 *dev) { int i; static struct regval std525[] = { /* 720x480 */ /* Frame start */ {STK116_CFSPO_STX_L, 0x0000}, {STK116_CFSPO_STX_H, 0x0000}, {STK116_CFSPO_STY_L, 0x0003}, {STK116_CFSPO_STY_H, 0x0000}, /* Frame end */ {STK116_CFEPO_ENX_L, 0x05a0}, {STK116_CFEPO_ENX_H, 0x0005}, {STK116_CFEPO_ENY_L, 0x00f3}, {STK116_CFEPO_ENY_H, 0x0000}, {0xffff, 0xffff} }; static struct regval std625[] = { /* 720x576 */ /* TODO: Each line of frame has some junk at the end */ /* Frame start */ {STK116_CFSPO, 0x0000}, {STK116_CFSPO+1, 0x0000}, {STK116_CFSPO+2, 0x0001}, {STK116_CFSPO+3, 0x0000}, /* Frame end */ {STK116_CFEPO, 0x05a0}, {STK116_CFEPO+1, 0x0005}, {STK116_CFEPO+2, 0x0121}, {STK116_CFEPO+3, 0x0001}, {0xffff, 0xffff} }; if (dev->norm & V4L2_STD_525_60) { stk1160_dbg("registers to NTSC like standard\n"); for (i = 0; std525[i].reg != 0xffff; i++) stk1160_write_reg(dev, std525[i].reg, std525[i].val); } else { stk1160_dbg("registers to PAL like standard\n"); for (i = 0; std625[i].reg != 0xffff; i++) stk1160_write_reg(dev, std625[i].reg, std625[i].val); } } static void stk1160_set_fmt(struct stk1160 *dev, struct stk1160_decimate_ctrl *ctrl) { u32 val = 0; if (ctrl) { /* * Since the format is UYVY, the device must skip or send * a number of rows/columns multiple of four. This way, the * colour format is preserved. The STK1160_DEC_UNIT_SIZE bit * does exactly this. */ val |= STK1160_DEC_UNIT_SIZE; val |= ctrl->col_en ? STK1160_H_DEC_EN : 0; val |= ctrl->row_en ? STK1160_V_DEC_EN : 0; val |= ctrl->col_mode == STK1160_DECIMATE_MORE_THAN_HALF ? STK1160_H_DEC_MODE : 0; val |= ctrl->row_mode == STK1160_DECIMATE_MORE_THAN_HALF ? STK1160_V_DEC_MODE : 0; /* Horizontal count units */ stk1160_write_reg(dev, STK1160_DMCTRL_H_UNITS, ctrl->col_n); /* Vertical count units */ stk1160_write_reg(dev, STK1160_DMCTRL_V_UNITS, ctrl->row_n); stk1160_dbg("decimate 0x%x, column units %d, row units %d\n", val, ctrl->col_n, ctrl->row_n); } /* Decimation control */ stk1160_write_reg(dev, STK1160_DMCTRL, val); } /* * Set a new alternate setting. * Returns true is dev->max_pkt_size has changed, false otherwise. */ static bool stk1160_set_alternate(struct stk1160 *dev) { int i, prev_alt = dev->alt; unsigned int min_pkt_size; bool new_pkt_size; /* * If we don't set right alternate, * then we will get a green screen with junk. */ min_pkt_size = STK1160_MIN_PKT_SIZE; for (i = 0; i < dev->num_alt; i++) { /* stop when the selected alt setting offers enough bandwidth */ if (dev->alt_max_pkt_size[i] >= min_pkt_size) { dev->alt = i; break; /* * otherwise make sure that we end up with the maximum bandwidth * because the min_pkt_size equation might be wrong... */ } else if (dev->alt_max_pkt_size[i] > dev->alt_max_pkt_size[dev->alt]) dev->alt = i; } stk1160_dbg("setting alternate %d\n", dev->alt); if (dev->alt != prev_alt) { stk1160_dbg("minimum isoc packet size: %u (alt=%d)\n", min_pkt_size, dev->alt); stk1160_dbg("setting alt %d with wMaxPacketSize=%u\n", dev->alt, dev->alt_max_pkt_size[dev->alt]); usb_set_interface(dev->udev, 0, dev->alt); } new_pkt_size = dev->max_pkt_size != dev->alt_max_pkt_size[dev->alt]; dev->max_pkt_size = dev->alt_max_pkt_size[dev->alt]; return new_pkt_size; } static int stk1160_start_streaming(struct stk1160 *dev) { bool new_pkt_size; int rc = 0; int i; /* Check device presence */ if (!dev->udev) return -ENODEV; if (mutex_lock_interruptible(&dev->v4l_lock)) return -ERESTARTSYS; /* * For some reason it is mandatory to set alternate *first* * and only *then* initialize isoc urbs. * Someone please explain me why ;) */ new_pkt_size = stk1160_set_alternate(dev); /* * We (re)allocate isoc urbs if: * there is no allocated isoc urbs, OR * a new dev->max_pkt_size is detected */ if (!dev->isoc_ctl.num_bufs || new_pkt_size) { rc = stk1160_alloc_isoc(dev); if (rc < 0) goto out_stop_hw; } /* submit urbs and enables IRQ */ for (i = 0; i < dev->isoc_ctl.num_bufs; i++) { rc = usb_submit_urb(dev->isoc_ctl.urb[i], GFP_KERNEL); if (rc) { stk1160_err("cannot submit urb[%d] (%d)\n", i, rc); goto out_uninit; } } /* Start saa711x */ v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 1); dev->sequence = 0; /* Start stk1160 */ stk1160_write_reg(dev, STK1160_DCTRL, 0xb3); stk1160_write_reg(dev, STK1160_DCTRL+3, 0x00); stk1160_dbg("streaming started\n"); mutex_unlock(&dev->v4l_lock); return 0; out_uninit: stk1160_uninit_isoc(dev); out_stop_hw: usb_set_interface(dev->udev, 0, 0); stk1160_clear_queue(dev); mutex_unlock(&dev->v4l_lock); return rc; } /* Must be called with v4l_lock hold */ static void stk1160_stop_hw(struct stk1160 *dev) { /* If the device is not physically present, there is nothing to do */ if (!dev->udev) return; /* set alternate 0 */ dev->alt = 0; stk1160_dbg("setting alternate %d\n", dev->alt); usb_set_interface(dev->udev, 0, 0); /* Stop stk1160 */ stk1160_write_reg(dev, STK1160_DCTRL, 0x00); stk1160_write_reg(dev, STK1160_DCTRL+3, 0x00); /* Stop saa711x */ v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0); } static int stk1160_stop_streaming(struct stk1160 *dev) { if (mutex_lock_interruptible(&dev->v4l_lock)) return -ERESTARTSYS; /* * Once URBs are cancelled, the URB complete handler * won't be running. This is required to safely release the * current buffer (dev->isoc_ctl.buf). */ stk1160_cancel_isoc(dev); /* * It is possible to keep buffers around using a module parameter. * This is intended to avoid memory fragmentation. */ if (!keep_buffers) stk1160_free_isoc(dev); stk1160_stop_hw(dev); stk1160_clear_queue(dev); stk1160_dbg("streaming stopped\n"); mutex_unlock(&dev->v4l_lock); return 0; } static struct v4l2_file_operations stk1160_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vb2_fop_release, .read = vb2_fop_read, .poll = vb2_fop_poll, .mmap = vb2_fop_mmap, .unlocked_ioctl = video_ioctl2, }; /* * vidioc ioctls */ static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct stk1160 *dev = video_drvdata(file); strcpy(cap->driver, "stk1160"); strcpy(cap->card, "stk1160"); usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info)); cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (f->index != 0) return -EINVAL; strlcpy(f->description, format[f->index].name, sizeof(f->description)); f->pixelformat = format[f->index].fourcc; return 0; } static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct stk1160 *dev = video_drvdata(file); f->fmt.pix.width = dev->width; f->fmt.pix.height = dev->height; f->fmt.pix.field = V4L2_FIELD_INTERLACED; f->fmt.pix.pixelformat = dev->fmt->fourcc; f->fmt.pix.bytesperline = dev->width * 2; f->fmt.pix.sizeimage = dev->height * f->fmt.pix.bytesperline; f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; return 0; } static int stk1160_try_fmt(struct stk1160 *dev, struct v4l2_format *f, struct stk1160_decimate_ctrl *ctrl) { unsigned int width, height; unsigned int base_width, base_height; unsigned int col_n, row_n; enum stk1160_decimate_mode col_mode, row_mode; bool col_en, row_en; base_width = 720; base_height = (dev->norm & V4L2_STD_525_60) ? 480 : 576; /* Minimum width and height is 5% the frame size */ width = clamp_t(unsigned int, f->fmt.pix.width, base_width / 20, base_width); height = clamp_t(unsigned int, f->fmt.pix.height, base_height / 20, base_height); /* Let's set default no decimation values */ col_n = 0; row_n = 0; col_en = false; row_en = false; f->fmt.pix.width = base_width; f->fmt.pix.height = base_height; row_mode = STK1160_DECIMATE_LESS_THAN_HALF; col_mode = STK1160_DECIMATE_LESS_THAN_HALF; if (width < base_width && width > base_width / 2) { /* * The device will send count units for each * unit skipped. This means count unit is: * * n = width / (frame width - width) * * And the width is: * * width = (n / n + 1) * frame width */ col_n = div_round_integer(width, base_width - width); if (col_n > 0 && col_n <= 255) { col_en = true; col_mode = STK1160_DECIMATE_LESS_THAN_HALF; f->fmt.pix.width = (base_width * col_n) / (col_n + 1); } } else if (width <= base_width / 2) { /* * The device will skip count units for each * unit sent. This means count is: * * n = (frame width / width) - 1 * * And the width is: * * width = frame width / (n + 1) */ col_n = div_round_integer(base_width, width) - 1; if (col_n > 0 && col_n <= 255) { col_en = true; col_mode = STK1160_DECIMATE_MORE_THAN_HALF; f->fmt.pix.width = base_width / (col_n + 1); } } if (height < base_height && height > base_height / 2) { row_n = div_round_integer(height, base_height - height); if (row_n > 0 && row_n <= 255) { row_en = true; row_mode = STK1160_DECIMATE_LESS_THAN_HALF; f->fmt.pix.height = (base_height * row_n) / (row_n + 1); } } else if (height <= base_height / 2) { row_n = div_round_integer(base_height, height) - 1; if (row_n > 0 && row_n <= 255) { row_en = true; row_mode = STK1160_DECIMATE_MORE_THAN_HALF; f->fmt.pix.height = base_height / (row_n + 1); } } f->fmt.pix.pixelformat = dev->fmt->fourcc; f->fmt.pix.field = V4L2_FIELD_INTERLACED; f->fmt.pix.bytesperline = f->fmt.pix.width * 2; f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; if (ctrl) { ctrl->col_en = col_en; ctrl->col_n = col_n; ctrl->col_mode = col_mode; ctrl->row_en = row_en; ctrl->row_n = row_n; ctrl->row_mode = row_mode; } stk1160_dbg("width %d, height %d\n", f->fmt.pix.width, f->fmt.pix.height); return 0; } static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct stk1160 *dev = video_drvdata(file); return stk1160_try_fmt(dev, f, NULL); } static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct stk1160 *dev = video_drvdata(file); struct vb2_queue *q = &dev->vb_vidq; struct stk1160_decimate_ctrl ctrl; int rc; if (vb2_is_busy(q)) return -EBUSY; rc = stk1160_try_fmt(dev, f, &ctrl); if (rc < 0) return rc; dev->width = f->fmt.pix.width; dev->height = f->fmt.pix.height; stk1160_set_fmt(dev, &ctrl); return 0; } static int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *norm) { struct stk1160 *dev = video_drvdata(file); v4l2_device_call_all(&dev->v4l2_dev, 0, video, querystd, norm); return 0; } static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm) { struct stk1160 *dev = video_drvdata(file); *norm = dev->norm; return 0; } static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm) { struct stk1160 *dev = video_drvdata(file); struct vb2_queue *q = &dev->vb_vidq; if (dev->norm == norm) return 0; if (vb2_is_busy(q)) return -EBUSY; /* Check device presence */ if (!dev->udev) return -ENODEV; /* We need to set this now, before we call stk1160_set_std */ dev->width = 720; dev->height = (norm & V4L2_STD_525_60) ? 480 : 576; dev->norm = norm; stk1160_set_std(dev); /* Calling with NULL disables frame decimation */ stk1160_set_fmt(dev, NULL); v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_std, dev->norm); return 0; } static int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *i) { struct stk1160 *dev = video_drvdata(file); if (i->index > STK1160_MAX_INPUT) return -EINVAL; /* S-Video special handling */ if (i->index == STK1160_SVIDEO_INPUT) sprintf(i->name, "S-Video"); else sprintf(i->name, "Composite%d", i->index); i->type = V4L2_INPUT_TYPE_CAMERA; i->std = dev->vdev.tvnorms; return 0; } static int vidioc_g_input(struct file *file, void *priv, unsigned int *i) { struct stk1160 *dev = video_drvdata(file); *i = dev->ctl_input; return 0; } static int vidioc_s_input(struct file *file, void *priv, unsigned int i) { struct stk1160 *dev = video_drvdata(file); if (i > STK1160_MAX_INPUT) return -EINVAL; dev->ctl_input = i; stk1160_select_input(dev); return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int vidioc_g_register(struct file *file, void *priv, struct v4l2_dbg_register *reg) { struct stk1160 *dev = video_drvdata(file); int rc; u8 val; /* Match host */ rc = stk1160_read_reg(dev, reg->reg, &val); reg->val = val; reg->size = 1; return rc; } static int vidioc_s_register(struct file *file, void *priv, const struct v4l2_dbg_register *reg) { struct stk1160 *dev = video_drvdata(file); /* Match host */ return stk1160_write_reg(dev, reg->reg, reg->val); } #endif static const struct v4l2_ioctl_ops stk1160_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_querystd = vidioc_querystd, .vidioc_g_std = vidioc_g_std, .vidioc_s_std = vidioc_s_std, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, /* vb2 takes care of these */ .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_log_status = v4l2_ctrl_log_status, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = vidioc_g_register, .vidioc_s_register = vidioc_s_register, #endif }; /********************************************************************/ /* * Videobuf2 operations */ static int queue_setup(struct vb2_queue *vq, const void *parg, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { struct stk1160 *dev = vb2_get_drv_priv(vq); unsigned long size; size = dev->width * dev->height * 2; /* * Here we can change the number of buffers being requested. * So, we set a minimum and a maximum like this: */ *nbuffers = clamp_t(unsigned int, *nbuffers, STK1160_MIN_VIDEO_BUFFERS, STK1160_MAX_VIDEO_BUFFERS); /* This means a packed colorformat */ *nplanes = 1; sizes[0] = size; stk1160_dbg("%s: buffer count %d, each %ld bytes\n", __func__, *nbuffers, size); return 0; } static void buffer_queue(struct vb2_buffer *vb) { unsigned long flags; struct stk1160 *dev = vb2_get_drv_priv(vb->vb2_queue); struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct stk1160_buffer *buf = container_of(vbuf, struct stk1160_buffer, vb); spin_lock_irqsave(&dev->buf_lock, flags); if (!dev->udev) { /* * If the device is disconnected return the buffer to userspace * directly. The next QBUF call will fail with -ENODEV. */ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); } else { buf->mem = vb2_plane_vaddr(vb, 0); buf->length = vb2_plane_size(vb, 0); buf->bytesused = 0; buf->pos = 0; /* * If buffer length is less from expected then we return * the buffer to userspace directly. */ if (buf->length < dev->width * dev->height * 2) vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); else list_add_tail(&buf->list, &dev->avail_bufs); } spin_unlock_irqrestore(&dev->buf_lock, flags); } static int start_streaming(struct vb2_queue *vq, unsigned int count) { struct stk1160 *dev = vb2_get_drv_priv(vq); return stk1160_start_streaming(dev); } /* abort streaming and wait for last buffer */ static void stop_streaming(struct vb2_queue *vq) { struct stk1160 *dev = vb2_get_drv_priv(vq); stk1160_stop_streaming(dev); } static struct vb2_ops stk1160_video_qops = { .queue_setup = queue_setup, .buf_queue = buffer_queue, .start_streaming = start_streaming, .stop_streaming = stop_streaming, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; static struct video_device v4l_template = { .name = "stk1160", .tvnorms = V4L2_STD_525_60 | V4L2_STD_625_50, .fops = &stk1160_fops, .ioctl_ops = &stk1160_ioctl_ops, .release = video_device_release_empty, }; /********************************************************************/ /* Must be called with both v4l_lock and vb_queue_lock hold */ void stk1160_clear_queue(struct stk1160 *dev) { struct stk1160_buffer *buf; unsigned long flags; /* Release all active buffers */ spin_lock_irqsave(&dev->buf_lock, flags); while (!list_empty(&dev->avail_bufs)) { buf = list_first_entry(&dev->avail_bufs, struct stk1160_buffer, list); list_del(&buf->list); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); stk1160_dbg("buffer [%p/%d] aborted\n", buf, buf->vb.vb2_buf.index); } /* It's important to release the current buffer */ if (dev->isoc_ctl.buf) { buf = dev->isoc_ctl.buf; dev->isoc_ctl.buf = NULL; vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); stk1160_dbg("buffer [%p/%d] aborted\n", buf, buf->vb.vb2_buf.index); } spin_unlock_irqrestore(&dev->buf_lock, flags); } int stk1160_vb2_setup(struct stk1160 *dev) { int rc; struct vb2_queue *q; q = &dev->vb_vidq; q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q->io_modes = VB2_READ | VB2_MMAP | VB2_USERPTR | VB2_DMABUF; q->drv_priv = dev; q->buf_struct_size = sizeof(struct stk1160_buffer); q->ops = &stk1160_video_qops; q->mem_ops = &vb2_vmalloc_memops; q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; rc = vb2_queue_init(q); if (rc < 0) return rc; /* initialize video dma queue */ INIT_LIST_HEAD(&dev->avail_bufs); return 0; } int stk1160_video_register(struct stk1160 *dev) { int rc; /* Initialize video_device with a template structure */ dev->vdev = v4l_template; dev->vdev.queue = &dev->vb_vidq; /* * Provide mutexes for v4l2 core and for videobuf2 queue. * It will be used to protect *only* v4l2 ioctls. */ dev->vdev.lock = &dev->v4l_lock; dev->vdev.queue->lock = &dev->vb_queue_lock; /* This will be used to set video_device parent */ dev->vdev.v4l2_dev = &dev->v4l2_dev; /* NTSC is default */ dev->norm = V4L2_STD_NTSC_M; dev->width = 720; dev->height = 480; /* set default format */ dev->fmt = &format[0]; stk1160_set_std(dev); v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_std, dev->norm); video_set_drvdata(&dev->vdev, dev); rc = video_register_device(&dev->vdev, VFL_TYPE_GRABBER, -1); if (rc < 0) { stk1160_err("video_register_device failed (%d)\n", rc); return rc; } v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n", video_device_node_name(&dev->vdev)); return 0; }
gpl-2.0
adafruit/adafruit-raspberrypi-linux
drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
146
13756
/****************************************************************************** * * Copyright(c) 2009-2014 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "../core.h" #include "../pci.h" #include "reg.h" #include "def.h" #include "phy.h" #include "../rtl8723com/phy_common.h" #include "dm.h" #include "../rtl8723com/dm_common.h" #include "hw.h" #include "fw.h" #include "../rtl8723com/fw_common.h" #include "sw.h" #include "trx.h" #include "led.h" #include "table.h" #include "../btcoexist/rtl_btc.h" #include <linux/vmalloc.h> #include <linux/module.h> static void rtl8723be_init_aspm_vars(struct ieee80211_hw *hw) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); /*close ASPM for AMD defaultly */ rtlpci->const_amdpci_aspm = 0; /* ASPM PS mode. * 0 - Disable ASPM, * 1 - Enable ASPM without Clock Req, * 2 - Enable ASPM with Clock Req, * 3 - Alwyas Enable ASPM with Clock Req, * 4 - Always Enable ASPM without Clock Req. * set defult to RTL8192CE:3 RTL8192E:2 */ rtlpci->const_pci_aspm = 3; /*Setting for PCI-E device */ rtlpci->const_devicepci_aspm_setting = 0x03; /*Setting for PCI-E bridge */ rtlpci->const_hostpci_aspm_setting = 0x02; /* In Hw/Sw Radio Off situation. * 0 - Default, * 1 - From ASPM setting without low Mac Pwr, * 2 - From ASPM setting with low Mac Pwr, * 3 - Bus D3 * set default to RTL8192CE:0 RTL8192SE:2 */ rtlpci->const_hwsw_rfoff_d3 = 0; /* This setting works for those device with * backdoor ASPM setting such as EPHY setting. * 0 - Not support ASPM, * 1 - Support ASPM, * 2 - According to chipset. */ rtlpci->const_support_pciaspm = 1; } int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) { int err = 0; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); rtl8723be_bt_reg_init(hw); rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); rtlpriv->dm.dm_initialgain_enable = 1; rtlpriv->dm.dm_flag = 0; rtlpriv->dm.disable_framebursting = 0; rtlpriv->dm.thermalvalue = 0; rtlpci->transmit_config = CFENDFORM | BIT(15) | BIT(24) | BIT(25); rtlpriv->phy.lck_inprogress = false; mac->ht_enable = true; /* compatible 5G band 88ce just 2.4G band & smsp */ rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G; rtlpriv->rtlhal.bandset = BAND_ON_2_4G; rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY; rtlpci->receive_config = (RCR_APPFCS | RCR_APP_MIC | RCR_APP_ICV | RCR_APP_PHYST_RXFF | RCR_HTC_LOC_CTRL | RCR_AMF | RCR_ACF | RCR_ADF | RCR_AICV | RCR_AB | RCR_AM | RCR_APM | 0); rtlpci->irq_mask[0] = (u32) (IMR_PSTIMEOUT | IMR_HSISR_IND_ON_INT | IMR_C2HCMD | IMR_HIGHDOK | IMR_MGNTDOK | IMR_BKDOK | IMR_BEDOK | IMR_VIDOK | IMR_VODOK | IMR_RDU | IMR_ROK | 0); rtlpci->irq_mask[1] = (u32)(IMR_RXFOVW | 0); rtlpci->sys_irq_mask = (u32)(HSIMR_PDN_INT_EN | HSIMR_RON_INT_EN | 0); /* for debug level */ rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; /* for LPS & IPS */ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; rtlpriv->cfg->mod_params->sw_crypto = rtlpriv->cfg->mod_params->sw_crypto; rtlpriv->cfg->mod_params->disable_watchdog = rtlpriv->cfg->mod_params->disable_watchdog; if (rtlpriv->cfg->mod_params->disable_watchdog) pr_info("watchdog disabled\n"); rtlpriv->psc.reg_fwctrl_lps = 3; rtlpriv->psc.reg_max_lps_awakeintvl = 5; /* for ASPM, you can close aspm through * set const_support_pciaspm = 0 */ rtl8723be_init_aspm_vars(hw); if (rtlpriv->psc.reg_fwctrl_lps == 1) rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE; else if (rtlpriv->psc.reg_fwctrl_lps == 2) rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE; else if (rtlpriv->psc.reg_fwctrl_lps == 3) rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE; /*low power: Disable 32k */ rtlpriv->psc.low_power_enable = false; rtlpriv->rtlhal.earlymode_enable = false; /* for firmware buf */ rtlpriv->rtlhal.pfirmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.pfirmware) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't alloc buffer for fw.\n"); return 1; } rtlpriv->max_fw_size = 0x8000; pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Failed to request firmware!\n"); return 1; } return 0; } void rtl8723be_deinit_sw_vars(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); if (rtlpriv->rtlhal.pfirmware) { vfree(rtlpriv->rtlhal.pfirmware); rtlpriv->rtlhal.pfirmware = NULL; } } /* get bt coexist status */ bool rtl8723be_get_btc_status(void) { return true; } static bool is_fw_header(struct rtlwifi_firmware_header *hdr) { return (le16_to_cpu(hdr->signature) & 0xfff0) == 0x5300; } static struct rtl_hal_ops rtl8723be_hal_ops = { .init_sw_vars = rtl8723be_init_sw_vars, .deinit_sw_vars = rtl8723be_deinit_sw_vars, .read_eeprom_info = rtl8723be_read_eeprom_info, .interrupt_recognized = rtl8723be_interrupt_recognized, .hw_init = rtl8723be_hw_init, .hw_disable = rtl8723be_card_disable, .hw_suspend = rtl8723be_suspend, .hw_resume = rtl8723be_resume, .enable_interrupt = rtl8723be_enable_interrupt, .disable_interrupt = rtl8723be_disable_interrupt, .set_network_type = rtl8723be_set_network_type, .set_chk_bssid = rtl8723be_set_check_bssid, .set_qos = rtl8723be_set_qos, .set_bcn_reg = rtl8723be_set_beacon_related_registers, .set_bcn_intv = rtl8723be_set_beacon_interval, .update_interrupt_mask = rtl8723be_update_interrupt_mask, .get_hw_reg = rtl8723be_get_hw_reg, .set_hw_reg = rtl8723be_set_hw_reg, .update_rate_tbl = rtl8723be_update_hal_rate_tbl, .fill_tx_desc = rtl8723be_tx_fill_desc, .fill_tx_cmddesc = rtl8723be_tx_fill_cmddesc, .query_rx_desc = rtl8723be_rx_query_desc, .set_channel_access = rtl8723be_update_channel_access_setting, .radio_onoff_checking = rtl8723be_gpio_radio_on_off_checking, .set_bw_mode = rtl8723be_phy_set_bw_mode, .switch_channel = rtl8723be_phy_sw_chnl, .dm_watchdog = rtl8723be_dm_watchdog, .scan_operation_backup = rtl8723be_phy_scan_operation_backup, .set_rf_power_state = rtl8723be_phy_set_rf_power_state, .led_control = rtl8723be_led_control, .set_desc = rtl8723be_set_desc, .get_desc = rtl8723be_get_desc, .is_tx_desc_closed = rtl8723be_is_tx_desc_closed, .tx_polling = rtl8723be_tx_polling, .enable_hw_sec = rtl8723be_enable_hw_security_config, .set_key = rtl8723be_set_key, .init_sw_leds = rtl8723be_init_sw_leds, .get_bbreg = rtl8723_phy_query_bb_reg, .set_bbreg = rtl8723_phy_set_bb_reg, .get_rfreg = rtl8723be_phy_query_rf_reg, .set_rfreg = rtl8723be_phy_set_rf_reg, .fill_h2c_cmd = rtl8723be_fill_h2c_cmd, .get_btc_status = rtl8723be_get_btc_status, .rx_command_packet = rtl8723be_rx_command_packet, .is_fw_header = is_fw_header, }; static struct rtl_mod_params rtl8723be_mod_params = { .sw_crypto = false, .inactiveps = true, .swctrl_lps = false, .fwctrl_lps = true, .msi_support = false, .disable_watchdog = false, .debug = DBG_EMERG, .ant_sel = 0, }; static struct rtl_hal_cfg rtl8723be_hal_cfg = { .bar_id = 2, .write_readback = true, .name = "rtl8723be_pci", .fw_name = "rtlwifi/rtl8723befw.bin", .ops = &rtl8723be_hal_ops, .mod_params = &rtl8723be_mod_params, .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN, .maps[SYS_CLK] = REG_SYS_CLKR, .maps[MAC_RCR_AM] = AM, .maps[MAC_RCR_AB] = AB, .maps[MAC_RCR_ACRC32] = ACRC32, .maps[MAC_RCR_ACF] = ACF, .maps[MAC_RCR_AAP] = AAP, .maps[MAC_HIMR] = REG_HIMR, .maps[MAC_HIMRE] = REG_HIMRE, .maps[MAC_HSISR] = REG_HSISR, .maps[EFUSE_ACCESS] = REG_EFUSE_ACCESS, .maps[EFUSE_TEST] = REG_EFUSE_TEST, .maps[EFUSE_CTRL] = REG_EFUSE_CTRL, .maps[EFUSE_CLK] = 0, .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL, .maps[EFUSE_PWC_EV12V] = PWC_EV12V, .maps[EFUSE_FEN_ELDR] = FEN_ELDR, .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN, .maps[EFUSE_ANA8M] = ANA8M, .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE, .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION, .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN, .maps[EFUSE_OOB_PROTECT_BYTES_LEN] = EFUSE_OOB_PROTECT_BYTES, .maps[RWCAM] = REG_CAMCMD, .maps[WCAMI] = REG_CAMWRITE, .maps[RCAMO] = REG_CAMREAD, .maps[CAMDBG] = REG_CAMDBG, .maps[SECR] = REG_SECCFG, .maps[SEC_CAM_NONE] = CAM_NONE, .maps[SEC_CAM_WEP40] = CAM_WEP40, .maps[SEC_CAM_TKIP] = CAM_TKIP, .maps[SEC_CAM_AES] = CAM_AES, .maps[SEC_CAM_WEP104] = CAM_WEP104, .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6, .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5, .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4, .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3, .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2, .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1, /* .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8, */ /*need check*/ .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7, .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6, .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5, .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4, .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3, .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2, .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1, /* .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,*/ /* .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,*/ .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW, .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT, .maps[RTL_IMR_BCNINT] = IMR_BCNDMAINT0, .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW, .maps[RTL_IMR_RDU] = IMR_RDU, .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND, .maps[RTL_IMR_BDOK] = IMR_BCNDOK0, .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK, .maps[RTL_IMR_TBDER] = IMR_TBDER, .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK, .maps[RTL_IMR_TBDOK] = IMR_TBDOK, .maps[RTL_IMR_BKDOK] = IMR_BKDOK, .maps[RTL_IMR_BEDOK] = IMR_BEDOK, .maps[RTL_IMR_VIDOK] = IMR_VIDOK, .maps[RTL_IMR_VODOK] = IMR_VODOK, .maps[RTL_IMR_ROK] = IMR_ROK, .maps[RTL_IMR_HSISR_IND] = IMR_HSISR_IND_ON_INT, .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNDMAINT0 | IMR_TBDOK | IMR_TBDER), .maps[RTL_RC_CCK_RATE1M] = DESC92C_RATE1M, .maps[RTL_RC_CCK_RATE2M] = DESC92C_RATE2M, .maps[RTL_RC_CCK_RATE5_5M] = DESC92C_RATE5_5M, .maps[RTL_RC_CCK_RATE11M] = DESC92C_RATE11M, .maps[RTL_RC_OFDM_RATE6M] = DESC92C_RATE6M, .maps[RTL_RC_OFDM_RATE9M] = DESC92C_RATE9M, .maps[RTL_RC_OFDM_RATE12M] = DESC92C_RATE12M, .maps[RTL_RC_OFDM_RATE18M] = DESC92C_RATE18M, .maps[RTL_RC_OFDM_RATE24M] = DESC92C_RATE24M, .maps[RTL_RC_OFDM_RATE36M] = DESC92C_RATE36M, .maps[RTL_RC_OFDM_RATE48M] = DESC92C_RATE48M, .maps[RTL_RC_OFDM_RATE54M] = DESC92C_RATE54M, .maps[RTL_RC_HT_RATEMCS7] = DESC92C_RATEMCS7, .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15, }; static struct pci_device_id rtl8723be_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB723, rtl8723be_hal_cfg)}, {}, }; MODULE_DEVICE_TABLE(pci, rtl8723be_pci_ids); MODULE_AUTHOR("PageHe <page_he@realsil.com.cn>"); MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Realtek 8723BE 802.11n PCI wireless"); MODULE_FIRMWARE("rtlwifi/rtl8723befw.bin"); module_param_named(swenc, rtl8723be_mod_params.sw_crypto, bool, 0444); module_param_named(debug, rtl8723be_mod_params.debug, int, 0444); module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444); module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444); module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog, bool, 0444); module_param_named(ant_sel, rtl8723be_mod_params.ant_sel, int, 0444); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n"); MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); MODULE_PARM_DESC(ant_sel, "Set to 1 or 2 to force antenna number (default 0)\n"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); static struct pci_driver rtl8723be_driver = { .name = KBUILD_MODNAME, .id_table = rtl8723be_pci_ids, .probe = rtl_pci_probe, .remove = rtl_pci_disconnect, .driver.pm = &rtlwifi_pm_ops, }; module_pci_driver(rtl8723be_driver);
gpl-2.0
ParanoidAndroid/android_kernel_grouper
drivers/media/video/tegra/ov14810.c
146
59066
/* * ov14810.c - ov14810 sensor driver * * Copyright (c) 2011-2012, NVIDIA, All Rights Reserved. * * Contributors: * Krupal Divvela <kdivvela@nvidia.com> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/delay.h> #include <linux/fs.h> #include <linux/i2c.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <media/ov14810.h> #include <linux/module.h> #include <linux/moduleparam.h> #define OV14810_I2C_WRITE8(ADDR, OFFSET, VAL) do { \ if (0 != ov14810_write8(ADDR, OFFSET, VAL)) \ return 1; \ } while(0) #define OV14810_FRAME_LENGTH_REG_ADDR0 0x380e #define OV14810_FRAME_LENGTH_REG_ADDR1 0x380f #define OV14810_COARSE_TIME_REG_ADDR0 0x3500 #define OV14810_COARSE_TIME_REG_ADDR1 0x3501 #define OV14810_COARSE_TIME_REG_ADDR2 0x3502 #define OV14810_GAIN_REG_ADDR0 0x350b #define OV14810_GROUP_ACCESS_REG_ADDR 0x3212 static u8 uCProgram[] = { 0x02,0x03,0x6E,0x02,0x19,0x74,0xBB,0x01,0x06,0x89,0x82,0x8A,0x83,0xE0,0x22,0x50 ,0x02,0xE7,0x22,0xBB,0xFE,0x02,0xE3,0x22,0x89,0x82,0x8A,0x83,0xE4,0x93,0x22,0xBB ,0x01,0x0C,0xE5,0x82,0x29,0xF5,0x82,0xE5,0x83,0x3A,0xF5,0x83,0xE0,0x22,0x50,0x06 ,0xE9,0x25,0x82,0xF8,0xE6,0x22,0xBB,0xFE,0x06,0xE9,0x25,0x82,0xF8,0xE2,0x22,0xE5 ,0x82,0x29,0xF5,0x82,0xE5,0x83,0x3A,0xF5,0x83,0xE4,0x93,0x22,0xBB,0x01,0x06,0x89 ,0x82,0x8A,0x83,0xF0,0x22,0x50,0x02,0xF7,0x22,0xBB,0xFE,0x01,0xF3,0x22,0xEF,0x8D ,0xF0,0xA4,0xA8,0xF0,0xCF,0x8C,0xF0,0xA4,0x28,0xCE,0x8D,0xF0,0xA4,0x2E,0xFE,0x22 ,0xBC,0x00,0x0B,0xBE,0x00,0x29,0xEF,0x8D,0xF0,0x84,0xFF,0xAD,0xF0,0x22,0xE4,0xCC ,0xF8,0x75,0xF0,0x08,0xEF,0x2F,0xFF,0xEE,0x33,0xFE,0xEC,0x33,0xFC,0xEE,0x9D,0xEC ,0x98,0x40,0x05,0xFC,0xEE,0x9D,0xFE,0x0F,0xD5,0xF0,0xE9,0xE4,0xCE,0xFD,0x22,0xED ,0xF8,0xF5,0xF0,0xEE,0x84,0x20,0xD2,0x1C,0xFE,0xAD,0xF0,0x75,0xF0,0x08,0xEF,0x2F ,0xFF,0xED,0x33,0xFD,0x40,0x07,0x98,0x50,0x06,0xD5,0xF0,0xF2,0x22,0xC3,0x98,0xFD ,0x0F,0xD5,0xF0,0xEA,0x22,0xC2,0xD5,0xEC,0x30,0xE7,0x09,0xB2,0xD5,0xE4,0xC3,0x9D ,0xFD,0xE4,0x9C,0xFC,0xEE,0x30,0xE7,0x15,0xB2,0xD5,0xE4,0xC3,0x9F,0xFF,0xE4,0x9E ,0xFE,0x12,0x00,0x70,0xC3,0xE4,0x9D,0xFD,0xE4,0x9C,0xFC,0x80,0x03,0x12,0x00,0x70 ,0x30,0xD5,0x07,0xC3,0xE4,0x9F,0xFF,0xE4,0x9E,0xFE,0x22,0xC5,0xF0,0xF8,0xA3,0xE0 ,0x28,0xF0,0xC5,0xF0,0xF8,0xE5,0x82,0x15,0x82,0x70,0x02,0x15,0x83,0xE0,0x38,0xF0 ,0x22,0xBB,0x01,0x0A,0x89,0x82,0x8A,0x83,0xE0,0xF5,0xF0,0xA3,0xE0,0x22,0x50,0x06 ,0x87,0xF0,0x09,0xE7,0x19,0x22,0xBB,0xFE,0x07,0xE3,0xF5,0xF0,0x09,0xE3,0x19,0x22 ,0x89,0x82,0x8A,0x83,0xE4,0x93,0xF5,0xF0,0x74,0x01,0x93,0x22,0xBB,0x01,0x10,0xE5 ,0x82,0x29,0xF5,0x82,0xE5,0x83,0x3A,0xF5,0x83,0xE0,0xF5,0xF0,0xA3,0xE0,0x22,0x50 ,0x09,0xE9,0x25,0x82,0xF8,0x86,0xF0,0x08,0xE6,0x22,0xBB,0xFE,0x0A,0xE9,0x25,0x82 ,0xF8,0xE2,0xF5,0xF0,0x08,0xE2,0x22,0xE5,0x83,0x2A,0xF5,0x83,0xE9,0x93,0xF5,0xF0 ,0xA3,0xE9,0x93,0x22,0xE8,0x8F,0xF0,0xA4,0xCC,0x8B,0xF0,0xA4,0x2C,0xFC,0xE9,0x8E ,0xF0,0xA4,0x2C,0xFC,0x8A,0xF0,0xED,0xA4,0x2C,0xFC,0xEA,0x8E,0xF0,0xA4,0xCD,0xA8 ,0xF0,0x8B,0xF0,0xA4,0x2D,0xCC,0x38,0x25,0xF0,0xFD,0xE9,0x8F,0xF0,0xA4,0x2C,0xCD ,0x35,0xF0,0xFC,0xEB,0x8E,0xF0,0xA4,0xFE,0xA9,0xF0,0xEB,0x8F,0xF0,0xA4,0xCF,0xC5 ,0xF0,0x2E,0xCD,0x39,0xFE,0xE4,0x3C,0xFC,0xEA,0xA4,0x2D,0xCE,0x35,0xF0,0xFD,0xE4 ,0x3C,0xFC,0x22,0x75,0xF0,0x08,0x75,0x82,0x00,0xEF,0x2F,0xFF,0xEE,0x33,0xFE,0xCD ,0x33,0xCD,0xCC,0x33,0xCC,0xC5,0x82,0x33,0xC5,0x82,0x9B,0xED,0x9A,0xEC,0x99,0xE5 ,0x82,0x98,0x40,0x0C,0xF5,0x82,0xEE,0x9B,0xFE,0xED,0x9A,0xFD,0xEC,0x99,0xFC,0x0F ,0xD5,0xF0,0xD6,0xE4,0xCE,0xFB,0xE4,0xCD,0xFA,0xE4,0xCC,0xF9,0xA8,0x82,0x22,0xB8 ,0x00,0xC1,0xB9,0x00,0x59,0xBA,0x00,0x2D,0xEC,0x8B,0xF0,0x84,0xCF,0xCE,0xCD,0xFC ,0xE5,0xF0,0xCB,0xF9,0x78,0x18,0xEF,0x2F,0xFF,0xEE,0x33,0xFE,0xED,0x33,0xFD,0xEC ,0x33,0xFC,0xEB,0x33,0xFB,0x10,0xD7,0x03,0x99,0x40,0x04,0xEB,0x99,0xFB,0x0F,0xD8 ,0xE5,0xE4,0xF9,0xFA,0x22,0x78,0x18,0xEF,0x2F,0xFF,0xEE,0x33,0xFE,0xED,0x33,0xFD ,0xEC,0x33,0xFC,0xC9,0x33,0xC9,0x10,0xD7,0x05,0x9B,0xE9,0x9A,0x40,0x07,0xEC,0x9B ,0xFC,0xE9,0x9A,0xF9,0x0F,0xD8,0xE0,0xE4,0xC9,0xFA,0xE4,0xCC,0xFB,0x22,0x75,0xF0 ,0x10,0xEF,0x2F,0xFF,0xEE,0x33,0xFE,0xED,0x33,0xFD,0xCC,0x33,0xCC,0xC8,0x33,0xC8 ,0x10,0xD7,0x07,0x9B,0xEC,0x9A,0xE8,0x99,0x40,0x0A,0xED,0x9B,0xFD,0xEC,0x9A,0xFC ,0xE8,0x99,0xF8,0x0F,0xD5,0xF0,0xDA,0xE4,0xCD,0xFB,0xE4,0xCC,0xFA,0xE4,0xC8,0xF9 ,0x22,0xEB,0x9F,0xF5,0xF0,0xEA,0x9E,0x42,0xF0,0xE9,0x9D,0x42,0xF0,0xE8,0x9C,0x45 ,0xF0,0x22,0xE8,0x60,0x0F,0xEC,0xC3,0x13,0xFC,0xED,0x13,0xFD,0xEE,0x13,0xFE,0xEF ,0x13,0xFF,0xD8,0xF1,0x22,0xE8,0x60,0x0F,0xEF,0xC3,0x33,0xFF,0xEE,0x33,0xFE,0xED ,0x33,0xFD,0xEC,0x33,0xFC,0xD8,0xF1,0x22,0xE0,0xFC,0xA3,0xE0,0xFD,0xA3,0xE0,0xFE ,0xA3,0xE0,0xFF,0x22,0xE0,0xF8,0xA3,0xE0,0xF9,0xA3,0xE0,0xFA,0xA3,0xE0,0xFB,0x22 ,0xEC,0xF0,0xA3,0xED,0xF0,0xA3,0xEE,0xF0,0xA3,0xEF,0xF0,0x22,0xE0,0xFB,0xA3,0xE0 ,0xFA,0xA3,0xE0,0xF9,0x22,0xF8,0xE0,0xFB,0xA3,0xA3,0xE0,0xF9,0x25,0xF0,0xF0,0xE5 ,0x82,0x15,0x82,0x70,0x02,0x15,0x83,0xE0,0xFA,0x38,0xF0,0x22,0xEB,0xF0,0xA3,0xEA ,0xF0,0xA3,0xE9,0xF0,0x22,0xD0,0x83,0xD0,0x82,0xF8,0xE4,0x93,0x70,0x12,0x74,0x01 ,0x93,0x70,0x0D,0xA3,0xA3,0x93,0xF8,0x74,0x01,0x93,0xF5,0x82,0x88,0x83,0xE4,0x73 ,0x74,0x02,0x93,0x68,0x60,0xEF,0xA3,0xA3,0xA3,0x80,0xDF,0x8A,0x83,0x89,0x82,0xE4 ,0x73,0xEC,0x8E,0xF0,0xA4,0xCC,0xC5,0xF0,0xCC,0xCD,0xF8,0xEF,0xA4,0xCE,0xC5,0xF0 ,0x2D,0xFD,0xE4,0x3C,0xFC,0xE8,0xA4,0x2E,0xC8,0xC5,0xF0,0x3D,0xFD,0xE4,0x3C,0xFC ,0xEF,0xA4,0xFF,0xE5,0xF0,0x28,0xFE,0xE4,0x3D,0xFD,0xE4,0x3C,0xFC,0x22,0x78,0x7F ,0xE4,0xF6,0xD8,0xFD,0x75,0x81,0x7F,0x02,0x03,0xB5,0x02,0x17,0x7F,0xE4,0x93,0xA3 ,0xF8,0xE4,0x93,0xA3,0x40,0x03,0xF6,0x80,0x01,0xF2,0x08,0xDF,0xF4,0x80,0x29,0xE4 ,0x93,0xA3,0xF8,0x54,0x07,0x24,0x0C,0xC8,0xC3,0x33,0xC4,0x54,0x0F,0x44,0x20,0xC8 ,0x83,0x40,0x04,0xF4,0x56,0x80,0x01,0x46,0xF6,0xDF,0xE4,0x80,0x0B,0x01,0x02,0x04 ,0x08,0x10,0x20,0x40,0x80,0x90,0x03,0xFA,0xE4,0x7E,0x01,0x93,0x60,0xBC,0xA3,0xFF ,0x54,0x3F,0x30,0xE5,0x09,0x54,0x1F,0xFE,0xE4,0x93,0xA3,0x60,0x01,0x0E,0xCF,0x54 ,0xC0,0x25,0xE0,0x60,0xA8,0x40,0xB8,0xE4,0x93,0xA3,0xFA,0xE4,0x93,0xA3,0xF8,0xE4 ,0x93,0xA3,0xC8,0xC5,0x82,0xC8,0xCA,0xC5,0x83,0xCA,0xF0,0xA3,0xC8,0xC5,0x82,0xC8 ,0xCA,0xC5,0x83,0xCA,0xDF,0xE9,0xDE,0xE7,0x80,0xBE,0x41,0x1B,0x5F,0x00,0x60,0x26 ,0x1B,0x23,0x07,0x83,0x07,0xE6,0x08,0x0D,0x1F,0x5D,0x1F,0x98,0x07,0x5F,0x04,0x73 ,0xFC,0x18,0x03,0xE8,0xFF,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x01,0x00,0x0B ,0x00,0x01,0x00,0x04,0x00,0xA0,0x00,0x64,0xC1,0x45,0xC1,0x46,0xC1,0x48,0xC1,0x47 ,0x00,0xE4,0xFD,0xFC,0xC2,0x8D,0xC2,0xA9,0xD2,0x8C,0xED,0x6F,0x70,0x02,0xEC,0x6E ,0x60,0x0C,0x30,0x8D,0xFD,0x0D,0xBD,0x00,0x01,0x0C,0xC2,0x8D,0x80,0xEC,0xC2,0x8C ,0x22,0xC2,0x8C,0xC2,0xA9,0x53,0x89,0xF0,0x43,0x89,0x02,0x75,0x8C,0x9C,0x75,0x8A ,0x9C,0xC2,0x8D,0x22,0xD0,0xE0,0xD0,0xE0,0xE4,0xC0,0xE0,0xC0,0xE0,0x32,0x22,0x02 ,0xFF,0xFC,0x22,0x42,0x30,0x00,0x47,0x30,0x03,0x47,0x30,0x0C,0x41,0x30,0x18,0x42 ,0x30,0x1B,0x41,0x30,0x1E,0x41,0x30,0x20,0x41,0x31,0x06,0x41,0x35,0x03,0x42,0x36 ,0x00,0x42,0x36,0x03,0x42,0x36,0x0A,0x41,0x36,0x0F,0x41,0x36,0x11,0x41,0x36,0x13 ,0x41,0x36,0x15,0x44,0x37,0x02,0x43,0x37,0x07,0x41,0x37,0x0E,0x42,0x37,0x10,0x42 ,0x37,0x14,0x43,0x37,0x17,0x44,0x37,0x1B,0x45,0x37,0x22,0x44,0x38,0x08,0x41,0x38 ,0x19,0x41,0x3B,0x09,0x41,0x3C,0x01,0x41,0x40,0x00,0x44,0x40,0x02,0x41,0x40,0x09 ,0x41,0x40,0x0C,0x41,0x40,0x4F,0x43,0x43,0x00,0x48,0x47,0x00,0x41,0x47,0x09,0x42 ,0x47,0x0B,0x42,0x48,0x00,0x41,0x48,0x03,0x41,0x48,0x06,0x41,0x48,0x37,0x41,0x48 ,0x42,0x41,0x48,0x4A,0x42,0x50,0x00,0x41,0x50,0x1F,0x41,0x50,0x25,0x42,0x50,0x3B ,0x41,0x50,0x41,0x41,0x50,0x43,0x41,0x5B,0x01,0x41,0x5B,0x03,0x42,0x38,0x2C,0x41 ,0x01,0x00,0x41,0x32,0x12,0x41,0x30,0x13,0x41,0x36,0x02,0x41,0x36,0x05,0x42,0x36 ,0x0C,0x41,0x36,0x14,0x44,0x37,0x0A,0x41,0x37,0x0F,0x41,0x37,0x13,0x41,0x37,0x16 ,0x41,0x37,0x21,0x42,0x37,0x27,0x45,0x38,0x03,0x46,0x38,0x0C,0x42,0x38,0x17,0x46 ,0x38,0x1C,0x42,0x38,0x2C,0x41,0x40,0x01,0x42,0x40,0x50,0x41,0x40,0x53,0x41,0x50 ,0x02,0x41,0x50,0x3D,0x41,0x50,0x42,0x41,0x50,0x47,0x41,0x59,0x01,0xEF,0xFF,0xFF ,0x41,0x32,0x12,0x41,0x32,0x12,0x84,0xEE,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x09 ,0x00,0xA7,0xA0,0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x04,0xE0,0xF8 ,0xF0,0x01,0x05,0x03,0x2D,0x1F,0x20,0x80,0x2E,0x00,0x24,0x6C,0x84,0x13,0x20,0x3D ,0x28,0xD1,0x73,0x01,0x00,0x04,0x40,0x16,0x5F,0x58,0x80,0x11,0x11,0xA0,0x46,0x40 ,0x2C,0x1A,0x30,0x2E,0x2E,0x70,0x00,0x40,0x00,0xF0,0x80,0x0A,0x80,0x29,0xC5,0x08 ,0x02,0x10,0x40,0x00,0xFF,0xFF,0x00,0xF0,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0x00 ,0x00,0x00,0x00,0x04,0x0F,0x50,0x80,0x1B,0x01,0x00,0x5F,0x4E,0x00,0x10,0x01,0x10 ,0x0E,0x08,0x03,0x00,0x02,0x02,0x00,0x00,0x1F,0x53,0x11,0x42,0x13,0x0F,0x83,0x00 ,0x04,0x81,0x00,0x57,0xB6,0x08,0x66,0x02,0x07,0x02,0x88,0x01,0xE6,0x0F,0xA0,0x01 ,0xF4,0x22,0x02,0x24,0x4A,0x32,0xAC,0x07,0xA4,0x03,0xA0,0x10,0x10,0x00,0xC0,0x00 ,0xA1,0x00,0x00,0x21,0x00,0x00,0xFF,0xFF,0x10,0xA0,0x02,0x88,0x01,0xE6,0xFF,0xFF ,0xFF,0xFF,0xFF,0x00,0x00,0x09,0x00,0xA7,0xA0,0x08,0x00,0x00,0x00,0x00,0x00,0x00 ,0x00,0x00,0x40,0x04,0xE0,0xF8,0xF0,0x01,0x05,0x03,0x2D,0x1F,0x20,0x80,0x2E,0x00 ,0x24,0x6C,0x84,0x13,0x20,0x3D,0x28,0xD1,0x73,0x01,0x00,0x04,0x40,0x16,0x5F,0x58 ,0x80,0x11,0x11,0xA0,0x46,0x40,0x2C,0x1A,0x30,0x2E,0x2E,0x70,0x00,0x40,0x00,0xF0 ,0x80,0x0A,0x80,0x29,0xC5,0x08,0x02,0x10,0x40,0x00,0xFF,0xFF,0x00,0xF0,0x04,0x01 ,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x0F,0x50,0x80,0x1B,0x01,0x00 ,0x5F,0x4E,0x00,0x10,0x01,0x10,0x0E,0x08,0x03,0x00,0x02,0x02,0x00,0x00,0x1F,0x42 ,0x11,0x42,0x13,0x30,0x80,0x00,0x84,0x01,0x61,0xF6,0x17,0x08,0x66,0x0C,0x0B,0x11 ,0x40,0x0C,0xF0,0x1F,0x00,0x0D,0x08,0x44,0x96,0x24,0x40,0x30,0x0C,0x0C,0xFC,0x00 ,0x08,0x04,0x04,0x02,0xC0,0x00,0xA1,0x00,0x00,0x21,0x00,0x00,0xFF,0xFF,0x10,0xA0 ,0x11,0x40,0x0C,0xF0,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x09,0x00,0xA7,0xA0,0x08 ,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x04,0xE0,0xF8,0xF0,0x01,0x05,0x03 ,0x2D,0x1F,0x20,0x80,0x2E,0x00,0x24,0x6C,0x84,0x13,0x20,0x3D,0x28,0xD1,0x73,0x01 ,0x00,0x04,0x40,0x16,0x5F,0x58,0x80,0x11,0x11,0xA0,0x46,0x40,0x2C,0x1A,0x30,0x2E ,0x2E,0x70,0x00,0x40,0x00,0xF0,0x80,0x0A,0x80,0x29,0xC5,0x08,0x02,0x10,0x40,0x00 ,0xFF,0xFF,0x00,0xF0,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04 ,0x0F,0x50,0x80,0x1B,0x01,0x00,0x5F,0x4E,0x00,0x10,0x01,0x10,0x0E,0x08,0x03,0x00 ,0x02,0x02,0x00,0x00,0x1F,0x42,0x11,0x42,0x13,0x30,0x80,0x00,0x84,0x01,0x61,0xF6 ,0x17,0x08,0x66,0x0C,0x0B,0x02,0x88,0x01,0xE6,0x0A,0x40,0x01,0xFC,0x44,0x96,0x24 ,0x40,0x35,0x85,0x01,0xF2,0x07,0x6C,0x04,0x04,0x02,0xC0,0x00,0xA1,0x00,0x00,0x31 ,0x00,0x00,0xFF,0xFF,0x10,0xA0,0x02,0x88,0x01,0xE6,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0x00,0x05,0x6D,0x07,0x18,0x06,0x13,0xFF,0x05,0x06,0xB9,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0x01,0x0D,0x4C,0x02,0x08,0xB9,0x03,0x0F,0xF5,0x05,0x0D,0xC7,0x0A ,0x14,0x6D,0x0B,0x14,0xB6,0x0C,0x0F,0xD2,0x0D,0x14,0xED,0x0F,0x15,0x59,0x10,0x15 ,0x5D,0x14,0x15,0x70,0x18,0x11,0xAF,0x19,0x12,0x79,0x1F,0x15,0x61,0x20,0x17,0x29 ,0x22,0x14,0x06,0x23,0x14,0x2D,0x29,0x18,0xAF,0x2A,0x10,0x9F,0x39,0x17,0x9F,0x3B ,0x0E,0xA6,0x3C,0x17,0xEB,0x4A,0x18,0x92,0x4B,0x14,0x88,0x50,0x17,0x42,0x57,0x15 ,0x61,0x59,0x11,0x3D,0x60,0x17,0x65,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x22,0xA2,0xC1,0xCC,0xC1,0xCB,0x25,0x11,0x26,0x88 ,0x60,0x00,0x64,0x45,0x30,0x03,0x41,0x01,0x00,0x60,0x00,0x64,0x41,0x01,0x00,0xEF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x0B,0x02,0xF7 ,0x00,0x00,0x00,0x00,0x00,0x04,0x14,0x28,0x36,0x64,0x04,0x00,0x09,0x00,0x9D,0xC3 ,0x0D,0x01,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0x90,0x1B,0x23,0x12,0x18,0x70,0x90,0x1B,0x68,0x12,0x03,0x0C,0x90,0x1B,0x68,0xF1 ,0xE3,0xFF,0xF4,0x60,0x34,0xEF,0x14,0xB5,0xC5,0x23,0x75,0xC5,0xFE,0xE4,0xFD,0x7F ,0x8E,0x11,0x89,0x90,0x1B,0x68,0x12,0x02,0xEC,0x90,0x00,0x01,0x12,0x01,0x3C,0xAA ,0xF0,0xF9,0x12,0x03,0x3B,0x7D,0x04,0x7F,0x8E,0x11,0x89,0x80,0x0F,0x90,0x1B,0x69 ,0xE4,0x75,0xF0,0x03,0x12,0x00,0xFB,0x80,0xC3,0x75,0xC1,0x06,0x75,0xC5,0xFF,0x22 ,0xFD,0x7F,0x20,0x11,0x89,0xE4,0xFD,0x7F,0x12,0x8F,0xFA,0x8D,0xFB,0xE4,0xF5,0xF8 ,0x90,0x1B,0x35,0xE0,0xFE,0xA3,0xE0,0xFF,0x43,0xF8,0x01,0xEF,0x4E,0x70,0x08,0xE5 ,0xF9,0x64,0x0F,0x60,0x13,0x80,0xF8,0xE5,0xF9,0x64,0x0F,0x60,0x0B,0xEF,0x1F,0xAC ,0x06,0x70,0x01,0x1E,0x4C,0x70,0xF0,0x22,0x22,0xAF,0xC1,0xEF,0x14,0x60,0x15,0x24 ,0xFE,0x60,0x3E,0x24,0x03,0x70,0x63,0xE5,0xC2,0xB4,0x01,0x06,0x12,0x17,0x92,0x02 ,0x04,0x64,0x80,0x56,0xE5,0xC2,0x64,0x01,0x70,0x25,0x31,0x35,0xE4,0xFD,0x7F,0x20 ,0x11,0x89,0x7D,0x03,0x7F,0x12,0x11,0x89,0x7D,0x07,0x7F,0x14,0x11,0x89,0x7D,0x03 ,0x7F,0x13,0x11,0x89,0x75,0xC5,0xFF,0xE4,0xF5,0xC1,0xFD,0x7F,0x18,0x80,0x8A,0x80 ,0x29,0xE5,0xC2,0x70,0x12,0x31,0x35,0x7F,0x20,0xB1,0x22,0x90,0x1B,0x5F,0xEF,0xF0 ,0xE4,0x11,0x80,0x7D,0x03,0x80,0x0D,0xE5,0xC2,0xB4,0x01,0x0C,0x90,0x1B,0x5F,0xE0 ,0x11,0x80,0xE4,0xFD,0x7F,0x13,0xA1,0xAD,0x80,0x00,0x75,0xC1,0x01,0x22,0xE4,0xFD ,0xFF,0x7E,0x01,0x80,0x0E,0x31,0x2E,0x7F,0x0A,0x7E,0x00,0x02,0x04,0x31,0xFD,0x7F ,0x02,0x7E,0x35,0xAB,0x07,0xAA,0x06,0xEA,0xF5,0xFA,0xEB,0xF5,0xFB,0x8D,0xFC,0x75 ,0xF8,0x10,0x01,0x90,0xE4,0xFD,0x7F,0x12,0x7E,0x32,0x31,0x43,0xAD,0x43,0xAC,0x42 ,0x7F,0x00,0x7E,0x35,0x31,0x84,0xE5,0x45,0x31,0x3E,0xAD,0x49,0xAC,0x48,0x7F,0x0A ,0x7E,0x35,0x31,0x84,0x7D,0x10,0x7F,0x12,0x7E,0x32,0x31,0x43,0x7D,0xA0,0x7F,0x12 ,0x7E,0x32,0x80,0xBF,0x8E,0x31,0x8F,0x32,0x8C,0x33,0x8D,0x34,0xE5,0x33,0xFD,0x31 ,0x43,0xE5,0x32,0x24,0x01,0xFF,0xE4,0x35,0x31,0xFE,0xAD,0x34,0x80,0xA5,0x8E,0x2F ,0x8F,0x30,0x90,0x1B,0x10,0xE0,0xFF,0x90,0x1B,0x0F,0xE0,0xFD,0x12,0x18,0x35,0xAA ,0x06,0xA9,0x07,0x7B,0xFF,0x90,0x1B,0x4E,0x12,0x03,0x0C,0x90,0x00,0x80,0x12,0x00 ,0x1F,0xFE,0x90,0x00,0x81,0x12,0x00,0x1F,0x7C,0x00,0x90,0x1B,0x5B,0xB1,0x18,0x90 ,0x00,0x82,0x12,0x00,0x1F,0xFE,0x90,0x00,0x83,0x12,0x00,0x1F,0x90,0x1B,0x5D,0xB1 ,0x18,0xB1,0x05,0x7C,0x41,0x7D,0x1A,0x12,0x03,0x41,0x7B,0x00,0x7A,0x71,0x79,0x02 ,0x78,0x00,0x12,0x01,0xFF,0x90,0x1B,0x51,0xEE,0xF0,0xA3,0xEF,0xF0,0xAE,0x2F,0xAF ,0x30,0x7C,0x27,0x7D,0x10,0x12,0x03,0x41,0xC0,0x06,0xC0,0x07,0x90,0x1B,0x51,0xE0 ,0xFE,0xA3,0xE0,0xFB,0xAA,0x06,0xE4,0xF9,0xF8,0xD0,0x07,0xD0,0x06,0x12,0x01,0xFF ,0x90,0x1B,0x53,0x12,0x02,0xE0,0xB1,0x05,0xE4,0xFC,0xFD,0x90,0x1B,0x53,0x12,0x02 ,0xD4,0x12,0x01,0x74,0xE4,0x7B,0x20,0xFA,0xF9,0xF8,0x12,0x01,0x74,0xE4,0x7B,0xE0 ,0x7A,0x2E,0xF9,0xF8,0x12,0x01,0xFF,0x90,0x1B,0x57,0x12,0x02,0xE0,0x90,0x1B,0x57 ,0x12,0x02,0xC8,0x78,0x08,0x12,0x02,0xA2,0x90,0x1B,0x11,0xEE,0xF0,0xA3,0xEF,0xF0 ,0x90,0x1B,0x57,0x12,0x02,0xC8,0xE4,0x90,0x1B,0x13,0xEF,0xF0,0x7F,0x18,0x7E,0x38 ,0xB1,0x3A,0xEF,0x30,0xE0,0x0D,0x51,0xD8,0x78,0x01,0x12,0x02,0xA2,0x90,0x1B,0x53 ,0x12,0x02,0xE0,0x51,0xD8,0xEF,0x54,0xF0,0xFF,0xE4,0xF5,0x44,0x8F,0x45,0x51,0xD8 ,0x78,0x08,0x12,0x02,0xA2,0xE4,0x8E,0x42,0x8F,0x43,0x51,0xD8,0x78,0x04,0x12,0x02 ,0xA2,0xF1,0x35,0x90,0x1B,0x5E,0xE0,0x24,0xF8,0xFF,0x90,0x1B,0x5D,0xE0,0x34,0xFF ,0xFE,0xE4,0xFC,0xFD,0xD3,0x12,0x02,0x91,0x40,0x10,0x51,0xD8,0x78,0x04,0x12,0x02 ,0xA2,0xEF,0x24,0x08,0xFD,0xE4,0x3E,0xFC,0x80,0x08,0x90,0x1B,0x5D,0xE0,0xFC,0xA3 ,0xE0,0xFD,0x7F,0x0E,0x7E,0x38,0x21,0x84,0x90,0x1B,0x53,0x02,0x02,0xC8,0x90,0x1B ,0x2F,0xE0,0xFE,0xA3,0xE0,0xFF,0x90,0x1B,0x49,0x12,0x02,0xEC,0xAC,0x02,0xAD,0x01 ,0x8E,0x33,0x8F,0x34,0x8C,0x35,0x8D,0x36,0x8F,0x82,0x8E,0x83,0xE4,0x93,0x91,0xFC ,0x70,0x02,0x05,0x33,0x90,0x1B,0x4C,0xE0,0xFF,0xF4,0x70,0x02,0x81,0x19,0xEF,0x54 ,0xE0,0xFB,0x70,0x24,0xE0,0x54,0x1F,0xB1,0x0E,0x05,0x34,0xE5,0x34,0x70,0x02,0x05 ,0x33,0xF5,0x82,0x85,0x33,0x83,0xE4,0x93,0x90,0x1B,0x4C,0xF0,0x60,0xEB,0x05,0x34 ,0xE5,0x34,0x70,0x02,0x05,0x33,0x80,0xCC,0x90,0x1B,0x4C,0xE0,0x54,0x1F,0xA3,0xF0 ,0x90,0x1B,0x4C,0xEB,0xF0,0x64,0x20,0x60,0x0A,0xE0,0xFF,0x64,0x80,0x60,0x04,0xEF ,0xB4,0xC0,0x15,0x91,0x2A,0xFF,0x90,0x1B,0x4E,0xE4,0xF0,0xA3,0xEF,0xF0,0x05,0x34 ,0xE5,0x34,0x70,0x02,0x05,0x33,0x80,0x19,0x91,0x2A,0xFF,0x74,0x01,0x93,0x90,0x1B ,0x4E,0xCF,0xF0,0xA3,0xEF,0xF0,0x74,0x02,0x25,0x34,0xF5,0x34,0xE4,0x35,0x33,0xF5 ,0x33,0x90,0x1B,0x4C,0xE0,0xB4,0x60,0x08,0x91,0x33,0xFF,0x12,0x04,0x31,0x80,0x7F ,0x90,0x1B,0x4C,0xE0,0xB4,0xE0,0x21,0xA3,0xE0,0xB4,0x02,0x16,0xAA,0x35,0xA9,0x36 ,0x7B,0xFF,0x12,0x01,0x11,0x85,0xF0,0x35,0xF5,0x36,0x91,0x33,0x8E,0x33,0xF5,0x34 ,0x80,0x5D,0x74,0x02,0xB1,0x0E,0x80,0x57,0x90,0x1B,0x4D,0xE0,0xD3,0x94,0x00,0x40 ,0x4E,0x90,0x1B,0x4C,0xE0,0xB4,0xC0,0x07,0x91,0x1A,0x12,0x18,0x7A,0x80,0x26,0x90 ,0x1B,0x4C,0xE0,0xB4,0x80,0x09,0x91,0x1A,0x7B,0x01,0x12,0x15,0x27,0x80,0x16,0x90 ,0x1B,0x4C,0xE0,0x90,0x1B,0x4E,0xB4,0x40,0x08,0xE0,0xFE,0x91,0x1D,0x31,0x43,0x80 ,0x04,0x91,0x1D,0x11,0x89,0x05,0x36,0xE5,0x36,0x70,0x02,0x05,0x35,0x90,0x1B,0x4E ,0xE4,0x75,0xF0,0x01,0x12,0x00,0xFB,0x90,0x1B,0x4D,0xE0,0x14,0xF0,0x80,0xA9,0x91 ,0x2A,0x91,0xFC,0x70,0x02,0x05,0x33,0x61,0x04,0x22,0x90,0x1B,0x4E,0xA3,0xE0,0xFF ,0x85,0x36,0x82,0x85,0x35,0x83,0xE4,0x93,0xFD,0x22,0x85,0x34,0x82,0x85,0x33,0x83 ,0xE4,0x93,0x22,0x90,0x1B,0x4E,0xE0,0xFE,0xA3,0xE0,0x22,0xE4,0x90,0x1B,0x4E,0xF0 ,0xA3,0xF0,0x30,0x47,0x05,0x90,0x1B,0x01,0x80,0x03,0x90,0x1B,0x45,0xE0,0xFA,0xA3 ,0xE0,0xFB,0x90,0x1B,0x44,0xE0,0x2B,0xFE,0x90,0x1B,0x43,0xE0,0x3A,0x90,0x1B,0x50 ,0xF0,0xA3,0xCE,0xF0,0xA3,0xEA,0xF0,0xA3,0xEB,0xF0,0x30,0x48,0x6B,0xD2,0x45,0x30 ,0x47,0x53,0x91,0xF3,0x90,0x1B,0x43,0xE0,0xFE,0xA3,0xE0,0xFF,0xD3,0x94,0x00,0xEE ,0x94,0x00,0x40,0x0A,0xEF,0x24,0x06,0xFF,0xE4,0x3E,0xFE,0x12,0x04,0x31,0x91,0x33 ,0xFF,0x90,0x1B,0x52,0xD1,0x25,0x40,0x54,0xB1,0x2C,0xC3,0x90,0x1B,0x38,0xE0,0x9F ,0x90,0x1B,0x37,0xE0,0x9E,0x50,0x45,0x90,0x1B,0x39,0xA3,0xE0,0xFF,0xBF,0xFF,0x02 ,0x80,0x22,0x90,0x1B,0x37,0xF1,0xBE,0x90,0x1B,0x4D,0xE0,0x9F,0xFF,0x90,0x1B,0x4C ,0xE0,0x9E,0xFE,0x80,0x24,0x91,0xF3,0x91,0x33,0xFF,0xA3,0xD1,0x25,0x40,0x1D,0xB1 ,0x2C,0x4E,0x60,0x18,0xF1,0xB2,0x80,0x11,0x91,0xF3,0x90,0x1B,0x50,0xE0,0xFE,0xA3 ,0xE0,0xFF,0x7C,0x00,0x7D,0x05,0x12,0x00,0x5E,0x12,0x04,0x31,0x31,0x37,0x53,0xCB ,0xFB,0x21,0x2E,0xF1,0xAA,0x30,0x48,0x03,0x43,0xCB,0x04,0x22,0x90,0x1B,0x4C,0xF0 ,0x05,0x34,0xE5,0x34,0x22,0x90,0x1B,0x5B,0xE0,0xFE,0xA3,0xE0,0xFF,0x22,0x25,0x36 ,0xF5,0x36,0xE4,0x35,0x35,0xF5,0x35,0x22,0x24,0x00,0xFF,0xEC,0x3E,0xF0,0xA3,0xEF ,0xF0,0x22,0x8F,0xFA,0x75,0xF8,0x02,0x11,0x90,0xAF,0xFB,0x22,0xED,0x9F,0xFF,0xEC ,0x9E,0xFE,0x90,0x1B,0x4C,0xF0,0xA3,0xEF,0xF0,0x22,0xAD,0x07,0xAC,0x06,0xEC,0xF5 ,0xFA,0xED,0xF5,0xFB,0x75,0xF8,0x12,0x11,0x90,0xAF,0xFB,0x22,0x12,0x04,0x51,0x90 ,0x1B,0x25,0xF1,0xC5,0x90,0x1F,0xF8,0xE4,0x93,0xF4,0x70,0x4D,0x7F,0x05,0x7E,0x3D ,0xB1,0x3A,0xEF,0x60,0x44,0x75,0x2F,0x3D,0x75,0x30,0x0E,0x75,0x31,0x1F,0x75,0x32 ,0x83,0xE4,0x90,0x1B,0x49,0xF0,0xB1,0xB3,0x70,0x02,0x05,0x31,0x05,0x30,0xE5,0x30 ,0x70,0x02,0x05,0x2F,0xD1,0x11,0x94,0x04,0x40,0xEC,0x75,0x31,0x1F,0x75,0x32,0xB8 ,0xE4,0x90,0x1B,0x49,0xF0,0xB1,0xB3,0x70,0x02,0x05,0x31,0x05,0x30,0xE5,0x30,0x70 ,0x02,0x05,0x2F,0xD1,0x11,0x94,0x1E,0x40,0xEC,0x7D,0x04,0x7F,0x8E,0x11,0x89,0xE4 ,0xF5,0xC1,0x22,0xAF,0x30,0xAE,0x2F,0xB1,0x3A,0xEF,0xF4,0x85,0x32,0x82,0x85,0x31 ,0x83,0xF0,0x05,0x32,0xE5,0x32,0x22,0x7F,0x03,0x7E,0x30,0xB1,0x3A,0x8F,0x2F,0x7F ,0x06,0x7E,0x30,0xB1,0x3A,0x8F,0x30,0xAF,0xC1,0xEF,0x24,0xFC,0x60,0x12,0x24,0x02 ,0x70,0x1C,0x53,0x2F,0xFC,0x43,0x2F,0x01,0x53,0x30,0xDF,0x43,0x30,0x20,0x80,0x09 ,0x53,0x2F,0xFC,0x43,0x2F,0x02,0x53,0x30,0xDF,0xE4,0xF5,0xC1,0x80,0x03,0x75,0xC1 ,0x01,0xAD,0x2F,0x7F,0x03,0x7E,0x30,0x31,0x43,0xAD,0x30,0x7F,0x06,0x7E,0x30,0x21 ,0x43,0x90,0x1B,0x49,0xE0,0x04,0xF0,0xE0,0xC3,0x22,0x90,0x1B,0x41,0xE0,0xFE,0xA3 ,0xE0,0xFF,0x90,0x1B,0x3F,0xE0,0xFC,0xA3,0xE0,0xFD,0xD3,0x9F,0xEC,0x9E,0x22,0x30 ,0x47,0x22,0xD1,0x1A,0x40,0x11,0xED,0x9F,0xFF,0xEC,0x9E,0xFE,0x90,0x1B,0x02,0xE0 ,0x90,0x1B,0x01,0xF1,0x15,0x80,0x30,0xF1,0x0C,0x90,0x1B,0x02,0xE0,0x2F,0xFF,0x90 ,0x1B,0x01,0x80,0x23,0x30,0x48,0x27,0xD1,0x1A,0x40,0x11,0xED,0x9F,0xFF,0xEC,0x9E ,0xFE,0x90,0x1B,0x46,0xE0,0x90,0x1B,0x45,0xF1,0x15,0x80,0x0B,0xF1,0x0C,0x90,0x1B ,0x46,0xE0,0x2F,0xFF,0x90,0x1B,0x45,0xE0,0x3E,0x90,0x1B,0x4C,0x80,0x0B,0x90,0x1B ,0x45,0xE0,0xFF,0xA3,0xE0,0x90,0x1B,0x4C,0xCF,0xF0,0xA3,0xEF,0xF0,0xF1,0xB2,0x31 ,0x9E,0x30,0x47,0x05,0x90,0x1B,0x03,0x80,0x03,0x90,0x1B,0x47,0xE0,0xFE,0xA3,0xE0 ,0xFF,0x12,0x15,0xB2,0x21,0x54,0x7D,0x01,0xAF,0xC1,0x12,0x18,0x35,0x12,0x19,0xB7 ,0x70,0x06,0xE9,0xF4,0x70,0x02,0xEA,0xF4,0x70,0x04,0x75,0xC1,0x01,0x22,0xD2,0x46 ,0x12,0x19,0x3B,0x90,0x1B,0x0F,0xE0,0x70,0x06,0x31,0x2E,0xF1,0x5C,0x51,0xDE,0x90 ,0x1B,0x0F,0x74,0x01,0xF0,0xA3,0xE5,0xC1,0xF0,0x30,0x47,0x05,0x90,0x1B,0x01,0x80 ,0x03,0x90,0x1B,0x45,0xE0,0xFF,0xA3,0xE0,0x90,0x1B,0x17,0xCF,0xF0,0xA3,0xEF,0xF0 ,0xD1,0x2F,0xE4,0x90,0x1B,0x19,0xF0,0x90,0x1B,0x14,0xE0,0xFF,0xA3,0xE0,0x90,0x1B ,0x1A,0xCF,0xF0,0xA3,0xEF,0xF0,0x91,0x3B,0xE4,0xF5,0xC1,0x22,0x90,0x1B,0x3D,0xE0 ,0xFE,0xA3,0xE0,0xFF,0x22,0x2F,0xFF,0xE0,0x3E,0xFE,0x90,0x1B,0x3E,0xE0,0x2F,0xFF ,0x90,0x1B,0x3D,0x22,0xE4,0x33,0x24,0x01,0xFF,0xE4,0x33,0xFE,0xE4,0x33,0xFD,0xE4 ,0x33,0xFC,0x12,0x01,0x74,0xA8,0x04,0xA9,0x05,0xAA,0x06,0xAB,0x07,0x22,0xAB,0x07 ,0xAA,0x06,0xB1,0x3A,0x90,0x1B,0x4C,0xEF,0xF0,0xEB,0x24,0x01,0xFF,0xE4,0x3A,0xFE ,0xB1,0x3A,0xEF,0xFD,0x90,0x1B,0x4C,0xE0,0xFE,0xED,0xFF,0x22,0x7F,0x00,0x7E,0x35 ,0xF1,0x3E,0x90,0x1B,0x1D,0xEE,0xF0,0xA3,0xEF,0xF0,0x7F,0x02,0x7E,0x35,0xB1,0x3A ,0x90,0x1B,0x1F,0xE4,0xF0,0xA3,0xEF,0xF0,0x7F,0x0A,0x7E,0x35,0xF1,0x3E,0x90,0x1B ,0x21,0xEE,0xF0,0xA3,0xEF,0xF0,0x22,0x90,0x1B,0x1D,0xE0,0xFC,0xA3,0xE0,0xFD,0x7F ,0x00,0x7E,0x35,0x31,0x84,0x90,0x1B,0x1F,0xA3,0xE0,0x31,0x3E,0x90,0x1B,0x21,0xE0 ,0xFC,0xA3,0xE0,0xFD,0x7F,0x0A,0x7E,0x35,0x21,0x84,0x7D,0x01,0x7F,0x00,0x7E,0x01 ,0x21,0x43,0x90,0x1B,0x4C,0xE0,0xFE,0xA3,0xE0,0xFF,0x22,0x90,0x1B,0x3B,0xE0,0xFE ,0xA3,0xE0,0xFF,0xC3,0x22,0xE0,0xFE,0xA3,0xE0,0xFF,0xA3,0xE0,0xFC,0xA3,0xE0,0xFD ,0x41,0xF0,0xE5,0xC1,0xC3,0x94,0xC1,0x50,0x06,0xAD,0xC4,0xAF,0xC1,0xA1,0xAD,0x75 ,0xC1,0x01,0x22,0x12,0x02,0xEC,0x02,0x00,0x06,0x7F,0x0A,0x7E,0x30,0xE1,0x3E,0x7F ,0x2A,0x7E,0x30,0xA1,0x3A,0x90,0x1B,0x4B,0xE5,0xC1,0xF0,0xE4,0xF5,0xC1,0xE0,0x12 ,0x03,0x15,0x10,0x21,0x00,0x10,0x2B,0x01,0x10,0x40,0x02,0x10,0x52,0x03,0x10,0x5C ,0x04,0x10,0x63,0x05,0x10,0x6A,0x07,0x10,0x7B,0x08,0x10,0x7F,0x09,0x00,0x00,0x10 ,0x88,0x75,0xC2,0x4B,0xE4,0xF5,0xC3,0x75,0xC4,0x01,0x22,0x12,0x0F,0xE9,0x11,0x8C ,0x12,0x0F,0xEF,0x8F,0xC2,0x90,0x1B,0x49,0x11,0x98,0xF5,0xC3,0xED,0xF5,0xC4,0x22 ,0x75,0xC2,0x08,0xE4,0xFF,0x12,0x0D,0x22,0x8F,0xC3,0x7F,0x01,0x12,0x0D,0x22,0x8F ,0xC4,0x22,0xE4,0xF5,0xC2,0x75,0xC3,0x2C,0x75,0xC4,0x38,0x22,0x75,0xC2,0x07,0xE4 ,0xF5,0xC3,0x22,0x75,0xC2,0x5D,0x75,0xC3,0xC0,0x22,0x7F,0x02,0x71,0x44,0x11,0x8C ,0x90,0x1B,0x49,0x11,0x98,0xF5,0xC2,0xED,0xF5,0xC3,0x22,0x75,0xC2,0x0A,0x22,0x31 ,0x36,0x8F,0xC2,0x31,0x29,0x8F,0xC3,0x22,0x75,0xC1,0x01,0x22,0x90,0x1B,0x49,0xEE ,0xF0,0xA3,0xEF,0xF0,0x22,0x90,0x1B,0x11,0xE0,0xFC,0xA3,0xE0,0xFD,0xEC,0x22,0xE4 ,0x90,0x1B,0x49,0xF0,0xA3,0xF0,0xA3,0xF0,0xA3,0xF0,0xAF,0xC1,0xEF,0x12,0x03,0x15 ,0x10,0xC9,0x00,0x10,0xD2,0x01,0x10,0xD7,0x05,0x10,0xDC,0x10,0x10,0xED,0x11,0x10 ,0xF6,0x15,0x11,0x00,0x20,0x00,0x00,0x11,0x08,0x11,0x95,0x31,0x21,0x90,0x1B,0x13 ,0x80,0x14,0x90,0x1B,0x14,0x80,0x19,0x90,0x1B,0x16,0x80,0x1D,0x90,0x1B,0x17,0x11 ,0x98,0x31,0x21,0x90,0x1B,0x19,0xE0,0x90,0x1B,0x4C,0xF0,0x80,0x21,0x90,0x1B,0x1A ,0x11,0x98,0x31,0x21,0x80,0x18,0x90,0x1B,0x1C,0xE0,0x90,0x1B,0x4A,0xF0,0x80,0x0E ,0x90,0x1B,0x49,0x74,0x04,0xF0,0x80,0x06,0x90,0x1B,0x49,0x74,0x01,0xF0,0x90,0x1B ,0x49,0xE0,0xF5,0xC1,0xA3,0xE0,0xF5,0xC2,0xA3,0xE0,0xF5,0xC3,0xA3,0xE0,0xF5,0xC4 ,0x22,0x90,0x1B,0x4A,0xF0,0xED,0xA3,0xF0,0x22,0x7F,0x01,0x8F,0xFA,0x75,0xF8,0x22 ,0x12,0x08,0x90,0xAF,0xFB,0x22,0xE4,0xFF,0x31,0x2B,0x7E,0x00,0x22,0xE4,0xF5,0x2F ,0xAF,0xC1,0xEF,0x14,0x60,0x11,0x14,0x60,0x18,0x14,0x60,0x15,0x14,0x60,0x17,0x24 ,0x04,0x70,0x1F,0x31,0x8B,0x80,0x1E,0x90,0x1B,0x64,0x11,0x98,0xFF,0x8F,0xC3,0x80 ,0x0C,0x75,0x2F,0x04,0x80,0x0F,0x90,0x1B,0x66,0x11,0x98,0xF5,0xC3,0xED,0xF5,0xC4 ,0x80,0x03,0x75,0x2F,0x01,0x85,0x2F,0xC1,0x22,0xE0,0x54,0x1F,0xFC,0xA3,0xE0,0xFD ,0x7F,0x50,0x51,0x62,0x7F,0xE8,0x7E,0x03,0x12,0x04,0x31,0x71,0x3C,0xC3,0x33,0xCE ,0x33,0xCE,0xD8,0xF9,0xFF,0x7C,0x00,0x7D,0x08,0x12,0x00,0xC5,0x90,0x1B,0x64,0xEE ,0xF0,0xA3,0xEF,0xF0,0x71,0xAA,0x90,0x1B,0x66,0xEE,0xF0,0xA3,0xEF,0xF0,0x22,0x90 ,0x1B,0x2B,0xE0,0xFE,0xA3,0xE0,0x8E,0x2F,0xF5,0x30,0x8E,0x31,0xF5,0x32,0x90,0x1B ,0x29,0x12,0x0F,0xC5,0xAF,0xC1,0xEF,0x24,0xFD,0x60,0x0B,0x04,0x70,0x10,0x74,0x36 ,0x51,0x58,0x74,0x38,0x80,0x0E,0x74,0x3A,0x51,0x58,0x74,0x3C,0x80,0x06,0x74,0x32 ,0x51,0x58,0x74,0x34,0x25,0x32,0xF5,0x32,0xE4,0x35,0x31,0xF5,0x31,0xE5,0xC1,0x60 ,0x3A,0x85,0x30,0x82,0x85,0x2F,0x83,0xE4,0x93,0xFE,0x05,0x30,0xE5,0x30,0x70,0x02 ,0x05,0x2F,0xF5,0x82,0x85,0x2F,0x83,0xE4,0x93,0x90,0x1B,0x31,0x51,0x70,0x85,0x32 ,0x82,0x85,0x31,0x83,0xE4,0x93,0xFE,0x05,0x32,0xE5,0x32,0x70,0x02,0x05,0x31,0xF5 ,0x82,0x85,0x31,0x83,0xE4,0x93,0x90,0x1B,0x33,0x51,0x70,0x90,0x1B,0x31,0x31,0x79 ,0x90,0x1B,0x33,0x31,0x79,0x90,0x1B,0x31,0xE0,0xF5,0x3E,0xA3,0xE0,0xF5,0x3F,0xA3 ,0xE0,0xF5,0x40,0xA3,0xE0,0xF5,0x41,0xE4,0xF5,0x3A,0xF5,0x3B,0x75,0xC1,0x01,0x75 ,0xC2,0x44,0x51,0x79,0xE4,0xF5,0xC1,0x22,0x25,0x30,0xF5,0x30,0xE4,0x35,0x2F,0xF5 ,0x2F,0x22,0x8F,0xFA,0xED,0xF5,0xFB,0xEC,0xF5,0xFC,0x75,0xF8,0x04,0x02,0x08,0x90 ,0xFD,0xED,0xFF,0xEE,0xF0,0xA3,0xEF,0xF0,0x22,0xE4,0xF5,0x33,0xAF,0xC1,0xEF,0xAD ,0xC2,0xF5,0x3A,0xED,0xF5,0x3B,0xD3,0x94,0x00,0xE5,0x3A,0x94,0x02,0x40,0x09,0x75 ,0x33,0x01,0x75,0x3A,0x02,0x75,0x3B,0x00,0xC3,0xE5,0x3A,0x94,0x00,0x50,0x08,0x75 ,0x33,0x01,0xE4,0xF5,0x3A,0xF5,0x3B,0xAF,0x3B,0xAE,0x3A,0x71,0x6A,0x8E,0x3C,0x8F ,0x3D,0x53,0xCB,0xF7,0xE5,0x3C,0x54,0x1F,0xFC,0xAD,0x3D,0x7F,0x50,0x51,0x62,0x75 ,0x34,0x00,0x75,0x35,0xFA,0x7F,0x05,0x7E,0x00,0x12,0x04,0x31,0x85,0x3C,0x36,0x85 ,0x3D,0x37,0x71,0x3C,0xC3,0x33,0xCE,0x33,0xCE,0xD8,0xF9,0x78,0x03,0xCE,0xA2,0xE7 ,0x13,0xCE,0x13,0xD8,0xF8,0xFF,0xC3,0xE5,0x37,0x9F,0xF5,0x37,0xE5,0x36,0x9E,0xF5 ,0x36,0xC3,0x64,0x80,0x94,0x80,0x50,0x0F,0xAE,0x36,0xAF,0x37,0x7C,0xFF,0x7D,0xFF ,0x12,0x00,0x5E,0x8E,0x36,0x8F,0x37,0xE5,0x35,0x15,0x35,0x70,0x02,0x15,0x34,0xC3 ,0xE5,0x37,0x94,0x14,0xE5,0x36,0x64,0x80,0x94,0x80,0x40,0x06,0xE5,0x35,0x45,0x34 ,0x70,0xA3,0xE5,0x35,0x45,0x34,0x70,0x03,0x75,0x33,0x04,0x71,0x55,0xE4,0xF5,0xC1 ,0x85,0x33,0xC2,0xE5,0x34,0xF5,0xC3,0xE5,0x35,0xF5,0xC4,0x22,0x7F,0x5A,0x71,0x44 ,0xEF,0x78,0x03,0x22,0x8F,0xFA,0x75,0xF8,0x06,0x12,0x08,0x90,0xAF,0xFC,0xEF,0xFE ,0xAD,0xFB,0xED,0xFF,0x22,0xE5,0x3C,0x54,0x1F,0xFE,0xE4,0x25,0x3D,0xFD,0xEE,0x34 ,0x20,0xFC,0x7F,0x50,0x51,0x62,0x43,0xCB,0x08,0x22,0x8E,0x38,0x8F,0x39,0x71,0xFB ,0x78,0x02,0x71,0xF2,0x12,0x0F,0x35,0xAE,0x38,0xAF,0x39,0xE4,0xFC,0xFD,0x12,0x01 ,0x74,0x78,0x09,0x12,0x02,0xA2,0xAD,0x07,0xAC,0x06,0xE5,0x41,0xAE,0x40,0x78,0x02 ,0xC3,0x33,0xCE,0x33,0xCE,0xD8,0xF9,0xC3,0x9D,0xFF,0xEE,0x9C,0xFE,0xEF,0x78,0x02 ,0xCE,0xA2,0xE7,0x13,0xCE,0x13,0xD8,0xF8,0xFF,0x22,0xD3,0xEF,0x95,0x41,0xE5,0x40 ,0x64,0x80,0xF8,0xEE,0x64,0x80,0x98,0x40,0x04,0xE4,0xFE,0xFF,0x22,0xC3,0xEF,0x95 ,0x3F,0xE5,0x3E,0x64,0x80,0xF8,0xEE,0x64,0x80,0x98,0x50,0x05,0x7E,0x02,0x7F,0x00 ,0x22,0xC3,0xE5,0x41,0x9F,0xFF,0xE5,0x40,0x9E,0x78,0x09,0x71,0xF2,0xC0,0x06,0xC0 ,0x07,0x71,0xFB,0xAB,0x07,0xFA,0x33,0x95,0xE0,0xF9,0xF8,0xD0,0x07,0xD0,0x06,0x02 ,0x01,0xFF,0xFE,0x33,0x95,0xE0,0xFD,0xFC,0x02,0x02,0xB5,0xC3,0xE5,0x41,0x95,0x3F ,0xFF,0xE5,0x40,0x95,0x3E,0x22,0x91,0x55,0x4E,0x70,0x05,0x75,0xC1,0x01,0x80,0x05 ,0x90,0x1B,0x45,0x91,0x21,0x11,0x95,0xF5,0xC2,0xED,0xF5,0xC3,0xA3,0xE0,0xF5,0xC4 ,0x22,0xEE,0xF0,0xA3,0xEF,0xF0,0x12,0x0E,0x2F,0xE4,0xF5,0xC1,0x22,0x91,0x55,0xD3 ,0x94,0x80,0xEE,0x94,0x0C,0x50,0x09,0xC3,0xEF,0x94,0x32,0xEE,0x94,0x00,0x50,0x05 ,0x75,0xC1,0x01,0x80,0x05,0x90,0x1B,0x47,0x91,0x21,0x90,0x1B,0x14,0x11,0x98,0xF5 ,0xC2,0xED,0xF5,0xC3,0x22,0xAF,0xC1,0xEF,0xFF,0xAD,0xC2,0xED,0x90,0x1B,0x49,0xCF ,0xF0,0xA3,0xEF,0xF0,0x90,0x1B,0x49,0xE0,0xFE,0xA3,0xE0,0xFF,0x22,0x91,0x55,0xC3 ,0x94,0xFF,0xEE,0x94,0x5F,0x50,0x0A,0xAD,0xC4,0x12,0x09,0x43,0xE4,0xF5,0xC1,0x80 ,0x03,0x75,0xC1,0x01,0xE4,0xF5,0xC2,0x22,0x90,0x1B,0x49,0xE5,0xC2,0xF0,0xE4,0xA3 ,0xF0,0x90,0x1B,0x49,0xE0,0xFF,0xC3,0x94,0xFF,0x50,0x0C,0x31,0x2B,0x90,0x1B,0x4A ,0xEF,0xF0,0xE4,0xF5,0xC1,0x80,0x03,0x75,0xC1,0x01,0xE4,0xF5,0xC2,0xF5,0xC3,0x90 ,0x1B,0x4A,0xE0,0xF5,0xC4,0x22,0xAF,0xC1,0xEF,0xFF,0xAD,0xC2,0xED,0x90,0x1B,0x49 ,0xCF,0xF0,0xA3,0xEF,0xF0,0xE4,0xA3,0x91,0x63,0xC3,0x94,0xFF,0xEE,0x94,0x5F,0x50 ,0x0D,0x12,0x0D,0x3A,0x90,0x1B,0x4B,0xEF,0xF0,0xE4,0xF5,0xC1,0x80,0x03,0x75,0xC1 ,0x01,0xE4,0xF5,0xC2,0xF5,0xC3,0x90,0x1B,0x4B,0xE0,0xF5,0xC4,0x22,0xE4,0xFF,0xE5 ,0xC1,0xC3,0x94,0xC1,0x50,0x0A,0xAF,0xC1,0x12,0x0D,0x22,0xE4,0xF5,0xC1,0x80,0x03 ,0x75,0xC1,0x01,0x8F,0xC4,0xE4,0xF5,0xC3,0xF5,0xC2,0x22,0x8F,0x2F,0xAB,0x2F,0xAD ,0xC3,0xAF,0xC2,0xB1,0x27,0x8F,0xC3,0x05,0xC2,0xAB,0x2F,0xAD,0xC4,0xAF,0xC2,0xB1 ,0x27,0x8F,0xC4,0xE4,0xF5,0xC1,0x22,0x8D,0x37,0xAE,0x03,0xEF,0x7C,0x00,0x7B,0x01 ,0x24,0x23,0xF9,0xEC,0x34,0x1B,0xFA,0x90,0x1B,0x50,0x12,0x03,0x0C,0xEE,0x60,0x11 ,0xEF,0xC3,0x94,0x26,0x50,0x0B,0x90,0x1B,0x50,0x12,0x02,0xEC,0xE5,0x37,0x12,0x00 ,0x4C,0x90,0x1B,0x50,0x12,0x0F,0xE3,0xFF,0x22,0xE4,0xFF,0x80,0xAE,0x7F,0x01,0x80 ,0xAA,0xAD,0x3B,0xAC,0x3A,0xE4,0xF5,0xC1,0x8C,0xC3,0xAF,0x05,0xEF,0xF5,0xC4,0x22 ,0xAF,0xC2,0x7E,0x00,0xEF,0x78,0x10,0xC3,0x33,0xCE,0x33,0xCE,0xD8,0xF9,0xFD,0xAC ,0x06,0xAF,0xC1,0x7E,0x00,0xEF,0x78,0x18,0xC3,0x33,0xCE,0x33,0xCE,0xD8,0xF9,0xB1 ,0xAA,0xAF,0xC3,0xEF,0x4C,0xFE,0xED,0xFF,0xAD,0xC4,0xEF,0x4D,0xFF,0xE4,0xFC,0xFD ,0x90,0x1B,0x60,0x12,0x02,0xE0,0xE4,0xF5,0xC1,0x22,0xFF,0xEE,0x4C,0xFC,0xEF,0x4D ,0xFD,0x22,0x8E,0x2F,0x8F,0x30,0xC3,0xE5,0x30,0x94,0x64,0xE5,0x2F,0x94,0x00,0x50 ,0x0C,0xF1,0x20,0xC2,0x44,0xF1,0x17,0x7E,0x40,0x7D,0x06,0x80,0x6D,0xC3,0xE5,0x30 ,0x94,0xC8,0xE5,0x2F,0x94,0x00,0x50,0x0A,0xF1,0x20,0xF1,0x11,0x7E,0x80,0x7D,0x0C ,0x80,0x58,0xC3,0xE5,0x30,0x94,0x90,0xE5,0x2F,0x94,0x01,0x50,0x0F,0xC2,0x40,0xD2 ,0x41,0xC2,0x42,0xC2,0x43,0xF1,0x11,0xFE,0x7D,0x19,0x80,0x3E,0xC3,0xE5,0x30,0x94 ,0x20,0xE5,0x2F,0x94,0x03,0x50,0x0D,0xD2,0x40,0xC2,0x41,0xC2,0x42,0xF1,0x0F,0xFE ,0x7D,0x32,0x80,0x26,0xC3,0xE5,0x30,0x94,0x40,0xE5,0x2F,0x94,0x06,0x50,0x09,0xC2 ,0x40,0xF1,0x0B,0xFE,0x7D,0x64,0x80,0x12,0xC3,0xE5,0x30,0x94,0x80,0xE5,0x2F,0x94 ,0x0C,0x50,0x0F,0xD2,0x40,0xF1,0x0B,0xFE,0x7D,0xC8,0xFC,0x12,0x01,0xFF,0x8E,0x31 ,0x8F,0x32,0xC3,0xE4,0x95,0x32,0xF5,0x34,0x74,0x20,0x95,0x31,0xF5,0x33,0xA2,0x41 ,0xE4,0x33,0x24,0x01,0xFB,0xE4,0x33,0xFA,0xE4,0x33,0xF9,0xE4,0x33,0xF8,0xA2,0x40 ,0x12,0x0F,0x24,0xA2,0x42,0x12,0x0F,0x24,0xA2,0x43,0x12,0x0F,0x24,0xA2,0x44,0xE4 ,0x33,0x24,0x01,0xFF,0xE4,0x33,0xFE,0xE4,0x33,0xFD,0xE4,0x33,0xFC,0x12,0x01,0x74 ,0xC0,0x04,0x12,0x0F,0x37,0xE5,0x33,0xFF,0xC3,0x74,0x20,0x9F,0xFD,0xE4,0x94,0x00 ,0xFC,0x7E,0x06,0x7F,0x40,0x12,0x00,0x70,0xE4,0xFC,0xFD,0xD0,0x00,0x12,0x01,0x74 ,0x90,0x1B,0x14,0xEE,0xF0,0xA3,0xEF,0xF0,0xA2,0x41,0xE4,0xFE,0x33,0x54,0x01,0x78 ,0x07,0xC3,0x33,0xCE,0x33,0xCE,0xD8,0xF9,0xFD,0xAC,0x06,0xA2,0x40,0xE4,0x33,0x54 ,0x01,0x4C,0xFC,0xA2,0x42,0xE4,0xFE,0x33,0x54,0x01,0x78,0x06,0xC3,0x33,0xCE,0x33 ,0xCE,0xD8,0xF9,0xB1,0xAA,0xA2,0x43,0xE4,0xFE,0x33,0x54,0x01,0x78,0x05,0xC3,0x33 ,0xCE,0x33,0xCE,0xD8,0xF9,0xB1,0xAA,0xA2,0x44,0xE4,0x33,0x54,0x01,0xC4,0xF8,0x54 ,0x0F,0xC8,0x68,0xFF,0xE4,0xC4,0x54,0xF0,0x48,0x4C,0xFC,0xEF,0x4D,0xFD,0xE5,0x33 ,0x54,0x0F,0xFF,0xEC,0xF5,0x48,0xEF,0x4D,0xF5,0x49,0x22,0xD2,0x41,0xD2,0x42,0xD2 ,0x43,0xD2,0x44,0xAE,0x2F,0xAF,0x30,0xAB,0x07,0xAA,0x06,0xE4,0xF9,0xF8,0xFF,0x22 ,0xC2,0x40,0xC2,0x41,0xC2,0x42,0xC2,0x43,0x22,0xE4,0xF5,0xC1,0xF1,0x31,0x8F,0xC2 ,0x22,0x7F,0x20,0x12,0x0D,0x22,0xEF,0x54,0x03,0x64,0x03,0x7F,0x00,0x60,0x02,0x7F ,0x01,0x22,0xAF,0xC1,0xEF,0x14,0x60,0x0C,0x04,0x70,0x16,0x74,0xFF,0x90,0x1B,0x3B ,0xF0,0xA3,0x80,0x08,0x90,0x1B,0x3B,0xE4,0xF0,0xA3,0x74,0x0A,0xF0,0xE4,0xF5,0xC1 ,0x22,0x75,0xC1,0x01,0x22,0xAF,0xC1,0xEF,0xFE,0xAD,0xC2,0xED,0xFF,0xE4,0xF5,0xC1 ,0x8F,0x82,0x8E,0x83,0x93,0xFC,0x74,0x01,0x93,0xF5,0xC3,0xEC,0xF5,0xC4,0x22,0xE4 ,0xF5,0x8F,0xF1,0x92,0x75,0xA8,0x81,0x53,0xC9,0xFE,0x75,0xC5,0xFF,0x12,0x04,0x6F ,0x80,0xFB,0x75,0xC9,0xFF,0x75,0xCA,0xFF,0x75,0xC7,0xFF,0x75,0xC8,0xFF,0x22,0xE5 ,0xC1,0xB4,0x80,0x05,0x12,0x09,0x2E,0x80,0x3E,0xE4,0xFD,0xAF,0xC1,0x12,0x18,0x35 ,0x12,0x19,0xB7,0x70,0x06,0xE9,0xF4,0x70,0x02,0xEA,0xF4,0x70,0x04,0x75,0xC1,0x01 ,0x22,0x90,0x1B,0x0F,0xE0,0x60,0x03,0x12,0x0F,0x87,0xE4,0x90,0x1B,0x0F,0xF0,0xA3 ,0xE5,0xC1,0xF0,0xC2,0x48,0xC2,0x47,0x12,0x0A,0xDE,0x12,0x0F,0xAA,0x7F,0x03,0x7E ,0x00,0x12,0x04,0x31,0x12,0x0E,0x2F,0xE4,0xF5,0xC1,0x22,0xAD,0xC2,0xAF,0xC3,0x12 ,0x18,0x35,0x12,0x19,0xB7,0x70,0x06,0xE9,0xF4,0x70,0x02,0xEA,0xF4,0x70,0x08,0x75 ,0xC1,0x01,0xF5,0xC2,0xF5,0xC3,0x22,0xE5,0xC1,0x90,0x1B,0x4A,0x70,0x05,0x75,0xF0 ,0x9D,0x80,0x04,0xE4,0x75,0xF0,0x9F,0x12,0x00,0xFB,0x90,0x1B,0x49,0xE4,0x75,0xF0 ,0x01,0x12,0x02,0xF5,0x12,0x00,0x06,0xF5,0xC2,0x90,0x1B,0x49,0x12,0x0F,0xE3,0xF5 ,0xC3,0xE4,0xF5,0xC1,0x22,0xAC,0x07,0x90,0x1B,0x2D,0x11,0x70,0x12,0x01,0x11,0xFF ,0xAE,0xF0,0xF4,0x70,0x02,0xEE,0xF4,0x60,0x1D,0xED,0x70,0x04,0xEF,0x6C,0x60,0x16 ,0xBD,0x01,0x0A,0x12,0x01,0x11,0xE5,0xF0,0xB5,0x04,0x02,0x80,0x09,0x74,0x04,0x29 ,0xF9,0xE4,0x3A,0xFA,0x80,0xD6,0x90,0x00,0x02,0x12,0x01,0x3C,0xFF,0xAE,0xF0,0x22 ,0xE0,0xFE,0xA3,0xE0,0xAA,0x06,0xF9,0x7B,0xFF,0x22,0xEF,0x24,0x34,0x60,0x06,0x04 ,0x70,0x05,0x8D,0xCB,0x22,0x8D,0xCC,0x22,0x8F,0xFA,0x8D,0xFB,0x75,0xF8,0x20,0x02 ,0x08,0x90,0x90,0x1B,0x49,0xE5,0xC2,0xF0,0xE0,0xFF,0xC3,0x94,0xFF,0x50,0x09,0xAD ,0xC4,0x11,0x88,0xE4,0xF5,0xC1,0x80,0x03,0x75,0xC1,0x01,0xE4,0xF5,0xC2,0x22,0xE4 ,0xFD,0xAF,0xC1,0xEF,0x14,0x60,0x25,0x14,0x60,0x39,0x24,0xFC,0x60,0x51,0x14,0x60 ,0x56,0x14,0x60,0x5B,0x24,0x08,0x70,0x63,0xE5,0xC2,0xD3,0x94,0x01,0x40,0x04,0x7D ,0x01,0x80,0x5A,0xAF,0xC2,0x90,0x1B,0x00,0xEF,0xF0,0x80,0x51,0xAF,0xC3,0xAC,0xC2 ,0xEC,0x2F,0xFF,0xE4,0x33,0x4F,0x70,0x04,0x7D,0x01,0x80,0x41,0x31,0x30,0x90,0x1B ,0x01,0x80,0x29,0x31,0x30,0xD3,0x94,0x80,0xEE,0x94,0x0C,0x50,0x09,0xC3,0xEF,0x94 ,0x32,0xEE,0x94,0x00,0x50,0x04,0x7D,0x01,0x80,0x23,0x90,0x1B,0x03,0x80,0x0D,0x90 ,0x1B,0x09,0xE5,0xC2,0xF0,0x80,0x16,0x31,0x30,0x90,0x1B,0x37,0xEE,0x80,0x06,0xAF ,0xC2,0x90,0x1B,0x39,0xE4,0xF0,0xA3,0xEF,0xF0,0x80,0x02,0x7D,0x01,0x8D,0xC1,0x22 ,0xAF,0xC2,0xEF,0xFE,0xAC,0xC3,0xEC,0xFB,0xEB,0xFF,0x22,0x90,0x1B,0x00,0xE0,0x60 ,0x04,0xD2,0x47,0x80,0x02,0xC2,0x47,0x30,0x47,0x15,0x12,0x0F,0xBB,0x90,0x1B,0x02 ,0xE0,0x9F,0x90,0x1B,0x01,0xE0,0x9E,0x40,0x03,0xD2,0x48,0x22,0xC2,0x48,0x22,0x12 ,0x0F,0xBB,0x90,0x1B,0x46,0xE0,0x9F,0x90,0x1B,0x45,0xE0,0x9E,0x40,0x03,0xD2,0x48 ,0x22,0xC2,0x48,0x22,0xC0,0xE0,0xC0,0xF0,0xC0,0x83,0xC0,0x82,0xC0,0xD0,0x75,0xD0 ,0x00,0xC0,0x00,0xC0,0x01,0xC0,0x02,0xC0,0x03,0xC0,0x04,0xC0,0x05,0xC0,0x06,0xC0 ,0x07,0xE5,0xC7,0x30,0xE0,0x06,0x12,0x08,0x30,0x53,0xC7,0x01,0xD0,0x07,0xD0,0x06 ,0xD0,0x05,0xD0,0x04,0xD0,0x03,0xD0,0x02,0xD0,0x01,0xD0,0x00,0xD0,0xD0,0xD0,0x82 ,0xD0,0x83,0xD0,0xF0,0xD0,0xE0,0x32,0xAA,0x06,0xA9,0x07,0x7B,0xFF,0x90,0x1B,0x49 ,0x12,0x03,0x0C,0x90,0x1B,0x49,0x12,0x02,0xEC,0x74,0xFF,0xF5,0x83,0xF5,0x82,0x6B ,0x22,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xEF,0xFF,0xFF ,0x21,0x20,0x24,0x90,0x21,0xB2,0x21,0x95,0x21,0x94,0x60,0x00,0x64,0x21,0x95,0x26 ,0x5E,0x25,0x66,0x2A,0x22,0x21,0x2E,0x26,0x30,0x22,0x38,0x24,0x40,0x23,0x9E,0x22 ,0x9A,0x84,0x0E,0x84,0xEE,0x84,0xEE,0x22,0x52,0x21,0x20,0x21,0x58,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x40,0x03,0x00,0x01 ,0x03,0x02,0x01,0x00,0xFF,0x60,0x03,0x18,0x00,0xE1,0x0F,0xF8,0xF4,0xF8,0x28,0x24 ,0x0C,0x26,0x00,0x27,0x0F,0x00,0x0E,0x02,0x01,0xD0,0x07,0x64,0x00,0x94,0x11,0xE8 ,0x03,0x64,0x00,0xF4,0x01,0x02,0x11,0x00,0xE8,0x03,0xFC,0x18,0x03,0xE9,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x60,0x03,0x07,0x06,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF ,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF }; struct ov14810_reg { u16 addr; u16 val; }; struct ov14810_sensor { struct i2c_client *i2c_client; struct ov14810_platform_data *pdata; }; struct ov14810_info { int mode; int uC_programmed; struct ov14810_sensor sensor; struct ov14810_sensor uC; struct ov14810_sensor slaveDev; }; static struct ov14810_info *info; #define OV14810_TABLE_WAIT_MS 0 #define OV14810_TABLE_END 1 static struct ov14810_reg mode_4416x3312[] = { {0x0103, 0x01}, {0x3003, 0x09}, {0x3004, 0x00}, {0x3005, 0xa7}, {0x3006, 0x80}, {0x3007, 0x08}, {0x3013, 0x1f}, {0x3018, 0x04}, {0x301b, 0xe0}, {0x301c, 0xf8}, {0x3020, 0x01}, {0x3106, 0x05}, {0x3600, 0x2d}, {0x3601, 0x1f}, {0x360a, 0x2e}, {0x360f, 0x24}, {0x3611, 0x6c}, {0x3613, 0x84}, {0x3705, 0xd1}, {0x3707, 0x73}, {0x3708, 0x01}, {0x370e, 0x04}, {0x3710, 0x40}, {0x3711, 0x1c}, {0x3717, 0x80}, {0x3718, 0x11}, {0x3719, 0x11}, {0x371b, 0xa0}, {0x371e, 0x2c}, {0x3723, 0x30}, {0x3726, 0x70}, {0x3808, 0x00}, {0x380a, 0x00}, {0x3817, 0x24}, {0x3819, 0x80}, {0x3a00, 0x78}, {0x3a13, 0x46}, {0x3a18, 0x00}, {0x3a19, 0x7f}, {0x3a1a, 0x06}, {0x3a25, 0x83}, {0x3b09, 0x0a}, {0x4002, 0xc5}, {0x4004, 0x02}, {0x4005, 0x10}, {0x4009, 0x40}, {0x404f, 0xff}, {0x4709, 0x00}, {0x4801, 0x0f}, {0x4806, 0x80}, {0x4842, 0x01}, {0x5000, 0x00}, {0x5001, 0x00}, {0x5002, 0x00}, {0x503b, 0x01}, {0x503c, 0x10}, {0x5041, 0x0e}, {0x5780, 0xfc}, {0x5b01, 0x03}, {0x5b03, 0x00}, {0x3003, 0x0a}, {0x3005, 0xa7}, {0x3006, 0x80}, {0x3007, 0x08}, {0x3013, 0x1f}, {0x3602, 0x42}, {0x3604, 0x80}, {0x3605, 0x11}, {0x360c, 0x42}, {0x360d, 0x13}, {0x3614, 0x05}, {0x3702, 0x10}, {0x3704, 0x14}, {0x3707, 0x73}, {0x370a, 0x80}, {0x370b, 0x00}, {0x370c, 0x04}, {0x370d, 0x0d}, {0x370f, 0x61}, {0x3713, 0xfa}, {0x3714, 0x2f}, {0x3715, 0x2c}, {0x3716, 0x0b}, {0x371c, 0x28}, {0x371d, 0x20}, {0x3721, 0x08}, {0x3724, 0x18}, {0x3725, 0x17}, {0x3727, 0x65}, {0x3728, 0x0c}, {0x3803, 0x0b}, {0x3804, 0x11}, {0x3805, 0x40}, {0x3806, 0x0c}, {0x3807, 0xf9}, {0x380c, 0x09}, {0x380d, 0x5c}, {0x380e, 0x0d}, {0x380f, 0x08}, {0x3810, 0x44}, {0x3811, 0x96}, {0x3818, 0x40}, {0x381c, 0x30}, {0x381d, 0x10}, {0x381e, 0x0c}, {0x381f, 0xf8}, {0x3820, 0x00}, {0x3821, 0x0c}, {0x3503, 0x13}, {0x4050, 0xc0}, {0x4051, 0x00}, {0x4053, 0xa1}, {0x4837, 0x1b}, {0x503d, 0x00}, {0x5042, 0x21}, {0x5047, 0x00}, {0x3a08, 0x1f}, {0x3a09, 0x40}, {0x3a0a, 0x1a}, {0x3a0b, 0x00}, {0x3a0d, 0x08}, {0x3a0e, 0x06}, {0x503d, 0x00}, {0x0100, 0x01}, {OV14810_TABLE_END, 0x0000} }; static struct ov14810_reg mode_1280x720[] = { {0x0103, 0x01}, {OV14810_TABLE_WAIT_MS, 20}, {0x3003, 0x0a}, {0x3004, 0x00}, {0x3005, 0xa7}, {0x3006, 0x80}, {0x3007, 0x08}, {0x3018, 0x04}, {0x301b, 0xe0}, {0x301c, 0xf8}, {0x3020, 0x01}, {0x3106, 0x05}, {0x3600, 0x2d}, {0x3601, 0x1f}, {0x3609, 0x00}, {0x360a, 0x2e}, {0x360f, 0x24}, {0x3611, 0x6c}, {0x3613, 0x84}, {0x3702, 0x20}, {0x3704, 0x28}, {0x3705, 0xd1}, {0x3708, 0x01}, {0x370e, 0x04}, {0x3710, 0x40}, {0x3711, 0x1c}, {0x3714, 0x5f}, {0x3715, 0x58}, {0x3717, 0x80}, {0x3718, 0x11}, {0x3719, 0x11}, {0x371b, 0xa0}, {0x371c, 0x46}, {0x371d, 0x40}, {0x371e, 0x2c}, {0x3723, 0x30}, {0x3725, 0x2e}, {0x3726, 0x70}, {0x3808, 0x00}, {0x380a, 0x00}, {0x3817, 0x24}, {0x3819, 0x80}, {0x382c, 0x02}, {0x382d, 0x01}, {0x3a00, 0x78}, {0x3a13, 0x46}, {0x3a18, 0x00}, {0x3a19, 0x7f}, {0x3a1a, 0x06}, {0x3a25, 0x83}, {0x3b09, 0x0a}, {0x4002, 0xc5}, {0x4004, 0x02}, {0x4005, 0x10}, {0x4009, 0x40}, {0x404f, 0xff}, {0x4709, 0x00}, {0x4801, 0x0f}, {0x4806, 0x80}, {0x4842, 0x01}, {0x5000, 0x00}, {0x5001, 0x00}, {0x5002, 0x00}, {0x503b, 0x01}, {0x503c, 0x10}, {0x5041, 0x0e}, {0x5780, 0xfc}, {0x5b00, 0x10}, {0x5b01, 0x5b}, {0x5b03, 0x00}, {0x3005, 0xa7}, {0x3006, 0x80}, {0x3007, 0x08}, {0x3013, 0x1f}, {0x3602, 0x53}, {0x3604, 0x80}, {0x3605, 0x01}, {0x360b, 0x0c}, {0x360c, 0x45}, {0x360d, 0x03}, {0x3614, 0x05}, {0x3707, 0x73}, {0x370a, 0x81}, {0x370b, 0x20}, {0x370c, 0x04}, {0x370d, 0x01}, {0x370f, 0x00}, {0x3713, 0xe6}, {0x3716, 0xf0}, {0x3721, 0x08}, {0x3724, 0x2e}, {0x3727, 0x60}, {0x3728, 0x02}, {0x3803, 0x07}, {0x3804, 0x05}, /* width */ {0x3805, 0x09}, {0x3806, 0x02}, /* height */ {0x3807, 0xd8}, {0x380c, 0x05}, {0x380d, 0x66}, {0x380e, 0x02}, {0x380f, 0xe4}, {0x3810, 0x22}, {0x3811, 0x02}, {0x3818, 0x45}, {0x381c, 0x13}, {0x381d, 0xb8}, {0x381e, 0x05}, /* height w/o skipping */ {0x381f, 0xc0}, {0x3820, 0x03}, {0x3821, 0xa8}, {0x3503, 0x13}, /* Manual exposure, gain control */ {0x4050, 0xc0}, {0x4051, 0x00}, {0x4053, 0xa1}, {0x4837, 0x1b}, {0x503d, 0x00}, {0x5042, 0x31}, {0x5047, 0x00}, {0x100, 0x01}, {OV14810_TABLE_END, 0x0000} }; enum { OV14810_MODE_4416x3312, OV14810_MODE_1280x720 }; static struct ov14810_reg *mode_table[] = { [OV14810_MODE_4416x3312] = mode_4416x3312, [OV14810_MODE_1280x720] = mode_1280x720 }; static inline void ov14810_get_frame_length_regs(struct ov14810_reg *regs, u32 frame_length) { regs->addr = OV14810_FRAME_LENGTH_REG_ADDR0; regs->val = (frame_length >> 8) & 0xff; (regs + 1)->addr = OV14810_FRAME_LENGTH_REG_ADDR1; (regs + 1)->val = (frame_length) & 0xff; } static inline void ov14810_get_coarse_time_regs(struct ov14810_reg *regs, u32 coarse_time) { regs->addr = OV14810_COARSE_TIME_REG_ADDR0; regs->val = (coarse_time >> 12) & 0xff; (regs + 1)->addr = OV14810_COARSE_TIME_REG_ADDR1; (regs + 1)->val = (coarse_time >> 4) & 0xff; (regs + 2)->addr = OV14810_COARSE_TIME_REG_ADDR2; (regs + 2)->val = (coarse_time & 0xf) << 4; } static inline void ov14810_get_gain_reg(struct ov14810_reg *regs, u16 gain) { regs->addr = OV14810_GAIN_REG_ADDR0; regs->val = gain; } static int ov14810_write16(struct i2c_client *client, u16 addr, u8 val) { int err; struct i2c_msg msg; unsigned char data[3]; if (!client->adapter) return -ENODEV; data[0] = (u8) (addr >> 8); data[1] = (u8) (addr & 0xff); data[2] = (u8) (val & 0xff); msg.addr = client->addr; msg.flags = 0; msg.len = 3; msg.buf = data; err = i2c_transfer(client->adapter, &msg, 1); if (err != 1) { pr_err("ov14810: i2c transfer failed %x %x\n", addr, val); return -EIO; } return 0; } static int ov14810_write8(struct i2c_client *client, u8 addr, u8 val) { int err; struct i2c_msg msg; unsigned char data[2]; if (!client->adapter) return -ENODEV; data[0] = (u8) (addr); data[1] = (u8) (val & 0xff); msg.addr = client->addr; msg.flags = 0; msg.len = 2; msg.buf = data; err = i2c_transfer(client->adapter, &msg, 1); if (err != 1) { pr_err("ov14810: i2c transfer failed %x %x\n",addr, val); return -EIO; } return 0; } static int ov14810_write_reg_helper(struct ov14810_info *info, u16 addr, u8 val) { return ov14810_write16(info->sensor.i2c_client, addr, val); } static int ov14810_write_table(struct ov14810_info *info, const struct ov14810_reg table[], const struct ov14810_reg override_list[], int num_override_regs) { int err; const struct ov14810_reg *next; int i; u16 val; for (next = table; next->addr != OV14810_TABLE_END; next++) { val = next->val; if (next->addr == OV14810_TABLE_WAIT_MS) { msleep(val); continue; } /* When an override list is passed in, replace the reg */ /* value to write if the reg is in the list */ if (override_list) { for (i = 0; i < num_override_regs; i++) { if (next->addr == override_list[i].addr) { val = override_list[i].val; break; } } } err = ov14810_write_reg_helper(info, next->addr, val); } return err; } static int ov14810_set_mode(struct ov14810_info *info, struct ov14810_mode *mode) { int sensor_mode; int err; struct ov14810_reg reg_list[6]; pr_info("%s: xres %u yres %u framelength %u coarsetime %u gain %u\n", __func__, mode->xres, mode->yres, mode->frame_length, mode->coarse_time, mode->gain); if (mode->xres == 1280 && mode->yres == 720) sensor_mode = OV14810_MODE_1280x720; else if (mode->xres == 4416 && mode->yres == 3312) sensor_mode = OV14810_MODE_4416x3312; else { pr_err("%s: invalid resolution supplied to set mode %d %d\n", __func__, mode->xres, mode->yres); return -EINVAL; } /* get a list of override regs for the asking frame length, */ /* coarse integration time, and gain. */ ov14810_get_frame_length_regs(reg_list, mode->frame_length); ov14810_get_coarse_time_regs(reg_list + 2, mode->coarse_time); ov14810_get_gain_reg(reg_list + 5, mode->gain); err = ov14810_write_table(info, mode_table[sensor_mode], reg_list, 6); if (err) return err; info->mode = sensor_mode; return 0; } static int ov14810_set_frame_length(struct ov14810_info *info, u32 frame_length) { struct ov14810_reg reg_list[2]; int i; int ret; ov14810_get_frame_length_regs(reg_list, frame_length); for (i = 0; i < 2; i++) { ret = ov14810_write_reg_helper(info, reg_list[i].addr, reg_list[i].val); if (ret) return ret; } return 0; } static int ov14810_set_coarse_time(struct ov14810_info *info, u32 coarse_time) { int ret; struct ov14810_reg reg_list[3]; int i; ov14810_get_coarse_time_regs(reg_list, coarse_time); ret = ov14810_write_reg_helper(info, OV14810_GROUP_ACCESS_REG_ADDR, 0x01); if (ret) return ret; for (i = 0; i < 3; i++) { ret = ov14810_write_reg_helper(info, reg_list[i].addr, reg_list[i].val); if (ret) return ret; } ret = ov14810_write_reg_helper(info, OV14810_GROUP_ACCESS_REG_ADDR, 0x11); if (ret) return ret; ret = ov14810_write_reg_helper(info, OV14810_GROUP_ACCESS_REG_ADDR, 0xa1); if (ret) return ret; return 0; } static int ov14810_set_gain(struct ov14810_info *info, u16 gain) { int ret; struct ov14810_reg reg_list; ov14810_get_gain_reg(&reg_list, gain); ret = ov14810_write_reg_helper(info, reg_list.addr, reg_list.val); return ret; } static int ov14810_set_power(int powerLevel) { pr_info("%s: powerLevel=%d \n", __func__, powerLevel); if (info->sensor.pdata) { if (powerLevel && info->sensor.pdata->power_on) { info->sensor.pdata->power_on(); msleep(1000); } else if (info->sensor.pdata->power_off) { info->sensor.pdata->power_off(); } } return 0; } static long ov14810_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct ov14810_info *info = file->private_data; int err; switch (cmd) { case OV14810_IOCTL_SET_MODE: { struct ov14810_mode mode; err = copy_from_user(&mode,(const void __user *)arg, sizeof(struct ov14810_mode)); if (err) { pr_err("%s %d\n", __func__, __LINE__); return err; } return ov14810_set_mode(info, &mode); } case OV14810_IOCTL_SET_FRAME_LENGTH: return ov14810_set_frame_length(info, (u32)arg); case OV14810_IOCTL_SET_COARSE_TIME: return ov14810_set_coarse_time(info, (u32)arg); case OV14810_IOCTL_SET_GAIN: return ov14810_set_gain(info, (u16)arg); case OV14810_IOCTL_GET_STATUS: { u16 status = 0; err = copy_to_user((void __user *)arg, &status,2); if (err) { pr_err("%s %d\n", __func__, __LINE__); return err; } return 0; } default: return -EINVAL; } return 0; } static int ov14810_slavedev_open(void) { pr_info("%s\n", __func__); OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0x19, 0x67); OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0x18, 0x02); return 0; } static int ov14810_slavedev_reset(void) { pr_info("%s\n", __func__); OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0x18, 0x03); msleep(1000); OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0xc1, 0x0); OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0xc2, 0x0); OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0xc3, 0x0); OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0xc4, 0x0); OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0xc5, 0x0); msleep(1000); OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0xc1, 0x0); OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0xc2, 0x0); OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0xc3, 0x0); OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0xc4, 0x0); OV14810_I2C_WRITE8(info->slaveDev.i2c_client, 0xc5, 0x17); msleep(1000); return 0; } static int ov14810uC_open(void) { int i; int err; pr_info("ov14810uC programmming started \n"); for (i = 0; i < sizeof(uCProgram); i++) { ov14810_write16(info->uC.i2c_client, ( ( (i & 0xff) << 8) | ( (i & 0xff00) >> 8) ), uCProgram[i]); } pr_info("ov14810uC programmming finished \n"); err = ov14810_slavedev_reset(); return err; } static int ov14810_open(struct inode *inode, struct file *file) { int err; pr_info("%s\n", __func__); file->private_data = info; err = ov14810_set_power(1); if (err) return err; if (info->uC_programmed == 0) { err = ov14810_slavedev_open(); if (err) return err; err = ov14810uC_open(); if (!err) info->uC_programmed = 1; } return err; } int ov14810_release(struct inode *inode, struct file *file) { pr_info("%s\n", __func__); ov14810_set_power(0); file->private_data = NULL; return 0; } static const struct file_operations ov14810_fileops = { .owner = THIS_MODULE, .open = ov14810_open, .unlocked_ioctl = ov14810_ioctl, .release = ov14810_release, }; static struct miscdevice ov14810_device = { .minor = MISC_DYNAMIC_MINOR, .name = "ov14810", .fops = &ov14810_fileops, }; static int ov14810_probe(struct i2c_client *client, const struct i2c_device_id *id) { int err; pr_info("%s: probing sensor.\n", __func__); if (!info) { info = kzalloc(sizeof(struct ov14810_info), GFP_KERNEL); if (!info) { pr_err("ov14810: Unable to allocate memory!\n"); return -ENOMEM; } } err = misc_register(&ov14810_device); if (err) { pr_err("ov14810: Unable to register misc device!\n"); kfree(info); return err; } info->sensor.pdata = client->dev.platform_data; info->sensor.i2c_client = client; return 0; } static int ov14810_remove(struct i2c_client *client) { misc_deregister(&ov14810_device); kfree(info); return 0; } static int ov14810_uC_probe(struct i2c_client *client, const struct i2c_device_id *id) { if (!info) { info = kzalloc(sizeof(struct ov14810_sensor), GFP_KERNEL); if (!info) { pr_err("ov14810uC: Unable to allocate memory!\n"); return -ENOMEM; } } info->uC.pdata = client->dev.platform_data; info->uC.i2c_client = client; return 0; } static int ov14810_uC_remove(struct i2c_client *client) { return 0; } static int ov14810_slavedev_probe(struct i2c_client *client, const struct i2c_device_id *id) { pr_info("%s: probing slave Dev of sensor.\n", __func__); if (!info) { info = kzalloc(sizeof(struct ov14810_sensor), GFP_KERNEL); if (!info) { pr_err("ov14810uC: Unable to allocate memory!\n"); return -ENOMEM; } } info->slaveDev.pdata = client->dev.platform_data; info->slaveDev.i2c_client = client; info->uC_programmed = 0; return 0; } static int ov14810_slavedev_remove(struct i2c_client *client) { return 0; } static const struct i2c_device_id ov14810_id[] = { { "ov14810", 0 }, { }, }; MODULE_DEVICE_TABLE(i2c, ov14810_id); static struct i2c_driver ov14810_i2c_driver = { .driver = { .name = "ov14810", .owner = THIS_MODULE, }, .probe = ov14810_probe, .remove = ov14810_remove, .id_table = ov14810_id, }; static const struct i2c_device_id ov14810_uC_id[] = { { "ov14810uC", 0 }, { }, }; MODULE_DEVICE_TABLE(i2c, ov14810_uC_id); static struct i2c_driver ov14810_uC_i2c_driver = { .driver = { .name = "ov14810uC", .owner = THIS_MODULE, }, .probe = ov14810_uC_probe, .remove = ov14810_uC_remove, .id_table = ov14810_uC_id, }; static const struct i2c_device_id ov14810_slavedev_id[] = { { "ov14810SlaveDev", 0 }, { }, }; MODULE_DEVICE_TABLE(i2c, ov14810_slavedev_id); static struct i2c_driver ov14810_slavedev_i2c_driver = { .driver = { .name = "ov14810SlaveDev", .owner = THIS_MODULE, }, .probe = ov14810_slavedev_probe, .remove = ov14810_slavedev_remove, .id_table = ov14810_slavedev_id, }; static int __init ov14810_init(void) { int ret; pr_info("ov14810 sensor driver loading\n"); ret = i2c_add_driver(&ov14810_i2c_driver); if (ret) return ret; ret = i2c_add_driver(&ov14810_uC_i2c_driver); if (ret) return ret; return i2c_add_driver(&ov14810_slavedev_i2c_driver); } static void __exit ov14810_exit(void) { i2c_del_driver(&ov14810_slavedev_i2c_driver); i2c_del_driver(&ov14810_uC_i2c_driver); i2c_del_driver(&ov14810_i2c_driver); } module_init(ov14810_init); module_exit(ov14810_exit);
gpl-2.0